Compare commits

...

No commits in common. "imports/c9/pcp-5.3.7-7.el9" and "c8" have entirely different histories.

20 changed files with 93165 additions and 212 deletions

1
.gitignore vendored
View File

@ -1 +1,2 @@
SOURCES/pcp-5.3.7.src.tar.gz
SOURCES/redhat-bugzilla-2219731-hacluster-metrics.patch

View File

@ -1 +1,2 @@
a0a05bf501b016cb859fb211ae60ce18be2bbd99 SOURCES/pcp-5.3.7.src.tar.gz
bb84c58586e078343c975ed61a8b53d4fd4393e8 SOURCES/redhat-bugzilla-2219731-hacluster-metrics.patch

View File

@ -0,0 +1,326 @@
commit c226e98096c7bdb6296a96257439724e1f68217e
Author: Nathan Scott <nathans@redhat.com>
Date: Tue Apr 26 12:00:32 2022 +1000
libpcp: optimize indom handling in fetchgroup code
Looking into some pcp-ss issues I immediately encountered a
noticable elapsed time difference between executing just ss
versus pcp-ss. It turned out it was due to fetchgroup code
not really understanding how the underlying indom APIs work
and repeatedly refreshing shared indoms (all metrics in the
pcp-ss tool actually share one indom) once for every metric
that happens to use that indom - i.e. many times per fetch.
These changes resulted in a 5x speedup in pcp-ss and it now
completes instantly as expected. Additionally, we use alot
less memory now for the many-metrics-sharing-an-indom case.
diff -Naurp pcp-5.3.7.patched/src/libpcp/src/fetchgroup.c pcp-5.3.7/src/libpcp/src/fetchgroup.c
--- pcp-5.3.7.patched/src/libpcp/src/fetchgroup.c 2022-04-05 09:05:43.000000000 +1000
+++ pcp-5.3.7/src/libpcp/src/fetchgroup.c 2023-03-08 18:46:43.486916630 +1100
@@ -34,6 +34,19 @@
/* ------------------------------------------------------------------------ */
/*
+ * Cache to avoid unnecessary repeated calls to pmGetInDom for individual
+ * instance domains. This involves a round trip to pmcd and it does not
+ * change frequently (certainly not for processing results of one sample).
+ */
+struct __pmInDomCache {
+ pmInDom indom;
+ int *codes; /* saved from pmGetInDom */
+ char **names;
+ unsigned int size;
+ int refreshed;
+};
+
+/*
* An individual fetch-group is a linked list of requests tied to a
* particular pcp context (which the fetchgroup owns). NB: This is
* opaque to the PMAPI client.
@@ -43,8 +56,10 @@ struct __pmFetchGroup {
int wrap; /* wrap-handling flag, set at fg-create-time */
pmResult *prevResult;
struct __pmFetchGroupItem *items;
+ struct __pmInDomCache *unique_indoms;
pmID *unique_pmids;
size_t num_unique_pmids;
+ size_t num_unique_indoms;
};
/*
@@ -81,9 +96,6 @@ struct __pmFetchGroupItem {
struct {
pmID metric_pmid;
pmDesc metric_desc;
- int *indom_codes; /* saved from pmGetInDom */
- char **indom_names;
- unsigned indom_size;
struct __pmFetchGroupConversionSpec conv;
int *output_inst_codes; /* NB: may be NULL */
char **output_inst_names; /* NB: may be NULL */
@@ -168,9 +180,7 @@ pmfg_add_pmid(pmFG pmfg, pmID pmid)
/*
* Populate given pmFGI item structure based on common lookup &
* verification for pmfg inputs. Adjust instance profile to
- * include requested instance. If it's a derived metric, we
- * don't know what instance domain(s) it could involve, so we
- * clear the instance profile entirely.
+ * include requested instance.
*/
static int
pmfg_lookup_item(const char *metric, const char *instance, pmFGI item)
@@ -206,9 +216,12 @@ pmfg_lookup_item(const char *metric, con
/* Same for a whole indom. Add the whole instance profile. */
static int
-pmfg_lookup_indom(const char *metric, pmFGI item)
+pmfg_lookup_indom(pmFG pmfg, const char *metric, pmFGI item)
{
- int sts;
+ struct __pmInDomCache *indoms;
+ pmInDom indom;
+ size_t size;
+ int i, sts;
assert(item != NULL);
assert(item->type == pmfg_indom);
@@ -221,14 +234,34 @@ pmfg_lookup_indom(const char *metric, pm
return sts;
/* As a convenience to users, we also accept non-indom'd metrics */
- if (item->u.indom.metric_desc.indom == PM_INDOM_NULL)
+ if ((indom = item->u.indom.metric_desc.indom) == PM_INDOM_NULL)
return 0;
/*
+ * Insert into the instance domain cache if not seen previously
+ */
+ for (i = 0; i < pmfg->num_unique_indoms; i++) {
+ if (pmfg->unique_indoms[i].indom == indom)
+ return 0;
+ }
+
+ size = sizeof(struct __pmInDomCache) * (pmfg->num_unique_indoms + 1);
+ indoms = realloc(pmfg->unique_indoms, size);
+
+ if (indoms == NULL)
+ return -ENOMEM;
+ pmfg->unique_indoms = indoms;
+ pmfg->unique_indoms[pmfg->num_unique_indoms].indom = indom;
+ pmfg->unique_indoms[pmfg->num_unique_indoms].codes = NULL;
+ pmfg->unique_indoms[pmfg->num_unique_indoms].names = NULL;
+ pmfg->unique_indoms[pmfg->num_unique_indoms].refreshed = 0;
+ pmfg->num_unique_indoms++;
+
+ /*
* Add all instances; this will override any other past or future
* piecemeal instance requests from __pmExtendFetchGroup_lookup.
*/
- return pmAddProfile(item->u.indom.metric_desc.indom, 0, NULL);
+ return pmAddProfile(indom, 0, NULL);
}
/* Same for an event field. */
@@ -581,7 +614,7 @@ pmfg_reinit_indom(pmFGI item)
if (item->u.indom.output_inst_names)
for (i = 0; i < item->u.indom.output_maxnum; i++)
- item->u.indom.output_inst_names[i] = NULL; /* break ref into indom_names[] */
+ item->u.indom.output_inst_names[i] = NULL; /* ptr into names[] */
if (item->u.indom.output_stss)
for (i = 0; i < item->u.indom.output_maxnum; i++)
@@ -857,11 +890,11 @@ pmfg_fetch_timestamp(pmFG pmfg, pmFGI it
static void
pmfg_fetch_indom(pmFG pmfg, pmFGI item, pmResult *newResult)
{
- int sts = 0;
- int i;
- unsigned j;
- int need_indom_refresh;
+ int i, sts = 0;
+ unsigned int j, k;
+ struct __pmInDomCache *cache;
const pmValueSet *iv;
+ pmInDom indom;
assert(item != NULL);
assert(item->type == pmfg_indom);
@@ -901,36 +934,43 @@ pmfg_fetch_indom(pmFG pmfg, pmFGI item,
/*
* Analyze newResult to see whether it only contains instances we
- * already know. This is unfortunately an O(N**2) operation. It
- * could be made a bit faster if we build a pmGetInDom()- variant
- * that provides instances in sorted order.
+ * already know.
*/
- need_indom_refresh = 0;
- if (item->u.indom.output_inst_names) { /* Caller interested at all? */
- for (j = 0; j < (unsigned)iv->numval; j++) {
- unsigned k;
- for (k = 0; k < item->u.indom.indom_size; k++)
- if (item->u.indom.indom_codes[k] == iv->vlist[j].inst)
+ cache = NULL;
+ indom = item->u.indom.metric_desc.indom;
+ for (j = 0; j < pmfg->num_unique_indoms; j++) {
+ if (indom != pmfg->unique_indoms[j].indom)
+ continue;
+ cache = &pmfg->unique_indoms[j];
+ break;
+ }
+ if (cache && cache->refreshed &&
+ item->u.indom.output_inst_names) { /* Caller interested at all? */
+ for (j = 0; j < (unsigned int)iv->numval; j++) {
+ for (k = 0; k < cache->size; k++)
+ if (cache->codes[k] == iv->vlist[j].inst)
break;
- if (k >= item->u.indom.indom_size) {
- need_indom_refresh = 1;
+ if (k >= cache->size) {
+ cache->refreshed = 0;
break;
}
}
}
- if (need_indom_refresh) {
- free(item->u.indom.indom_codes);
- free(item->u.indom.indom_names);
- sts = pmGetInDom(item->u.indom.metric_desc.indom,
- &item->u.indom.indom_codes, &item->u.indom.indom_names);
+ if (cache && !cache->refreshed) {
+ cache->refreshed = 1;
+ free(cache->codes);
+ free(cache->names);
+ sts = pmGetInDom(indom, &cache->codes, &cache->names);
if (sts < 1) {
/* Need to manually clear; pmGetInDom claims they are undefined. */
- item->u.indom.indom_codes = NULL;
- item->u.indom.indom_names = NULL;
- item->u.indom.indom_size = 0;
+ cache->codes = NULL;
+ cache->names = NULL;
+ cache->size = 0;
}
else {
- item->u.indom.indom_size = sts;
+ if (sts < 0)
+ cache->refreshed = 0;
+ cache->size = sts;
}
/*
* NB: Even if the pmGetInDom failed, we can proceed with
@@ -965,18 +1005,19 @@ pmfg_fetch_indom(pmFG pmfg, pmFGI item,
* results from pmGetIndom.
*/
if (item->u.indom.output_inst_names) {
- unsigned k;
-
- for (k = 0; k < item->u.indom.indom_size; k++) {
- if (item->u.indom.indom_codes[k] == jv->inst) {
- /*
- * NB: copy the indom name char* by value.
- * The user is not supposed to modify / free this pointer,
- * or use it after a subsequent fetch or delete operation.
- */
- item->u.indom.output_inst_names[j] =
- item->u.indom.indom_names[k];
- break;
+ if (cache == NULL)
+ item->u.indom.output_inst_names[j] = NULL;
+ else {
+ for (k = 0; k < cache->size; k++) {
+ if (cache->codes[k] == jv->inst) {
+ /*
+ * NB: copy the indom name char* by value.
+ * User may not modify / free this pointer,
+ * nor use it after subsequent fetch / delete.
+ */
+ item->u.indom.output_inst_names[j] = cache->names[k];
+ break;
+ }
}
}
}
@@ -1201,8 +1242,7 @@ out:
static int
pmfg_clear_profile(pmFG pmfg)
{
- int sts;
- pmFGI item;
+ int i, sts;
sts = pmUseContext(pmfg->ctx);
if (sts != 0)
@@ -1216,10 +1256,9 @@ pmfg_clear_profile(pmFG pmfg)
* Any errors here are benign (or rather there's nothing we can do
* about 'em), so ignore pmDelProfile() return value.
*/
- for (item = pmfg->items; item; item = item->next) {
- if (item->u.item.metric_desc.indom != PM_INDOM_NULL)
- (void)pmDelProfile(item->u.item.metric_desc.indom, 0, NULL);
- }
+ for (i = 0; i < pmfg->num_unique_indoms; i++)
+ (void)pmDelProfile(pmfg->unique_indoms[i].indom, 0, NULL);
+
return 0;
}
@@ -1434,7 +1473,7 @@ pmExtendFetchGroup_indom(pmFG pmfg,
item->type = pmfg_indom;
- sts = pmfg_lookup_indom(metric, item);
+ sts = pmfg_lookup_indom(pmfg, metric, item);
if (sts != 0)
goto out;
@@ -1615,7 +1654,6 @@ pmFetchGroup(pmFG pmfg)
if (pmfg == NULL)
return -EINVAL;
-
/*
* Walk the fetchgroup, reinitializing every output spot, regardless of
* later errors.
@@ -1705,6 +1743,7 @@ int
pmClearFetchGroup(pmFG pmfg)
{
pmFGI item;
+ size_t n;
if (pmfg == NULL)
return -EINVAL;
@@ -1719,8 +1758,6 @@ pmClearFetchGroup(pmFG pmfg)
break;
case pmfg_indom:
pmfg_reinit_indom(item);
- free(item->u.indom.indom_codes);
- free(item->u.indom.indom_names);
break;
case pmfg_event:
pmfg_reinit_event(item);
@@ -1739,11 +1776,21 @@ pmClearFetchGroup(pmFG pmfg)
if (pmfg->prevResult)
pmFreeResult(pmfg->prevResult);
pmfg->prevResult = NULL;
+
if (pmfg->unique_pmids)
free(pmfg->unique_pmids);
pmfg->unique_pmids = NULL;
pmfg->num_unique_pmids = 0;
+ for (n = 0; n < pmfg->num_unique_indoms; n++) {
+ free(pmfg->unique_indoms[n].codes);
+ free(pmfg->unique_indoms[n].names);
+ }
+ if (pmfg->unique_indoms)
+ free(pmfg->unique_indoms);
+ pmfg->unique_indoms = NULL;
+ pmfg->num_unique_indoms = 0;
+
return pmfg_clear_profile(pmfg);
}

View File

@ -372,88 +372,3 @@ index 183db5afa..009a00cd9 100644
__int32_t v6only;
char skmem_str[64];
__int32_t skmem_rmem_alloc;
commit 77ba20d5e76ada83283a262dd2083b2fc284b5f8
Author: Nathan Scott <nathans@redhat.com>
Date: Thu May 5 09:33:46 2022 +1000
selinux: policy updates needed for the pmdasockets metrics
Thanks to Jan Kurík and Miloš Malík we have the additional
selinux policy requirements - without these we see QE test
failures for this agent with pcp-ss(1) on RHEL.
Related to Red Hat BZ #1981886.
diff --git a/qa/917.out.in b/qa/917.out.in
index 3bd1dc15e..6a4356a12 100644
--- a/qa/917.out.in
+++ b/qa/917.out.in
@@ -154,9 +154,9 @@ Checking policies.
# -- end logging_watch_journal_dirs(pcp_domain) expansion
allow [pcp_pmcd_t] [cluster_tmpfs_t] : [file] { write };
allow [pcp_pmcd_t] [drbd_exec_t] : [file] { execute execute_no_trans };
- allow [pcp_pmcd_t] self : [netlink_generic_socket] { bind create getattr setopt write read };
- allow [pcp_pmcd_t] [sbd_exec_t] : [file] { execute execute_no_trans };
- allow [pcp_pmcd_t] self : [netlink_tcpdiag_socket] { bind create getattr nlmsg_read setopt };
+! allow [pcp_pmcd_t] self : [netlink_generic_socket] { bind create getattr setopt write read };
+! allow [pcp_pmcd_t] [sbd_exec_t] : [file] { execute execute_no_trans };
+! allow [pcp_pmcd_t] self : [netlink_tcpdiag_socket] { append bind connect create getattr getopt ioctl lock read setattr setopt shutdown write };
allow [syslogd_t] [pcp_log_t] : [fifo_file] { open read write };
allow [pcp_pmcd_t] [etc_t] : [dir] { open read search getattr lock ioctl };
allow [pcp_pmcd_t] [shadow_t] : [file] { getattr ioctl lock open read };
diff --git a/src/selinux/GNUlocaldefs b/src/selinux/GNUlocaldefs
index 1a1b1428c..1462c5ccb 100644
--- a/src/selinux/GNUlocaldefs
+++ b/src/selinux/GNUlocaldefs
@@ -138,8 +138,8 @@ PCP_NETLINK_GENERIC_SOCKET_RULE="allow pcp_pmcd_t self:netlink_generic_socket {
endif
ifeq "$(PCP_SELINUX_NETLINK_TCPDIAG_SOCKET_CLASS)" "true"
-PCP_NETLINK_TCPDIAG_SOCKET_CLASS="class netlink_tcpdiag_socket { bind create getattr nlmsg_read setopt };"
-PCP_NETLINK_TCPDIAG_SOCKET_RULE="allow pcp_pmcd_t self:netlink_tcpdiag_socket { bind create getattr nlmsg_read setopt };"
+PCP_NETLINK_TCPDIAG_SOCKET_CLASS="class netlink_tcpdiag_socket { append bind connect create getattr getopt ioctl lock read setattr setopt shutdown write };"
+PCP_NETLINK_TCPDIAG_SOCKET_RULE="allow pcp_pmcd_t self:netlink_tcpdiag_socket { append bind connect create getattr getopt ioctl lock read setattr setopt shutdown write };"
endif
ifeq "$(PCP_SELINUX_LOCKDOWN_CLASS)" "true"
commit a6222992fe5f97f94bdddd928ce9557be1918bfd
Author: Jan Kurik <jkurik@redhat.com>
Date: Fri May 6 08:04:46 2022 +1000
selinux: fine-tune netlink_tcpdiag_socket policy for all platforms
Previous policy set did not apply correctly on ppc64le and aarch64
architectures. After some tweaking the following set of permissions
was found to work on all the supported architectures and fixes the
behavior of the sockets PMDA.
Related to Red Hat BZ #1981886.
diff --git a/qa/917.out.in b/qa/917.out.in
index 6a4356a12..723193aa2 100644
--- a/qa/917.out.in
+++ b/qa/917.out.in
@@ -156,7 +156,7 @@ Checking policies.
allow [pcp_pmcd_t] [drbd_exec_t] : [file] { execute execute_no_trans };
! allow [pcp_pmcd_t] self : [netlink_generic_socket] { bind create getattr setopt write read };
! allow [pcp_pmcd_t] [sbd_exec_t] : [file] { execute execute_no_trans };
-! allow [pcp_pmcd_t] self : [netlink_tcpdiag_socket] { append bind connect create getattr getopt ioctl lock read setattr setopt shutdown write };
+! allow [pcp_pmcd_t] self : [netlink_tcpdiag_socket] { append bind connect create getattr getopt ioctl lock nlmsg_read nlmsg_write read setattr setopt shutdown write };
allow [syslogd_t] [pcp_log_t] : [fifo_file] { open read write };
allow [pcp_pmcd_t] [etc_t] : [dir] { open read search getattr lock ioctl };
allow [pcp_pmcd_t] [shadow_t] : [file] { getattr ioctl lock open read };
diff --git a/src/selinux/GNUlocaldefs b/src/selinux/GNUlocaldefs
index 1462c5ccb..9733aead9 100644
--- a/src/selinux/GNUlocaldefs
+++ b/src/selinux/GNUlocaldefs
@@ -138,8 +138,8 @@ PCP_NETLINK_GENERIC_SOCKET_RULE="allow pcp_pmcd_t self:netlink_generic_socket {
endif
ifeq "$(PCP_SELINUX_NETLINK_TCPDIAG_SOCKET_CLASS)" "true"
-PCP_NETLINK_TCPDIAG_SOCKET_CLASS="class netlink_tcpdiag_socket { append bind connect create getattr getopt ioctl lock read setattr setopt shutdown write };"
-PCP_NETLINK_TCPDIAG_SOCKET_RULE="allow pcp_pmcd_t self:netlink_tcpdiag_socket { append bind connect create getattr getopt ioctl lock read setattr setopt shutdown write };"
+PCP_NETLINK_TCPDIAG_SOCKET_CLASS="class netlink_tcpdiag_socket { append bind connect create getattr getopt ioctl lock nlmsg_read nlmsg_write read setattr setopt shutdown write };"
+PCP_NETLINK_TCPDIAG_SOCKET_RULE="allow pcp_pmcd_t self:netlink_tcpdiag_socket { append bind connect create getattr getopt ioctl lock nlmsg_read nlmsg_write read setattr setopt shutdown write };"
endif
ifeq "$(PCP_SELINUX_LOCKDOWN_CLASS)" "true"

View File

@ -0,0 +1,20 @@
bcc included in RHEL 8.6 doesn't support the kernel_struct_has_field function.
The 4.18.x kernel in RHEL 8.6 did backport the `state` to `__state` rename (upstream:
change was in kernel v5.14+), and now we're in a situation where we can't test for
the existence of this kernel struct member and also can't rely on a kernel version check.
Therefore, let's patch it here for RHEL 8.x only:
diff --git a/src/pmdas/bcc/modules/runqlat.python b/src/pmdas/bcc/modules/runqlat.python
index 1c6c6b4b0..efc30e958 100644
--- a/src/pmdas/bcc/modules/runqlat.python
+++ b/src/pmdas/bcc/modules/runqlat.python
@@ -100,7 +100,7 @@ class PCPBCCModule(PCPBCCBase):
if (
hasattr(BPF, "kernel_struct_has_field")
and BPF.kernel_struct_has_field(b"task_struct", b"__state") == 1
- ) or self.kernel_version() >= (5, 14, 0):
+ ) or self.kernel_version() >= (4, 18, 0):
self.bpf_text = self.bpf_text.replace('STATE_FIELD', '__state')
else:
self.bpf_text = self.bpf_text.replace('STATE_FIELD', 'state')

View File

@ -0,0 +1,74 @@
diff -Naurp pcp-5.3.7.orig/src/pcp/dstat/pcp-dstat.1 pcp-5.3.7/src/pcp/dstat/pcp-dstat.1
--- pcp-5.3.7.orig/src/pcp/dstat/pcp-dstat.1 2021-05-26 17:43:26.000000000 +1000
+++ pcp-5.3.7/src/pcp/dstat/pcp-dstat.1 2022-10-20 08:57:02.176922290 +1100
@@ -1,6 +1,6 @@
'\"macro stdmacro
.\"
-.\" Copyright (c) 2018-2020 Red Hat.
+.\" Copyright (c) 2018-2022 Red Hat.
.\"
.\" This program is free software; you can redistribute it and/or modify it
.\" under the terms of the GNU General Public License as published by the
@@ -34,6 +34,7 @@
[\f3\-\-integer\f1]
[\f3\-\-nocolor\f1]
[\f3\-\-noheaders\f1]
+[\f3\-\-nomissed\f1]
[\f3\-\-noupdate\f1]
[\f3\-\-list\f1]
[\f3\-\-pidfile\f1 \f2pid-file\f1]
@@ -404,6 +405,9 @@ disable colors
\fB\-\-noheaders\fR
disable repetitive headers
.TP
+\fB\-\-nomissed\fR
+disable missed ticks warnings for intermediate samples.
+.TP
\fB\-\-noupdate\fR
disable intermediate updates when \fIdelay\fR greater than 1.
.TP
diff -Naurp pcp-5.3.7.orig/src/pcp/dstat/pcp-dstat.py pcp-5.3.7/src/pcp/dstat/pcp-dstat.py
--- pcp-5.3.7.orig/src/pcp/dstat/pcp-dstat.py 2022-04-05 09:05:43.000000000 +1000
+++ pcp-5.3.7/src/pcp/dstat/pcp-dstat.py 2022-10-20 08:57:02.176922290 +1100
@@ -455,6 +455,7 @@ class DstatTool(object):
# Internal
self.missed = 0
+ self.nomissed = False # report missed ticks by default
self.runtime = -1
self.plugins = [] # list of requested plugin names
self.allplugins = [] # list of all known plugin names
@@ -783,7 +784,8 @@ class DstatTool(object):
opts.pmSetLongOption('color', 0, '', '', 'force colors')
opts.pmSetLongOption('nocolor', 0, '', '', 'disable colors')
opts.pmSetLongOption('noheaders', 0, '', '', 'disable repetitive headers')
- opts.pmSetLongOption('noupdate', 0, '', '', 'disable intermediate headers')
+ opts.pmSetLongOption('noupdate', 0, '', '', 'disable intermediate updates')
+ opts.pmSetLongOption('nomissed', 0, '', '', 'disable missed ticks warnings')
opts.pmSetLongOption('output', 1, 'o', 'file', 'write CSV output to file')
opts.pmSetLongOption('version', 0, 'V', '', '')
opts.pmSetLongOption('debug', 1, None, '', '')
@@ -920,6 +922,8 @@ class DstatTool(object):
self.header = False
elif opt in ['noupdate']:
self.update = False
+ elif opt in ['nomissed']:
+ self.nomissed = True
elif opt in ['o', 'output']:
self.output = arg
elif opt in ['pidfile']:
@@ -1773,12 +1777,12 @@ class DstatTool(object):
outputfile = open(self.output, omode)
outputfile.write(oline)
- if self.missed > 0:
+ if self.missed > 0 and self.nomissed is False:
line = 'missed ' + str(self.missed + 1) + ' ticks'
sys.stdout.write(' ' + THEME['error'] + line + THEME['input'])
if self.output and step == self.delay:
outputfile.write(',"' + line + '"')
- self.missed = 0
+ self.missed = 0
# Finish the line
if not op.update and self.novalues is False:
sys.stdout.write('\n')

View File

@ -0,0 +1,135 @@
commit 55e8c83ee5920ab30644f54f7a525255b1de4b84
Author: Nathan Scott <nathans@redhat.com>
Date: Mon Aug 29 14:25:03 2022 +1000
docs: describe working sudoers configuration with requiretty
When /etc/sudoers is configured with 'Defaults requiretty',
pmlogctl cannot invoke pmlogger_check in the normal fashion.
Symptoms of the problem are the following system log message:
pmlogctl[PID]: sudo: sorry, you must have a tty to run sudo
pmiectl and pmie_check are similarly affected. The simplest
solution is to add an additional configuration line excluding
these commands from requiring a tty; this is the approach now
documented.
Note these PCP commands are not interactive (require no tty)
and the unprivileged 'pcp' account uses nologin(8) as a shell
anyway, so requiretty offers no advantages here. Note also
there's debate about whether requiretty is a useful security
measure in general as it can be trivially bypassed; further
details: https://bugzilla.redhat.com/show_bug.cgi?id=1020147
Resolves Red Hat BZ #2093751
diff -Naurp pcp-5.3.7.orig/man/man1/pmie_check.1 pcp-5.3.7/man/man1/pmie_check.1
--- pcp-5.3.7.orig/man/man1/pmie_check.1 2021-11-04 08:26:15.000000000 +1100
+++ pcp-5.3.7/man/man1/pmie_check.1 2022-08-31 11:17:52.362276530 +1000
@@ -406,6 +406,42 @@ no
entries are needed as the timer mechanism provided by
.B systemd
is used instead.
+.PP
+The
+.BR pmiectl (1)
+utility may invoke
+.B pmie_check
+using the
+.BR sudo (1)
+command to run it under the $PCP_USER ``pcp'' account.
+If
+.B sudo
+is configured with the non-default
+.I requiretty
+option (see below),
+.B pmie_check
+may fail to run due to not having a tty configured.
+This issue can be resolved by adding a second line
+(expand $PCP_BINADM_DIR according to your platform)
+to the
+.I /etc/sudoers
+configuration file as follows:
+.P
+.ft CW
+.nf
+.in +0.5i
+Defaults requiretty
+Defaults!$PCP_BINADM_DIR/pmie_check !requiretty
+.in
+.fi
+.ft 1
+.P
+Note that the unprivileged PCP account under which these
+commands run uses
+.I /sbin/nologin
+as the shell, so the
+.I requiretty
+option is ineffective here and safe to disable in this way.
.SH FILES
.TP 5
.I $PCP_PMIECONTROL_PATH
diff -Naurp pcp-5.3.7.orig/man/man1/pmlogger_check.1 pcp-5.3.7/man/man1/pmlogger_check.1
--- pcp-5.3.7.orig/man/man1/pmlogger_check.1 2022-04-05 09:05:43.000000000 +1000
+++ pcp-5.3.7/man/man1/pmlogger_check.1 2022-08-31 11:20:52.470086724 +1000
@@ -830,6 +830,42 @@ no
entries are needed as the timer mechanism provided by
.B systemd
is used instead.
+.PP
+The
+.BR pmlogctl (1)
+utility may invoke
+.B pmlogger_check
+using the
+.BR sudo (1)
+command to run it under the $PCP_USER ``pcp'' account.
+If
+.B sudo
+is configured with the non-default
+.I requiretty
+option (see below),
+.B pmlogger_check
+may fail to run due to not having a tty configured.
+This issue can be resolved by adding a second line
+(expand $PCP_BINADM_DIR according to your platform)
+to the
+.I /etc/sudoers
+configuration file as follows:
+.P
+.ft CW
+.nf
+.in +0.5i
+Defaults requiretty
+Defaults!$PCP_BINADM_DIR/pmlogger_check !requiretty
+.in
+.fi
+.ft 1
+.P
+Note that the unprivileged PCP account under which these
+commands run uses
+.I /sbin/nologin
+as the shell, so the
+.I requiretty
+option is ineffective here and safe to disable in this way.
.SH FILES
.TP 5
.I $PCP_PMLOGGERCONTROL_PATH
@@ -926,7 +962,7 @@ instances for
.I hostname
have been launched in the interim.
Because the cron-driven PCP archive management scripts run under
-the uid of the user ``pcp'',
+the $PCP_USER account ``pcp'',
.BI $PCP_ARCHIVE_DIR/ hostname /SaveLogs
typically needs to be owned by the user ``pcp''.
.TP
@@ -994,6 +1030,7 @@ platforms.
.BR pmlogmv (1),
.BR pmlogrewrite (1),
.BR pmsocks (1),
+.BR sudo (1),
.BR systemd (1),
.BR xz (1)
and

View File

@ -0,0 +1,103 @@
From 73c024c64f7db68fdcd224c27c1711fa6dd1d254 Mon Sep 17 00:00:00 2001
From: Nathan Scott <nathans@redhat.com>
Date: Tue, 28 Jun 2022 10:06:06 +1000
Subject: [PATCH] pmlogger_farm: add default configuration file for farm
loggers
Provide a mechanism whereby the farm loggers can be configured.
There has been reluctance in the past to sharing configuration
of the local primary logger, so these are now done separately.
Makes sense to me as the primary pmlogger may need to use more
frequent sampling, may not want to allow remote pmlc, etc.
Resolves Red Hat BZ #2101574
---
src/pmlogger/GNUmakefile | 1 +
src/pmlogger/pmlogger.defaults | 2 ++
src/pmlogger/pmlogger_check.sh | 5 +++--
src/pmlogger/pmlogger_farm.defaults | 27 +++++++++++++++++++++++++++
4 files changed, 33 insertions(+), 2 deletions(-)
create mode 100644 src/pmlogger/pmlogger_farm.defaults
diff -Naurp pcp-5.3.7.orig/src/pmlogger/GNUmakefile pcp-5.3.7/src/pmlogger/GNUmakefile
--- pcp-5.3.7.orig/src/pmlogger/GNUmakefile 2022-02-02 11:53:05.000000000 +1100
+++ pcp-5.3.7/src/pmlogger/GNUmakefile 2022-08-31 11:23:08.758672970 +1000
@@ -45,6 +45,7 @@ install:: $(SUBDIRS)
install:: default
$(INSTALL) -m 775 -o $(PCP_USER) -g $(PCP_GROUP) -d $(PCP_VAR_DIR)/config/pmlogger
+ $(INSTALL) -m 644 pmlogger_farm.defaults $(PCP_SYSCONFIG_DIR)/pmlogger_farm
$(INSTALL) -m 644 pmlogger.defaults $(PCP_SYSCONFIG_DIR)/pmlogger
$(INSTALL) -m 755 -d $(PCP_SHARE_DIR)/zeroconf
$(INSTALL) -m 644 pmlogger.zeroconf $(PCP_SHARE_DIR)/zeroconf/pmlogger
diff -Naurp pcp-5.3.7.orig/src/pmlogger/pmlogger_check.sh pcp-5.3.7/src/pmlogger/pmlogger_check.sh
--- pcp-5.3.7.orig/src/pmlogger/pmlogger_check.sh 2022-04-05 09:05:43.000000000 +1000
+++ pcp-5.3.7/src/pmlogger/pmlogger_check.sh 2022-08-31 11:23:08.758672970 +1000
@@ -1,6 +1,6 @@
#! /bin/sh
#
-# Copyright (c) 2013-2016,2018,2020-2021 Red Hat.
+# Copyright (c) 2013-2016,2018,2020-2022 Red Hat.
# Copyright (c) 1995-2000,2003 Silicon Graphics, Inc. All Rights Reserved.
#
# This program is free software; you can redistribute it and/or modify it
@@ -24,6 +24,7 @@
PMLOGGER="$PCP_BINADM_DIR/pmlogger"
PMLOGCONF="$PCP_BINADM_DIR/pmlogconf"
PMLOGGERENVS="$PCP_SYSCONFIG_DIR/pmlogger"
+PMLOGGERFARMENVS="$PCP_SYSCONFIG_DIR/pmlogger_farm"
PMLOGGERZEROCONFENVS="$PCP_SHARE_DIR/zeroconf/pmlogger"
# error messages should go to stderr, not the GUI notifiers
@@ -972,8 +973,8 @@ END { print m }'`
continue
fi
else
+ envs=`grep -h ^PMLOGGER "$PMLOGGERFARMENVS" 2>/dev/null`
args="-h $host $args"
- envs=""
iam=""
fi
diff -Naurp pcp-5.3.7.orig/src/pmlogger/pmlogger.defaults pcp-5.3.7/src/pmlogger/pmlogger.defaults
--- pcp-5.3.7.orig/src/pmlogger/pmlogger.defaults 2022-02-03 16:11:40.000000000 +1100
+++ pcp-5.3.7/src/pmlogger/pmlogger.defaults 2022-08-31 11:23:08.758672970 +1000
@@ -1,5 +1,7 @@
# Environment variables for the primary pmlogger daemon. See also
# the pmlogger control file and pmlogconf(1) for additional details.
+# Also see separate pmlogger_farm configuration for the non-primary
+# logger configuration settings, separate to this file.
# Settings defined in this file will override any settings in the
# pmlogger zeroconf file (if present).
diff -Naurp pcp-5.3.7.orig/src/pmlogger/pmlogger_farm.defaults pcp-5.3.7/src/pmlogger/pmlogger_farm.defaults
--- pcp-5.3.7.orig/src/pmlogger/pmlogger_farm.defaults 1970-01-01 10:00:00.000000000 +1000
+++ pcp-5.3.7/src/pmlogger/pmlogger_farm.defaults 2022-08-31 11:23:08.758672970 +1000
@@ -0,0 +1,27 @@
+# Environment variables for the pmlogger farm daemons. See also
+# pmlogger control file(s) and pmlogconf(1) for additional details.
+# Also see separate pmlogger configuration for the primary logger
+# configuration settings, separate to this file.
+
+# Behaviour regarding listening on external-facing interfaces;
+# unset PMLOGGER_LOCAL to allow connections from remote hosts.
+# A value of 0 permits remote connections, 1 permits local only.
+# PMLOGGER_LOCAL=1
+
+# Max length to which the queue of pending connections may grow
+# A value of 5 is the default.
+# PMLOGGER_MAXPENDING=5
+
+# Default sampling interval pmlogger uses when no more specific
+# interval is requested. A value of 60 seconds is the default.
+# Both pmlogger command line (via control file) and also pmlogger
+# configuration file directives will override this value.
+# PMLOGGER_INTERVAL=60
+
+# The default behaviour, when pmlogger configuration comes from
+# pmlogconf(1), is to regenerate the configuration file and check for
+# changes whenever pmlogger is started from pmlogger_check(1).
+# If the PMDA configuration is stable, this is not necessary, and
+# setting PMLOGGER_CHECK_SKIP_LOGCONF to yes disables the regeneration
+# and checking.
+# PMLOGGER_CHECK_SKIP_LOGCONF=yes

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,534 @@
diff -Naurp pcp-5.3.7.orig/qa/1985 pcp-5.3.7/qa/1985
--- pcp-5.3.7.orig/qa/1985 1970-01-01 10:00:00.000000000 +1000
+++ pcp-5.3.7/qa/1985 2022-10-19 21:32:03.971832371 +1100
@@ -0,0 +1,38 @@
+#!/bin/sh
+# PCP QA Test No. 1985
+# Exercise a pmfind fix - valgrind-enabled variant.
+#
+# Copyright (c) 2022 Red Hat. All Rights Reserved.
+#
+
+seq=`basename $0`
+echo "QA output created by $seq"
+
+# get standard environment, filters and checks
+. ./common.product
+. ./common.filter
+. ./common.check
+
+_check_valgrind
+
+_cleanup()
+{
+ cd $here
+ $sudo rm -rf $tmp $tmp.*
+}
+
+status=0 # success is the default!
+$sudo rm -rf $tmp $tmp.* $seq.full
+trap "_cleanup; exit \$status" 0 1 2 3 15
+
+# real QA test starts here
+export seq
+./1986 --valgrind \
+| $PCP_AWK_PROG '
+skip == 1 && $1 == "===" { skip = 0 }
+/^=== std err ===/ { skip = 1 }
+skip == 0 { print }
+skip == 1 { print >>"'$here/$seq.full'" }'
+
+# success, all done
+exit
diff -Naurp pcp-5.3.7.orig/qa/1985.out pcp-5.3.7/qa/1985.out
--- pcp-5.3.7.orig/qa/1985.out 1970-01-01 10:00:00.000000000 +1000
+++ pcp-5.3.7/qa/1985.out 2022-10-19 21:32:03.971832371 +1100
@@ -0,0 +1,11 @@
+QA output created by 1985
+QA output created by 1986 --valgrind
+=== std out ===
+SOURCE HOSTNAME
+=== filtered valgrind report ===
+Memcheck, a memory error detector
+Command: pmfind -S -m probe=127.0.0.1/32
+LEAK SUMMARY:
+definitely lost: 0 bytes in 0 blocks
+indirectly lost: 0 bytes in 0 blocks
+ERROR SUMMARY: 0 errors from 0 contexts ...
diff -Naurp pcp-5.3.7.orig/qa/1986 pcp-5.3.7/qa/1986
--- pcp-5.3.7.orig/qa/1986 1970-01-01 10:00:00.000000000 +1000
+++ pcp-5.3.7/qa/1986 2022-10-19 21:32:03.971832371 +1100
@@ -0,0 +1,62 @@
+#!/bin/sh
+# PCP QA Test No. 1986
+# Exercise libpcp_web timers pmfind regression fix.
+#
+# Copyright (c) 2022 Red Hat. All Rights Reserved.
+#
+
+if [ $# -eq 0 ]
+then
+ seq=`basename $0`
+ echo "QA output created by $seq"
+else
+ # use $seq from caller, unless not set
+ [ -n "$seq" ] || seq=`basename $0`
+ echo "QA output created by `basename $0` $*"
+fi
+
+# get standard environment, filters and checks
+. ./common.product
+. ./common.filter
+. ./common.check
+
+do_valgrind=false
+if [ "$1" = "--valgrind" ]
+then
+ _check_valgrind
+ do_valgrind=true
+fi
+
+test -x $PCP_BIN_DIR/pmfind || _notrun No support for pmfind
+
+_cleanup()
+{
+ cd $here
+ $sudo rm -rf $tmp $tmp.*
+}
+
+status=0 # success is the default!
+hostname=`hostname || echo localhost`
+$sudo rm -rf $tmp $tmp.* $seq.full
+trap "_cleanup; exit \$status" 0 1 2 3 15
+
+_filter()
+{
+ sed \
+ -e "s@$tmp@TMP@g" \
+ -e "s/ $hostname/ HOSTNAME/" \
+ -e 's/^[a-f0-9][a-f0-9]* /SOURCE /' \
+ # end
+}
+
+# real QA test starts here
+if $do_valgrind
+then
+ _run_valgrind pmfind -S -m probe=127.0.0.1/32
+else
+ pmfind -S -m probe=127.0.0.1/32
+fi \
+| _filter
+
+# success, all done
+exit
diff -Naurp pcp-5.3.7.orig/qa/1986.out pcp-5.3.7/qa/1986.out
--- pcp-5.3.7.orig/qa/1986.out 1970-01-01 10:00:00.000000000 +1000
+++ pcp-5.3.7/qa/1986.out 2022-10-19 21:32:03.971832371 +1100
@@ -0,0 +1,2 @@
+QA output created by 1986
+SOURCE HOSTNAME
diff -Naurp pcp-5.3.7.orig/qa/group pcp-5.3.7/qa/group
--- pcp-5.3.7.orig/qa/group 2022-10-19 20:49:42.638708707 +1100
+++ pcp-5.3.7/qa/group 2022-10-19 21:32:03.972832359 +1100
@@ -1974,4 +1974,6 @@ x11
1957 libpcp local valgrind
1978 atop local
1984 pmlogconf pmda.redis local
+1985 pmfind local valgrind
+1986 pmfind local
4751 libpcp threads valgrind local pcp helgrind
diff -Naurp pcp-5.3.7.orig/src/libpcp_web/src/webgroup.c pcp-5.3.7/src/libpcp_web/src/webgroup.c
--- pcp-5.3.7.orig/src/libpcp_web/src/webgroup.c 2021-11-01 13:02:26.000000000 +1100
+++ pcp-5.3.7/src/libpcp_web/src/webgroup.c 2022-10-19 21:32:03.973832346 +1100
@@ -287,11 +287,24 @@ webgroup_new_context(pmWebGroupSettings
}
static void
+webgroup_timers_stop(struct webgroups *groups)
+{
+ if (groups->active) {
+ uv_timer_stop(&groups->timer);
+ uv_close((uv_handle_t *)&groups->timer, NULL);
+ pmWebTimerRelease(groups->timerid);
+ groups->timerid = -1;
+ groups->active = 0;
+ }
+}
+
+static void
webgroup_garbage_collect(struct webgroups *groups)
{
dictIterator *iterator;
dictEntry *entry;
context_t *cp;
+ unsigned int count = 0, drops = 0;
if (pmDebugOptions.http || pmDebugOptions.libweb)
fprintf(stderr, "%s: started\n", "webgroup_garbage_collect");
@@ -308,33 +321,40 @@ webgroup_garbage_collect(struct webgroup
uv_mutex_unlock(&groups->mutex);
webgroup_drop_context(cp, groups);
uv_mutex_lock(&groups->mutex);
+ drops++;
}
+ count++;
}
dictReleaseIterator(iterator);
+
+ /* if dropping the last remaining context, do cleanup */
+ if (groups->active && drops == count) {
+ if (pmDebugOptions.http || pmDebugOptions.libweb)
+ fprintf(stderr, "%s: freezing\n", "webgroup_garbage_collect");
+ webgroup_timers_stop(groups);
+ }
uv_mutex_unlock(&groups->mutex);
}
if (pmDebugOptions.http || pmDebugOptions.libweb)
- fprintf(stderr, "%s: finished\n", "webgroup_garbage_collect");
+ fprintf(stderr, "%s: finished [%u drops from %u entries]\n",
+ "webgroup_garbage_collect", drops, count);
}
static void
refresh_maps_metrics(void *data)
{
struct webgroups *groups = (struct webgroups *)data;
+ unsigned int value;
- if (groups->metrics) {
- unsigned int value;
-
- value = dictSize(contextmap);
- mmv_set(groups->map, groups->metrics[CONTEXT_MAP_SIZE], &value);
- value = dictSize(namesmap);
- mmv_set(groups->map, groups->metrics[NAMES_MAP_SIZE], &value);
- value = dictSize(labelsmap);
- mmv_set(groups->map, groups->metrics[LABELS_MAP_SIZE], &value);
- value = dictSize(instmap);
- mmv_set(groups->map, groups->metrics[INST_MAP_SIZE], &value);
- }
+ value = contextmap? dictSize(contextmap) : 0;
+ mmv_set(groups->map, groups->metrics[CONTEXT_MAP_SIZE], &value);
+ value = namesmap? dictSize(namesmap) : 0;
+ mmv_set(groups->map, groups->metrics[NAMES_MAP_SIZE], &value);
+ value = labelsmap? dictSize(labelsmap) : 0;
+ mmv_set(groups->map, groups->metrics[LABELS_MAP_SIZE], &value);
+ value = instmap? dictSize(instmap) : 0;
+ mmv_set(groups->map, groups->metrics[INST_MAP_SIZE], &value);
}
static void
@@ -487,6 +507,7 @@ pmWebGroupDestroy(pmWebGroupSettings *se
if (pmDebugOptions.libweb)
fprintf(stderr, "%s: destroy context %p gp=%p\n", "pmWebGroupDestroy", cp, gp);
+ webgroup_deref_context(cp);
webgroup_drop_context(cp, gp);
}
sdsfree(msg);
@@ -2394,17 +2415,12 @@ pmWebGroupClose(pmWebGroupModule *module
if (groups) {
/* walk the contexts, stop timers and free resources */
- if (groups->active) {
- groups->active = 0;
- uv_timer_stop(&groups->timer);
- pmWebTimerRelease(groups->timerid);
- groups->timerid = -1;
- }
iterator = dictGetIterator(groups->contexts);
while ((entry = dictNext(iterator)) != NULL)
webgroup_drop_context((context_t *)dictGetVal(entry), NULL);
dictReleaseIterator(iterator);
dictRelease(groups->contexts);
+ webgroup_timers_stop(groups);
memset(groups, 0, sizeof(struct webgroups));
free(groups);
}
diff -Naurp pcp-5.3.7.orig/src/pmfind/source.c pcp-5.3.7/src/pmfind/source.c
--- pcp-5.3.7.orig/src/pmfind/source.c 2021-02-17 15:27:41.000000000 +1100
+++ pcp-5.3.7/src/pmfind/source.c 2022-10-19 21:32:03.973832346 +1100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2020 Red Hat.
+ * Copyright (c) 2020,2022 Red Hat.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@@ -25,6 +25,7 @@ static pmWebGroupSettings settings;
typedef struct {
sds source;
sds hostspec;
+ unsigned int refcount;
} context_t;
typedef struct {
@@ -38,22 +39,34 @@ typedef struct {
} sources_t;
static void
+source_release(sources_t *sp, context_t *cp, sds ctx)
+{
+ pmWebGroupDestroy(&settings, ctx, sp);
+ sdsfree(cp->hostspec);
+ sdsfree(cp->source);
+ free(cp);
+}
+
+static void
sources_release(void *arg, const struct dictEntry *entry)
{
sources_t *sp = (sources_t *)arg;
context_t *cp = (context_t *)dictGetVal(entry);
sds ctx = (sds)entry->key;
- pmWebGroupDestroy(&settings, ctx, sp);
- sdsfree(cp->hostspec);
- sdsfree(cp->source);
+ if (pmDebugOptions.discovery)
+ fprintf(stderr, "releasing context %s\n", ctx);
+
+ source_release(sp, cp, ctx);
}
static void
-sources_containers(sources_t *sp, sds id, dictEntry *uniq)
+sources_containers(sources_t *sp, context_t *cp, sds id, dictEntry *uniq)
{
uv_mutex_lock(&sp->mutex);
- sp->count++; /* issuing another PMWEBAPI request */
+ /* issuing another PMWEBAPI request */
+ sp->count++;
+ cp->refcount++;
uv_mutex_unlock(&sp->mutex);
pmWebGroupScrape(&settings, id, sp->params, sp);
@@ -75,6 +88,7 @@ on_source_context(sds id, pmWebSource *s
cp->source = sdsdup(src->source);
cp->hostspec = sdsdup(src->hostspec);
+ cp->refcount = 1;
uv_mutex_lock(&sp->mutex);
dictAdd(sp->contexts, id, cp);
@@ -84,7 +98,7 @@ on_source_context(sds id, pmWebSource *s
if (entry) { /* source just discovered */
printf("%s %s\n", src->source, src->hostspec);
if (containers)
- sources_containers(sp, id, entry);
+ sources_containers(sp, cp, id, entry);
}
}
@@ -116,7 +130,9 @@ static void
on_source_done(sds context, int status, sds message, void *arg)
{
sources_t *sp = (sources_t *)arg;
- int count = 0, release = 0;
+ context_t *cp;
+ dictEntry *he;
+ int remove = 0, count = 0, release = 0;
if (pmDebugOptions.discovery)
fprintf(stderr, "done on context %s (sts=%d)\n", context, status);
@@ -127,19 +143,26 @@ on_source_done(sds context, int status,
uv_mutex_lock(&sp->mutex);
if ((count = --sp->count) <= 0)
release = 1;
+ if ((he = dictFind(sp->contexts, context)) != NULL &&
+ (cp = (context_t *)dictGetVal(he)) != NULL &&
+ (--cp->refcount <= 0))
+ remove = 1;
uv_mutex_unlock(&sp->mutex);
+ if (remove) {
+ if (pmDebugOptions.discovery)
+ fprintf(stderr, "remove context %s\n", context);
+ source_release(sp, cp, context);
+ dictDelete(sp->contexts, context);
+ }
+
if (release) {
unsigned long cursor = 0;
-
- if (pmDebugOptions.discovery)
- fprintf(stderr, "release context %s (sts=%d)\n", context, status);
do {
cursor = dictScan(sp->contexts, cursor, sources_release, NULL, sp);
} while (cursor);
- } else {
- if (pmDebugOptions.discovery)
- fprintf(stderr, "not yet releasing (count=%d)\n", count);
+ } else if (pmDebugOptions.discovery) {
+ fprintf(stderr, "not yet releasing (count=%d)\n", count);
}
}
@@ -190,6 +213,7 @@ sources_discovery_start(uv_timer_t *arg)
}
dictRelease(dp);
+ pmWebTimerClose();
}
/*
@@ -214,8 +238,8 @@ source_discovery(int count, char **urls)
uv_mutex_init(&find.mutex);
find.urls = urls;
find.count = count; /* at least one PMWEBAPI request for each url */
- find.uniq = dictCreate(&sdsDictCallBacks, NULL);
- find.params = dictCreate(&sdsDictCallBacks, NULL);
+ find.uniq = dictCreate(&sdsKeyDictCallBacks, NULL);
+ find.params = dictCreate(&sdsOwnDictCallBacks, NULL);
dictAdd(find.params, sdsnew("name"), sdsnew("containers.state.running"));
find.contexts = dictCreate(&sdsKeyDictCallBacks, NULL);
@@ -230,6 +254,7 @@ source_discovery(int count, char **urls)
pmWebGroupSetup(&settings.module);
pmWebGroupSetEventLoop(&settings.module, loop);
+ pmWebTimerSetEventLoop(loop);
/*
* Start a one-shot timer to add a start function into the loop
@@ -244,7 +269,9 @@ source_discovery(int count, char **urls)
/*
* Finished, release all resources acquired so far
*/
+ pmWebGroupClose(&settings.module);
uv_mutex_destroy(&find.mutex);
+ dictRelease(find.uniq);
dictRelease(find.params);
dictRelease(find.contexts);
return find.status;
diff -Naurp pcp-5.3.7.orig/src/pmproxy/src/server.c pcp-5.3.7/src/pmproxy/src/server.c
--- pcp-5.3.7.orig/src/pmproxy/src/server.c 2022-04-05 09:05:43.000000000 +1000
+++ pcp-5.3.7/src/pmproxy/src/server.c 2022-10-19 21:31:43.831093354 +1100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2019,2021 Red Hat.
+ * Copyright (c) 2018-2019,2021-2022 Red Hat.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU Lesser General Public License as published
@@ -310,17 +310,21 @@ on_write_callback(uv_callback_t *handle,
struct client *client = (struct client *)request->writer.data;
int sts;
+ (void)handle;
if (pmDebugOptions.af)
fprintf(stderr, "%s: client=%p\n", "on_write_callback", client);
if (client->stream.secure == 0) {
sts = uv_write(&request->writer, (uv_stream_t *)&client->stream,
&request->buffer[0], request->nbuffers, request->callback);
- if (sts != 0)
- fprintf(stderr, "%s: ERROR uv_write failed\n", "on_write_callback");
+ if (sts != 0) {
+ pmNotifyErr(LOG_ERR, "%s: %s - uv_write failed [%s]: %s\n",
+ pmGetProgname(), "on_write_callback",
+ uv_err_name(sts), uv_strerror(sts));
+ client_close(client);
+ }
} else
secure_client_write(client, request);
- (void)handle;
return 0;
}
@@ -455,14 +459,16 @@ on_client_connection(uv_stream_t *stream
uv_handle_t *handle;
if (status != 0) {
- fprintf(stderr, "%s: client connection failed: %s\n",
- pmGetProgname(), uv_strerror(status));
+ pmNotifyErr(LOG_ERR, "%s: %s - %s failed [%s]: %s\n",
+ pmGetProgname(), "on_client_connection", "connection",
+ uv_err_name(status), uv_strerror(status));
return;
}
if ((client = calloc(1, sizeof(*client))) == NULL) {
- fprintf(stderr, "%s: out-of-memory for new client\n",
- pmGetProgname());
+ pmNotifyErr(LOG_ERR, "%s: %s - %s failed [%s]: %s\n",
+ pmGetProgname(), "on_client_connection", "calloc",
+ "ENOMEM", strerror(ENOMEM));
return;
}
if (pmDebugOptions.context | pmDebugOptions.af)
@@ -476,16 +482,18 @@ on_client_connection(uv_stream_t *stream
status = uv_tcp_init(proxy->events, &client->stream.u.tcp);
if (status != 0) {
- fprintf(stderr, "%s: client tcp init failed: %s\n",
- pmGetProgname(), uv_strerror(status));
+ pmNotifyErr(LOG_ERR, "%s: %s - %s failed [%s]: %s\n",
+ pmGetProgname(), "on_client_connection", "uv_tcp_init",
+ uv_err_name(status), uv_strerror(status));
client_put(client);
return;
}
status = uv_accept(stream, (uv_stream_t *)&client->stream.u.tcp);
if (status != 0) {
- fprintf(stderr, "%s: client tcp init failed: %s\n",
- pmGetProgname(), uv_strerror(status));
+ pmNotifyErr(LOG_ERR, "%s: %s - %s failed [%s]: %s\n",
+ pmGetProgname(), "on_client_connection", "uv_accept",
+ uv_err_name(status), uv_strerror(status));
client_put(client);
return;
}
@@ -496,8 +504,9 @@ on_client_connection(uv_stream_t *stream
status = uv_read_start((uv_stream_t *)&client->stream.u.tcp,
on_buffer_alloc, on_client_read);
if (status != 0) {
- fprintf(stderr, "%s: client read start failed: %s\n",
- pmGetProgname(), uv_strerror(status));
+ pmNotifyErr(LOG_ERR, "%s: %s - %s failed [%s]: %s\n",
+ pmGetProgname(), "on_client_connection", "uv_read_start",
+ uv_err_name(status), uv_strerror(status));
client_close(client);
}
}
@@ -530,8 +539,9 @@ open_request_port(struct proxy *proxy, s
sts = uv_listen((uv_stream_t *)&stream->u.tcp, maxpending, on_client_connection);
if (sts != 0) {
- fprintf(stderr, "%s: socket listen error %s\n",
- pmGetProgname(), uv_strerror(sts));
+ pmNotifyErr(LOG_ERR, "%s: %s - uv_listen failed [%s]: %s\n",
+ pmGetProgname(), "open_request_port",
+ uv_err_name(sts), uv_strerror(sts));
uv_close(handle, NULL);
return -ENOTCONN;
}
@@ -554,15 +564,23 @@ open_request_local(struct proxy *proxy,
uv_pipe_init(proxy->events, &stream->u.local, 0);
handle = (uv_handle_t *)&stream->u.local;
handle->data = (void *)proxy;
- uv_pipe_bind(&stream->u.local, name);
+ sts = uv_pipe_bind(&stream->u.local, name);
+ if (sts != 0) {
+ pmNotifyErr(LOG_ERR, "%s: %s - uv_pipe_bind %s failed [%s]: %s\n",
+ pmGetProgname(), "open_request_local", name,
+ uv_err_name(sts), uv_strerror(sts));
+ uv_close(handle, NULL);
+ return -ENOTCONN;
+ }
#ifdef HAVE_UV_PIPE_CHMOD
uv_pipe_chmod(&stream->u.local, UV_READABLE | UV_WRITABLE);
#endif
sts = uv_listen((uv_stream_t *)&stream->u.local, maxpending, on_client_connection);
if (sts != 0) {
- fprintf(stderr, "%s: local listen error %s\n",
- pmGetProgname(), uv_strerror(sts));
+ pmNotifyErr(LOG_ERR, "%s: %s - %s failed [%s]: %s\n",
+ pmGetProgname(), "open_request_local", "uv_listen",
+ uv_err_name(sts), uv_strerror(sts));
uv_close(handle, NULL);
return -ENOTCONN;
}

View File

@ -0,0 +1,32 @@
commit 4014e295f7b5f541439774bd3c88924d3c061325
Author: Masatake YAMATO <yamato@redhat.com>
Date: Thu Oct 20 13:55:43 2022 +0900
pmdas/snmp: install the agent specific configuration file to PMDATMPDIR
When running ./Install of the agent, the following line is printed in
/var/log/pcp/pmcd/snmp.log.
Log for pmdasnmp on pcp-netsnmp started Tue Oct 18 22:45:23 2022
opening /var/lib/pcp/pmdas/snmp/snmp.conf No such file or directory at /var/lib/pcp/pmdas/snmp/pmdasnmp.pl line 90.
As a result, pmdasnmp.pl cannot read its configuration file though it
is "./Install"ed.
Signed-off-by: Masatake YAMATO <yamato@redhat.com>
diff --git a/src/pmdas/snmp/GNUmakefile b/src/pmdas/snmp/GNUmakefile
index ce0e3e8036..7bf5471a76 100644
--- a/src/pmdas/snmp/GNUmakefile
+++ b/src/pmdas/snmp/GNUmakefile
@@ -44,7 +44,7 @@ install_pcp install: default
$(INSTALL) -m 755 -t $(PMDATMPDIR) Install Remove $(PMDAADMDIR)
$(INSTALL) -m 644 -t $(PMDATMPDIR)/pmda$(IAM).pl pmda$(IAM).pl $(PMDAADMDIR)/pmda$(IAM).pl
$(INSTALL) -m 755 -d $(PMDACONFIG)
- $(INSTALL) -m 644 snmp.conf $(PMDACONFIG)/snmp.conf
+ $(INSTALL) -m 644 -t $(PMDATMPDIR)/$(IAM).conf $(IAM).conf $(PMDACONFIG)/$(IAM).conf
@$(INSTALL_MAN)
else
build-me:

View File

@ -0,0 +1,317 @@
commit 1e216d84c6b95b4f9cb7ee6b5adf9591f8af37d5
Author: Nathan Scott <nathans@redhat.com>
Date: Wed Dec 7 11:40:43 2022 +1100
pmdaopenmetrics: validate given names before using them for metrics
Its possible pmdaopenmetrics is fed garbage accidentally, e.g. in the
case where a /metrics end point is not made visible and an HTTP error
response is returned (misconfiguration).
Failure to safeguard this results in the generation of diagnostics in
the openmetrics PMDA log file at an alarming rate until all available
filesystem space is completely consumed. It can also result in bogus
metric names being exposed to PMAPI clients.
This commit both adds new safeguards and scales back diagnostic noise
from the agent. In some places it switches to more appropriate APIs
for the level of logging being performed (esp. debug messages).
Resolves Red Hat BZ #2139325
diff -Naurp pcp-5.3.7.orig/qa/1191.out pcp-5.3.7/qa/1191.out
--- pcp-5.3.7.orig/qa/1191.out 2021-11-01 13:02:26.000000000 +1100
+++ pcp-5.3.7/qa/1191.out 2023-03-09 11:49:42.372413973 +1100
@@ -10884,13 +10884,6 @@ GC Wall Time for b5be5b9f_b0f1_47de_b436
inst [0 or "0 collector_name:Copy"] value 24462465
inst [1 or "1 collector_name:MSC"] value 49519
-openmetrics.vmware_exporter.[root@ci-vm-10-0-138-196 [vmware_host_net_usage_average]
- Data Type: double InDom: PM_INDOM_NULL 0xffffffff
- Semantics: instant Units: none
-Help:
-vmware_host_net_usage_average
-Error: Try again. Information not currently available
-
openmetrics.vmware_exporter.vmware_datastore_accessible [VMWare datastore accessible (true / false)]
Data Type: double InDom: 144.35859 0x24008c13
Semantics: instant Units: none
diff -Naurp pcp-5.3.7.orig/qa/1221.out pcp-5.3.7/qa/1221.out
--- pcp-5.3.7.orig/qa/1221.out 2021-11-01 13:02:26.000000000 +1100
+++ pcp-5.3.7/qa/1221.out 2023-03-09 11:49:42.382414004 +1100
@@ -7105,9 +7105,6 @@ openmetrics.thermostat.tms_jvm_gc_b5be5b
inst [0 or "0 collector_name:Copy"] labels {"agent":"openmetrics","collector_name":"Copy","domainname":DOMAINNAME,"groupid":NUM,"hostname":HOSTNAME,"instname":"0 collector_name:Copy","machineid":MACHINEID,"source":"thermostat","url":FILEURL,"userid":NUM}
inst [1 or "1 collector_name:MSC"] labels {"agent":"openmetrics","collector_name":"MSC","domainname":DOMAINNAME,"groupid":NUM,"hostname":HOSTNAME,"instname":"1 collector_name:MSC","machineid":MACHINEID,"source":"thermostat","url":FILEURL,"userid":NUM}
-openmetrics.vmware_exporter.[root@ci-vm-10-0-138-196
- labels {"agent":"openmetrics","domainname":DOMAINNAME,"groupid":NUM,"hostname":HOSTNAME,"machineid":MACHINEID,"source":"vmware_exporter","url":FILEURL,"userid":NUM}
-
openmetrics.vmware_exporter.vmware_datastore_accessible
labels {"agent":"openmetrics","dc_name":"ha-datacenter","domainname":DOMAINNAME,"ds_cluster":"","ds_name":"name008","groupid":NUM,"hostname":HOSTNAME,"machineid":MACHINEID,"source":"vmware_exporter","url":FILEURL,"userid":NUM}
inst [0 or "0 dc_name:ha-datacenter ds_cluster: ds_name:name026"] labels {"agent":"openmetrics","dc_name":"ha-datacenter","domainname":DOMAINNAME,"ds_cluster":"","ds_name":"name026","groupid":NUM,"hostname":HOSTNAME,"instname":"0 dc_name:ha-datacenter ds_cluster: ds_name:name026","machineid":MACHINEID,"source":"vmware_exporter","url":FILEURL,"userid":NUM}
@@ -15331,9 +15328,6 @@ openmetrics.thermostat.tms_jvm_gc_b5be5b
inst [0 or "0 collector_name:Copy"] labels {"agent":"openmetrics","collector_name":"Copy","domainname":DOMAINNAME,"groupid":NUM,"hostname":HOSTNAME,"instname":"0 collector_name:Copy","machineid":MACHINEID,"source":"thermostat","url":FILEURL,"userid":NUM}
inst [1 or "1 collector_name:MSC"] labels {"agent":"openmetrics","collector_name":"MSC","domainname":DOMAINNAME,"groupid":NUM,"hostname":HOSTNAME,"instname":"1 collector_name:MSC","machineid":MACHINEID,"source":"thermostat","url":FILEURL,"userid":NUM}
-openmetrics.vmware_exporter.[root@ci-vm-10-0-138-196
- labels {"agent":"openmetrics","domainname":DOMAINNAME,"groupid":NUM,"hostname":HOSTNAME,"machineid":MACHINEID,"source":"vmware_exporter","url":FILEURL,"userid":NUM}
-
openmetrics.vmware_exporter.vmware_datastore_accessible
labels {"agent":"openmetrics","dc_name":"ha-datacenter","domainname":DOMAINNAME,"ds_cluster":"","ds_name":"name008","groupid":NUM,"hostname":HOSTNAME,"machineid":MACHINEID,"source":"vmware_exporter","url":FILEURL,"userid":NUM}
inst [0 or "0 dc_name:ha-datacenter ds_cluster: ds_name:name026"] labels {"agent":"openmetrics","dc_name":"ha-datacenter","domainname":DOMAINNAME,"ds_cluster":"","ds_name":"name026","groupid":NUM,"hostname":HOSTNAME,"instname":"0 dc_name:ha-datacenter ds_cluster: ds_name:name026","machineid":MACHINEID,"source":"vmware_exporter","url":FILEURL,"userid":NUM}
diff -Naurp pcp-5.3.7.orig/qa/1342 pcp-5.3.7/qa/1342
--- pcp-5.3.7.orig/qa/1342 2021-02-17 15:27:41.000000000 +1100
+++ pcp-5.3.7/qa/1342 2023-03-09 11:49:42.382414004 +1100
@@ -99,6 +99,8 @@ echo == Note: check $seq.full for log en
echo == pmdaopenmetrics LOG == >>$here/$seq.full
cat $PCP_LOG_DIR/pmcd/openmetrics.log >>$here/$seq.full
+# cleanup preparing for remove (else error messages here)
+$sudo rm $PCP_PMDAS_DIR/openmetrics/config.d/simple_metric.*
_pmdaopenmetrics_remove
# success, all done
diff -Naurp pcp-5.3.7.orig/qa/1727 pcp-5.3.7/qa/1727
--- pcp-5.3.7.orig/qa/1727 2021-09-23 14:19:12.000000000 +1000
+++ pcp-5.3.7/qa/1727 2023-03-09 11:49:42.382414004 +1100
@@ -2,6 +2,7 @@
# PCP QA Test No. 1727
# Test duplicate instname labels in /metrics webapi when a context
# level label such as "hostname" is explicitly specified.
+# Test invalid OpenMetrics metric names also.
#
# Copyright (c) 2021 Red Hat. All Rights Reserved.
#
@@ -54,8 +55,17 @@ echo '# TYPE somemetric gauge'
echo 'somemetric{hostname="$MYHOST"} 1234'
EOF
-chmod 755 $tmp.script
+cat <<EOF >$tmp.badxml
+#! /bin/bash
+
+cat $here/sadist/891688-dash-time.xml
+echo '# TYPE somemetric gauge'
+echo 'somemetric{hostname="$MYHOST"} 1234'
+EOF
+
+chmod 755 $tmp.script $tmp.badxml
$sudo mv $tmp.script $PCP_PMDAS_DIR/openmetrics/config.d/duplicate_instname_label.sh
+$sudo mv $tmp.badxml $PCP_PMDAS_DIR/openmetrics/config.d/invalid_metrics_badinput.sh
_pmdaopenmetrics_install
if ! _pmdaopenmetrics_wait_for_metric openmetrics.control.calls
@@ -68,6 +78,14 @@ echo; echo === /metrics webapi listing.
curl -Gs 'http://localhost:44322/metrics?names=openmetrics.duplicate_instname_label.somemetric' \
| _filter_openmetrics_labels
+echo; echo === verify metric name validity using pminfo
+pminfo -v openmetrics
+
+# squash errors for a clean uninstall
+$sudo rm $PCP_PMDAS_DIR/openmetrics/config.d/invalid_metrics_badinput.sh
+# capture openmetrics log for posterity
+cat $PCP_LOG_DIR/pmcd/openmetrics.log >> $here/$seq.full
+
_pmdaopenmetrics_remove
# success, all done
diff -Naurp pcp-5.3.7.orig/qa/1727.out pcp-5.3.7/qa/1727.out
--- pcp-5.3.7.orig/qa/1727.out 2021-09-13 14:40:08.000000000 +1000
+++ pcp-5.3.7/qa/1727.out 2023-03-09 11:49:42.382414004 +1100
@@ -8,6 +8,8 @@ QA output created by 1727
# TYPE openmetrics_duplicate_instname_label_somemetric gauge
openmetrics_duplicate_instname_label_somemetric{hostname=HOSTNAME,instid="0",instname="0 hostname:HOSTNAME",domainname=DOMAINNAME,machineid=MACHINEID,source="duplicate_instname_label"} 1234
+=== verify metric name validity using pminfo
+
=== remove openmetrics agent ===
Culling the Performance Metrics Name Space ...
openmetrics ... done
diff -Naurp pcp-5.3.7.orig/qa/openmetrics/samples/vmware_exporter.txt pcp-5.3.7/qa/openmetrics/samples/vmware_exporter.txt
--- pcp-5.3.7.orig/qa/openmetrics/samples/vmware_exporter.txt 2021-11-01 13:02:26.000000000 +1100
+++ pcp-5.3.7/qa/openmetrics/samples/vmware_exporter.txt 2023-03-09 11:49:42.382414004 +1100
@@ -1035,25 +1035,6 @@ vmware_host_net_errorsTx_summation{clust
# HELP vmware_host_net_usage_average vmware_host_net_usage_average
# TYPE vmware_host_net_usage_average gauge
vmware_host_net_usage_average{cluster_name="",dc_name="ha-datacenter",host_name="SOMEHOSTNAME"} 3.0
-[root@ci-vm-10-0-138-196 ~]# curl -Gs http://localhost:9272/metrics > curl.txt
-[root@ci-vm-10-0-138-196 ~]#
-[root@ci-vm-10-0-138-196 ~]#
-[root@ci-vm-10-0-138-196 ~]#
-[root@ci-vm-10-0-138-196 ~]#
-[root@ci-vm-10-0-138-196 ~]#
-[root@ci-vm-10-0-138-196 ~]#
-[root@ci-vm-10-0-138-196 ~]#
-[root@ci-vm-10-0-138-196 ~]#
-[root@ci-vm-10-0-138-196 ~]#
-[root@ci-vm-10-0-138-196 ~]#
-[root@ci-vm-10-0-138-196 ~]#
-[root@ci-vm-10-0-138-196 ~]#
-[root@ci-vm-10-0-138-196 ~]#
-[root@ci-vm-10-0-138-196 ~]#
-[root@ci-vm-10-0-138-196 ~]#
-[root@ci-vm-10-0-138-196 ~]#
-[root@ci-vm-10-0-138-196 ~]#
-[root@ci-vm-10-0-138-196 ~]# cat curl.txt
# HELP vmware_vm_power_state VMWare VM Power state (On / Off)
# TYPE vmware_vm_power_state gauge
vmware_vm_power_state{cluster_name="",dc_name="ha-datacenter",ds_name="name026",host_name="SOMEHOSTNAME",vm_name="name023-sat61-client"} 0.0
diff -Naurp pcp-5.3.7.orig/src/pmdas/openmetrics/pmdaopenmetrics.python pcp-5.3.7/src/pmdas/openmetrics/pmdaopenmetrics.python
--- pcp-5.3.7.orig/src/pmdas/openmetrics/pmdaopenmetrics.python 2022-04-05 09:05:43.000000000 +1000
+++ pcp-5.3.7/src/pmdas/openmetrics/pmdaopenmetrics.python 2023-03-09 11:49:42.382414004 +1100
@@ -122,7 +122,7 @@ class Metric(object):
# c_units = c_api.pmUnits_int(self.munits.dimSpace, self.munits.dimTime, self.munits.dimCount,
# self.munits.scaleSpace, self.munits.scaleTime, self.munits.scaleCount)
- self.source.pmda.log("Metric: adding new metric %s pmid=%s type=%d sem=%d singular=%s mindom=0x%x labels=%s" %
+ self.source.pmda.debug("Metric: adding metric %s pmid=%s type=%d sem=%d singular=%s mindom=0x%x labels=%s" %
(name, pmContext.pmIDStr(self.pmid), self.mtype, self.msem, self.singular, self.mindom, self.labels))
self.obj = pmdaMetric(self.pmid, self.mtype, self.mindom, self.msem, self.munits)
@@ -565,6 +565,7 @@ class Source(object):
self.cluster = cluster # unique/persistent id# for nickname
self.path = path # pathname to .url or executable file
self.url = None
+ self.parse_error = False
self.parse_url_time = 0 # timestamp of config file when it was last parsed
self.is_scripted = is_scripted
self.pmda = thispmda # the shared pmda
@@ -659,6 +660,19 @@ class Source(object):
self.pmda.log("instname_labels returning %s" % naming_labels) if self.pmda.dbg else None
return naming_labels
+ def valid_metric_name(self, name):
+ '''
+ Check validity of given metric name component:
+ - only contains alphanumerics and/or underscores
+ (colon removed later, allowed through here);
+ - only starts with an alphabetic character.
+ '''
+ if not name[0].isalpha():
+ return False
+ if not re.sub('[_.:]', '', name).isalnum():
+ return False
+ return True
+
def parse_metric_line(self, line, pcpline, helpline, typeline):
'''
Parse the sample line, identify/create corresponding metric & instance.
@@ -687,8 +701,10 @@ class Source(object):
# Nb: filter pattern is applied only to the leaf component of the full metric name
if self.check_filter(sp.name, "METRIC") != "INCLUDE":
self.pmda.log("Metric %s excluded by config filters" % fullname)
- return
+ return True
else:
+ if not self.valid_metric_name(sp.name):
+ raise ValueError('invalid metric name: ' + sp.name)
# new metric
metricnum = self.pmids_table.intern_lookup_value(sp.name)
# check if the config specifies metadata for this metric
@@ -708,24 +724,32 @@ class Source(object):
else:
m.store_inst(naming_labels, sp.value)
self.pmda.set_notify_change()
+ except ValueError as e:
+ if not self.parse_error:
+ self.pmda.err("cannot parse name in %s: %s" % (line, e))
+ self.parse_error = True # one-time-only error diagnostic
+ return False
except Exception as e:
+ if self.pmda.dbg:
+ traceback.print_exc() # traceback can be handy here
self.pmda.err("cannot parse/store %s: %s" % (line, e))
- traceback.print_exc() # traceback can be handy here
-
+ return False
+ return True
def parse_lines(self, text):
- '''Refresh all the metric metadata as it is found, including creating
- new metrics. Store away metric values for subsequent
- fetch()es. Parse errors may result in exceptions.
- That's OK, we don't try heroics to parse non-compliant
- data. Return number of metrics extracted.
'''
-
+ Refresh all the metric metadata as it is found, including creating
+ new metrics. Store away metric values for subsequent fetch()es.
+ Input parse errors result in exceptions and early termination.
+ That's OK, we don't try heroics to parse non-compliant data.
+ Return number of metrics extracted.
+ '''
num_metrics = 0
lines = text.splitlines()
pcpline = None
helpline = None
typeline = None
+ badness = False
state = "metadata"
for line in lines:
self.pmda.debug("line: %s state: %s" % (line, state)) if self.pmda.dbg else None
@@ -762,9 +786,15 @@ class Source(object):
# NB: could verify helpline/typeline lp[2] matches,
# but we don't have to go out of our way to support
# non-compliant exporters.
- self.parse_metric_line(l, pcpline, helpline, typeline)
+ if not self.parse_metric_line(l, pcpline, helpline, typeline):
+ badness = True
+ break # bad metric line, skip the remainder of this file
num_metrics += 1
+ # clear one-time-only error diagnostic if the situation is now resolved
+ if not badness:
+ self.parse_error = False
+
# NB: this logic only ever -adds- Metrics to a Source. If a source
# stops supplying some metrics, then a PCP app will see a PM_ERR_INST
# coming back when it tries to fetch them. We could perhaps keep the
@@ -842,7 +872,7 @@ class Source(object):
metadata = line.split(' ')[1:]
self.metadatalist.append(metadata)
else:
- self.pmda.log('Warning: %s ignored unrecognised config entry "%s"' % (self.url, line))
+ self.pmda.err('%s ignored unrecognised config entry "%s"' % (self.url, line))
self.pmda.debug("DEBUG url: %s HEADERS: %s" % (self.url, self.headers)) if self.pmda.dbg else None
self.pmda.debug("DEBUG url: %s FILTERS: %s" % (self.url, self.filterlist)) if self.pmda.dbg else None
@@ -911,8 +941,7 @@ class Source(object):
except Exception as e:
self.pmda.stats_status[self.cluster] = 'failed to fetch URL or execute script %s: %s' % (self.path, e)
self.pmda.stats_status_code[self.cluster] = status_code
- self.pmda.debug('Warning: cannot fetch URL or execute script %s: %s' % (self.path, e)) if self.pmda.dbg else None
- return
+ self.pmda.debug('cannot fetch URL or execute script %s: %s' % (self.path, e))
def refresh2(self, timeout):
'''
@@ -950,7 +979,7 @@ class Source(object):
self.pmda.log("fetch: item=%d inst=%d m.mname=%s" % (item, inst, m.mname)) if self.pmda.dbg else None
return m.fetch_inst(inst)
except Exception as e:
- self.pmda.log("Warning: cannot fetch item %d inst %d: %s" % (item, inst, e))
+ self.pmda.debug("cannot fetch item %d inst %d: %s" % (item, inst, e))
return [c_api.PM_ERR_AGAIN, 0]
class OpenMetricsPMDA(PMDA):
@@ -1468,7 +1497,7 @@ class OpenMetricsPMDA(PMDA):
def debug(self, s):
if self.dbg:
- super(OpenMetricsPMDA, self).log("debug: " + s)
+ super(OpenMetricsPMDA, self).dbg(s)
def log(self, message):
PMDA.log(message)
@@ -1526,8 +1555,9 @@ if __name__ == '__main__':
if args.nosort:
sort_conf_list = False
- pmdadir = os.path.join(os.getenv('PCP_PMDAS_DIR'), args.root)
if not args.config.startswith("/"):
+ pmdadir = os.getenv('PCP_PMDAS_DIR') or '/'
+ pmdadir = os.path.join(pmdadir, args.root)
args.config = os.path.join(pmdadir, args.config)
# This PMDA starts up in the "notready" state, see the Install script where

View File

@ -0,0 +1,706 @@
commit f4e34cd32bc73b53c2fc94c8deb670044d9d18d6
Author: Nathan Scott <nathans@redhat.com>
Date: Wed Dec 7 12:17:19 2022 +1100
pmdanfsclient: fix srcport handling for rdma and udp mounts
This bug fix kindly brought to you by the good folk at Penn State Uni.
Regression test update by myself but the real fix came from others.
Resolves Red Hat BZ #2150889
diff --git a/qa/798.out.32 b/qa/798.out.32
index 8db1b9896..854d667a2 100644
--- a/qa/798.out.32
+++ b/qa/798.out.32
@@ -165,6 +165,110 @@ pmInDom: 62.0
dbpmda>
Log for pmdanfsclient on HOST ...
Log finished ...
+=== Test case: mountstats-el8.7-rdma.qa
+dbpmda> open pipe $PYTHON pmdanfsclient.python
+Start $PYTHON PMDA: $PYTHON pmdanfsclient.python
+dbpmda> # on some platforms this may take a while ...
+dbpmda> wait 2
+dbpmda> getdesc on
+dbpmda> desc nfsclient.export
+PMID: 62.0.1
+ Data Type: string InDom: 62.0 0xf800000
+ Semantics: instant Units: none
+dbpmda> desc nfsclient.mountpoint
+PMID: 62.0.2
+ Data Type: string InDom: 62.0 0xf800000
+ Semantics: instant Units: none
+dbpmda> desc nfsclient.options.string
+PMID: 62.1.1
+ Data Type: string InDom: 62.0 0xf800000
+ Semantics: instant Units: none
+dbpmda> desc nfsclient.options.proto
+PMID: 62.1.24
+ Data Type: string InDom: 62.0 0xf800000
+ Semantics: instant Units: none
+dbpmda> desc nfsclient.options.vers
+PMID: 62.1.6
+ Data Type: string InDom: 62.0 0xf800000
+ Semantics: instant Units: none
+dbpmda> fetch nfsclient.export
+PMID(s): 62.0.1
+__pmResult ... numpmid: 1
+ 62.0.1 (<noname>): numval: 7 valfmt: 1 vlist[]:
+ inst [N or ???] value "example.com:/group"
+ inst [N or ???] value "example.com:/home"
+ inst [N or ???] value "example.com:/icds"
+ inst [N or ???] value "example.com:/icds-pcp-data"
+ inst [N or ???] value "example.com:/icds-prod"
+ inst [N or ???] value "example.com:/icds-test"
+ inst [N or ???] value "example.com:/work"
+dbpmda> fetch nfsclient.mountpoint
+PMID(s): 62.0.2
+__pmResult ... numpmid: 1
+ 62.0.2 (<noname>): numval: 7 valfmt: 1 vlist[]:
+ inst [N or ???] value "/storage/group"
+ inst [N or ???] value "/storage/home"
+ inst [N or ???] value "/storage/icds"
+ inst [N or ???] value "/storage/pcp-data"
+ inst [N or ???] value "/storage/prod"
+ inst [N or ???] value "/storage/test"
+ inst [N or ???] value "/storage/work"
+dbpmda> fetch nfsclient.options.string
+PMID(s): 62.1.1
+__pmResult ... numpmid: 1
+ 62.1.1 (<noname>): numval: 7 valfmt: 1 vlist[]:
+ inst [N or ???] value "rw,vers=3,rsize=65536,wsize=65536,namlen=255,acregmin=3,acregmax=60,acdirmin=30,acdirmax=60,hard,proto=rdma,nconnect=8,port=20049,timeo=600,retrans=2,sec=sys,mountaddr=10.6.159.1,mountvers=3,mountport=0,mountproto=tcp,local_lock=all"
+ inst [N or ???] value "rw,vers=3,rsize=65536,wsize=65536,namlen=255,acregmin=3,acregmax=60,acdirmin=30,acdirmax=60,hard,proto=rdma,nconnect=8,port=20049,timeo=600,retrans=2,sec=sys,mountaddr=10.6.159.18,mountvers=3,mountport=0,mountproto=tcp,local_lock=all"
+ inst [N or ???] value "rw,vers=3,rsize=65536,wsize=65536,namlen=255,acregmin=3,acregmax=60,acdirmin=30,acdirmax=60,hard,proto=rdma,nconnect=8,port=20049,timeo=600,retrans=2,sec=sys,mountaddr=10.6.159.23,mountvers=3,mountport=0,mountproto=tcp,local_lock=all"
+ inst [N or ???] value "rw,vers=3,rsize=65536,wsize=65536,namlen=255,acregmin=3,acregmax=60,acdirmin=30,acdirmax=60,hard,proto=rdma,nconnect=8,port=20049,timeo=600,retrans=2,sec=sys,mountaddr=10.6.159.27,mountvers=3,mountport=0,mountproto=tcp,local_lock=all"
+ inst [N or ???] value "rw,vers=3,rsize=65536,wsize=65536,namlen=255,acregmin=3,acregmax=60,acdirmin=30,acdirmax=60,hard,proto=rdma,nconnect=8,port=20049,timeo=600,retrans=2,sec=sys,mountaddr=10.6.159.29,mountvers=3,mountport=0,mountproto=tcp,local_lock=all"
+ inst [N or ???] value "rw,vers=3,rsize=65536,wsize=65536,namlen=255,acregmin=3,acregmax=60,acdirmin=30,acdirmax=60,hard,proto=rdma,nconnect=8,port=20049,timeo=600,retrans=2,sec=sys,mountaddr=10.6.159.6,mountvers=3,mountport=0,mountproto=tcp,local_lock=all"
+ inst [N or ???] value "rw,vers=3,rsize=65536,wsize=65536,namlen=255,acregmin=3,acregmax=60,acdirmin=30,acdirmax=60,hard,proto=rdma,nconnect=8,port=20049,timeo=600,retrans=2,sec=sys,mountaddr=10.6.159.7,mountvers=3,mountport=0,mountproto=tcp,local_lock=all"
+dbpmda> fetch nfsclient.options.proto
+PMID(s): 62.1.24
+__pmResult ... numpmid: 1
+ 62.1.24 (<noname>): numval: 7 valfmt: 1 vlist[]:
+ inst [N or ???] value "rdma"
+ inst [N or ???] value "rdma"
+ inst [N or ???] value "rdma"
+ inst [N or ???] value "rdma"
+ inst [N or ???] value "rdma"
+ inst [N or ???] value "rdma"
+ inst [N or ???] value "rdma"
+dbpmda> fetch nfsclient.options.vers
+PMID(s): 62.1.6
+__pmResult ... numpmid: 1
+ 62.1.6 (<noname>): numval: 7 valfmt: 1 vlist[]:
+ inst [N or ???] value "3"
+ inst [N or ???] value "3"
+ inst [N or ???] value "3"
+ inst [N or ???] value "3"
+ inst [N or ???] value "3"
+ inst [N or ???] value "3"
+ inst [N or ???] value "3"
+dbpmda> fetch nfsclient.options.vers
+PMID(s): 62.1.6
+__pmResult ... numpmid: 1
+ 62.1.6 (<noname>): numval: 7 valfmt: 1 vlist[]:
+ inst [N or ???] value "3"
+ inst [N or ???] value "3"
+ inst [N or ???] value "3"
+ inst [N or ???] value "3"
+ inst [N or ???] value "3"
+ inst [N or ???] value "3"
+ inst [N or ???] value "3"
+dbpmda> instance 62.0
+pmInDom: 62.0
+[ X] inst: X name: "/storage/group"
+[X+1] inst: X+1 name: "/storage/home"
+[X+2] inst: X+2 name: "/storage/icds"
+[X+3] inst: X+3 name: "/storage/pcp-data"
+[X+4] inst: X+4 name: "/storage/prod"
+[X+5] inst: X+5 name: "/storage/test"
+[X+6] inst: X+6 name: "/storage/work"
+dbpmda>
+Log for pmdanfsclient on HOST ...
+Log finished ...
=== Test case: mountstats.qa
dbpmda> open pipe $PYTHON pmdanfsclient.python
Start $PYTHON PMDA: $PYTHON pmdanfsclient.python
diff --git a/qa/798.out.64 b/qa/798.out.64
index 6b6a4b73c..860fa582e 100644
--- a/qa/798.out.64
+++ b/qa/798.out.64
@@ -165,6 +165,110 @@ pmInDom: 62.0
dbpmda>
Log for pmdanfsclient on HOST ...
Log finished ...
+=== Test case: mountstats-el8.7-rdma.qa
+dbpmda> open pipe $PYTHON pmdanfsclient.python
+Start $PYTHON PMDA: $PYTHON pmdanfsclient.python
+dbpmda> # on some platforms this may take a while ...
+dbpmda> wait 2
+dbpmda> getdesc on
+dbpmda> desc nfsclient.export
+PMID: 62.0.1
+ Data Type: string InDom: 62.0 0xf800000
+ Semantics: instant Units: none
+dbpmda> desc nfsclient.mountpoint
+PMID: 62.0.2
+ Data Type: string InDom: 62.0 0xf800000
+ Semantics: instant Units: none
+dbpmda> desc nfsclient.options.string
+PMID: 62.1.1
+ Data Type: string InDom: 62.0 0xf800000
+ Semantics: instant Units: none
+dbpmda> desc nfsclient.options.proto
+PMID: 62.1.24
+ Data Type: string InDom: 62.0 0xf800000
+ Semantics: instant Units: none
+dbpmda> desc nfsclient.options.vers
+PMID: 62.1.6
+ Data Type: string InDom: 62.0 0xf800000
+ Semantics: instant Units: none
+dbpmda> fetch nfsclient.export
+PMID(s): 62.0.1
+__pmResult ... numpmid: 1
+ 62.0.1 (<noname>): numval: 7 valfmt: 1 vlist[]:
+ inst [N or ???] value "example.com:/group"
+ inst [N or ???] value "example.com:/home"
+ inst [N or ???] value "example.com:/icds"
+ inst [N or ???] value "example.com:/icds-pcp-data"
+ inst [N or ???] value "example.com:/icds-prod"
+ inst [N or ???] value "example.com:/icds-test"
+ inst [N or ???] value "example.com:/work"
+dbpmda> fetch nfsclient.mountpoint
+PMID(s): 62.0.2
+__pmResult ... numpmid: 1
+ 62.0.2 (<noname>): numval: 7 valfmt: 1 vlist[]:
+ inst [N or ???] value "/storage/group"
+ inst [N or ???] value "/storage/home"
+ inst [N or ???] value "/storage/icds"
+ inst [N or ???] value "/storage/pcp-data"
+ inst [N or ???] value "/storage/prod"
+ inst [N or ???] value "/storage/test"
+ inst [N or ???] value "/storage/work"
+dbpmda> fetch nfsclient.options.string
+PMID(s): 62.1.1
+__pmResult ... numpmid: 1
+ 62.1.1 (<noname>): numval: 7 valfmt: 1 vlist[]:
+ inst [N or ???] value "rw,vers=3,rsize=65536,wsize=65536,namlen=255,acregmin=3,acregmax=60,acdirmin=30,acdirmax=60,hard,proto=rdma,nconnect=8,port=20049,timeo=600,retrans=2,sec=sys,mountaddr=10.6.159.1,mountvers=3,mountport=0,mountproto=tcp,local_lock=all"
+ inst [N or ???] value "rw,vers=3,rsize=65536,wsize=65536,namlen=255,acregmin=3,acregmax=60,acdirmin=30,acdirmax=60,hard,proto=rdma,nconnect=8,port=20049,timeo=600,retrans=2,sec=sys,mountaddr=10.6.159.18,mountvers=3,mountport=0,mountproto=tcp,local_lock=all"
+ inst [N or ???] value "rw,vers=3,rsize=65536,wsize=65536,namlen=255,acregmin=3,acregmax=60,acdirmin=30,acdirmax=60,hard,proto=rdma,nconnect=8,port=20049,timeo=600,retrans=2,sec=sys,mountaddr=10.6.159.23,mountvers=3,mountport=0,mountproto=tcp,local_lock=all"
+ inst [N or ???] value "rw,vers=3,rsize=65536,wsize=65536,namlen=255,acregmin=3,acregmax=60,acdirmin=30,acdirmax=60,hard,proto=rdma,nconnect=8,port=20049,timeo=600,retrans=2,sec=sys,mountaddr=10.6.159.27,mountvers=3,mountport=0,mountproto=tcp,local_lock=all"
+ inst [N or ???] value "rw,vers=3,rsize=65536,wsize=65536,namlen=255,acregmin=3,acregmax=60,acdirmin=30,acdirmax=60,hard,proto=rdma,nconnect=8,port=20049,timeo=600,retrans=2,sec=sys,mountaddr=10.6.159.29,mountvers=3,mountport=0,mountproto=tcp,local_lock=all"
+ inst [N or ???] value "rw,vers=3,rsize=65536,wsize=65536,namlen=255,acregmin=3,acregmax=60,acdirmin=30,acdirmax=60,hard,proto=rdma,nconnect=8,port=20049,timeo=600,retrans=2,sec=sys,mountaddr=10.6.159.6,mountvers=3,mountport=0,mountproto=tcp,local_lock=all"
+ inst [N or ???] value "rw,vers=3,rsize=65536,wsize=65536,namlen=255,acregmin=3,acregmax=60,acdirmin=30,acdirmax=60,hard,proto=rdma,nconnect=8,port=20049,timeo=600,retrans=2,sec=sys,mountaddr=10.6.159.7,mountvers=3,mountport=0,mountproto=tcp,local_lock=all"
+dbpmda> fetch nfsclient.options.proto
+PMID(s): 62.1.24
+__pmResult ... numpmid: 1
+ 62.1.24 (<noname>): numval: 7 valfmt: 1 vlist[]:
+ inst [N or ???] value "rdma"
+ inst [N or ???] value "rdma"
+ inst [N or ???] value "rdma"
+ inst [N or ???] value "rdma"
+ inst [N or ???] value "rdma"
+ inst [N or ???] value "rdma"
+ inst [N or ???] value "rdma"
+dbpmda> fetch nfsclient.options.vers
+PMID(s): 62.1.6
+__pmResult ... numpmid: 1
+ 62.1.6 (<noname>): numval: 7 valfmt: 1 vlist[]:
+ inst [N or ???] value "3"
+ inst [N or ???] value "3"
+ inst [N or ???] value "3"
+ inst [N or ???] value "3"
+ inst [N or ???] value "3"
+ inst [N or ???] value "3"
+ inst [N or ???] value "3"
+dbpmda> fetch nfsclient.options.vers
+PMID(s): 62.1.6
+__pmResult ... numpmid: 1
+ 62.1.6 (<noname>): numval: 7 valfmt: 1 vlist[]:
+ inst [N or ???] value "3"
+ inst [N or ???] value "3"
+ inst [N or ???] value "3"
+ inst [N or ???] value "3"
+ inst [N or ???] value "3"
+ inst [N or ???] value "3"
+ inst [N or ???] value "3"
+dbpmda> instance 62.0
+pmInDom: 62.0
+[ X] inst: X name: "/storage/group"
+[X+1] inst: X+1 name: "/storage/home"
+[X+2] inst: X+2 name: "/storage/icds"
+[X+3] inst: X+3 name: "/storage/pcp-data"
+[X+4] inst: X+4 name: "/storage/prod"
+[X+5] inst: X+5 name: "/storage/test"
+[X+6] inst: X+6 name: "/storage/work"
+dbpmda>
+Log for pmdanfsclient on HOST ...
+Log finished ...
=== Test case: mountstats.qa
dbpmda> open pipe $PYTHON pmdanfsclient.python
Start $PYTHON PMDA: $PYTHON pmdanfsclient.python
diff --git a/qa/nfsclient/mountstats-el8.7-rdma.qa b/qa/nfsclient/mountstats-el8.7-rdma.qa
new file mode 100644
index 000000000..98dd836a1
--- /dev/null
+++ b/qa/nfsclient/mountstats-el8.7-rdma.qa
@@ -0,0 +1,437 @@
+device proc mounted on /proc with fstype proc
+device devtmpfs mounted on /dev with fstype devtmpfs
+device securityfs mounted on /sys/kernel/security with fstype securityfs
+device tmpfs mounted on /dev/shm with fstype tmpfs
+device devpts mounted on /dev/pts with fstype devpts
+device tmpfs mounted on /run with fstype tmpfs
+device tmpfs mounted on /sys/fs/cgroup with fstype tmpfs
+device cgroup mounted on /sys/fs/cgroup/systemd with fstype cgroup
+device pstore mounted on /sys/fs/pstore with fstype pstore
+device efivarfs mounted on /sys/firmware/efi/efivars with fstype efivarfs
+device bpf mounted on /sys/fs/bpf with fstype bpf
+device cgroup mounted on /sys/fs/cgroup/hugetlb with fstype cgroup
+device cgroup mounted on /sys/fs/cgroup/cpu,cpuacct with fstype cgroup
+device cgroup mounted on /sys/fs/cgroup/memory with fstype cgroup
+device cgroup mounted on /sys/fs/cgroup/freezer with fstype cgroup
+device cgroup mounted on /sys/fs/cgroup/devices with fstype cgroup
+device cgroup mounted on /sys/fs/cgroup/net_cls,net_prio with fstype cgroup
+device cgroup mounted on /sys/fs/cgroup/perf_event with fstype cgroup
+device cgroup mounted on /sys/fs/cgroup/rdma with fstype cgroup
+device cgroup mounted on /sys/fs/cgroup/cpuset with fstype cgroup
+device cgroup mounted on /sys/fs/cgroup/pids with fstype cgroup
+device cgroup mounted on /sys/fs/cgroup/blkio with fstype cgroup
+device configfs mounted on /sys/kernel/config with fstype configfs
+device rootfs mounted on / with fstype tmpfs
+device rw mounted on /.sllocal/log with fstype tmpfs
+device rpc_pipefs mounted on /var/lib/nfs/rpc_pipefs with fstype rpc_pipefs
+device hugetlbfs mounted on /dev/hugepages with fstype hugetlbfs
+device debugfs mounted on /sys/kernel/debug with fstype debugfs
+device mqueue mounted on /dev/mqueue with fstype mqueue
+device systemd-1 mounted on /proc/sys/fs/binfmt_misc with fstype autofs
+device binfmt_misc mounted on /proc/sys/fs/binfmt_misc with fstype binfmt_misc
+device fusectl mounted on /sys/fs/fuse/connections with fstype fusectl
+device /dev/sda3 mounted on /tmp with fstype xfs
+device /dev/sda1 mounted on /var/log with fstype xfs
+device /dev/sda2 mounted on /var/tmp with fstype xfs
+device tracefs mounted on /sys/kernel/debug/tracing with fstype tracefs
+device example.com:/icds-prod mounted on /storage/prod with fstype nfs statvers=1.1
+ opts: rw,vers=3,rsize=65536,wsize=65536,namlen=255,acregmin=3,acregmax=60,acdirmin=30,acdirmax=60,hard,proto=rdma,nconnect=8,port=20049,timeo=600,retrans=2,sec=sys,mountaddr=10.6.159.30,mountvers=3,mountport=0,mountproto=tcp,local_lock=all
+ age: 1166720
+ caps: caps=0x10003fcf,wtmult=4096,dtsize=65536,bsize=0,namlen=255
+ sec: flavor=1,pseudoflavor=1
+ events: 3 36 0 0 3 8 44 0 0 3 0 0 0 0 3 0 0 3 0 0 0 0 0 0 0 0 0
+ bytes: 5086 0 0 0 5086 0 4 0
+ RPC iostats version: 1.1 p/v: 100003/3 (nfs)
+ xprt: rdma 0 0 1 0 0 5 5 0 5 0 0 0 0 0 0 0 0 0 0 0 0 0 0 11 0 0 0
+ xprt: rdma 0 0 1 0 0 3 3 0 3 0 0 0 0 0 0 0 0 0 0 0 0 0 0 11 0 0 0
+ xprt: rdma 0 0 1 0 0 3 3 0 3 0 0 0 0 0 0 0 268 0 0 0 0 0 0 11 0 0 0
+ xprt: rdma 0 0 1 0 0 3 3 0 3 0 0 0 0 0 0 0 0 0 0 0 0 0 0 11 0 0 0
+ xprt: rdma 0 0 1 0 0 3 3 0 3 0 0 0 1 4220 0 0 28 0 0 0 0 0 0 11 1 0 0
+ xprt: rdma 0 0 1 0 0 3 3 0 3 0 0 0 0 0 0 0 0 0 0 0 0 0 0 11 0 0 0
+ xprt: rdma 0 0 1 0 0 3 3 0 3 0 0 1 0 4746 4746 0 72 0 0 0 0 0 0 11 1 0 0
+ xprt: rdma 0 0 1 0 0 3 3 0 3 0 0 0 0 0 0 0 0 0 0 0 0 0 0 11 0 0 0
+ per-op statistics
+ NULL: 1 1 0 40 24 18 0 19 0
+ GETATTR: 3 3 0 276 336 0 0 0 0
+ SETATTR: 0 0 0 0 0 0 0 0 0
+ LOOKUP: 8 8 0 852 1600 16 1 17 1
+ ACCESS: 7 7 0 672 840 10 1 11 0
+ READLINK: 1 1 0 92 148 5 0 5 0
+ READ: 3 3 0 312 5472 0 1 1 0
+ WRITE: 0 0 0 0 0 0 0 0 0
+ CREATE: 0 0 0 0 0 0 0 0 0
+ MKDIR: 0 0 0 0 0 0 0 0 0
+ SYMLINK: 0 0 0 0 0 0 0 0 0
+ MKNOD: 0 0 0 0 0 0 0 0 0
+ REMOVE: 0 0 0 0 0 0 0 0 0
+ RMDIR: 0 0 0 0 0 0 0 0 0
+ RENAME: 0 0 0 0 0 0 0 0 0
+ LINK: 0 0 0 0 0 0 0 0 0
+ READDIR: 0 0 0 0 0 0 0 0 0
+ READDIRPLUS: 0 0 0 0 0 0 0 0 0
+ FSSTAT: 0 0 0 0 0 0 0 0 0
+ FSINFO: 2 2 0 184 328 6 0 6 0
+ PATHCONF: 0 0 0 0 0 0 0 0 0
+ COMMIT: 0 0 0 0 0 0 0 0 0
+
+device example.com:/icds-test mounted on /storage/test with fstype nfs statvers=1.1
+ opts: rw,vers=3,rsize=65536,wsize=65536,namlen=255,acregmin=3,acregmax=60,acdirmin=30,acdirmax=60,hard,proto=rdma,nconnect=8,port=20049,timeo=600,retrans=2,sec=sys,mountaddr=10.6.159.24,mountvers=3,mountport=0,mountproto=tcp,local_lock=all
+ age: 1166720
+ caps: caps=0x10003fcf,wtmult=4096,dtsize=65536,bsize=0,namlen=255
+ sec: flavor=1,pseudoflavor=1
+ events: 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+ bytes: 0 0 0 0 0 0 0 0
+ RPC iostats version: 1.1 p/v: 100003/3 (nfs)
+ xprt: rdma 0 0 1 0 0 3 3 0 3 0 0 0 0 0 0 0 0 0 0 0 0 0 0 11 0 0 0
+ xprt: rdma 0 0 1 0 0 1 1 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 11 0 0 0
+ xprt: rdma 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+ xprt: rdma 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+ xprt: rdma 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+ xprt: rdma 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+ xprt: rdma 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+ xprt: rdma 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+ per-op statistics
+ NULL: 1 1 0 40 24 6 0 6 0
+ GETATTR: 0 0 0 0 0 0 0 0 0
+ SETATTR: 0 0 0 0 0 0 0 0 0
+ LOOKUP: 0 0 0 0 0 0 0 0 0
+ ACCESS: 0 0 0 0 0 0 0 0 0
+ READLINK: 0 0 0 0 0 0 0 0 0
+ READ: 0 0 0 0 0 0 0 0 0
+ WRITE: 0 0 0 0 0 0 0 0 0
+ CREATE: 0 0 0 0 0 0 0 0 0
+ MKDIR: 0 0 0 0 0 0 0 0 0
+ SYMLINK: 0 0 0 0 0 0 0 0 0
+ MKNOD: 0 0 0 0 0 0 0 0 0
+ REMOVE: 0 0 0 0 0 0 0 0 0
+ RMDIR: 0 0 0 0 0 0 0 0 0
+ RENAME: 0 0 0 0 0 0 0 0 0
+ LINK: 0 0 0 0 0 0 0 0 0
+ READDIR: 0 0 0 0 0 0 0 0 0
+ READDIRPLUS: 0 0 0 0 0 0 0 0 0
+ FSSTAT: 0 0 0 0 0 0 0 0 0
+ FSINFO: 2 2 0 184 328 6 0 6 0
+ PATHCONF: 0 0 0 0 0 0 0 0 0
+ COMMIT: 0 0 0 0 0 0 0 0 0
+
+device example.com:/icds mounted on /storage/icds with fstype nfs statvers=1.1
+ opts: rw,vers=3,rsize=65536,wsize=65536,namlen=255,acregmin=3,acregmax=60,acdirmin=30,acdirmax=60,hard,proto=rdma,nconnect=8,port=20049,timeo=600,retrans=2,sec=sys,mountaddr=10.6.159.25,mountvers=3,mountport=0,mountproto=tcp,local_lock=all
+ age: 1166720
+ caps: caps=0x10003fcf,wtmult=4096,dtsize=65536,bsize=0,namlen=255
+ sec: flavor=1,pseudoflavor=1
+ events: 2 191 0 0 3 13 207 0 0 30 0 0 0 0 2 0 0 0 0 0 0 0 0 0 0 0 0
+ bytes: 2452 0 0 0 2555904 0 624 0
+ RPC iostats version: 1.1 p/v: 100003/3 (nfs)
+ xprt: rdma 0 0 1 0 0 12 12 0 12 0 0 6 0 352256 352256 0 0 0 0 0 0 0 0 11 6 0 0
+ xprt: rdma 0 0 1 0 0 10 10 0 10 0 0 5 0 184320 184320 0 0 0 0 0 0 0 0 11 5 0 0
+ xprt: rdma 0 0 2 0 0 10 10 0 10 0 0 4 0 225280 225280 0 0 0 0 0 0 0 0 22 4 0 0
+ xprt: rdma 0 0 1 0 0 9 9 0 9 0 0 6 0 294912 294912 0 0 0 0 0 0 0 0 11 6 0 0
+ xprt: rdma 0 0 1 0 0 9 9 0 9 0 0 7 0 339968 339968 0 0 0 0 0 0 0 0 11 7 0 0
+ xprt: rdma 0 0 1 0 0 9 9 0 9 0 0 7 0 352256 352256 0 0 0 0 0 0 0 0 11 7 0 0
+ xprt: rdma 0 0 1 0 0 9 9 0 9 0 0 7 0 405504 405504 0 0 0 0 0 0 0 0 11 7 0 0
+ xprt: rdma 0 0 1 0 0 9 9 0 9 0 0 7 0 401408 401408 0 0 0 0 0 0 0 0 11 7 0 0
+ per-op statistics
+ NULL: 1 1 0 40 24 5 0 5 0
+ GETATTR: 2 2 0 188 224 11 0 11 0
+ SETATTR: 0 0 0 0 0 0 0 0 0
+ LOOKUP: 13 13 0 1364 1952 15 3 19 5
+ ACCESS: 9 9 0 864 1080 16 1 18 0
+ READLINK: 0 0 0 0 0 0 0 0 0
+ READ: 49 49 0 5096 2562176 0 15 17 0
+ WRITE: 0 0 0 0 0 0 0 0 0
+ CREATE: 0 0 0 0 0 0 0 0 0
+ MKDIR: 0 0 0 0 0 0 0 0 0
+ SYMLINK: 0 0 0 0 0 0 0 0 0
+ MKNOD: 0 0 0 0 0 0 0 0 0
+ REMOVE: 0 0 0 0 0 0 0 0 0
+ RMDIR: 0 0 0 0 0 0 0 0 0
+ RENAME: 0 0 0 0 0 0 0 0 0
+ LINK: 0 0 0 0 0 0 0 0 0
+ READDIR: 0 0 0 0 0 0 0 0 0
+ READDIRPLUS: 0 0 0 0 0 0 0 0 0
+ FSSTAT: 0 0 0 0 0 0 0 0 0
+ FSINFO: 2 2 0 184 328 5 0 5 0
+ PATHCONF: 0 0 0 0 0 0 0 0 0
+ COMMIT: 0 0 0 0 0 0 0 0 0
+
+device example.com:/group mounted on /storage/group with fstype nfs statvers=1.1
+ opts: rw,vers=3,rsize=65536,wsize=65536,namlen=255,acregmin=3,acregmax=60,acdirmin=30,acdirmax=60,hard,proto=rdma,nconnect=8,port=20049,timeo=600,retrans=2,sec=sys,mountaddr=10.6.159.1,mountvers=3,mountport=0,mountproto=tcp,local_lock=all
+ age: 1166720
+ caps: caps=0x10003fcf,wtmult=4096,dtsize=65536,bsize=0,namlen=255
+ sec: flavor=1,pseudoflavor=1
+ events: 359470 11669975 26864 99083 237241 104622 12074173 76552450 12372 24061401 0 184482 64402 15675 250513 4475 60207 205420 0 13655 41997395 0 0 0 0 0 0
+ bytes: 5990252963873 271047280191 0 0 1730835995062 212213971093 422603768 51862807
+ RPC iostats version: 1.1 p/v: 100003/3 (nfs)
+ xprt: rdma 0 0 111 1 28 5001450 5001450 0 16468960 244630485 411153 4499604 2419 216488884485 216363618297 705084 2119278 0 610 0 0 0 0 4422 4913176 0 0
+ xprt: rdma 0 0 110 1 28 5000369 5000369 0 16473884 244723520 411409 4497949 2419 216448785625 216324215309 744372 2220863 0 584 0 0 0 0 4466 4911777 0 0
+ xprt: rdma 0 0 110 1 12 5001230 5001230 0 16466569 244632808 411122 4499109 2294 216463811427 216346453699 725596 2162884 0 656 0 0 0 0 4642 4912525 0 0
+ xprt: rdma 0 0 114 1 12 5000643 5000643 0 16476687 244518169 411370 4498397 2400 216437529332 216314007816 754588 2166359 0 638 0 0 0 0 4675 4912167 0 0
+ xprt: rdma 0 0 113 0 12 5001675 5001675 0 16476119 244839819 411304 4499163 2420 216552802382 216427322070 674092 2054877 0 666 0 0 0 0 4796 4912887 0 0
+ xprt: rdma 0 0 112 1 62 5001928 5001928 0 16478906 244718584 411345 4499807 2449 216598144747 216470930799 709516 2049733 0 629 0 0 0 0 4664 4913601 0 0
+ xprt: rdma 0 0 108 0 62 5000620 5000620 0 16457063 244260204 411118 4498680 2347 216410122025 216289485037 742848 2157015 0 618 0 0 0 0 4488 4912145 0 0
+ xprt: rdma 0 0 105 0 62 5001170 5001170 0 16465684 244462170 411102 4499474 2363 216499718003 216377867059 774804 2218299 0 664 0 0 0 0 4532 4912939 0 0
+ per-op statistics
+ NULL: 1 1 0 40 24 6 0 6 0
+ GETATTR: 359470 359470 0 37679148 40260640 6615 68172 77464 0
+ SETATTR: 21554 21554 0 3038404 775944 123 18549 18914 0
+ LOOKUP: 116511 116511 0 14261900 8782176 956 41688 44033 90189
+ ACCESS: 64375 64375 0 7046576 7725000 886 14014 15459 0
+ READLINK: 736 736 0 79416 104332 2 249 269 0
+ READ: 36019499 36019499 0 4279070464 1735446589952 406570 26116356 27160896 0
+ WRITE: 3304938 3304938 0 212624926960 528790080 1195907908 50317800 1246575394 0
+ CREATE: 77879 77879 0 11891764 19937024 494 299155 303286 0
+ MKDIR: 2201 2201 0 325500 545856 26 7601 7669 80
+ SYMLINK: 4 4 0 768 1024 0 13 13 0
+ MKNOD: 0 0 0 0 0 0 0 0 0
+ REMOVE: 15039 15039 0 1895992 541404 46 25825 26057 0
+ RMDIR: 1859 1859 0 223452 66924 4 3096 3127 3
+ RENAME: 6525 6525 0 1219928 287100 79 13674 13918 0
+ LINK: 0 0 0 0 0 0 0 0 0
+ READDIR: 5842 5842 0 747112 19898184 14 7271 7470 0
+ READDIRPLUS: 9028 9028 0 1164116 74659892 169 17053 17575 0
+ FSSTAT: 113 113 0 11856 18984 34 126 162 0
+ FSINFO: 2 2 0 184 328 6 0 6 0
+ PATHCONF: 0 0 0 0 0 0 0 0 0
+ COMMIT: 0 0 0 0 0 0 0 0 0
+
+device example.com:/home mounted on /storage/home with fstype nfs statvers=1.1
+ opts: rw,vers=3,rsize=65536,wsize=65536,namlen=255,acregmin=3,acregmax=60,acdirmin=30,acdirmax=60,hard,proto=rdma,nconnect=8,port=20049,timeo=600,retrans=2,sec=sys,mountaddr=10.6.159.7,mountvers=3,mountport=0,mountproto=tcp,local_lock=all
+ age: 1166720
+ caps: caps=0x10003fcf,wtmult=4096,dtsize=65536,bsize=0,namlen=255
+ sec: flavor=1,pseudoflavor=1
+ events: 1431711 44943044 116357 137881 1036341 97602 45725153 741968 1060 314347 7 82823 747343 132339 751136 4247 9781 660494 0 1621 659329 139 0 0 0 0 0
+ bytes: 31191656856 2345480650 0 0 13941630626 2386575218 3519989 615297
+ RPC iostats version: 1.1 p/v: 100003/3 (nfs)
+ xprt: rdma 0 0 299 2 19 314383 314383 0 360317 2824 6031 38731 7094 2143783391 1741491091 2076748 3911658 0 48 0 0 0 0 3652 51856 0 0
+ xprt: rdma 0 0 306 2 19 314284 314284 0 359761 3339 6178 38680 7086 2138653820 1737586668 2122588 3993750 0 53 0 0 0 0 3729 51944 0 0
+ xprt: rdma 0 0 305 2 19 314534 314534 0 360342 3020 6127 39049 7491 2177604450 1749298514 2178016 4057012 0 51 0 0 0 0 3718 52667 0 0
+ xprt: rdma 0 0 304 2 19 314570 314568 2 687294 2628 6152 38977 6977 2151798321 1756744897 2311096 3945741 0 45 0 0 0 0 3707 52106 0 0
+ xprt: rdma 0 0 301 3 19 314546 314545 1 495960 2588 6045 38911 7196 2164869636 1756151804 1995156 3909706 0 48 0 0 0 0 3674 52152 0 0
+ xprt: rdma 0 0 300 2 19 314597 314597 0 361338 2792 6190 39020 6968 2150734096 1755759176 2129072 3997461 0 47 0 0 0 0 3663 52178 0 0
+ xprt: rdma 0 0 300 2 19 314532 314532 0 360982 2603 6177 38991 7215 2162670305 1752241557 2106240 4097289 0 45 0 0 0 0 3663 52383 0 0
+ xprt: rdma 0 0 299 2 19 314696 314696 0 360978 2938 6117 38938 7297 2164527924 1748297356 2332620 3915498 0 51 0 0 0 0 3652 52352 0 0
+ per-op statistics
+ NULL: 1 1 0 40 24 5 0 5 0
+ GETATTR: 1431711 1431711 0 152340376 160351352 23887 262978 318138 5
+ SETATTR: 134526 134526 0 20992092 4842936 592 62889 65101 0
+ LOOKUP: 105669 105669 0 13828636 11957600 1842 36086 41336 61002
+ ACCESS: 228039 228039 0 26382944 27364652 2814 47214 53250 7
+ READLINK: 1627 1627 0 170916 228932 15 337 391 0
+ READ: 420683 420683 0 50800636 13995575392 7785 255480 270435 0
+ WRITE: 87100 87100 0 2397805952 13936000 172750 365004 540345 0
+ CREATE: 29307 29307 0 4889564 7501712 111 49570 50330 4
+ MKDIR: 7156 7156 0 1067212 1800036 15 11511 11613 145
+ SYMLINK: 446 446 0 82020 113516 1 682 688 3
+ MKNOD: 0 0 0 0 0 0 0 0 0
+ REMOVE: 5852 5852 0 836620 210672 17 13008 13133 0
+ RMDIR: 2417 2417 0 277660 87012 5 3319 3357 54
+ RENAME: 2882 2882 0 542372 126808 62 26287 26407 0
+ LINK: 212 212 0 32072 26288 0 420 425 0
+ READDIR: 26 26 0 3348 156548 0 81 82 0
+ READDIRPLUS: 46977 46977 0 6063288 86387612 202 55663 58039 0
+ FSSTAT: 2784 2784 0 291028 467712 28 3651 4047 0
+ FSINFO: 2 2 0 184 328 5 0 5 0
+ PATHCONF: 0 0 0 0 0 0 0 0 0
+ COMMIT: 0 0 0 0 0 0 0 0 0
+
+device example.com:/icds mounted on /storage/icds with fstype nfs statvers=1.1
+ opts: rw,vers=3,rsize=65536,wsize=65536,namlen=255,acregmin=3,acregmax=60,acdirmin=30,acdirmax=60,hard,proto=rdma,nconnect=8,port=20049,timeo=600,retrans=2,sec=sys,mountaddr=10.6.159.23,mountvers=3,mountport=0,mountproto=tcp,local_lock=all
+ age: 1166720
+ caps: caps=0x10003fcf,wtmult=4096,dtsize=65536,bsize=0,namlen=255
+ sec: flavor=1,pseudoflavor=1
+ events: 1504197 4360204878 3573 11827 998351 46870 4361262723 0 13 86345 0 0 412964 0 810302 0 0 787954 0 0 0 0 0 0 0 0 0
+ bytes: 2473882007460 0 0 0 3117996888 0 781324 0
+ RPC iostats version: 1.1 p/v: 100003/3 (nfs)
+ xprt: rdma 0 0 9 0 15 218040 218040 0 220710 0 0 10661 757 426647800 380864004 0 2882858 0 0 0 0 0 0 99 11418 0 0
+ xprt: rdma 0 0 11 0 20 217911 217911 0 220214 0 0 10742 764 434579499 388755611 0 2968495 0 0 0 0 0 0 121 11506 0 0
+ xprt: rdma 0 0 11 0 20 218423 218423 0 220758 0 0 10858 830 439053205 388939529 0 2997186 0 0 0 0 0 0 121 11688 0 0
+ xprt: rdma 0 0 10 0 20 217656 217656 0 220142 0 0 10798 776 439744412 392148264 0 2965342 0 0 0 0 0 0 110 11574 0 0
+ xprt: rdma 0 0 10 0 20 218280 218280 0 220648 0 0 10783 764 433088272 387536212 0 2916853 0 0 0 0 0 0 110 11547 0 0
+ xprt: rdma 0 0 10 0 15 217897 217897 0 220528 0 0 10800 744 432500217 387918105 0 3035589 0 0 0 0 0 0 110 11544 0 0
+ xprt: rdma 0 0 9 0 15 218233 218233 0 220901 0 0 10876 786 437891918 390297186 0 2843850 0 0 0 0 0 0 99 11662 0 0
+ xprt: rdma 0 0 9 0 15 218287 218287 0 221066 0 0 10817 799 438124667 390593603 0 2900001 0 0 0 0 0 0 99 11616 0 0
+ per-op statistics
+ NULL: 1 1 0 40 24 5 0 5 0
+ GETATTR: 1504197 1504197 0 159348668 168470064 5823 250532 346757 0
+ SETATTR: 0 0 0 0 0 0 0 0 0
+ LOOKUP: 47852 47852 0 6159548 5950144 134 18038 23896 24837
+ ACCESS: 79076 79076 0 8750456 9487564 188 14705 18515 389
+ READLINK: 331 331 0 36048 45912 1 109 121 0
+ READ: 106156 106156 0 12595328 3131627640 2442 102180 106716 0
+ WRITE: 0 0 0 0 0 0 0 0 0
+ CREATE: 972 972 0 159400 34992 4 1815 1875 972
+ MKDIR: 130 130 0 18948 4680 0 259 260 130
+ SYMLINK: 0 0 0 0 0 0 0 0 0
+ MKNOD: 0 0 0 0 0 0 0 0 0
+ REMOVE: 0 0 0 0 0 0 0 0 0
+ RMDIR: 0 0 0 0 0 0 0 0 0
+ RENAME: 0 0 0 0 0 0 0 0 0
+ LINK: 0 0 0 0 0 0 0 0 0
+ READDIR: 0 0 0 0 0 0 0 0 0
+ READDIRPLUS: 5863 5863 0 765784 12555964 21 6882 7339 0
+ FSSTAT: 120 120 0 13172 20160 0 86 88 0
+ FSINFO: 2 2 0 184 328 5 0 5 0
+ PATHCONF: 0 0 0 0 0 0 0 0 0
+ COMMIT: 0 0 0 0 0 0 0 0 0
+
+device example.com:/work mounted on /storage/work with fstype nfs statvers=1.1
+ opts: rw,vers=3,rsize=65536,wsize=65536,namlen=255,acregmin=3,acregmax=60,acdirmin=30,acdirmax=60,hard,proto=rdma,nconnect=8,port=20049,timeo=600,retrans=2,sec=sys,mountaddr=10.6.159.18,mountvers=3,mountport=0,mountproto=tcp,local_lock=all
+ age: 1166720
+ caps: caps=0x10003fcf,wtmult=4096,dtsize=65536,bsize=0,namlen=255
+ sec: flavor=1,pseudoflavor=1
+ events: 963419 23064824 4957 23437 498893 36863 23579290 20398227 3127 191254 348 187305 207697 6261 405125 1210 0 394647 0 3923 18843784 3 0 0 0 0 0
+ bytes: 41568180225 14269941286 0 0 23191576124 14729236454 5668359 3684526
+ RPC iostats version: 1.1 p/v: 100003/3 (nfs)
+ xprt: rdma 0 0 111 1 1 231044 231042 2 873131 366580 33899 45806 1273 2967035298 2901558270 7296576 1169614 0 47 0 0 0 0 1529 80978 0 0
+ xprt: rdma 0 0 106 0 1 231329 231328 1 717682 361947 33828 45455 1262 2952240311 2887232063 7392372 1221754 0 52 0 0 0 0 1474 80545 0 0
+ xprt: rdma 0 0 106 1 6 231078 231078 0 485645 363676 33786 45721 1320 2967925402 2899619554 7176800 1189264 0 48 0 0 0 0 1474 80827 0 0
+ xprt: rdma 0 0 109 0 6 231226 231226 0 486538 362707 33929 45482 1274 2956234946 2890484558 7294244 1190630 0 44 0 0 0 0 1507 80685 0 0
+ xprt: rdma 0 0 109 0 1 231464 231464 0 486184 367681 33875 45789 1233 2968915191 2906173167 7325356 1151195 0 42 0 0 0 0 1507 80897 0 0
+ xprt: rdma 0 0 110 0 1 231566 231566 0 485613 362833 33801 45688 1245 2967609016 2903914028 7186764 1166808 0 43 0 0 0 0 1518 80734 0 0
+ xprt: rdma 0 0 107 0 1 231285 231285 0 485205 365643 33905 45719 1299 2968468178 2901567814 7074060 1190420 0 47 0 0 0 0 1485 80923 0 0
+ xprt: rdma 0 0 109 1 1 231926 231926 0 487042 369268 33760 45696 1286 2970922269 2904447457 7343684 1204180 0 45 0 0 0 0 1507 80742 0 0
+ per-op statistics
+ NULL: 1 1 0 40 24 6 0 6 0
+ GETATTR: 963419 963419 0 101370948 107902704 15461 186184 213618 3
+ SETATTR: 8104 8104 0 1151604 291744 47 11806 11970 0
+ LOOKUP: 42796 42796 0 5399836 4859232 622 14626 15832 24620
+ ACCESS: 45471 45471 0 4949688 5456520 1051 8791 10504 1
+ READLINK: 14 14 0 1500 1908 0 3 3 0
+ READ: 371033 371033 0 43379708 23239084264 2450 276265 283119 0
+ WRITE: 390214 390214 0 14779478604 62433744 3531147 2810230 6351526 4
+ CREATE: 14579 14579 0 2254256 3731564 70 47115 47475 3
+ MKDIR: 2317 2317 0 378616 591172 17 9179 9302 9
+ SYMLINK: 0 0 0 0 0 0 0 0 0
+ MKNOD: 0 0 0 0 0 0 0 0 0
+ REMOVE: 903 903 0 121280 32508 7 1340 1364 0
+ RMDIR: 1 1 0 116 36 0 2 2 1
+ RENAME: 1691 1691 0 336668 74404 14 2998 3057 0
+ LINK: 71 71 0 12064 8804 0 91 92 0
+ READDIR: 4 4 0 512 15012 0 27 28 0
+ READDIRPLUS: 6573 6573 0 845980 12381640 70 7615 8194 0
+ FSSTAT: 49 49 0 5256 8232 56 41 99 0
+ FSINFO: 2 2 0 184 328 5 0 6 0
+ PATHCONF: 0 0 0 0 0 0 0 0 0
+ COMMIT: 0 0 0 0 0 0 0 0 0
+
+device example.com:/icds-prod mounted on /storage/prod with fstype nfs statvers=1.1
+ opts: rw,vers=3,rsize=65536,wsize=65536,namlen=255,acregmin=3,acregmax=60,acdirmin=30,acdirmax=60,hard,proto=rdma,nconnect=8,port=20049,timeo=600,retrans=2,sec=sys,mountaddr=10.6.159.27,mountvers=3,mountport=0,mountproto=tcp,local_lock=all
+ age: 1166720
+ caps: caps=0x10003fcf,wtmult=4096,dtsize=65536,bsize=0,namlen=255
+ sec: flavor=1,pseudoflavor=1
+ events: 216327 953839 0 2 94628 41 977594 0 0 32 0 0 7792 0 176250 0 0 90732 0 0 0 0 0 0 0 0 0
+ bytes: 900620338 0 0 0 317803 0 96 0
+ RPC iostats version: 1.1 p/v: 100003/3 (nfs)
+ xprt: rdma 0 0 1 0 120 27117 27117 0 27120 0 0 3 0 44329 44329 0 0 0 0 0 0 0 0 11 3 0 0
+ xprt: rdma 0 0 1 0 120 27117 27117 0 27121 0 0 5 1 110959 82199 0 0 0 0 0 0 0 0 11 6 0 0
+ xprt: rdma 0 0 1 0 28 27114 27114 0 27115 0 0 1 0 13215 13215 0 4239 0 0 0 0 0 0 11 1 0 0
+ xprt: rdma 0 0 1 0 28 27113 27113 0 27113 0 0 2 0 61752 61752 0 395 0 0 0 0 0 0 11 2 0 0
+ xprt: rdma 0 0 1 0 120 27115 27115 0 27118 0 0 2 1 24412 20192 0 3380 0 0 0 0 0 0 11 3 0 0
+ xprt: rdma 0 0 1 0 120 27113 27113 0 27114 0 0 4 0 24016 24016 0 1518 0 0 0 0 0 0 11 4 0 0
+ xprt: rdma 0 0 1 0 120 27115 27115 0 27119 0 0 3 1 118321 55597 0 518 0 0 0 0 0 0 11 4 0 0
+ xprt: rdma 0 0 1 0 120 27113 27113 0 27114 0 0 2 1 75257 10593 0 0 0 0 0 0 0 0 11 3 0 0
+ per-op statistics
+ NULL: 1 1 0 40 24 5 0 5 0
+ GETATTR: 216327 216327 0 19911140 24228624 790 44005 48809 0
+ SETATTR: 0 0 0 0 0 0 0 0 0
+ LOOKUP: 45 45 0 5016 7200 16 15 32 15
+ ACCESS: 494 494 0 49128 59280 17 148 169 0
+ READLINK: 1 1 0 92 148 0 0 0 0
+ READ: 32 32 0 3392 321932 1 28 30 0
+ WRITE: 0 0 0 0 0 0 0 0 0
+ CREATE: 0 0 0 0 0 0 0 0 0
+ MKDIR: 0 0 0 0 0 0 0 0 0
+ SYMLINK: 0 0 0 0 0 0 0 0 0
+ MKNOD: 0 0 0 0 0 0 0 0 0
+ REMOVE: 0 0 0 0 0 0 0 0 0
+ RMDIR: 0 0 0 0 0 0 0 0 0
+ RENAME: 0 0 0 0 0 0 0 0 0
+ LINK: 0 0 0 0 0 0 0 0 0
+ READDIR: 0 0 0 0 0 0 0 0 0
+ READDIRPLUS: 2 2 0 236 3940 0 3 3 0
+ FSSTAT: 11 11 0 1136 1848 0 8 8 0
+ FSINFO: 2 2 0 184 328 5 0 6 0
+ PATHCONF: 0 0 0 0 0 0 0 0 0
+ COMMIT: 0 0 0 0 0 0 0 0 0
+
+device example.com:/icds-test mounted on /storage/test with fstype nfs statvers=1.1
+ opts: rw,vers=3,rsize=65536,wsize=65536,namlen=255,acregmin=3,acregmax=60,acdirmin=30,acdirmax=60,hard,proto=rdma,nconnect=8,port=20049,timeo=600,retrans=2,sec=sys,mountaddr=10.6.159.29,mountvers=3,mountport=0,mountproto=tcp,local_lock=all
+ age: 1166720
+ caps: caps=0x10003fcf,wtmult=4096,dtsize=65536,bsize=0,namlen=255
+ sec: flavor=1,pseudoflavor=1
+ events: 11 226 0 0 0 6 260 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+ bytes: 0 0 0 0 0 0 0 0
+ RPC iostats version: 1.1 p/v: 100003/3 (nfs)
+ xprt: rdma 0 0 5 0 0 7 7 0 7 0 0 0 0 0 0 0 0 0 0 0 0 0 0 55 0 0 0
+ xprt: rdma 0 0 6 0 0 6 6 0 6 0 0 0 1 28932 172 0 0 0 0 0 0 0 0 66 1 0 0
+ xprt: rdma 0 0 5 0 0 5 5 0 5 0 0 0 0 0 0 0 0 0 0 0 0 0 0 55 0 0 0
+ xprt: rdma 0 0 5 0 0 5 5 0 5 0 0 0 0 0 0 0 0 0 0 0 0 0 0 55 0 0 0
+ xprt: rdma 0 0 5 0 0 5 5 0 5 0 0 0 0 0 0 0 0 0 0 0 0 0 0 55 0 0 0
+ xprt: rdma 0 0 4 0 0 4 4 0 4 0 0 0 0 0 0 0 0 0 0 0 0 0 0 44 0 0 0
+ xprt: rdma 0 0 4 0 0 4 4 0 4 0 0 0 0 0 0 0 0 0 0 0 0 0 0 44 0 0 0
+ xprt: rdma 0 0 4 0 0 4 4 0 4 0 0 0 0 0 0 0 0 0 0 0 0 0 0 44 0 0 0
+ per-op statistics
+ NULL: 1 1 0 40 24 6 0 6 0
+ GETATTR: 11 11 0 1156 1232 174 4 178 0
+ SETATTR: 0 0 0 0 0 0 0 0 0
+ LOOKUP: 6 6 0 740 192 35 2 37 6
+ ACCESS: 6 6 0 652 720 99 2 102 0
+ READLINK: 0 0 0 0 0 0 0 0 0
+ READ: 0 0 0 0 0 0 0 0 0
+ WRITE: 0 0 0 0 0 0 0 0 0
+ CREATE: 0 0 0 0 0 0 0 0 0
+ MKDIR: 0 0 0 0 0 0 0 0 0
+ SYMLINK: 0 0 0 0 0 0 0 0 0
+ MKNOD: 0 0 0 0 0 0 0 0 0
+ REMOVE: 0 0 0 0 0 0 0 0 0
+ RMDIR: 0 0 0 0 0 0 0 0 0
+ RENAME: 0 0 0 0 0 0 0 0 0
+ LINK: 0 0 0 0 0 0 0 0 0
+ READDIR: 0 0 0 0 0 0 0 0 0
+ READDIRPLUS: 0 0 0 0 0 0 0 0 0
+ FSSTAT: 12 12 0 1240 2016 109 6 116 0
+ FSINFO: 2 2 0 184 328 5 0 6 0
+ PATHCONF: 0 0 0 0 0 0 0 0 0
+ COMMIT: 0 0 0 0 0 0 0 0 0
+
+device example.com:/icds-pcp-data mounted on /storage/pcp-data with fstype nfs statvers=1.1
+ opts: rw,vers=3,rsize=65536,wsize=65536,namlen=255,acregmin=3,acregmax=60,acdirmin=30,acdirmax=60,hard,proto=rdma,nconnect=8,port=20049,timeo=600,retrans=2,sec=sys,mountaddr=10.6.159.6,mountvers=3,mountport=0,mountproto=tcp,local_lock=all
+ age: 1166720
+ caps: caps=0x10003fcf,wtmult=4096,dtsize=65536,bsize=0,namlen=255
+ sec: flavor=1,pseudoflavor=1
+ events: 6699 70747 708 20599 2762 2253 102622 720956 1 5508 0 37492 1710 163 1296 0 0 1194 0 0 720956 0 0 0 0 0 0
+ bytes: 1920695637 2068044591 0 0 1319950703 2143394964 322276 542160
+ RPC iostats version: 1.1 p/v: 100003/3 (nfs)
+ xprt: rdma 0 0 1 0 163 11491 11491 0 11791 0 5235 2523 200 173699455 165053291 995572 0 0 0 0 0 0 0 11 7958 0 0
+ xprt: rdma 0 0 1 0 163 11490 11490 0 11783 0 5227 2523 206 174106340 165126008 1038128 0 0 0 0 0 0 0 11 7956 0 0
+ xprt: rdma 0 0 1 0 102 11490 11490 0 11795 0 5201 2531 215 174949903 165380715 1042544 0 0 0 0 0 0 0 11 7947 0 0
+ xprt: rdma 0 0 1 0 102 11490 11490 0 11803 0 5249 2530 198 173876087 165402039 1006656 2100 0 0 0 0 0 0 11 7977 0 0
+ xprt: rdma 0 0 1 0 102 11490 11490 0 11809 0 5227 2522 200 173699084 165094476 1049444 1968 0 0 0 0 0 0 11 7949 0 0
+ xprt: rdma 0 0 1 0 43 11490 11490 0 11796 0 5213 2527 208 174387760 165255764 1003696 1736 0 0 0 0 0 0 11 7948 0 0
+ xprt: rdma 0 0 1 0 43 11490 11490 0 11812 0 5229 2520 208 174061854 164930038 1022056 0 0 0 0 0 0 0 11 7957 0 0
+ xprt: rdma 0 0 1 0 43 11490 11490 0 11797 0 5218 2519 216 174643548 164999120 1041756 0 0 0 0 0 0 0 11 7953 0 0
+ per-op statistics
+ NULL: 1 1 0 40 24 5 0 5 0
+ GETATTR: 6699 6699 0 658876 750288 34 1459 1700 0
+ SETATTR: 878 878 0 112776 31608 3 378 409 0
+ LOOKUP: 1631 1631 0 188400 64096 16 372 413 1569
+ ACCESS: 952 952 0 95396 114240 20 197 241 0
+ READLINK: 0 0 0 0 0 0 0 0 0
+ READ: 20198 20198 0 2204072 1322536072 1049 7547 9069 0
+ WRITE: 57666 57666 0 2149857320 9226560 1838 115379 120554 0
+ CREATE: 826 826 0 99672 211456 3 1352 1378 0
+ MKDIR: 15 15 0 2048 3840 0 32 32 0
+ SYMLINK: 0 0 0 0 0 0 0 0 0
+ MKNOD: 0 0 0 0 0 0 0 0 0
+ REMOVE: 715 715 0 74796 25740 5 967 995 0
+ RMDIR: 0 0 0 0 0 0 0 0 0
+ RENAME: 2 2 0 304 88 0 3 3 0
+ LINK: 0 0 0 0 0 0 0 0 0
+ READDIR: 0 0 0 0 0 0 0 0 0
+ READDIRPLUS: 700 700 0 84356 1157640 5 532 602 0
+ FSSTAT: 684 684 0 65712 114912 8 750 794 0
+ FSINFO: 2 2 0 184 328 5 0 5 0
+ PATHCONF: 0 0 0 0 0 0 0 0 0
+ COMMIT: 0 0 0 0 0 0 0 0 0
+
+device scratch mounted on /scratch with fstype gpfs
diff --git a/src/pmdas/nfsclient/pmdanfsclient.python b/src/pmdas/nfsclient/pmdanfsclient.python
index 8e96e9031..786d95845 100644
--- a/src/pmdas/nfsclient/pmdanfsclient.python
+++ b/src/pmdas/nfsclient/pmdanfsclient.python
@@ -585,7 +585,7 @@ class NFSCLIENTPMDA(PMDA):
client.xprt.sending_u = self.longs(values[12])
client.xprt.pending_u = self.longs(values[13])
elif xprt_prot == 'udp':
- client.xprt.srcport = values[1]
+ client.xprt.srcport = self.chars(values[1])
client.xprt.bind_count = self.longs(values[2])
client.xprt.sends = self.longs(values[3])
client.xprt.recvs = self.longs(values[4])
@@ -596,7 +596,7 @@ class NFSCLIENTPMDA(PMDA):
client.xprt.sending_u = self.longs(values[9])
client.xprt.pending_u = self.longs(values[10])
elif xprt_prot == 'rdma':
- client.xprt.srcport = values[1]
+ client.xprt.srcport = self.chars(values[1])
client.xprt.bind_count = self.longs(values[2])
client.xprt.connect_count = self.longs(values[3])
client.xprt.connect_time = self.longs(values[4])

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,94 @@
diff -Naurp pcp-5.3.7.orig/src/pmdas/bcc/Upgrade pcp-5.3.7/src/pmdas/bcc/Upgrade
--- pcp-5.3.7.orig/src/pmdas/bcc/Upgrade 2021-08-16 14:12:25.000000000 +1000
+++ pcp-5.3.7/src/pmdas/bcc/Upgrade 2023-07-10 16:25:31.904767032 +1000
@@ -26,8 +26,11 @@ then
else
sed -i -e "s,^\(bcc.*binary\),\1 notready,g" $PCP_PMCDCONF_PATH
fi
- sed -i \
- -e "s,python $PCP_PMDAS_DIR/bcc/,$PCP_PYTHON_PROG $PCP_PMDAS_DIR/bcc/,g" \
- $PCP_PMCDCONF_PATH 2>/dev/null
+ if grep -q '^bcc.*python ' "$PCP_PMCDCONF_PATH" 2>/dev/null
+ then
+ sed -i \
+ -e "s,python $PCP_PMDAS_DIR/bcc/,$PCP_PYTHON_PROG $PCP_PMDAS_DIR/bcc/,g" \
+ $PCP_PMCDCONF_PATH 2>/dev/null
+ fi
fi
exit 0
diff -Naurp pcp-5.3.7.orig/src/pmdas/kvm/Upgrade pcp-5.3.7/src/pmdas/kvm/Upgrade
--- pcp-5.3.7.orig/src/pmdas/kvm/Upgrade 2019-02-06 17:16:29.000000000 +1100
+++ pcp-5.3.7/src/pmdas/kvm/Upgrade 2023-07-10 16:25:31.904767032 +1000
@@ -17,7 +17,7 @@
. $PCP_DIR/etc/pcp.env
-if grep -q ^kvm "$PCP_PMCDCONF_PATH" 2>/dev/null
+if grep -q '^kvm.*perl.*' "$PCP_PMCDCONF_PATH" 2>/dev/null
then
sed -i -e "s,perl $PCP_PMDAS_DIR/kvm/pmdakvm.pl,$PCP_PMDAS_DIR/kvm/pmdakvm -d 95,g" $PCP_PMCDCONF_PATH 2>/dev/null
fi
diff -Naurp pcp-5.3.7.orig/src/pmdas/mssql/Upgrade pcp-5.3.7/src/pmdas/mssql/Upgrade
--- pcp-5.3.7.orig/src/pmdas/mssql/Upgrade 2021-08-16 14:12:25.000000000 +1000
+++ pcp-5.3.7/src/pmdas/mssql/Upgrade 2023-07-10 16:25:31.904767032 +1000
@@ -17,14 +17,20 @@
. $PCP_DIR/etc/pcp.env
-if grep -q ^mssql "$PCP_PMCDCONF_PATH" 2>/dev/null
+if grep -q '^mssql.*perl ' "$PCP_PMCDCONF_PATH" 2>/dev/null
then
sed -i \
- -e "s,python $PCP_PMDAS_DIR/mssql/,$PCP_PYTHON_PROG $PCP_PMDAS_DIR/mssql/,g" \
-e "s,perl $PCP_PMDAS_DIR/mssql/pmdamssql.pl,$PCP_PYTHON_PROG $PCP_PMDAS_DIR/mssql/pmdamssql.python,g" \
$PCP_PMCDCONF_PATH 2>/dev/null
fi
+if grep -q '^mssql.*python ' "$PCP_PMCDCONF_PATH" 2>/dev/null
+then
+ sed -i \
+ -e "s,python $PCP_PMDAS_DIR/mssql/,$PCP_PYTHON_PROG $PCP_PMDAS_DIR/mssql/,g" \
+ $PCP_PMCDCONF_PATH 2>/dev/null
+fi
+
perlpath=`which $PCP_PERL_PROG`
original="$PCP_PMDAS_DIR/mssql/mssql.conf"
upgraded="$PCP_PMDAS_DIR/mssql/mssql.conf.tmp"
diff -Naurp pcp-5.3.7.orig/src/pmdas/openmetrics/Upgrade pcp-5.3.7/src/pmdas/openmetrics/Upgrade
--- pcp-5.3.7.orig/src/pmdas/openmetrics/Upgrade 2021-08-16 14:12:25.000000000 +1000
+++ pcp-5.3.7/src/pmdas/openmetrics/Upgrade 2023-07-10 16:25:31.904767032 +1000
@@ -36,7 +36,7 @@ then
rm -f "$PCP_VAR_DIR/pmns/prometheus" 2>/dev/null
fi
-if grep -q ^openmetrics "$PCP_PMCDCONF_PATH" 2>/dev/null
+if grep -q '^openmetrics.*python ' "$PCP_PMCDCONF_PATH" 2>/dev/null
then
sed -i -e "s,python $PCP_PMDAS_DIR/openmetrics/,$PCP_PYTHON_PROG $PCP_PMDAS_DIR/openmetrics/,g" $PCP_PMCDCONF_PATH 2>/dev/null
fi
diff -Naurp pcp-5.3.7.orig/src/pmdas/postgresql/Upgrade pcp-5.3.7/src/pmdas/postgresql/Upgrade
--- pcp-5.3.7.orig/src/pmdas/postgresql/Upgrade 2021-08-16 14:12:25.000000000 +1000
+++ pcp-5.3.7/src/pmdas/postgresql/Upgrade 2023-07-10 16:25:31.914767070 +1000
@@ -17,14 +17,20 @@
. $PCP_DIR/etc/pcp.env
-if grep -q ^postgresql "$PCP_PMCDCONF_PATH" 2>/dev/null
+if grep -q '^postgresql.*perl ' "$PCP_PMCDCONF_PATH" 2>/dev/null
then
sed -i \
- -e "s,python $PCP_PMDAS_DIR/postgresql/,$PCP_PYTHON_PROG $PCP_PMDAS_DIR/postgresql/,g" \
-e "s,perl $PCP_PMDAS_DIR/postgresql/pmdapostgresql.pl,$PCP_PYTHON_PROG $PCP_PMDAS_DIR/postgresql/pmdapostgresql.python,g" \
$PCP_PMCDCONF_PATH 2>/dev/null
fi
+if grep -q '^postgresql.*python ' "$PCP_PMCDCONF_PATH" 2>/dev/null
+then
+ sed -i \
+ -e "s,python $PCP_PMDAS_DIR/postgresql/,$PCP_PYTHON_PROG $PCP_PMDAS_DIR/postgresql/,g" \
+ $PCP_PMCDCONF_PATH 2>/dev/null
+fi
+
perlpath=`which $PCP_PERL_PROG`
original="$PCP_PMDAS_DIR/postgresql/postgresql.conf"
upgraded="$PCP_PMDAS_DIR/postgresql/pmdapostgresql.conf"

View File

@ -0,0 +1,50 @@
diff --git a/src/pmdas/podman/.gitignore b/src/pmdas/podman/.gitignore
index a4f35e0f43..aea2c4052e 100644
--- a/src/pmdas/podman/.gitignore
+++ b/src/pmdas/podman/.gitignore
@@ -2,8 +2,6 @@ deps/
domain.h
pmdapodman
pmda_podman.so
-jsonsl.c
-jsonsl.h
help.dir
help.pag
exports
diff --git a/src/pmdas/podman/GNUmakefile b/src/pmdas/podman/GNUmakefile
index d6e58cfca5..1117a7a4e4 100644
--- a/src/pmdas/podman/GNUmakefile
+++ b/src/pmdas/podman/GNUmakefile
@@ -83,6 +83,7 @@ domain.h: ../../pmns/stdpmid
$(OBJECTS): domain.h
pmda.o: $(VERSION_SCRIPT)
pmda.o: $(TOPDIR)/src/include/pcp/libpcp.h
+podman.o: $(JSONSL_HFILES)
check:: $(CFILES) $(HFILES)
$(CLINT) $^
diff --git a/src/pmdas/root/.gitignore b/src/pmdas/root/.gitignore
index 21f507f0dd..b78b1fd28a 100644
--- a/src/pmdas/root/.gitignore
+++ b/src/pmdas/root/.gitignore
@@ -1,8 +1,6 @@
deps/
domain.h
pmdaroot
-jsonsl.c
-jsonsl.h
help.dir
help.pag
pmns
diff --git a/src/pmdas/root/GNUmakefile b/src/pmdas/root/GNUmakefile
index ed01a18fb8..b02d4ea834 100644
--- a/src/pmdas/root/GNUmakefile
+++ b/src/pmdas/root/GNUmakefile
@@ -83,6 +83,7 @@ pmns :
$(LN_S) -f root_root pmns
lxc.o root.o: $(TOPDIR)/src/include/pcp/libpcp.h
+podman.o: $(JSONSL_HFILES)
check:: $(CFILES) $(HFILES)
$(CLINT) $^

View File

@ -0,0 +1,27 @@
commit 3bde240a2acc85e63e2f7813330713dd9b59386e
Author: Nathan Scott <nathans@redhat.com>
Date: Wed Mar 27 14:51:28 2024 +1100
pmproxy: disable Redis protocol proxying by default
If a redis-server has been locked down in terms of connections,
we want to prevent pmproxy from being allowed to send arbitrary
RESP commands to it.
This protocol proxying doesn't affect PCP functionality at all,
its more of a developer/sysadmin convenience when Redis used in
cluster mode (relatively uncommon compared to localhost mode).
diff --git a/src/pmproxy/pmproxy.conf b/src/pmproxy/pmproxy.conf
index e54891792e..4cbc1c96af 100644
--- a/src/pmproxy/pmproxy.conf
+++ b/src/pmproxy/pmproxy.conf
@@ -29,7 +29,7 @@ pcp.enabled = true
http.enabled = true
# support Redis protocol proxying
-redis.enabled = true
+redis.enabled = false
# support SSL/TLS protocol wrapping
secure.enabled = true

View File

@ -0,0 +1,74 @@
diff -Naurp pcp-5.3.7.orig/src/selinux/pcp.fc pcp-5.3.7/src/selinux/pcp.fc
--- pcp-5.3.7.orig/src/selinux/pcp.fc 2023-11-21 13:25:11.689247531 +1100
+++ pcp-5.3.7/src/selinux/pcp.fc 2023-11-21 14:12:48.080744232 +1100
@@ -1,36 +1,32 @@
-/etc/rc\.d/init\.d/pmcd -- gen_context(system_u:object_r:pcp_pmcd_initrc_exec_t,s0)
-/etc/rc\.d/init\.d/pmlogger -- gen_context(system_u:object_r:pcp_pmlogger_initrc_exec_t,s0)
-/etc/rc\.d/init\.d/pmproxy -- gen_context(system_u:object_r:pcp_pmproxy_initrc_exec_t,s0)
-/etc/rc\.d/init\.d/pmie -- gen_context(system_u:object_r:pcp_pmie_initrc_exec_t,s0)
-
/usr/bin/pmie -- gen_context(system_u:object_r:pcp_pmie_exec_t,s0)
-/usr/bin/pmcd -- gen_context(system_u:object_r:pcp_pmcd_exec_t,s0)
/usr/bin/pmlogger -- gen_context(system_u:object_r:pcp_pmlogger_exec_t,s0)
-/usr/bin/pmproxy -- gen_context(system_u:object_r:pcp_pmproxy_exec_t,s0)
-
/usr/libexec/pcp/bin/pmcd -- gen_context(system_u:object_r:pcp_pmcd_exec_t,s0)
-/usr/libexec/pcp/bin/pmlogger -- gen_context(system_u:object_r:pcp_pmlogger_exec_t,s0)
/usr/libexec/pcp/bin/pmproxy -- gen_context(system_u:object_r:pcp_pmproxy_exec_t,s0)
-/usr/libexec/pcp/bin/pmie -- gen_context(system_u:object_r:pcp_pmie_exec_t,s0)
+
+/usr/libexec/pcp/bin/pmie_check -- gen_context(system_u:object_r:pcp_pmie_exec_t,s0)
+/usr/libexec/pcp/bin/pmie_daily -- gen_context(system_u:object_r:pcp_pmie_exec_t,s0)
+/usr/libexec/pcp/bin/pmie_farm -- gen_context(system_u:object_r:pcp_pmie_exec_t,s0)
+/usr/libexec/pcp/bin/pmlogger_check -- gen_context(system_u:object_r:pcp_pmlogger_exec_t,s0)
+/usr/libexec/pcp/bin/pmlogger_daily -- gen_context(system_u:object_r:pcp_pmlogger_exec_t,s0)
+/usr/libexec/pcp/bin/pmlogger_farm -- gen_context(system_u:object_r:pcp_pmlogger_exec_t,s0)
/usr/libexec/pcp/lib/pmcd -- gen_context(system_u:object_r:pcp_pmcd_initrc_exec_t,s0)
/usr/libexec/pcp/lib/pmlogger -- gen_context(system_u:object_r:pcp_pmlogger_initrc_exec_t,s0)
/usr/libexec/pcp/lib/pmproxy -- gen_context(system_u:object_r:pcp_pmproxy_initrc_exec_t,s0)
/usr/libexec/pcp/lib/pmie -- gen_context(system_u:object_r:pcp_pmie_initrc_exec_t,s0)
-/usr/share/pcp/lib/pmie -- gen_context(system_u:object_r:pcp_pmie_exec_t,s0)
-
-/usr/share/pcp/lib/pmlogger -- gen_context(system_u:object_r:pcp_pmlogger_exec_t,s0)
+/usr/share/pcp/lib/pmcd -- gen_context(system_u:object_r:pcp_pmcd_exec_t,s0)
+/usr/share/pcp/lib/pmproxy -- gen_context(system_u:object_r:pcp_pmproxy_exec_t,s0)
+/usr/share/pcp/lib/pmie -- gen_context(system_u:object_r:pcp_pmie_exec_t,s0)
+/usr/share/pcp/lib/pmlogger -- gen_context(system_u:object_r:pcp_pmlogger_exec_t,s0)
/var/lib/pcp(/.*)? gen_context(system_u:object_r:pcp_var_lib_t,s0)
/var/lib/pcp/pmdas/.*/Install -- gen_context(system_u:object_r:pcp_plugin_exec_t,s0)
-/var/lib/pcp/pmdas/.*/Remove -- gen_context(system_u:object_r:pcp_plugin_exec_t,s0)
+/var/lib/pcp/pmdas/.*/Remove -- gen_context(system_u:object_r:pcp_plugin_exec_t,s0)
/var/lib/pcp/pmdas/.*/Upgrade -- gen_context(system_u:object_r:pcp_plugin_exec_t,s0)
/var/log/pcp(/.*)? gen_context(system_u:object_r:pcp_log_t,s0)
/var/run/pcp(/.*)? gen_context(system_u:object_r:pcp_var_run_t,s0)
-/var/run/pmcd\.socket -- gen_context(system_u:object_r:pcp_var_run_t,s0)
-/var/run/pmlogger\.primary\.socket -l gen_context(system_u:object_r:pcp_var_run_t,s0)
diff -Naurp pcp-5.3.7.orig/src/selinux/pcp.te pcp-5.3.7/src/selinux/pcp.te
--- pcp-5.3.7.orig/src/selinux/pcp.te 2023-11-21 13:25:11.690247528 +1100
+++ pcp-5.3.7/src/selinux/pcp.te 2023-11-21 14:13:03.855770809 +1100
@@ -279,6 +279,7 @@ allow pcp_pmlogger_t pcp_pmcd_t:unix_str
allow pcp_pmlogger_t self:unix_dgram_socket create_socket_perms;
allow pcp_pmlogger_t pcp_pmlogger_exec_t:file execute_no_trans;
+allow pcp_pmlogger_t ldconfig_exec_t:file { execute execute_no_trans };
dontaudit pcp_pmlogger_t self:cap_userns { sys_ptrace };
@@ -313,6 +314,10 @@ optional_policy(`
rpm_script_signal(pcp_pmlogger_t)
')
+optional_policy(`
+ userdom_setattr_user_home_content_files(pcp_pmlogger_t)
+')
+
########################################
#
# pcp_plugin local policy

File diff suppressed because it is too large Load Diff

View File

@ -1,16 +1,33 @@
Name: pcp
Version: 5.3.7
Release: 7%{?dist}
Release: 20%{?dist}
Summary: System-level performance monitoring and performance management
License: GPLv2+ and LGPLv2+ and CC-BY
URL: https://pcp.io
%global artifactory https://performancecopilot.jfrog.io/artifactory
Source0: %{artifactory}/pcp-source-release/pcp-%{version}.src.tar.gz
Patch0: redhat-bugzilla-1981886-pmdasockets-backporting.patch
Patch1: redhat-bugzilla-2059461-pmie-systemd-fixup.patch
Patch2: redhat-bugzilla-2081262-pmdaproc-cgroups-fix.patch
Patch3: redhat-bugzilla-2059463-pmdapostfix-harden.patch
Patch0: redhat-bugzilla-2003956-pmdabcc-update-kernel-version-check-due-to-backporting.patch
Patch1: redhat-bugzilla-1981886-pmdasockets-backporting.patch
Patch2: redhat-bugzilla-2059461-pmie-systemd-fixup.patch
Patch3: redhat-bugzilla-2081262-pmdaproc-cgroups-fix.patch
Patch4: redhat-bugzilla-2059463-pmdapostfix-harden.patch
Patch5: redhat-bugzilla-2083897-dstat-missed-ticks.patch
Patch6: redhat-bugzilla-2111742-selinux-policy.patch
Patch7: redhat-bugzilla-2093751-sudoers-docs.patch
Patch8: redhat-bugzilla-2101574-farm-config.patch
Patch9: redhat-bugzilla-2135314-pmfind-fix.patch
Patch10: redhat-bugzilla-2139012-pmdasnmp-config.patch
Patch11: redhat-bugzilla-1981886-pcp-ss-fetchgroup.patch
Patch12: redhat-bugzilla-2159207-pmproxy-rollup-fixes.patch
Patch13: redhat-bugzilla-2139325-openmetrics-in-grafana.patch
Patch14: redhat-bugzilla-2150889-nfsclient-srcport.patch
Patch15: redhat-bugzilla-2219731-hacluster-metrics.patch
Patch16: redhat-bugzilla-2211263-pmcd-conf-rewrite.patch
Patch17: redhat-build-jsonsl.patch
Patch18: redhat-issues-RHEL-7507-pmdaopenmetrics-quoting.patch
Patch19: redhat-issues-RHEL-7501-pmlogger_farm-selinux-policy.patch
Patch20: redhat-issues-RHEL-30715-pmproxy-resp-proxy-disabled.patch
# The additional linker flags break out-of-tree PMDAs.
# https://bugzilla.redhat.com/show_bug.cgi?id=2043092
@ -100,7 +117,7 @@ Patch3: redhat-bugzilla-2059463-pmdapostfix-harden.patch
# support for pmdabpf, check bpf.spec for supported architectures of bpf
%if 0%{?fedora} >= 33 || 0%{?rhel} > 8
%ifarch x86_64 ppc64 ppc64le aarch64
%ifarch x86_64 %{power64} aarch64 s390x
%global disable_bpf 0
%else
%global disable_bpf 1
@ -292,40 +309,28 @@ BuildRequires: qt5-qtsvg-devel
%endif
Requires: bash xz gawk sed grep findutils which %{_hostname_executable}
%if 0%{?rhel} > 8
Requires: cyrus-sasl-scram
%endif
Requires: pcp-libs = %{version}-%{release}
%if !%{disable_selinux}
%if !%{disable_selinux}
# rpm boolean dependencies are supported since RHEL 8
%if 0%{?fedora} >= 35 || 0%{?rhel} >= 8
# This ensures that the pcp-selinux package and all it's dependencies are not pulled
# into containers and other systems that do not use SELinux
# This ensures that the pcp-selinux package and all its dependencies are
# not pulled into containers and other systems that do not use SELinux
Requires: (pcp-selinux = %{version}-%{release} if selinux-policy-targeted)
%else
Requires: pcp-selinux = %{version}-%{release}
%endif
%endif
%global _confdir %{_sysconfdir}/pcp
%global _logsdir %{_localstatedir}/log/pcp
%global _pmnsdir %{_localstatedir}/lib/pcp/pmns
%global _pmnsexecdir %{_libexecdir}/pcp/pmns
%global _tempsdir %{_localstatedir}/lib/pcp/tmp
%global _pmdasdir %{_localstatedir}/lib/pcp/pmdas
%global _pmdasexecdir %{_libexecdir}/pcp/pmdas
%global _testsdir %{_localstatedir}/lib/pcp/testsuite
%global _selinuxdir %{_localstatedir}/lib/pcp/selinux
%global _selinuxexecdir %{_libexecdir}/pcp/selinux
%global _logconfdir %{_localstatedir}/lib/pcp/config/pmlogconf
%global _ieconfigdir %{_localstatedir}/lib/pcp/config/pmie
%global _ieconfdir %{_localstatedir}/lib/pcp/config/pmieconf
%global _tapsetdir %{_datadir}/systemtap/tapset
%global _bashcompdir %{_datadir}/bash-completion/completions
%global _pixmapdir %{_datadir}/pcp-gui/pixmaps
%global _hicolordir %{_datadir}/icons/hicolor
%global _booksdir %{_datadir}/doc/pcp-doc
%global _selinuxdir %{_datadir}/selinux/packages/targeted
%if 0%{?fedora} >= 20 || 0%{?rhel} >= 8
%global _with_doc --with-docdir=%{_docdir}/%{name}
@ -446,14 +451,13 @@ else
fi
}
%global selinux_handle_policy() %{expand:
if [ %1 -ge 1 ]
%global run_pmieconf() %{expand:
if [ -w "%1" ]
then
%{_libexecdir}/pcp/bin/selinux-setup %{_selinuxdir} install %2
elif [ %1 -eq 0 ]
then
%{_libexecdir}/pcp/bin/selinux-setup %{_selinuxdir} remove %2
fi
pmieconf -c enable "%2"
else
echo "WARNING: Cannot write to %1, skipping pmieconf enable of %2." >&2
fi
}
%description
@ -2289,13 +2293,8 @@ interface rules, type enforcement and file context adjustments for an
updated policy package.
%endif
%prep
%setup -q
%patch0 -p1
%patch1 -p1
%patch2 -p1
%patch3 -p1
%autosetup -p1
%build
# the buildsubdir macro gets defined in %setup and is apparently only available in the next step (i.e. the %build step)
@ -2443,7 +2442,7 @@ total_manifest | keep 'tutorials|/html/|pcp-doc|man.*\.[1-9].*' | cull 'out' >pc
total_manifest | keep 'testsuite|etc/systemd/system|libpcp_fault|pcp/fault.h' >pcp-testsuite-files
basic_manifest | keep "$PCP_GUI|pcp-gui|applications|pixmaps|hicolor" | cull 'pmtime.h' >pcp-gui-files
basic_manifest | keep 'selinux' | cull 'tmp|GNUselinuxdefs' >pcp-selinux-files
basic_manifest | keep 'selinux' | cull 'tmp|testsuite' >pcp-selinux-files
basic_manifest | keep 'zeroconf|daily[-_]report|/sa$' >pcp-zeroconf-files
basic_manifest | grep -E -e 'pmiostat|pmrep|dstat|htop|pcp2csv' \
-e 'pcp-atop|pcp-dmcache|pcp-dstat|pcp-free|pcp-htop' \
@ -2687,6 +2686,9 @@ done
%endif
%pre testsuite
%if !%{disable_selinux}
%selinux_relabel_pre -s targeted
%endif
test -d %{_testsdir} || mkdir -p -m 755 %{_testsdir}
getent group pcpqa >/dev/null || groupadd -r pcpqa
getent passwd pcpqa >/dev/null || \
@ -2695,6 +2697,11 @@ chown -R pcpqa:pcpqa %{_testsdir} 2>/dev/null
exit 0
%post testsuite
%if !%{disable_selinux}
semodule -r pcpqa >/dev/null 2>&1 || true
%selinux_modules_install -s targeted %{_selinuxdir}/pcp-testsuite.pp.bz2
%selinux_relabel_post -s targeted
%endif
chown -R pcpqa:pcpqa %{_testsdir} 2>/dev/null
%if 0%{?rhel}
%if !%{disable_systemd}
@ -2709,6 +2716,14 @@ chown -R pcpqa:pcpqa %{_testsdir} 2>/dev/null
%endif
exit 0
%if !%{disable_selinux}
%postun testsuite
if [ $1 -eq 0 ]; then
%selinux_modules_uninstall -s targeted pcp-testsuite
%selinux_relabel_post -s targeted
fi
%endif
%pre
getent group pcp >/dev/null || groupadd -r pcp
getent passwd pcp >/dev/null || \
@ -2995,6 +3010,7 @@ fi
PCP_PMDAS_DIR=%{_pmdasdir}
PCP_SYSCONFIG_DIR=%{_sysconfdir}/sysconfig
PCP_PMCDCONF_PATH=%{_confdir}/pmcd/pmcd.conf
PCP_PMIECONFIG_DIR=%{_ieconfigdir}
# auto-install important PMDAs for RH Support (if not present already)
for PMDA in dm nfsclient openmetrics ; do
if ! grep -q "$PMDA/pmda$PMDA" "$PCP_PMCDCONF_PATH"
@ -3003,7 +3019,7 @@ for PMDA in dm nfsclient openmetrics ; do
fi
done
# auto-enable these usually optional pmie rules
pmieconf -c enable dmthin
%{run_pmieconf "$PCP_PMIECONFIG_DIR" dmthin}
%if 0%{?rhel}
%if !%{disable_systemd}
systemctl restart pmcd pmlogger pmie >/dev/null 2>&1
@ -3018,17 +3034,6 @@ pmieconf -c enable dmthin
%endif
%endif
%if !%{disable_selinux}
%post selinux
%{selinux_handle_policy "$1" "pcpupstream"}
%triggerin selinux -- docker-selinux
%{selinux_handle_policy "$1" "pcpupstream-docker"}
%triggerin selinux -- container-selinux
%{selinux_handle_policy "$1" "pcpupstream-container"}
%endif
%post
PCP_PMNS_DIR=%{_pmnsdir}
PCP_LOG_DIR=%{_logsdir}
@ -3067,14 +3072,21 @@ PCP_LOG_DIR=%{_logsdir}
%endif
%if !%{disable_selinux}
%preun selinux
%{selinux_handle_policy "$1" "pcpupstream"}
%pre selinux
%selinux_relabel_pre -s targeted
%triggerun selinux -- docker-selinux
%{selinux_handle_policy "$1" "pcpupstream-docker"}
%post selinux
semodule -r pcpupstream-container >/dev/null 2>&1 || true
semodule -r pcpupstream-docker >/dev/null 2>&1 || true
semodule -r pcpupstream >/dev/null 2>&1 || true
%selinux_modules_install -s targeted %{_selinuxdir}/pcp.pp.bz2
%selinux_relabel_post -s targeted
%triggerun selinux -- container-selinux
%{selinux_handle_policy "$1" "pcpupstream-container"}
%postun selinux
if [ $1 -eq 0 ]; then
%selinux_modules_uninstall -s targeted pcp
%selinux_relabel_post -s targeted
fi
%endif
%files -f pcp-files.rpm
@ -3093,6 +3105,7 @@ PCP_LOG_DIR=%{_logsdir}
%if !%{disable_selinux}
%files selinux -f pcp-selinux-files.rpm
%ghost %verify(not md5 size mode mtime) %{_sharedstatedir}/selinux/targeted/active/modules/200/pcp
%endif
%if !%{disable_qt}
@ -3353,6 +3366,35 @@ PCP_LOG_DIR=%{_logsdir}
%files zeroconf -f pcp-zeroconf-files.rpm
%changelog
* Wed Apr 17 2024 Nathan Scott <nathans@redhat.com> - 5.3.7-20
- Disable RESP proxying by default in pmproxy (RHEL-30715)
* Tue Nov 21 2023 Nathan Scott <nathans@redhat.com> - 5.3.7-19
- Fix OpenMetrics PMDA mishandling systemd metrics (RHEL-7507)
- Additional pmlogger_farm service SELinux policy (RHEL-7501)
* Wed Jul 05 2023 Nathan Scott <nathans@redhat.com> - 5.3.7-18
- Improve pmproxy handling large HTTP requests (BZ 2159207)
- Fix hacluster metrics with current Pacemaker (BZ 2219731)
- Ensure pmcd.conf not needlessly over written (BZ 2211263)
* Thu Mar 09 2023 Nathan Scott <nathans@redhat.com> - 5.3.7-17
- Harden pmdaopenmetrics metric name validator (BZ 2139325)
- Fix issues in pmdanfsclient srcport handling (BZ 2150889)
* Thu Nov 17 2022 Nathan Scott <nathans@redhat.com> - 5.3.7-16
- Ensure SNMP metrics config symlink installed (BZ 2139012)
* Thu Oct 27 2022 Nathan Scott <nathans@redhat.com> - 5.3.7-15
- Backport independent selinux policy rework (BZ 2111742)
- Fix invalid memory access in pmfind utility (BZ 2135314)
- Allow pcp-dstat(1) missed ticks suppression (BZ 2083897)
* Mon Sep 05 2022 Nathan Scott <nathans@redhat.com> - 5.3.7-9
- Additional selinux policy rules for pmdabcc (BZ 2050094)
- Describe working sudoers requiretty configuration (BZ 2093751)
- Separate pmlogger_farm configuration mechanism (BZ 2101574)
* Mon May 09 2022 Nathan Scott <nathans@redhat.com> - 5.3.7-7
- Additional selinux policy rules for pmdasockets (BZ 1981886)
@ -3362,88 +3404,106 @@ PCP_LOG_DIR=%{_logsdir}
* Tue May 03 2022 Nathan Scott <nathans@redhat.com> - 5.3.7-3
- Fix remaining issues in the pcp-ss(1) utility (BZ 1981886)
- Remove benign pmie systemd unit file warning (BZ 2072971)
- Remove benign warning message from pmie systemd unit file.
* Tue Apr 05 2022 Nathan Scott <nathans@redhat.com> - 5.3.7-2
- Rebase to latest stable version of PCP (BZ 2059462)
* Tue Apr 05 2022 Nathan Scott <nathans@redhat.com> - 5.3.7-1
- Fix several issues in the pcp-ss(1) utility (BZ 1981886)
- Document pmproxy archive discovery further (BZ 2026726)
- Improve SQL Server PMDA secure settings (BZ 2057615)
- Rebase to latest stable version of PCP (BZ 2059461)
* Wed Feb 02 2022 Nathan Scott <nathans@redhat.com> - 5.3.5-8
- Fix pcp-zeroconf logger interval override regression (BZ 1991764)
- Fix pcp-zeroconf logger interval override regression (BZ 1991763)
- Remove warnings from spec setup of PCP systemd units (BZ 2048024)
* Fri Jan 21 2022 Nathan Scott <nathans@redhat.com> - 5.3.5-5
- Further improve pmlogger and pmie farm services (BZ 2030138)
- Resolve a further KVM selinux issue with KVM (BZ 2006430)
- Rebuild to use latest selinux-policy for el9 (BZ 2041503)
* Thu Dec 16 2021 Andreas Gerstmayr <agerstmayr@redhat.com> - 5.3.5-6
- pmdabcc: update qa/1118 testcase to match new output (BZ 2003956)
* Fri Dec 17 2021 Nathan Scott <nathans@redhat.com> - 5.3.5-4
* Wed Dec 15 2021 Nathan Scott <nathans@redhat.com> - 5.3.5-4
- pmdabcc: resolve compilation issues of some bcc PMDA modules on
aarch64, ppc64le and s390x (BZ 2024982)
- pmdabpf: support arm and powerpc architectures (BZ 2024980)
aarch64, ppc64le and s390x (BZ 2003956)
- Further improve pmlogger service startup latency (BZ 1973833)
- Additional improvements to farm systemd services (BZ 2030138)
- Additional improvements to farm systemd services (BZ 2027753)
* Thu Dec 09 2021 Nathan Scott <nathans@redhat.com> - 5.3.5-3
- Resolve failure in the Nvidia metrics agent (BZ 2030123)
- PMDA indom cache loading performance improvements (BZ 2030122)
- Consistent user experience for new farm services (BZ 2030138)
- Resilience improvements for the pmproxy service (BZ 2030141)
- Resolve failure in the Nvidia metrics agent (BZ 2029301)
- PMDA indom cache loading performance improvements (BZ 2030121)
- Consistent user experience for new farm services (BZ 2027753)
- Resilience improvements for the pmproxy service (BZ 2030140)
* Fri Nov 26 2021 Nathan Scott <nathans@redhat.com> - 5.3.5-2
- Updates to pmlogconf persistance changes (BZ 2017632)
- Updates to pmlogconf persistence changes (BZ 2017632)
* Wed Nov 10 2021 Nathan Scott <nathans@redhat.com> - 5.3.5-1
- Update to latest PCP sources (BZ 1991764)
- Extend pmlogger(1) man page --interval option (BZ 2018083)
- Disable Avahi service advertisement by default (BZ 1899625)
- Use separate localhost and farm service cgroups (BZ 1991896)
- Update and extend the Nvidia GPU metric coverage (BZ 1690590)
- Assessment API and pmdamssql(1) share credentials (BZ 1951342)
- Improve remote pmlogger over slow network connections (BZ 1973833)
- Implement pcp-atop(1) support for Nvidia GPU reports (BZ 1984273)
- Drop DMA[32] zones when the zone is not in node 0 (BZ 1985519)
- Support kernel changes to /proc/zoneinfo metrics (BZ 1985523)
- Ensure pmlogconf persists configuration changes (BZ 2017632)
- Auto-reconnect lost redis connection in pmproxy (BZ 1989287)
- Rebase to a more recent upstream version of PCP (BZ 1991763)
* Fri Oct 15 2021 Mark Goodwin <mgoodwin@redhat.com> - 5.3.4-2
- Fix pmlogger manual start with service disabled (BZ 2013937)
- Fix pmlogger manual start with service disabled (BZ 2018011)
* Fri Oct 08 2021 Nathan Scott <nathans@redhat.com> - 5.3.4-1
- Update to latest PCP sources (BZ 1991764)
- Add pcp-atop(1) support for 'curscal' values (BZ 1984271)
- Fix pcp-atop(1) reporting perfevent 'ipc' values (BZ 1986264)
- Fix pmlogger(1) exiting on receipt of SIGALRM (BZ 2004771)
- Fix values of some hacluster metrics on s390x (BZ 2008298)
- Add new pmdads389(1) metrics for replication (BZ 1966122)
- Rebase to a more recent upstream version of PCP (BZ 1991763)
* Tue Oct 05 2021 Nathan Scott <nathans@redhat.com> - 5.3.3-1
- Resolve pmdakvm(1) selinux AVC denials (BZ 2006430)
- Related: rhbz#1991764
* Wed Sep 15 2021 Nathan Scott <nathans@redhat.com> - 5.3.3-1
- Add new pmdads389(1) metrics for replication (BZ 1966122)
- Add label support to pmdahacluster(1) metrics (BZ 1972277)
- Improve pmieconf rules for saturated processors (BZ 1994680)
- Auto-upgrade pmcd.conf for python2 to python3 (BZ 1988403)
- Document unloading pmdakvm(1) for kernel module (BZ 1977740)
- Add option to pmdalinux(1) for slabinfo metrics (BZ 1962902)
- Fix for OpenMetrics scripting generating AVCs (BZ 1985818)
- Resolve some pcp-testsuite failures (BZ 1980459, 1981686)
- Rebase to a more recent upstream version of PCP (BZ 1991763)
* Mon Aug 30 2021 Nathan Scott <nathans@redhat.com> - 5.3.1-6
- Fix pmdapodman initialization and selinux policy
- Related: rhbz#1957575
* Sat Aug 28 2021 Nathan Scott <nathans@redhat.com> - 5.3.1-5
- Fix pmdapodman initialization and selinux policy (BZ 1962019)
* Mon Aug 09 2021 Mohan Boddu <mboddu@redhat.com> - 5.3.1-5
- Rebuilt for IMA sigs, glibc 2.34, aarch64 flags
Related: rhbz#1991688
* Mon Jul 12 2021 Mark Goodwin <mgoodwin@redhat.com> - 5.3.1-4
* Fri Jul 09 2021 Mark Goodwin <mgoodwin@redhat.com> - 5.3.1-3
- Improve pmproxy and libpcp_web scalability (BZ 1975069)
- Provide a pcp-ss(1) tool in pcp-system-tools (BZ 1879350)
* Fri Jun 25 2021 Nathan Scott <nathans@redhat.com> - 5.3.1-3
* Thu Jun 24 2021 Nathan Scott <nathans@redhat.com> - 5.3.1-2
- Fix pmproxy parallel TLS requests bug (BZ 1947989)
- Backport pmrep archive creation fix (BZ 1974266)
- Related: rhbz#1957575
* Wed Jun 16 2021 Mohan Boddu <mboddu@redhat.com> - 5.3.1-2
- Rebuilt for RHEL 9 BETA for openssl 3.0
Related: rhbz#1971065
* Fri Jun 04 2021 Nathan Scott <nathans@redhat.com> - 5.3.1-1
- Improved diagnostics from pmie service scripts (BZ 1954576)
- Drop option configuration file env var setting (BZ 1967174)
- Add pmie support for thermal throttle detection (BZ 1908212)
- Annotate a couple of obscure config files in rpm (BZ 1964253)
- Rebase to a more recent upstream version of PCP (BZ 1922040)
* Wed Jun 09 2021 Nathan Scott <nathans@redhat.com> - 5.3.1-1
- Add selinux rules for pmdabcc bpf access (BZ 1952374)
- Update to latest PCP sources (BZ 1957575)
* Tue May 18 2021 Nathan Scott <nathans@redhat.com> - 5.3.0-2
* Fri May 14 2021 Nathan Scott <nathans@redhat.com> - 5.3.0-4
- Improve pmchart duplicate archive diagnostics (BZ 1615718)
- Save pmchart users prefered archive locations (BZ 1615742)
- Remove libvarlink dependency from podman PMDA (BZ 1956608)
* Thu Apr 22 2021 Nathan Scott <nathans@redhat.com> - 5.3.0-3
- Correct pcp-testsuite file permissions (BZ 1922040)
* Tue Apr 20 2021 Nathan Scott <nathans@redhat.com> - 5.3.0-2
- Resolve pcp-testsuite linkage problems (BZ 1950263)
* Thu May 06 2021 Nathan Scott <nathans@redhat.com> - 5.3.0-1
- Added conditional lockdown policy access by pmdakvm (BZ 1940971)
- Added spec file dependency on cyrus-sasl-scram (BZ 1942879)
- Update to latest PCP sources (BZ 1957575)
* Fri Apr 16 2021 Mohan Boddu <mboddu@redhat.com> - 5.2.5-5
- Rebuilt for RHEL 9 BETA on Apr 15th 2021. Related: rhbz#1947937
* Fri Apr 16 2021 Nathan Scott <nathans@redhat.com> - 5.3.0-1
- Allow pcp-atop to filter processes on state (BZ 1807046)
- Dynamically adjust the pmproxy open file limit (BZ 1954711)
- Rebase to a more recent upstream version of PCP (BZ 1922040)
- PCP scalability and memory footprint improvements (BZ 1942292)
* Fri Feb 19 2021 Nathan Scott <nathans@redhat.com> - 5.2.5-4
- Revert removal of pmcd unit dependencies (BZs 1929556, 1930284)
@ -3452,44 +3512,116 @@ PCP_LOG_DIR=%{_logsdir}
- Specify pmns_name in sockets PMDA Install and Remove scripts
- Add selinux rules for pmcd to use netlink tcpdiag sockets (BZ 1926756)
* Wed Feb 10 2021 Nathan Scott <nathans@redhat.com> - 5.2.5-2
- Update to latest PCP sources.
- Fix pcp-dstat(1) sample count being off-by-one (BZ 1922768)
- Add dstat(1) symlink to pcp-dstat(1) in pcp-doc (BZ 1922771)
* Mon Feb 08 2021 Andreas Gerstmayr <agerstmayr@redhat.com> - 5.2.5-2
- Fixed typo in specfile (pcp-testsuite requires pcp-pmda-hacluster
and pcp-pmda-sockets instead of pcp-pmdas-hacluster etc.)
* Tue Jan 26 2021 Fedora Release Engineering <releng@fedoraproject.org> - 5.2.3-2
- Rebuilt for https://fedoraproject.org/wiki/Fedora_34_Mass_Rebuild
* Mon Feb 08 2021 Nathan Scott <nathans@redhat.com> - 5.2.5-1
- Extended socket statistic metrics (BZ 1491892)
- Add explicit umask to startup scripts (BZ 1833647)
- Fix pmlogger scripts fore remote loggers (BZ 1919950)
- Enhance selinux policy for netcheck metrics (BZ 1897719)
- Resolve packaging issue related to tmpfiles (BZ 1919974)
- Ensure online help text exists for all metrics (BZ 1670029)
- Adds HA cluster metrics (Pacemaker, Corosync, DRBD and SBD)
- Rebase to a more recent upstream version of PCP (BZ 1854035)
* Fri Dec 18 2020 Nathan Scott <nathans@redhat.com> - 5.2.3-1
- Update to latest PCP sources.
- Adds a new pcp-htop(1) utility (BZ 1716242)
- Adds per-process network metrics to pcp-atop(1) (BZ 1733901)
- Rebase to latest upstream stable release of PCP (BZ 1854035)
- Resolves an selinux issue in the netcheck agent (BZ 1897719)
- Rebase to a more recent upstream version of PCP (BZ 1854035)
* Wed Nov 11 2020 Nathan Scott <nathans@redhat.com> - 5.2.2-1
- Update to latest PCP sources.
- Add online help text for PCP derived metrics (BZ 1670029)
- Add process accounting metrics and pcp-atop support (BZ 1814526)
- Fix pmchart recorded archive folio host names (BZ 1824403)
- Fix selinux issue with secure pmcd connections (BZ 1857396)
- Extend pmlogger_daily script compression docs (BZ 1880392)
- perfevent support for hv_24x7/hv_gpci dynamic events (BZ 1889613)
- Improve pmlogger multiple remote node setups (BZ 1892326)
- Rebase to a more recent upstream version of PCP (BZ 1854035)
* Tue Sep 29 2020 Nathan Scott <nathans@redhat.com> - 5.2.1-2
- Fix rpm spec permissions for pmie and sa directories (BZ 1882615)
- Add versioned rpm spec dependencies for pcp-zeroconf (BZ 1882664)
* Fri Sep 25 2020 Nathan Scott <nathans@redhat.com> - 5.2.1-1
- Update to latest PCP sources.
- Interactive mode in pcp-atop for time intervals (BZ 1806824)
- Add branch-to-specific-time feature in pcp-atop (BZ 1806927)
- Fix hostname written to archive folio by pmchart (BZ 1824403)
- Allow pmafm to function with compressed archives (BZ 1839666)
- Fix folio handling of archive files outside /tmp (BZ 1839698)
- pcp-pmda-lio deals with a missing kernel module (BZ 1843792)
- Windows authentication mode for SQL Server PMDA (BZ 1847808)
- Fix previous sample selection within pcp-atop (BZ 1847925)
- Make pcp-atopsar honour the -e (end time) option (BZ 1851838)
- Fix pcp-pmda-zswap crashing with namespace error (BZ 1851887)
- Fix pcp-atopsar handling of -H (header) option (BZ 1857121)
- Update pcp-free(1) to show 'shared' memory usage (BZ 1878768)
- Rebase to a more recent upstream version of PCP (BZ 1854035)
* Sat Aug 08 2020 Mark Goodwin <mgoodwin@redhat.com> - 5.2.0-1
- FHS compliance in installed /var file locations (BZ 1827441)
- pmproxy intermittently crashes at uv_timer_stop (BZ 1789312)
- Update to latest PCP sources.
- Re-enabled LTO.
* Tue Jun 23 2020 Mark Goodwin <mgoodwin@redhat.com> - 5.1.1-3
- Fix for missing runtime deps on perl Net::SNMP (BZ 1790433)
- Resolve covscan and other issues from upstream QA (BZ 1849511)
- Possible memory leak detected in pcp-atop (BZ 1846705)
- Installation of pcp-pmda-samba causes SELinux issues (BZ 1790452)
- Fix Intermittent pminfo crashes (BZ 1848995)
- Silence openmetrics PMDA warnings, add status metrics (BZ 1846711)
- Set PACKAGE_BUILD in VERSION.pcp so pmcd.build metric is correct
* Tue Jul 28 2020 Fedora Release Engineering <releng@fedoraproject.org> - 5.1.1-4
- Rebuilt for https://fedoraproject.org/wiki/Fedora_33_Mass_Rebuild
* Wed Jul 01 2020 Jeff Law <law@redhat.com> - 5.1.1-3
- Disable LTO
* Tue Jun 23 2020 Jitka Plesnikova <jplesnik@redhat.com> - 5.1.1-2
- Perl 5.32 rebuild
* Thu Jun 11 2020 Mark Goodwin <mgoodwin@redhat.com> - 5.1.1-2
- Activate pmlogger_rewrite on upgrades (BZ 1541406)
- Fix Coverity issues in pmdastatsd and pmlogconf (BZ 1792971)
- libpcp_web: ensure context is freed only after timer is fully closed
- Services: pmlogger and pmie services Want pmcd on boot
- Fix intermittent pmlogconf core dumps (BZ 1845241)
- pcp-atop: resolve potential null task pointer dereference
- pmproxy: improve diagnostics, particularly relating to http requests
- pmproxy: cleanup, remove unused flags and dead code in http encoding
- pmproxy: support the OPTIONS protocol in HTTP 1.1
- libpcp_web: add resilience to descriptor lookup paths (BZ 1837153)
* Fri May 29 2020 Mark Goodwin <mgoodwin@redhat.com> - 5.1.1-1
- Update to latest PCP sources.
- Rebuild to pick up changed HdrHistogram_c version (BZ 1831502)
- pmdakvm: handle kernel lockdown in integrity mode (BZ 1824297)
- PCP_NSSDB_DIR should not be mentioned in /etc/pcp.conf (BZ 1826020)
- pmie_daily.service runs as pcp but tries to do root things (BZ 1832262)
- pcp-testsuite-5.1.0-2.el8 package is missing pcpqa.pp file (BZ 1835422)
- GFS2 kernel trace points turning on by themselves (BZ 1825386)
- pcp-atop various fixes (BZ 1818710)
- SELinux prevents pmlogger from secure connection to remote pmcd (BZ 1826047)
- pmda-lustre fails to start since lustre 2.12 (BZ 1788937)
- Added labels support for pmrep and various pcp2xxx tools
- Update to latest pcp-5.1.1 PCP sources.
* Fri Apr 24 2020 Mark Goodwin <mgoodwin@redhat.com> - 5.1.0-1
* Fri May 08 2020 Mark Goodwin <mgoodwin@redhat.com> - 5.1.0-2
- Replace pmdads389log dep on 389-ds-base with a softdep
- Fix install deps for pcp-testsuite on pcp-pmda-mssql
- Improve pmlogger and pmie system log messages (BZ 1806428)
- Missing dep of pcp-pmda-dbping on perl-DBI (BZ 1790421)
- Missing dep of pcp-pmda-ds389log on 389-ds-base (BZ 1790422)
- Missing dep of pcp-pmda-gpsd on perl-JSON and perl-Time-HiRes (BZ 1790426)
- Missing dep of pcp-pmda-lmsensors on lm_sensors (BZ 1790427)
- Missing dep of pcp-pmda-redis on perl-autodie (BZ 1790431)
- Missing dep of pcp-pmda-smart on smartmontools (BZ 1790432)
- Missing dep of pcp-pmda-snmp on net-snmp-perl (BZ 1790433)
- SELinux issues with pcp-pmda-zimbra (BZ 1790437)
- Missing dep of pcp-pmda-pdns on perl-Time-HiRes (BZ 1790441)
- Installation of pcp-pmda-netcheck causes SELinux issue (BZ 1790450)
- Installation of pcp-pmda-samba causes SELinux issues (BZ 1790452)
- Some PMDAs are missing dependency on PCP (BZ 1790526)
- pmieconf randomly fails (BZ 1800545)
- collectl2pcp does not handle large collectl archives well (BZ 1785101)
- Missing dep of pcp-pmda-redis on perl-autodie and perl-Time-HiRes (BZ 1788519)
- Cannot install pcp-pmda-activemq, wrong location of RESTClient (BZ 1788878)
- Missing dep of pcp-pmda-bind2 on various perl packages (BZ 1790415)
- pmlogger_daily_report causing PCP upstream testsuite to fail (BZ 1805146)
- Missing selinux rules preventing pcp-pmda-named runing rndc(BZ 1825663)
- SELinux is preventing PostgreSQL PMDA to collect metrics (BZ 1825957)
- pmnewlog generates inaccessible config file (BZ 1810110)
- pmnewlog is causing PCP testsuite to hang (BZ 1810118)
- pmdakvm: debugfs access is restricted (BZ 1824297)
- Error starting pmlogger; pid file not owned by root (BZ 1761962)
- Update to latest PCP sources.