pcp/SOURCES/redhat-bugzilla-1980067.patch

2420 lines
96 KiB
Diff

From 7b21619f0022b9eba7ad5b9c3c018471c4be9a93 Mon Sep 17 00:00:00 2001
From: Paul Evans <pevans@redhat.com>
Date: Mon, 8 Feb 2021 18:14:37 +0000
Subject: [PATCH 1/7] pmdahacluster: Quick README.md update
Update README.md to reflect pmns root node update to ha_cluster
---
src/pmdas/hacluster/README.md | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/src/pmdas/hacluster/README.md b/src/pmdas/hacluster/README.md
index 4d0fcabf5..709cb721d 100644
--- a/src/pmdas/hacluster/README.md
+++ b/src/pmdas/hacluster/README.md
@@ -6,7 +6,7 @@ The PMDA collects it's metric data from the following components that make up a
## General Notes
-### `hacluster.drbd.split_brain`
+### `ha_cluster.drbd.split_brain`
This metric signals if there is a split brain occurring in DRBD per instance resource:volume. The metric will return the value `1` if a split brain is detected, otherwise it will be `0`.
@@ -37,7 +37,7 @@ exposed by this PMDA.
Once the PMDA has been installed, the following command will list all of
the available metrics and their explanatory “help” text:
- # $ pminfo -fT hacluster
+ # $ pminfo -fT ha_cluster
## Installation
--
2.31.1
From f68df957286df0b0c2bb091d1025cf3c4adc2810 Mon Sep 17 00:00:00 2001
From: Ken McDonell <kenj@kenj.id.au>
Date: Sun, 4 Apr 2021 07:35:45 +1000
Subject: [PATCH 2/7] src/pmdas/hacluster/pacemaker.c: plug small mem leak on
error path in hacluster_refresh_pacemaker_resources() (covscan)
Fixes Coverity CID 366053.
---
src/pmdas/hacluster/pacemaker.c | 5 ++++-
1 file changed, 4 insertions(+), 1 deletion(-)
diff --git a/src/pmdas/hacluster/pacemaker.c b/src/pmdas/hacluster/pacemaker.c
index 25d80ed41..2dc53f8cf 100644
--- a/src/pmdas/hacluster/pacemaker.c
+++ b/src/pmdas/hacluster/pacemaker.c
@@ -625,8 +625,11 @@ hacluster_refresh_pacemaker_resources(const char *instance_name, struct resource
pmsprintf(buffer, sizeof(buffer), "%s", crm_mon_command);
- if ((pf = popen(buffer, "r")) == NULL)
+ if ((pf = popen(buffer, "r")) == NULL) {
+ if (!no_node_attachment)
+ free(tofree);
return -oserror();
+ }
while(fgets(buffer, sizeof(buffer)-1, pf) != NULL) {
--
2.31.1
From 625e3a493611864d8785091d0f95a2e1ec293eea Mon Sep 17 00:00:00 2001
From: Paul Evans <pevans@redhat.com>
Date: Fri, 9 Apr 2021 21:10:12 +0100
Subject: [PATCH 3/7] pmdahacluster: Correct logic for
ha_cluster.pacemaker.resources.* metrics
Correct the logic for pacemaker resources metrics in the hacluster PMDA
simplifying the logic and correcting the issue seen under QA testing
pointed out by Ken along with further corrections in the pacemaker
resources metric collection.
Have also included further buffer overflow guarding for the most of
the sscanf cases in pacemaker.c.
Bonus: Corrected spelling mistakes also pointed out in helptext.
---
qa/1897.out | 32 ++++++++++++++++----------------
src/pmdas/hacluster/help | 28 ++++++++++++++--------------
src/pmdas/hacluster/pacemaker.c | 19 ++++---------------
3 files changed, 34 insertions(+), 45 deletions(-)
diff --git a/qa/1897.out b/qa/1897.out
index ff9c3ff6c..054b1e92f 100644
--- a/qa/1897.out
+++ b/qa/1897.out
@@ -337,7 +337,7 @@ ha_cluster.pacemaker.nodes.status.dc PMID: 155.3.8 [Whether the node status is g
Data Type: 32-bit unsigned int InDom: 155.2 0x26c00002
Semantics: instant Units: count
Help:
-Whether the disconnected status is reportered by the node in the cluster, a
+Whether the disconnected status is reported by the node in the cluster, a
value of 1 confirms the node status as disconnected.
inst [0 or "node-1"] value 0
inst [1 or "node-2"] value 1
@@ -346,7 +346,7 @@ ha_cluster.pacemaker.nodes.status.expected_up PMID: 155.3.7 [Whether the node st
Data Type: 32-bit unsigned int InDom: 155.2 0x26c00002
Semantics: instant Units: count
Help:
-Whether the expected_up status is reportered by the node in the cluster, a
+Whether the expected_up status is reported by the node in the cluster, a
value of 1 confirms the node status as expected_up.
inst [0 or "node-1"] value 1
inst [1 or "node-2"] value 1
@@ -355,7 +355,7 @@ ha_cluster.pacemaker.nodes.status.maintenance PMID: 155.3.3 [Whether the node st
Data Type: 32-bit unsigned int InDom: 155.2 0x26c00002
Semantics: instant Units: count
Help:
-Whether the maintenance status is reportered by the node in the cluster, a
+Whether the maintenance status is reported by the node in the cluster, a
value of 1 confirms the node status as online.
inst [0 or "node-1"] value 0
inst [1 or "node-2"] value 0
@@ -364,7 +364,7 @@ ha_cluster.pacemaker.nodes.status.online PMID: 155.3.0 [Whether the node status
Data Type: 32-bit unsigned int InDom: 155.2 0x26c00002
Semantics: instant Units: count
Help:
-Whether the online status is reportered by the node in the cluster, a value of
+Whether the online status is reported by the node in the cluster, a value of
1 confirms the node status as online.
inst [0 or "node-1"] value 1
inst [1 or "node-2"] value 1
@@ -373,7 +373,7 @@ ha_cluster.pacemaker.nodes.status.pending PMID: 155.3.4 [Whether the node status
Data Type: 32-bit unsigned int InDom: 155.2 0x26c00002
Semantics: instant Units: count
Help:
-Whether the pending status is reportered by the node in the cluster, a value of
+Whether the pending status is reported by the node in the cluster, a value of
1 confirms the node status as pending.
inst [0 or "node-1"] value 0
inst [1 or "node-2"] value 0
@@ -382,7 +382,7 @@ ha_cluster.pacemaker.nodes.status.shutdown PMID: 155.3.6 [Whether the node statu
Data Type: 32-bit unsigned int InDom: 155.2 0x26c00002
Semantics: instant Units: count
Help:
-Whether the shutdown status is reportered by the node in the cluster, a value
+Whether the shutdown status is reported by the node in the cluster, a value
of 1 confirms the node status as shutdown.
inst [0 or "node-1"] value 0
inst [1 or "node-2"] value 0
@@ -391,7 +391,7 @@ ha_cluster.pacemaker.nodes.status.standby PMID: 155.3.1 [Whether the node status
Data Type: 32-bit unsigned int InDom: 155.2 0x26c00002
Semantics: instant Units: count
Help:
-Whether the standby status is reportered by the node in the cluster, a value of
+Whether the standby status is reported by the node in the cluster, a value of
1 confirms the node status as standby.
inst [0 or "node-1"] value 0
inst [1 or "node-2"] value 0
@@ -400,7 +400,7 @@ ha_cluster.pacemaker.nodes.status.standby_on_fail PMID: 155.3.2 [Whether the nod
Data Type: 32-bit unsigned int InDom: 155.2 0x26c00002
Semantics: instant Units: count
Help:
-Whether the standby_on_fail status is reportered by the node in the cluster,
+Whether the standby_on_fail status is reported by the node in the cluster,
a value of 1 confirms the node status as standby_on_fail.
inst [0 or "node-1"] value 0
inst [1 or "node-2"] value 0
@@ -409,7 +409,7 @@ ha_cluster.pacemaker.nodes.status.unclean PMID: 155.3.5 [Whether the node status
Data Type: 32-bit unsigned int InDom: 155.2 0x26c00002
Semantics: instant Units: count
Help:
-Whether the unclean status is reportered by the node in the cluster, a value of
+Whether the unclean status is reported by the node in the cluster, a value of
1 confirms the node status as unclean.
inst [0 or "node-1"] value 0
inst [1 or "node-2"] value 0
@@ -466,7 +466,7 @@ ha_cluster.pacemaker.resources.managed PMID: 155.5.3 [Value is either true or fa
Semantics: instant Units: count
Help:
The value of whether the resource instance in the cluster is managed or not.
- inst [0 or "my_first_svc:node-1"] value 0
+ inst [0 or "my_first_svc:node-1"] value 1
inst [5 or "rsc_TEST:node-1"] value 1
inst [1 or "rsc_Test:node-1"] value 1
inst [2 or "rsc_Test:node-2"] value 1
@@ -489,9 +489,9 @@ ha_cluster.pacemaker.resources.status.active PMID: 155.5.5 [Whether the resource
Data Type: 32-bit unsigned int InDom: 155.4 0x26c00004
Semantics: instant Units: count
Help:
-Whether the active status is reportered by the resource in the cluster, a
+Whether the active status is reported by the resource in the cluster, a
value of 1 confirms the resource status as active.
- inst [0 or "my_first_svc:node-1"] value 0
+ inst [0 or "my_first_svc:node-1"] value 1
inst [5 or "rsc_TEST:node-1"] value 1
inst [1 or "rsc_Test:node-1"] value 1
inst [2 or "rsc_Test:node-2"] value 1
@@ -502,7 +502,7 @@ ha_cluster.pacemaker.resources.status.blocked PMID: 155.5.7 [Whether the resourc
Data Type: 32-bit unsigned int InDom: 155.4 0x26c00004
Semantics: instant Units: count
Help:
-Whether the blocked status is reportered by the resource in the cluster, a
+Whether the blocked status is reported by the resource in the cluster, a
value of 1 confirms the resource status as blocked.
inst [0 or "my_first_svc:node-1"] value 0
inst [5 or "rsc_TEST:node-1"] value 0
@@ -515,7 +515,7 @@ ha_cluster.pacemaker.resources.status.failed PMID: 155.5.8 [Whether the resource
Data Type: 32-bit unsigned int InDom: 155.4 0x26c00004
Semantics: instant Units: count
Help:
-Whether the failed status is reportered by the resource in the cluster, a
+Whether the failed status is reported by the resource in the cluster, a
value of 1 confirms the resource status as failed.
inst [0 or "my_first_svc:node-1"] value 0
inst [5 or "rsc_TEST:node-1"] value 0
@@ -528,7 +528,7 @@ ha_cluster.pacemaker.resources.status.failure_ignored PMID: 155.5.9 [Whether the
Data Type: 32-bit unsigned int InDom: 155.4 0x26c00004
Semantics: instant Units: count
Help:
-Whether the failure_ignored status is reportered by the resource in the
+Whether the failure_ignored status is reported by the resource in the
cluster, a value of 1 confirms the resource status as failure_ignored.
inst [0 or "my_first_svc:node-1"] value 0
inst [5 or "rsc_TEST:node-1"] value 0
@@ -541,7 +541,7 @@ ha_cluster.pacemaker.resources.status.orphaned PMID: 155.5.6 [Whether the resour
Data Type: 32-bit unsigned int InDom: 155.4 0x26c00004
Semantics: instant Units: count
Help:
-Whether the orphaned status is reportered by the resource in the cluster, a
+Whether the orphaned status is reported by the resource in the cluster, a
value of 1 confirms the resource status as orphaned.
inst [0 or "my_first_svc:node-1"] value 0
inst [5 or "rsc_TEST:node-1"] value 0
diff --git a/src/pmdas/hacluster/help b/src/pmdas/hacluster/help
index fa5bf9106..704e940c0 100644
--- a/src/pmdas/hacluster/help
+++ b/src/pmdas/hacluster/help
@@ -64,39 +64,39 @@ Pacemaker internals.
The membership type given to the node in the Pacemaker cluster.
@ ha_cluster.pacemaker.nodes.status.online Whether the node status is given as online
-Whether the online status is reportered by the node in the cluster, a value of
+Whether the online status is reported by the node in the cluster, a value of
1 confirms the node status as online.
@ ha_cluster.pacemaker.nodes.status.standby Whether the node status is given as standby
-Whether the standby status is reportered by the node in the cluster, a value of
+Whether the standby status is reported by the node in the cluster, a value of
1 confirms the node status as standby.
@ ha_cluster.pacemaker.nodes.status.standby_on_fail Whether the node status is given as standby_on_fail
-Whether the standby_on_fail status is reportered by the node in the cluster,
+Whether the standby_on_fail status is reported by the node in the cluster,
a value of 1 confirms the node status as standby_on_fail.
@ ha_cluster.pacemaker.nodes.status.maintenance Whether the node status is given as maintenance
-Whether the maintenance status is reportered by the node in the cluster, a
+Whether the maintenance status is reported by the node in the cluster, a
value of 1 confirms the node status as online.
@ ha_cluster.pacemaker.nodes.status.pending Whether the node status is given as pending
-Whether the pending status is reportered by the node in the cluster, a value of
+Whether the pending status is reported by the node in the cluster, a value of
1 confirms the node status as pending.
@ ha_cluster.pacemaker.nodes.status.unclean Whether the node status is given as unclean
-Whether the unclean status is reportered by the node in the cluster, a value of
+Whether the unclean status is reported by the node in the cluster, a value of
1 confirms the node status as unclean.
@ ha_cluster.pacemaker.nodes.status.shutdown Whether the node status is given as shutdown
-Whether the shutdown status is reportered by the node in the cluster, a value
+Whether the shutdown status is reported by the node in the cluster, a value
of 1 confirms the node status as shutdown.
@ ha_cluster.pacemaker.nodes.status.expected_up Whether the node status is given as expected_up
-Whether the expected_up status is reportered by the node in the cluster, a
+Whether the expected_up status is reported by the node in the cluster, a
value of 1 confirms the node status as expected_up.
@ ha_cluster.pacemaker.nodes.status.dc Whether the node status is given as disconnected
-Whether the disconnected status is reportered by the node in the cluster, a
+Whether the disconnected status is reported by the node in the cluster, a
value of 1 confirms the node status as disconnected.
@ ha_cluster.pacemaker.resources.agent The name of the resource agent for this resource
@@ -118,23 +118,23 @@ The value of whether the resource instance in the cluster is managed or not.
The value of the given role state for the resource instance in the cluster.
@ ha_cluster.pacemaker.resources.status.active Whether the resources status is given as active
-Whether the active status is reportered by the resource in the cluster, a
+Whether the active status is reported by the resource in the cluster, a
value of 1 confirms the resource status as active.
@ ha_cluster.pacemaker.resources.status.orphaned Whether the resources status is given as orphaned
-Whether the orphaned status is reportered by the resource in the cluster, a
+Whether the orphaned status is reported by the resource in the cluster, a
value of 1 confirms the resource status as orphaned.
@ ha_cluster.pacemaker.resources.status.blocked Whether the resources status is given as blocked
-Whether the blocked status is reportered by the resource in the cluster, a
+Whether the blocked status is reported by the resource in the cluster, a
value of 1 confirms the resource status as blocked.
@ ha_cluster.pacemaker.resources.status.failed Whether the resources status is given as failed
-Whether the failed status is reportered by the resource in the cluster, a
+Whether the failed status is reported by the resource in the cluster, a
value of 1 confirms the resource status as failed.
@ ha_cluster.pacemaker.resources.status.failure_ignored Whether the resources status is given as failure_ignored
-Whether the failure_ignored status is reportered by the resource in the
+Whether the failure_ignored status is reported by the resource in the
cluster, a value of 1 confirms the resource status as failure_ignored.
@ ha_cluster.corosync.quorate Value given for quorate
diff --git a/src/pmdas/hacluster/pacemaker.c b/src/pmdas/hacluster/pacemaker.c
index 2dc53f8cf..8f8e626a5 100644
--- a/src/pmdas/hacluster/pacemaker.c
+++ b/src/pmdas/hacluster/pacemaker.c
@@ -513,7 +513,7 @@ hacluster_refresh_pacemaker_nodes(const char *node_name, struct nodes *nodes)
/* Collect our node names */
if (found_nodes && strstr(buffer, node_name)) {
- sscanf(buffer, "%*s %*s %*s online=\"%[^\"]\" standby=\"%[^\"]\" standby_onfail=\"%[^\"]\" maintenance=\"%[^\"]\" pending=\"%[^\"]\" unclean=\"%[^\"]\" shutdown=\"%[^\"]\" expected_up=\"%[^\"]\" is_dc =\"%[^\"]\" %*s type=\"%[^\"]\"",
+ sscanf(buffer, "%*s %*s %*s online=\"%9[^\"]\" standby=\"%9[^\"]\" standby_onfail=\"%9[^\"]\" maintenance=\"%9[^\"]\" pending=\"%9[^\"]\" unclean=\"%9[^\"]\" shutdown=\"%9[^\"]\" expected_up=\"%9[^\"]\" is_dc =\"%9[^\"]\" %*s type=\"%9[^\"]\"",
online,
standby,
standby_on_fail,
@@ -671,19 +671,8 @@ hacluster_refresh_pacemaker_resources(const char *instance_name, struct resource
/* Collect our metrics */
if (strstr(buffer, "resource id=") && strstr(buffer, resource_id)) {
- if(strstr(resources->clone, "\0") || strstr(resources->group, "\0")) {
- sscanf(buffer, "%*s %*s resource_agent=\"%[^\"]\" role=\"%[^\"]\" active=\"%[^\"]\" orphaned=\"%[^\"]\" blocked=\"%[^\"]\" managed=\"%[^\"]\" failed=\"%[^\"]\" failure_ignored=\"%[^\"]\"",
- resources->agent,
- resources->role,
- active,
- orphaned,
- blocked,
- managed,
- failed,
- failure_ignored
- );
- } else if ((strstr(resources->clone, "\0") || strstr(resources->group, "\0")) && strstr(buffer, "target_role")) {
- sscanf(buffer, "%*s %*s resource_agent=\"%[^\"]\" role=\"%[^\"]\" %*s active=\"%[^\"]\" orphaned=\"%[^\"]\" blocked=\"%[^\"]\" managed=\"%[^\"]\" failed=\"%[^\"]\" failure_ignored=\"%[^\"]\"",
+ if (strstr(buffer, "target_role")) {
+ sscanf(buffer, "%*s %*s resource_agent=\"%[^\"]\" role=\"%[^\"]\" %*s active=\"%7[^\"]\" orphaned=\"%7[^\"]\" blocked=\"%7[^\"]\" managed=\"%7[^\"]\" failed=\"%7[^\"]\" failure_ignored=\"%7[^\"]\"",
resources->agent,
resources->role,
active,
@@ -694,7 +683,7 @@ hacluster_refresh_pacemaker_resources(const char *instance_name, struct resource
failure_ignored
);
} else {
- sscanf(buffer, "%*s %*s resource_agent=\"%[^\"]\" role=\"%[^\"]\" %*s active=\"%[^\"]\" orphaned=\"%[^\"]\" blocked=\"%[^\"]\" managed=\"%[^\"]\" failed=\"%[^\"]\" failure_ignored=\"%[^\"]\"",
+ sscanf(buffer, "%*s %*s resource_agent=\"%[^\"]\" role=\"%[^\"]\" active=\"%7[^\"]\" orphaned=\"%7[^\"]\" blocked=\"%7[^\"]\" managed=\"%7[^\"]\" failed=\"%7[^\"]\" failure_ignored=\"%7[^\"]\"",
resources->agent,
resources->role,
active,
--
2.31.1
From 7f39eb78a86b244a046d7014c744abe21b3bef52 Mon Sep 17 00:00:00 2001
From: Paul Evans <pevans@redhat.com>
Date: Fri, 9 Apr 2021 21:12:19 +0100
Subject: [PATCH 4/7] pmdahacluster: Reduce log messages from popen()
The PMDA is designed to be able to collect metrics on setups where not all
of the HA Cluster components might exist. Suppress some of these logging
warnings when certain components are not available on the host system.
---
src/pmdas/hacluster/corosync.c | 20 +++++++--------
src/pmdas/hacluster/drbd.c | 8 +++---
src/pmdas/hacluster/pacemaker.c | 14 +++++------
src/pmdas/hacluster/pmda.c | 44 +++++++++++++++++++--------------
src/pmdas/hacluster/sbd.c | 4 +--
5 files changed, 49 insertions(+), 41 deletions(-)
diff --git a/src/pmdas/hacluster/corosync.c b/src/pmdas/hacluster/corosync.c
index c7964b621..ee0052dee 100644
--- a/src/pmdas/hacluster/corosync.c
+++ b/src/pmdas/hacluster/corosync.c
@@ -139,10 +139,10 @@ hacluster_refresh_corosync_node(const char *node_name, struct member_votes *node
char *buffer_ptr;
FILE *pf;
- pmsprintf(buffer, sizeof(buffer), "%s", quorumtool_command);
+ pmsprintf(buffer, sizeof(buffer), "%s 2>&1", quorumtool_command);
if ((pf = popen(buffer, "r")) == NULL)
- return -oserror();
+ return oserror();
while(fgets(buffer, sizeof(buffer)-1, pf) != NULL) {
if (strstr(buffer, node_name)) {
@@ -187,10 +187,10 @@ hacluster_refresh_corosync_global()
char buffer[4096], quorate[6];
FILE *pf;
- pmsprintf(buffer, sizeof(buffer), "%s", quorumtool_command);
+ pmsprintf(buffer, sizeof(buffer), "%s 2>&1", quorumtool_command);
if ((pf = popen(buffer, "r")) == NULL)
- return -oserror();
+ return oserror();
while(fgets(buffer, sizeof(buffer)-1, pf) != NULL) {
@@ -218,10 +218,10 @@ hacluster_refresh_corosync_global()
}
pclose(pf);
- pmsprintf(buffer, sizeof(buffer), "%s", cfgtool_command);
+ pmsprintf(buffer, sizeof(buffer), "%s 2>&1", cfgtool_command);
if ((pf = popen(buffer, "r")) == NULL)
- return -oserror();
+ return oserror();
while(fgets(buffer, sizeof(buffer)-1, pf) != NULL) {
@@ -240,10 +240,10 @@ hacluster_refresh_corosync_ring(const char *ring_name, struct rings *rings)
FILE *pf;
int ring_found = 0;
- pmsprintf(buffer, sizeof(buffer), "%s", cfgtool_command);
+ pmsprintf(buffer, sizeof(buffer), "%s 2>&1", cfgtool_command);
if ((pf = popen(buffer, "r")) == NULL)
- return -oserror();
+ return oserror();
while(fgets(buffer, sizeof(buffer)-1, pf) != NULL) {
@@ -293,10 +293,10 @@ hacluster_refresh_corosync_ring(const char *ring_name, struct rings *rings)
}
pclose(pf);
- pmsprintf(buffer, sizeof(buffer), "%s", quorumtool_command);
+ pmsprintf(buffer, sizeof(buffer), "%s 2>&1", quorumtool_command);
if ((pf = popen(buffer, "r")) == NULL)
- return -oserror();
+ return oserror();
/*
* Check corosync-quorumtool for our node_id and ring_id values for our
diff --git a/src/pmdas/hacluster/drbd.c b/src/pmdas/hacluster/drbd.c
index 7fb3b04d0..bec83031c 100644
--- a/src/pmdas/hacluster/drbd.c
+++ b/src/pmdas/hacluster/drbd.c
@@ -156,10 +156,10 @@ hacluster_refresh_drbd_resource(const char *resource_name, struct resource *reso
int found_node = 0, found_volume = 0, nesting = 0;
- pmsprintf(buffer, sizeof(buffer), "%s", drbdsetup_command);
+ pmsprintf(buffer, sizeof(buffer), "%s 2>&1", drbdsetup_command);
if ((pf = popen(buffer, "r")) == NULL)
- return -oserror();
+ return oserror();
/*
* We need to split our combined NODE:VOLUME instance names into their
@@ -274,10 +274,10 @@ hacluster_refresh_drbd_peer_device(const char *peer_name, struct peer_device *pe
int found_node = 0, found_peer_node = 0, nesting = 0;
- pmsprintf(buffer, sizeof(buffer), "%s", drbdsetup_command);
+ pmsprintf(buffer, sizeof(buffer), "%s 2>&1", drbdsetup_command);
if ((pf = popen(buffer, "r")) == NULL)
- return -oserror();
+ return oserror();
/*
* We need to split our combined NODE:PEER_NODE_ID instance names into
diff --git a/src/pmdas/hacluster/pacemaker.c b/src/pmdas/hacluster/pacemaker.c
index 8f8e626a5..355f4df5c 100644
--- a/src/pmdas/hacluster/pacemaker.c
+++ b/src/pmdas/hacluster/pacemaker.c
@@ -360,7 +360,7 @@ hacluster_refresh_pacemaker_global()
char last_written_text[128], stonith[6];
FILE *pf;
- pmsprintf(buffer, sizeof(buffer), "%s", cibadmin_command);
+ pmsprintf(buffer, sizeof(buffer), "%s 2>&1", cibadmin_command);
if ((pf = popen(buffer, "r")) == NULL)
return -oserror();
@@ -374,7 +374,7 @@ hacluster_refresh_pacemaker_global()
}
pclose(pf);
- pmsprintf(buffer, sizeof(buffer), "%s", crm_mon_command);
+ pmsprintf(buffer, sizeof(buffer), "%s 2>&1", crm_mon_command);
if ((pf = popen(buffer, "r")) == NULL)
return -oserror();
@@ -402,7 +402,7 @@ hacluster_refresh_pacemaker_fail(const char *instance_name, struct fail_count *f
int found_node_history = 0, found_node_name = 0;
FILE *pf;
- pmsprintf(buffer, sizeof(buffer), "%s", crm_mon_command);
+ pmsprintf(buffer, sizeof(buffer), "%s 2>&1", crm_mon_command);
if ((pf = popen(buffer, "r")) == NULL)
return -oserror();
@@ -455,7 +455,7 @@ hacluster_refresh_pacemaker_constraints(const char *constraints_name, struct loc
int found_constraints = 0;
FILE *pf;
- pmsprintf(buffer, sizeof(buffer), "%s", cibadmin_command);
+ pmsprintf(buffer, sizeof(buffer), "%s 2>&1", cibadmin_command);
if ((pf = popen(buffer, "r")) == NULL)
return -oserror();
@@ -492,7 +492,7 @@ hacluster_refresh_pacemaker_nodes(const char *node_name, struct nodes *nodes)
char online[10], standby[10], standby_on_fail[10], maintenance[10], pending[10];
char unclean[10], shutdown[10], expected_up[10], dc[10];
- pmsprintf(buffer, sizeof(buffer), "%s", crm_mon_command);
+ pmsprintf(buffer, sizeof(buffer), "%s 2>&1", crm_mon_command);
if ((pf = popen(buffer, "r")) == NULL)
return -oserror();
@@ -549,7 +549,7 @@ hacluster_refresh_pacemaker_node_attribs(const char *attrib_name, struct attribu
int found_node_attributes = 0, found_node_name = 0;
FILE *pf;
- pmsprintf(buffer, sizeof(buffer), "%s", crm_mon_command);
+ pmsprintf(buffer, sizeof(buffer), "%s 2>&1", crm_mon_command);
if ((pf = popen(buffer, "r")) == NULL)
return -oserror();
@@ -623,7 +623,7 @@ hacluster_refresh_pacemaker_resources(const char *instance_name, struct resource
node = strsep(&str, ":");
}
- pmsprintf(buffer, sizeof(buffer), "%s", crm_mon_command);
+ pmsprintf(buffer, sizeof(buffer), "%s 2>&1", crm_mon_command);
if ((pf = popen(buffer, "r")) == NULL) {
if (!no_node_attachment)
diff --git a/src/pmdas/hacluster/pmda.c b/src/pmdas/hacluster/pmda.c
index 196de2e64..c7ffac6da 100644
--- a/src/pmdas/hacluster/pmda.c
+++ b/src/pmdas/hacluster/pmda.c
@@ -359,10 +359,10 @@ hacluster_pacemaker_fail_instance_refresh(void)
FILE *pf;
pmInDom indom = INDOM(PACEMAKER_FAIL_INDOM);
- pmsprintf(buffer, sizeof(buffer), "%s", crm_mon_command);
+ pmsprintf(buffer, sizeof(buffer), "%s 2>&1", crm_mon_command);
if ((pf = popen(buffer, "r")) == NULL)
- return -oserror();
+ return oserror();
while(fgets(buffer, sizeof(buffer)-1, pf) != NULL) {
/* First we need to check whether we are in <node_history> section*/
@@ -426,11 +426,11 @@ hacluster_pacemaker_constraints_instance_refresh(void)
FILE *pf;
pmInDom indom = INDOM(PACEMAKER_CONSTRAINTS_INDOM);
- pmsprintf(buffer, sizeof(buffer), "%s", cibadmin_command);
+ pmsprintf(buffer, sizeof(buffer), "%s 2>&1", cibadmin_command);
buffer[sizeof(buffer)-1] = '\0';
if ((pf = popen(buffer, "r")) == NULL)
- return -oserror();
+ return oserror();
while(fgets(buffer, sizeof(buffer)-1, pf) != NULL) {
/* First we need to check whether we are in <constraints> section*/
@@ -472,10 +472,10 @@ hacluster_pacemaker_nodes_instance_refresh(void)
FILE *pf;
pmInDom indom = INDOM(PACEMAKER_NODES_INDOM);
- pmsprintf(buffer, sizeof(buffer), "%s", crm_mon_command);
+ pmsprintf(buffer, sizeof(buffer), "%s 2>&1", crm_mon_command);
if ((pf = popen(buffer, "r")) == NULL)
- return -oserror();
+ return oserror();
while(fgets(buffer, sizeof(buffer)-1, pf) != NULL) {
/* First we need to check whether we are in <nodes> section*/
@@ -525,10 +525,10 @@ hacluster_pacemaker_node_attrib_instance_refresh(void)
FILE *pf;
pmInDom indom = INDOM(PACEMAKER_NODE_ATTRIB_INDOM);
- pmsprintf(buffer, sizeof(buffer), "%s", crm_mon_command);
+ pmsprintf(buffer, sizeof(buffer), "%s 2>&1", crm_mon_command);
if ((pf = popen(buffer, "r")) == NULL)
- return -oserror();
+ return oserror();
while(fgets(buffer, sizeof(buffer)-1, pf) != NULL) {
/* First we need to check whether we are in <node_history> section*/
@@ -598,10 +598,10 @@ hacluster_pacemaker_resources_instance_refresh(void)
FILE *pf;
pmInDom indom= INDOM(PACEMAKER_RESOURCES_INDOM);
- pmsprintf(buffer, sizeof(buffer), "%s", crm_mon_command);
+ pmsprintf(buffer, sizeof(buffer), "%s 2>&1", crm_mon_command);
if ((pf = popen(buffer, "r")) == NULL)
- return -oserror();
+ return oserror();
while(fgets(buffer, sizeof(buffer)-1, pf) != NULL) {
/* First we need to check whether we are in <resources> section*/
@@ -676,9 +676,11 @@ hacluster_corosync_node_instance_refresh(void)
* membership information section of corosync-quorumtool output
*/
pmdaCacheOp(indom, PMDA_CACHE_INACTIVE);
+
+ pmsprintf(buffer, sizeof(buffer), "%s 2>&1", quorumtool_command);
- if ((pf = popen(quorumtool_command, "r")) == NULL)
- return -oserror();
+ if ((pf = popen(buffer, "r")) == NULL)
+ return oserror();
while (fgets(buffer, sizeof(buffer)-1, pf) != NULL) {
/* Clear whitespace at start of each line */
@@ -735,8 +737,10 @@ hacluster_corosync_ring_instance_refresh(void)
*/
pmdaCacheOp(indom, PMDA_CACHE_INACTIVE);
- if ((pf = popen(cfgtool_command, "r")) == NULL)
- return -oserror();
+ pmsprintf(buffer, sizeof(buffer), "%s 2>&1", cfgtool_command);
+
+ if ((pf = popen(buffer, "r")) == NULL)
+ return oserror();
while(fgets(buffer, sizeof(buffer)-1, pf) != NULL) {
@@ -865,8 +869,10 @@ hacluster_drbd_resource_instance_refresh(void)
*/
pmdaCacheOp(indom, PMDA_CACHE_INACTIVE);
- if ((pf = popen(drbdsetup_command, "r")) == NULL)
- return -oserror();
+ pmsprintf(buffer, sizeof(buffer), "%s 2>&1", drbdsetup_command);
+
+ if ((pf = popen(buffer, "r")) == NULL)
+ return oserror();
while(fgets(buffer, sizeof(buffer)-1, pf) != NULL) {
/* Clear whitespace at start of each line */
@@ -940,8 +946,10 @@ hacluster_drbd_peer_device_instance_refresh(void)
*/
pmdaCacheOp(indom, PMDA_CACHE_INACTIVE);
- if ((pf = popen(drbdsetup_command, "r")) == NULL)
- return -oserror();
+ pmsprintf(buffer, sizeof(buffer), "%s 2>&1", drbdsetup_command);
+
+ if ((pf = popen(buffer, "r")) == NULL)
+ return oserror();
while(fgets(buffer, sizeof(buffer)-1, pf) != NULL) {
/* Clear whitespace at start of each line */
diff --git a/src/pmdas/hacluster/sbd.c b/src/pmdas/hacluster/sbd.c
index 5f55d0734..2824f6589 100644
--- a/src/pmdas/hacluster/sbd.c
+++ b/src/pmdas/hacluster/sbd.c
@@ -74,10 +74,10 @@ hacluster_refresh_sbd_device(const char *sbd_dev, struct sbd *sbd)
char buffer[4096];
FILE *pf;
- pmsprintf(buffer, sizeof(buffer), "%s -d %s dump", sbd_command, sbd_dev);
+ pmsprintf(buffer, sizeof(buffer), "%s -d %s dump 2>&1", sbd_command, sbd_dev);
if ((pf = popen(buffer, "r")) == NULL)
- return -oserror();
+ return oserror();
strncpy(sbd->path, sbd_dev, sizeof(sbd->path));
sbd->path[sizeof(sbd->path)-1] = '\0';
--
2.31.1
From 91241a5f76fc83895621fe35f399b5dcd97e796e Mon Sep 17 00:00:00 2001
From: Paul Evans <pevans@redhat.com>
Date: Fri, 18 Jun 2021 00:30:39 +0100
Subject: [PATCH 5/7] pmdahacluster: Add support for labels on key metrics
Introduction of a number of labels on some metrics allowing the export
of metadata in cases where PM_TYPE_STRING is not supported.
This will allow information including Pacemaker location constraint data and
others to be successfully exported in pmproxy using the OpenMetrics API.
Satisfies RHBZ# 1972277: PCP - PMDA HA Cluster (pcp-pmda-hacluster) - unable
to export pacemaker location constraints data due to unsupported metric type
---
qa/1897.out | 66 ++++-
src/pmdas/hacluster/corosync.c | 7 +
src/pmdas/hacluster/corosync.h | 1 +
src/pmdas/hacluster/drbd.c | 14 ++
src/pmdas/hacluster/drbd.h | 2 +
src/pmdas/hacluster/help | 30 ++-
src/pmdas/hacluster/pacemaker.c | 21 ++
src/pmdas/hacluster/pacemaker.h | 3 +
src/pmdas/hacluster/pmda.c | 369 ++++++++++++++++++++++++++--
src/pmdas/hacluster/pmdahacluster.h | 58 +++--
src/pmdas/hacluster/pmns | 119 ++++-----
src/pmdas/hacluster/sbd.c | 7 +
src/pmdas/hacluster/sbd.h | 1 +
13 files changed, 602 insertions(+), 96 deletions(-)
diff --git a/qa/1897.out b/qa/1897.out
index 054b1e92f..b757e857a 100644
--- a/qa/1897.out
+++ b/qa/1897.out
@@ -76,6 +76,14 @@ Help:
The IP address locally linked to this ring.
inst [0 or "0"] value "192.168.122.101"
+ha_cluster.corosync.rings.all PMID: 155.15.0 [Corosync rings information]
+ Data Type: 32-bit unsigned int InDom: 155.13 0x26c0000d
+ Semantics: instant Units: count
+Help:
+Value is 1 if a ring exists. The details of the corrosponding ring
+is given as label metadata values for this metric.
+ inst [0 or "0"] value 1
+
ha_cluster.corosync.rings.node_id PMID: 155.8.2 [ID of the local node]
Data Type: 64-bit unsigned int InDom: 155.6 0x26c00006
Semantics: instant Units: count
@@ -120,6 +128,14 @@ Help:
Amount of writes to the bitmap area of metadata by the DRBD resource:volume.
inst [0 or "drbd1:0"] value 0
+ha_cluster.drbd.connections.all PMID: 155.18.0 [DRBD Peer disk information]
+ Data Type: 32-bit unsigned int InDom: 155.16 0x26c00010
+ Semantics: instant Units: count
+Help:
+Value is 1 if a drbd peer connection exists. The details of the corrosponding DRBD peer
+connection is given as label metadata values for this metric.
+ inst [0 or "drbd1:1"] value 1
+
ha_cluster.drbd.connections.peer_disk_state PMID: 155.11.4 [Peer disk state]
Data Type: string InDom: 155.9 0x26c00009
Semantics: instant Units: count
@@ -218,6 +234,14 @@ Help:
Amount in KiB read by the DRBD resource:volume.
inst [0 or "drbd1:0"] value 1888160
+ha_cluster.drbd.resources.all PMID: 155.17.0 [DRBD resource information]
+ Data Type: 32-bit unsigned int InDom: 155.15 0x26c0000f
+ Semantics: instant Units: count
+Help:
+Value is 1 if a drbd resource exists. The details of the corrosponding drbd resource
+is given as label metadata values for this metric.
+ inst [0 or "drbd1:0"] value 1
+
ha_cluster.drbd.resources.disk_state PMID: 155.10.3 [Disk state]
Data Type: string InDom: 155.8 0x26c00008
Semantics: instant Units: count
@@ -285,6 +309,14 @@ The number of fail count per node and resource ID, the actual maximum value
depends on Pacemaker internals.
inst [0 or "node-1:my_first_svc"] value 0
+ha_cluster.pacemaker.location_constraints.all PMID: 155.12.0 [Location constraint information]
+ Data Type: 32-bit unsigned int InDom: 155.10 0x26c0000a
+ Semantics: instant Units: count
+Help:
+Value is 1 if a location constraint exists. The details of the location constraint
+is given as label metadata values for this metric.
+ inst [0 or "test"] value 1
+
ha_cluster.pacemaker.location_constraints.node PMID: 155.2.0 [Node of the location constraint]
Data Type: string InDom: 155.1 0x26c00001
Semantics: instant Units: count
@@ -327,12 +359,23 @@ ha_cluster.pacemaker.node_attributes PMID: 155.4.0 [Metadata used by Resource Ag
Semantics: instant Units: count
Help:
The raw values for the cluster metadata attributes and their value per node as
-used by the Resource Agents.
+used by the Resource Agents.
inst [0 or "node-1:test_clone_state"] value "PROMOTED"
inst [1 or "node-1:test_site"] value "PRIMARY_SITE_NAME"
inst [2 or "node-2:test_clone_state"] value "DEMOTED"
inst [3 or "node-2:test_site"] value "SECONDARY_SITE_NAME"
+ha_cluster.pacemaker.node_attributes_all PMID: 155.13.0 [Metadata information]
+ Data Type: 32-bit unsigned int InDom: 155.11 0x26c0000b
+ Semantics: instant Units: count
+Help:
+Value is 1 if a node metadata exists. The details of the node metadata
+is given as label metadata values for this metric.
+ inst [0 or "node-1:test_clone_state"] value 1
+ inst [1 or "node-1:test_site"] value 1
+ inst [2 or "node-2:test_clone_state"] value 1
+ inst [3 or "node-2:test_site"] value 1
+
ha_cluster.pacemaker.nodes.status.dc PMID: 155.3.8 [Whether the node status is given as disconnected]
Data Type: 32-bit unsigned int InDom: 155.2 0x26c00002
Semantics: instant Units: count
@@ -435,6 +478,19 @@ cluster.
inst [4 or "rsc_fs_TEST:node-1"] value "ocf::heartbeat:Filesystem"
inst [3 or "rsc_ip_TEST:node-1"] value "ocf::heartbeat:IPaddr2"
+ha_cluster.pacemaker.resources.all PMID: 155.14.0 [Pacemaker resources information]
+ Data Type: 32-bit unsigned int InDom: 155.12 0x26c0000c
+ Semantics: instant Units: count
+Help:
+Value is 1 if a resources exists. The details of the resource
+is given as label metadata values for this metric.
+ inst [0 or "my_first_svc:node-1"] value 1
+ inst [5 or "rsc_TEST:node-1"] value 1
+ inst [1 or "rsc_Test:node-1"] value 1
+ inst [2 or "rsc_Test:node-2"] value 1
+ inst [4 or "rsc_fs_TEST:node-1"] value 1
+ inst [3 or "rsc_ip_TEST:node-1"] value 1
+
ha_cluster.pacemaker.resources.clone PMID: 155.5.1 [The name of the clone given for this resource]
Data Type: string InDom: 155.4 0x26c00004
Semantics: instant Units: count
@@ -557,6 +613,14 @@ Help:
Value returns as to whether stonith is enabled or disabled for the cluster.
value 0
+ha_cluster.sbd.all PMID: 155.16.0 [SBD device information]
+ Data Type: 32-bit unsigned int InDom: 155.14 0x26c0000e
+ Semantics: instant Units: count
+Help:
+Value is 1 if a sbd device exists. The details of the corrosponding SBD device
+is given as label metadata values for this metric.
+ inst [0 or "/dev/vdb"] value 1
+
ha_cluster.sbd.devices.path PMID: 155.9.0 [Path of SBD device]
Data Type: string InDom: 155.7 0x26c00007
Semantics: instant Units: count
diff --git a/src/pmdas/hacluster/corosync.c b/src/pmdas/hacluster/corosync.c
index ee0052dee..5549e59b9 100644
--- a/src/pmdas/hacluster/corosync.c
+++ b/src/pmdas/hacluster/corosync.c
@@ -132,6 +132,13 @@ hacluster_corosync_ring_fetch(int item, struct rings *rings, pmAtomValue *atom)
return PMDA_FETCH_NOVALUES;
}
+int
+hacluster_corosync_ring_all_fetch(int item, pmAtomValue *atom)
+{
+ atom->ul = 1; /* Assign default exists value 1 */
+ return PMDA_FETCH_STATIC;
+}
+
int
hacluster_refresh_corosync_node(const char *node_name, struct member_votes *node)
{
diff --git a/src/pmdas/hacluster/corosync.h b/src/pmdas/hacluster/corosync.h
index f3d265d17..11c3b3b45 100644
--- a/src/pmdas/hacluster/corosync.h
+++ b/src/pmdas/hacluster/corosync.h
@@ -73,6 +73,7 @@ extern int hacluster_corosync_global_fetch(int, pmAtomValue *);
extern int hacluster_refresh_corosync_global();
extern int hacluster_corosync_ring_fetch(int, struct rings *, pmAtomValue *);
+extern int hacluster_corosync_ring_all_fetch(int, pmAtomValue *);
extern int hacluster_refresh_corosync_ring(const char *, struct rings *);
extern void corosync_stats_setup(void);
diff --git a/src/pmdas/hacluster/drbd.c b/src/pmdas/hacluster/drbd.c
index bec83031c..2c18a5fae 100644
--- a/src/pmdas/hacluster/drbd.c
+++ b/src/pmdas/hacluster/drbd.c
@@ -90,6 +90,13 @@ hacluster_drbd_resource_fetch(int item, struct resource *resource, pmAtomValue *
return PMDA_FETCH_NOVALUES;
}
+int
+hacluster_drbd_resource_all_fetch(int item, pmAtomValue *atom)
+{
+ atom->ul = 1; /* Assign default exists value 1 */
+ return PMDA_FETCH_STATIC;
+}
+
int
hacluster_drbd_peer_device_fetch(int item, struct peer_device *peer_device, pmAtomValue *atom)
{
@@ -146,6 +153,13 @@ hacluster_drbd_peer_device_fetch(int item, struct peer_device *peer_device, pmAt
return PMDA_FETCH_NOVALUES;
}
+int
+hacluster_drbd_peer_device_all_fetch(int item, pmAtomValue *atom)
+{
+ atom->ul = 1; /* Assign default exists value 1 */
+ return PMDA_FETCH_STATIC;
+}
+
int
hacluster_refresh_drbd_resource(const char *resource_name, struct resource *resource)
{
diff --git a/src/pmdas/hacluster/drbd.h b/src/pmdas/hacluster/drbd.h
index 0cb687e78..35a2a898c 100644
--- a/src/pmdas/hacluster/drbd.h
+++ b/src/pmdas/hacluster/drbd.h
@@ -83,9 +83,11 @@ struct peer_device {
};
extern int hacluster_drbd_resource_fetch(int, struct resource *, pmAtomValue *);
+extern int hacluster_drbd_resource_all_fetch(int, pmAtomValue *);
extern int hacluster_refresh_drbd_resource(const char *, struct resource *);
extern int hacluster_drbd_peer_device_fetch(int, struct peer_device *, pmAtomValue *);
+extern int hacluster_drbd_peer_device_all_fetch(int, pmAtomValue *);
extern int hacluster_refresh_drbd_peer_device(const char *, struct peer_device *);
extern void drbd_stats_setup(void);
diff --git a/src/pmdas/hacluster/help b/src/pmdas/hacluster/help
index 704e940c0..bdcd68e5f 100644
--- a/src/pmdas/hacluster/help
+++ b/src/pmdas/hacluster/help
@@ -42,7 +42,11 @@ Pacemaker cluster.
@ ha_cluster.pacemaker.node_attributes Metadata used by Resource Agents
The raw values for the cluster metadata attributes and their value per node as
-used by the Resource Agents.
+used by the Resource Agents.
+
+@ ha_cluster.pacemaker.node_attributes_all Metadata information
+Value is 1 if a node metadata exists. The details of the node metadata
+is given as label metadata values for this metric.
@ ha_cluster.pacemaker.stonith_enabled Whether stonith is enabled in the cluster
Value returns as to whether stonith is enabled or disabled for the cluster.
@@ -60,6 +64,10 @@ The resource role that the location contraint applies to, if any.
The score given to the location constraint by Pacemaker, the value depends on
Pacemaker internals.
+@ ha_cluster.pacemaker.location_constraints.all Location constraint information
+Value is 1 if a location constraint exists. The details of the location constraint
+is given as label metadata values for this metric.
+
@ ha_cluster.pacemaker.nodes.type The type given to the node
The membership type given to the node in the Pacemaker cluster.
@@ -137,6 +145,10 @@ value of 1 confirms the resource status as failed.
Whether the failure_ignored status is reported by the resource in the
cluster, a value of 1 confirms the resource status as failure_ignored.
+@ ha_cluster.pacemaker.resources.all Pacemaker resources information
+Value is 1 if a resources exists. The details of the resource
+is given as label metadata values for this metric.
+
@ ha_cluster.corosync.quorate Value given for quorate
The value represents whether or not the cluster is quorate.
@@ -180,6 +192,10 @@ The number for the Corosync ring.
@ ha_cluster.corosync.rings.ring_id Ring ID
The internal Corosync ring ID, corresponds to the first node to join.
+@ ha_cluster.corosync.rings.all Corosync rings information
+Value is 1 if a ring exists. The details of the corrosponding ring
+is given as label metadata values for this metric.
+
@ ha_cluster.sbd.devices.path Path of SBD device
The full path given to each SBD device.
@@ -187,6 +203,10 @@ The full path given to each SBD device.
The current status given for each of the SBD devices, the value is one of
healthy or unhealthy.
+@ ha_cluster.sbd.all SBD device information
+Value is 1 if a sbd device exists. The details of the corrosponding SBD device
+is given as label metadata values for this metric.
+
@ ha_cluster.sbd.timeouts.mgswait mgswait timeout value
The value threshold for msgwait timeouts for the given SBD device.
@@ -260,6 +280,10 @@ The volume number of ther resource for each resource:volume.
@ ha_cluster.drbd.resources.disk_state Disk state
The current reported disk state of for the resource:volume.
+@ ha_cluster.drbd.resources.all DRBD resource information
+Value is 1 if a drbd resource exists. The details of the corrosponding drbd resource
+is given as label metadata values for this metric.
+
@ ha_cluster.drbd.connections.resource Resource that the connection is for
The given resource that the DRBD connection is for each resource:volume.
@@ -274,3 +298,7 @@ The reported volume for the connection.
@ ha_cluster.drbd.connections.peer_disk_state Peer disk state
The reported peer disk state for the connection.
+
+@ ha_cluster.drbd.connections.all DRBD Peer disk information
+Value is 1 if a drbd peer connection exists. The details of the corrosponding DRBD peer
+connection is given as label metadata values for this metric.
diff --git a/src/pmdas/hacluster/pacemaker.c b/src/pmdas/hacluster/pacemaker.c
index 355f4df5c..beff98b83 100644
--- a/src/pmdas/hacluster/pacemaker.c
+++ b/src/pmdas/hacluster/pacemaker.c
@@ -221,6 +221,13 @@ hacluster_pacemaker_constraints_fetch(int item, struct location_constraints *loc
return PMDA_FETCH_NOVALUES;
}
+int
+hacluster_pacemaker_constraints_all_fetch(int item, pmAtomValue *atom)
+{
+ atom->ul = 1; /* Assign default exists value 1 */
+ return PMDA_FETCH_STATIC;
+}
+
int
hacluster_pacemaker_nodes_fetch(int item, struct nodes *nodes, pmAtomValue *atom)
{
@@ -297,6 +304,13 @@ hacluster_pacemaker_node_attribs_fetch(int item, struct attributes *attributes,
return PMDA_FETCH_NOVALUES;
}
+int
+hacluster_pacemaker_node_attribs_all_fetch(int item, pmAtomValue *atom)
+{
+ atom->ul = 1; /* Assign default exists value 1 */
+ return PMDA_FETCH_STATIC;
+}
+
int
hacluster_pacemaker_resources_fetch(int item, struct resources *resources, pmAtomValue *atom)
{
@@ -353,6 +367,13 @@ hacluster_pacemaker_resources_fetch(int item, struct resources *resources, pmAto
return PMDA_FETCH_NOVALUES;
}
+int
+hacluster_pacemaker_resources_all_fetch(int item, pmAtomValue *atom)
+{
+ atom->ul = 1; /* Assign default exists value 1 */
+ return PMDA_FETCH_STATIC;
+}
+
int
hacluster_refresh_pacemaker_global()
{
diff --git a/src/pmdas/hacluster/pacemaker.h b/src/pmdas/hacluster/pacemaker.h
index fe175e37f..0bd2a7e0f 100644
--- a/src/pmdas/hacluster/pacemaker.h
+++ b/src/pmdas/hacluster/pacemaker.h
@@ -124,15 +124,18 @@ extern int hacluster_pacemaker_fail_fetch(int, struct fail_count *, pmAtomValue
extern int hacluster_refresh_pacemaker_fail(const char *, struct fail_count *);
extern int hacluster_pacemaker_constraints_fetch(int, struct location_constraints *, pmAtomValue *);
+extern int hacluster_pacemaker_constraints_all_fetch(int, pmAtomValue *);
extern int hacluster_refresh_pacemaker_constraints(const char *, struct location_constraints *);
extern int hacluster_pacemaker_nodes_fetch(int, struct nodes *, pmAtomValue *);
extern int hacluster_refresh_pacemaker_nodes(const char *, struct nodes *);
extern int hacluster_pacemaker_node_attribs_fetch(int, struct attributes *, pmAtomValue *);
+extern int hacluster_pacemaker_node_attribs_all_fetch(int, pmAtomValue *);
extern int hacluster_refresh_pacemaker_node_attribs(const char *, struct attributes *);
extern int hacluster_pacemaker_resources_fetch(int, struct resources *, pmAtomValue *);
+extern int hacluster_pacemaker_resources_all_fetch(int, pmAtomValue *);
extern int hacluster_refresh_pacemaker_resources(const char *, struct resources *);
extern void pacemaker_stats_setup(void);
diff --git a/src/pmdas/hacluster/pmda.c b/src/pmdas/hacluster/pmda.c
index c7ffac6da..6c9163b25 100644
--- a/src/pmdas/hacluster/pmda.c
+++ b/src/pmdas/hacluster/pmda.c
@@ -42,6 +42,13 @@ pmdaIndom indomtable[] = {
{ .it_indom = SBD_DEVICE_INDOM },
{ .it_indom = DRBD_RESOURCE_INDOM },
{ .it_indom = DRBD_PEER_DEVICE_INDOM },
+ { .it_indom = PACEMAKER_CONSTRAINTS_ALL_INDOM },
+ { .it_indom = PACEMAKER_NODE_ATTRIB_ALL_INDOM },
+ { .it_indom = PACEMAKER_RESOURCES_ALL_INDOM },
+ { .it_indom = COROSYNC_RING_ALL_INDOM},
+ { .it_indom = SBD_DEVICE_ALL_INDOM},
+ { .it_indom = DRBD_RESOURCE_ALL_INDOM},
+ { .it_indom = DRBD_PEER_DEVICE_ALL_INDOM},
};
#define INDOM(x) (indomtable[x].it_indom)
@@ -83,6 +90,10 @@ pmdaMetric metrictable[] = {
PMDA_PMID(CLUSTER_PACEMAKER_CONSTRAINTS, PACEMAKER_CONSTRAINTS_SCORE),
PM_TYPE_STRING, PACEMAKER_CONSTRAINTS_INDOM, PM_SEM_INSTANT,
PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+ { .m_desc = {
+ PMDA_PMID(CLUSTER_PACEMAKER_CONSTRAINTS_ALL, 0),
+ PM_TYPE_U32, PACEMAKER_CONSTRAINTS_ALL_INDOM, PM_SEM_INSTANT,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
{ .m_desc = {
PMDA_PMID(CLUSTER_PACEMAKER_NODES, PACEMAKER_NODES_ONLINE),
PM_TYPE_U32, PACEMAKER_NODES_INDOM, PM_SEM_INSTANT,
@@ -127,6 +138,10 @@ pmdaMetric metrictable[] = {
PMDA_PMID(CLUSTER_PACEMAKER_NODE_ATTRIB, PACEMAKER_NODES_ATTRIB_VALUE),
PM_TYPE_STRING, PACEMAKER_NODE_ATTRIB_INDOM, PM_SEM_INSTANT,
PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+ { .m_desc = {
+ PMDA_PMID(CLUSTER_PACEMAKER_NODE_ATTRIB_ALL, 0),
+ PM_TYPE_U32, PACEMAKER_NODE_ATTRIB_ALL_INDOM, PM_SEM_INSTANT,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
{ .m_desc = {
PMDA_PMID(CLUSTER_PACEMAKER_RESOURCES, PACEMAKER_RESOURCES_AGENT),
PM_TYPE_STRING, PACEMAKER_RESOURCES_INDOM, PM_SEM_INSTANT,
@@ -139,6 +154,10 @@ pmdaMetric metrictable[] = {
PMDA_PMID(CLUSTER_PACEMAKER_RESOURCES, PACEMAKER_RESOURCES_GROUP),
PM_TYPE_STRING, PACEMAKER_RESOURCES_INDOM, PM_SEM_INSTANT,
PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+ { .m_desc = {
+ PMDA_PMID(CLUSTER_PACEMAKER_RESOURCES_ALL, 0),
+ PM_TYPE_U32, PACEMAKER_RESOURCES_ALL_INDOM, PM_SEM_INSTANT,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
{ .m_desc = {
PMDA_PMID(CLUSTER_PACEMAKER_RESOURCES, PACEMAKER_RESOURCES_MANAGED),
PM_TYPE_U32, PACEMAKER_RESOURCES_INDOM, PM_SEM_INSTANT,
@@ -228,6 +247,10 @@ pmdaMetric metrictable[] = {
PMDA_PMID(CLUSTER_COROSYNC_RING, COROSYNC_RINGS_RING_ID),
PM_TYPE_STRING, COROSYNC_RING_INDOM, PM_SEM_INSTANT,
PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+ { .m_desc = {
+ PMDA_PMID(CLUSTER_COROSYNC_RING_ALL, 0),
+ PM_TYPE_U32, COROSYNC_RING_ALL_INDOM, PM_SEM_INSTANT,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
/* SBD */
{ .m_desc = {
PMDA_PMID(CLUSTER_SBD_DEVICE, SBD_DEVICE_PATH),
@@ -237,6 +260,10 @@ pmdaMetric metrictable[] = {
PMDA_PMID(CLUSTER_SBD_DEVICE, SBD_DEVICE_STATUS),
PM_TYPE_STRING, SBD_DEVICE_INDOM, PM_SEM_INSTANT,
PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+ { .m_desc = {
+ PMDA_PMID(CLUSTER_SBD_DEVICE_ALL, 0),
+ PM_TYPE_U32, SBD_DEVICE_ALL_INDOM, PM_SEM_INSTANT,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
{ .m_desc = {
PMDA_PMID(CLUSTER_SBD_DEVICE, SBD_DEVICE_TIMEOUT_MSGWAIT),
PM_TYPE_U32, SBD_DEVICE_INDOM, PM_SEM_INSTANT,
@@ -270,6 +297,10 @@ pmdaMetric metrictable[] = {
PMDA_PMID(CLUSTER_DRBD_RESOURCE, DRBD_RESOURCE_DISK_STATE),
PM_TYPE_STRING, DRBD_RESOURCE_INDOM, PM_SEM_INSTANT,
PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+ { .m_desc ={
+ PMDA_PMID(CLUSTER_DRBD_RESOURCE_ALL, 0),
+ PM_TYPE_U32, DRBD_RESOURCE_ALL_INDOM, PM_SEM_INSTANT,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
{ .m_desc = {
PMDA_PMID(CLUSTER_DRBD_RESOURCE, DRBD_RESOURCE_WRITTEN),
PM_TYPE_U32, DRBD_RESOURCE_INDOM, PM_SEM_INSTANT,
@@ -318,6 +349,10 @@ pmdaMetric metrictable[] = {
PMDA_PMID(CLUSTER_DRBD_PEER_DEVICE, DRBD_PEER_DEVICE_PEER_DISK_STATE),
PM_TYPE_STRING, DRBD_PEER_DEVICE_INDOM, PM_SEM_INSTANT,
PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+ { .m_desc = {
+ PMDA_PMID(CLUSTER_DRBD_PEER_DEVICE_ALL, 0),
+ PM_TYPE_U32, DRBD_PEER_DEVICE_ALL_INDOM, PM_SEM_INSTANT,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
{ .m_desc = {
PMDA_PMID(CLUSTER_DRBD_PEER_DEVICE, DRBD_PEER_DEVICE_CONNECTIONS_SYNC),
PM_TYPE_FLOAT, DRBD_PEER_DEVICE_INDOM, PM_SEM_INSTANT,
@@ -359,6 +394,12 @@ hacluster_pacemaker_fail_instance_refresh(void)
FILE *pf;
pmInDom indom = INDOM(PACEMAKER_FAIL_INDOM);
+ /*
+ * Update indom cache based off the reading of crm_mon listed in
+ * the output from crm_mon
+ */
+ pmdaCacheOp(indom, PMDA_CACHE_INACTIVE);
+
pmsprintf(buffer, sizeof(buffer), "%s 2>&1", crm_mon_command);
if ((pf = popen(buffer, "r")) == NULL)
@@ -425,6 +466,14 @@ hacluster_pacemaker_constraints_instance_refresh(void)
int found_constraints = 0;
FILE *pf;
pmInDom indom = INDOM(PACEMAKER_CONSTRAINTS_INDOM);
+ pmInDom indom_all = INDOM(PACEMAKER_CONSTRAINTS_ALL_INDOM);
+
+ /*
+ * Update indom cache based off the reading of cibadmin listed in
+ * the output from cibadmin
+ */
+ pmdaCacheOp(indom, PMDA_CACHE_INACTIVE);
+ pmdaCacheOp(indom_all, PMDA_CACHE_INACTIVE);
pmsprintf(buffer, sizeof(buffer), "%s 2>&1", cibadmin_command);
buffer[sizeof(buffer)-1] = '\0';
@@ -446,6 +495,7 @@ hacluster_pacemaker_constraints_instance_refresh(void)
struct pacemaker_constraints *constraints;
sts = pmdaCacheLookupName(indom, constraint_name, NULL, (void **)&constraints);
+ pmdaCacheLookupName(indom_all, constraint_name, NULL, NULL);
if (sts == PM_ERR_INST || (sts >=0 && constraints == NULL)) {
constraints = calloc(1, sizeof(struct pacemaker_constraints));
if (constraints == NULL) {
@@ -457,6 +507,7 @@ hacluster_pacemaker_constraints_instance_refresh(void)
continue;
pmdaCacheStore(indom, PMDA_CACHE_ADD, constraint_name, (void *)constraints);
+ pmdaCacheStore(indom_all, PMDA_CACHE_ADD, constraint_name, NULL);
}
}
pclose(pf);
@@ -472,6 +523,12 @@ hacluster_pacemaker_nodes_instance_refresh(void)
FILE *pf;
pmInDom indom = INDOM(PACEMAKER_NODES_INDOM);
+ /*
+ * Update indom cache based off the reading of crm_mon listed in
+ * the output from crm_mon
+ */
+ pmdaCacheOp(indom, PMDA_CACHE_INACTIVE);
+
pmsprintf(buffer, sizeof(buffer), "%s 2>&1", crm_mon_command);
if ((pf = popen(buffer, "r")) == NULL)
@@ -524,6 +581,14 @@ hacluster_pacemaker_node_attrib_instance_refresh(void)
int found_node_attributes = 0, found_node_name = 0;
FILE *pf;
pmInDom indom = INDOM(PACEMAKER_NODE_ATTRIB_INDOM);
+ pmInDom indom_all = INDOM(PACEMAKER_NODE_ATTRIB_ALL_INDOM);
+
+ /*
+ * Update indom cache based off the reading of crm_mon listed in
+ * the output from crm_mon
+ */
+ pmdaCacheOp(indom, PMDA_CACHE_INACTIVE);
+ pmdaCacheOp(indom_all, PMDA_CACHE_INACTIVE);
pmsprintf(buffer, sizeof(buffer), "%s 2>&1", crm_mon_command);
@@ -571,6 +636,7 @@ hacluster_pacemaker_node_attrib_instance_refresh(void)
struct pacemaker_node_attrib *node_attrib;
sts = pmdaCacheLookupName(indom, instance_name, NULL, (void **)&node_attrib);
+ pmdaCacheLookupName(indom_all, instance_name, NULL, NULL);
if (sts == PM_ERR_INST || (sts >=0 && node_attrib == NULL)) {
node_attrib = calloc(1, sizeof(struct pacemaker_node_attrib));
if (node_attrib == NULL) {
@@ -582,6 +648,7 @@ hacluster_pacemaker_node_attrib_instance_refresh(void)
continue;
pmdaCacheStore(indom, PMDA_CACHE_ADD, instance_name, (void *)node_attrib);
+ pmdaCacheStore(indom_all, PMDA_CACHE_ADD, instance_name, NULL);
}
}
}
@@ -597,6 +664,14 @@ hacluster_pacemaker_resources_instance_refresh(void)
int found_resources = 0;
FILE *pf;
pmInDom indom= INDOM(PACEMAKER_RESOURCES_INDOM);
+ pmInDom indom_all = INDOM(PACEMAKER_RESOURCES_ALL_INDOM);
+
+ /*
+ * Update indom cache based off the reading of crm_mon listed in
+ * the output from crm_mon
+ */
+ pmdaCacheOp(indom, PMDA_CACHE_INACTIVE);
+ pmdaCacheOp(indom_all, PMDA_CACHE_INACTIVE);
pmsprintf(buffer, sizeof(buffer), "%s 2>&1", crm_mon_command);
@@ -641,6 +716,7 @@ hacluster_pacemaker_resources_instance_refresh(void)
struct pacemaker_resources *pace_resources;
sts = pmdaCacheLookupName(indom, instance_name, NULL, (void **)&pace_resources);
+ pmdaCacheLookupName(indom_all, instance_name, NULL, NULL);
if (sts == PM_ERR_INST || (sts >=0 && pace_resources == NULL)) {
pace_resources = calloc(1, sizeof(struct pacemaker_resources));
if (pace_resources == NULL) {
@@ -652,6 +728,7 @@ hacluster_pacemaker_resources_instance_refresh(void)
continue;
pmdaCacheStore(indom, PMDA_CACHE_ADD, instance_name, (void *)pace_resources);
+ pmdaCacheStore(indom_all, PMDA_CACHE_ADD, instance_name, NULL);
/* Clear node name in the event that a resource has not got a node attachment */
memset(node_name, '\0', sizeof(node_name));
@@ -730,12 +807,14 @@ hacluster_corosync_ring_instance_refresh(void)
char buffer[4096], ring_name[128];
FILE *pf;
pmInDom indom = INDOM(COROSYNC_RING_INDOM);
+ pmInDom indom_all = INDOM(COROSYNC_RING_ALL_INDOM);
/*
* Update indom cache based off number of nodes listed in the
* membership information section of corosync-quorumtool output
*/
pmdaCacheOp(indom, PMDA_CACHE_INACTIVE);
+ pmdaCacheOp(indom_all, PMDA_CACHE_INACTIVE);
pmsprintf(buffer, sizeof(buffer), "%s 2>&1", cfgtool_command);
@@ -763,6 +842,7 @@ hacluster_corosync_ring_instance_refresh(void)
struct corosync_ring *ring;
sts = pmdaCacheLookupName(indom, ring_name, NULL, (void **)&ring);
+ pmdaCacheLookupName(indom_all, ring_name, NULL, NULL);
if (sts == PM_ERR_INST || (sts >=0 && ring == NULL)) {
ring = calloc(1, sizeof(struct corosync_ring));
if (ring == NULL) {
@@ -774,6 +854,7 @@ hacluster_corosync_ring_instance_refresh(void)
continue;
pmdaCacheStore(indom, PMDA_CACHE_ADD, ring_name, (void *)ring);
+ pmdaCacheStore(indom_all, PMDA_CACHE_ADD, ring_name, NULL);
}
}
pclose(pf);
@@ -789,12 +870,14 @@ hacluster_sbd_device_instance_refresh(void)
char *buffer_ptr;
FILE *fp;
pmInDom indom = INDOM(SBD_DEVICE_INDOM);
+ pmInDom indom_all = INDOM(SBD_DEVICE_ALL_INDOM);
/*
* Update indom cache based off number of nodes listed in the
* membership information section of corosync-quorumtool output
*/
pmdaCacheOp(indom, PMDA_CACHE_INACTIVE);
+ pmdaCacheOp(indom_all, PMDA_CACHE_INACTIVE);
if ((fp = fopen(sbd_path, "r")) == NULL)
/*
@@ -831,6 +914,7 @@ hacluster_sbd_device_instance_refresh(void)
struct sbd_device *sbd;
sts = pmdaCacheLookupName(indom, dev_name, NULL, (void **)&sbd);
+ pmdaCacheLookupName(indom_all, dev_name, NULL, NULL);
if (sts == PM_ERR_INST || (sts >=0 && sbd == NULL)) {
sbd = calloc(1, sizeof(struct sbd_device));
if (sbd == NULL) {
@@ -842,6 +926,7 @@ hacluster_sbd_device_instance_refresh(void)
continue;
pmdaCacheStore(indom, PMDA_CACHE_ADD, dev_name, (void *)sbd);
+ pmdaCacheStore(indom_all, PMDA_CACHE_ADD, dev_name, NULL);
}
}
}
@@ -860,6 +945,7 @@ hacluster_drbd_resource_instance_refresh(void)
char *buffer_ptr;
FILE *pf;
pmInDom indom = INDOM(DRBD_RESOURCE_INDOM);
+ pmInDom indom_all = INDOM(DRBD_RESOURCE_ALL_INDOM);
int found_node = 0, found_volume = 0, nesting = 0;
@@ -868,6 +954,7 @@ hacluster_drbd_resource_instance_refresh(void)
* the json output from drbdsetup
*/
pmdaCacheOp(indom, PMDA_CACHE_INACTIVE);
+ pmdaCacheOp(indom_all, PMDA_CACHE_INACTIVE);
pmsprintf(buffer, sizeof(buffer), "%s 2>&1", drbdsetup_command);
@@ -910,6 +997,7 @@ hacluster_drbd_resource_instance_refresh(void)
struct drbd_resource *resource;
sts = pmdaCacheLookupName(indom, resource_name, NULL, (void **)&resource);
+ pmdaCacheLookupName(indom_all, resource_name, NULL, NULL);
if (sts == PM_ERR_INST || (sts >=0 && resource == NULL)) {
resource = calloc(1, sizeof(struct drbd_resource));
if (resource == NULL) {
@@ -921,6 +1009,7 @@ hacluster_drbd_resource_instance_refresh(void)
continue;
pmdaCacheStore(indom, PMDA_CACHE_ADD, resource_name, (void *)resource);
+ pmdaCacheStore(indom_all, PMDA_CACHE_ADD, resource_name, NULL);
found_volume = 0;
}
}
@@ -937,6 +1026,7 @@ hacluster_drbd_peer_device_instance_refresh(void)
char *buffer_ptr;
FILE *pf;
pmInDom indom = INDOM(DRBD_PEER_DEVICE_INDOM);
+ pmInDom indom_all = INDOM(DRBD_PEER_DEVICE_ALL_INDOM);
int found_node = 0, found_peer_node = 0, nesting = 0;
@@ -945,6 +1035,7 @@ hacluster_drbd_peer_device_instance_refresh(void)
* the json output from drbdsetup
*/
pmdaCacheOp(indom, PMDA_CACHE_INACTIVE);
+ pmdaCacheOp(indom_all, PMDA_CACHE_INACTIVE);
pmsprintf(buffer, sizeof(buffer), "%s 2>&1", drbdsetup_command);
@@ -987,6 +1078,7 @@ hacluster_drbd_peer_device_instance_refresh(void)
struct drbd_peer_device *peer_device;
sts = pmdaCacheLookupName(indom, peer_name, NULL, (void **)&peer_device);
+ pmdaCacheLookupName(indom_all, peer_name, NULL, NULL);
if (sts == PM_ERR_INST || (sts >=0 && peer_device == NULL)) {
peer_device = calloc(1, sizeof(struct drbd_peer_device));
if (peer_device == NULL) {
@@ -998,6 +1090,7 @@ hacluster_drbd_peer_device_instance_refresh(void)
continue;
pmdaCacheStore(indom, PMDA_CACHE_ADD, peer_name, (void *)peer_device);
+ pmdaCacheStore(indom_all, PMDA_CACHE_ADD, peer_name, NULL);
found_peer_node = 0;
}
}
@@ -1088,7 +1181,8 @@ hacluster_fetch_refresh(pmdaExt *pmda, int *need_refresh)
if (!pmdaCacheLookup(INDOM(PACEMAKER_CONSTRAINTS_INDOM), i, &constraint_name, (void **)&constraints) || !constraints)
continue;
- if (need_refresh[CLUSTER_PACEMAKER_CONSTRAINTS])
+ if (need_refresh[CLUSTER_PACEMAKER_CONSTRAINTS] ||
+ need_refresh[CLUSTER_PACEMAKER_CONSTRAINTS_ALL])
hacluster_refresh_pacemaker_constraints(constraint_name, &constraints->location_constraints);
}
@@ -1108,7 +1202,8 @@ hacluster_fetch_refresh(pmdaExt *pmda, int *need_refresh)
if (!pmdaCacheLookup(INDOM(PACEMAKER_NODE_ATTRIB_INDOM), i, &attrib_name, (void **)&node_attribs) || !node_attribs)
continue;
- if (need_refresh[CLUSTER_PACEMAKER_NODE_ATTRIB])
+ if (need_refresh[CLUSTER_PACEMAKER_NODE_ATTRIB] ||
+ need_refresh[CLUSTER_PACEMAKER_NODE_ATTRIB_ALL])
hacluster_refresh_pacemaker_node_attribs(attrib_name, &node_attribs->attributes);
}
@@ -1118,7 +1213,8 @@ hacluster_fetch_refresh(pmdaExt *pmda, int *need_refresh)
if (!pmdaCacheLookup(INDOM(PACEMAKER_RESOURCES_INDOM), i, &pace_resource_name, (void **)&pace_resources) || !pace_resources)
continue;
- if (need_refresh[CLUSTER_PACEMAKER_RESOURCES])
+ if (need_refresh[CLUSTER_PACEMAKER_RESOURCES] ||
+ need_refresh[CLUSTER_PACEMAKER_RESOURCES_ALL])
hacluster_refresh_pacemaker_resources(pace_resource_name, &pace_resources->resources);
}
@@ -1141,7 +1237,8 @@ hacluster_fetch_refresh(pmdaExt *pmda, int *need_refresh)
if (!pmdaCacheLookup(INDOM(COROSYNC_RING_INDOM), i, &ring_name, (void **)&ring) || !ring)
continue;
- if (need_refresh[CLUSTER_COROSYNC_RING])
+ if (need_refresh[CLUSTER_COROSYNC_RING] ||
+ need_refresh[CLUSTER_COROSYNC_RING_ALL])
hacluster_refresh_corosync_ring(ring_name, &ring->rings);
}
@@ -1151,7 +1248,8 @@ hacluster_fetch_refresh(pmdaExt *pmda, int *need_refresh)
if (!pmdaCacheLookup(INDOM(SBD_DEVICE_INDOM), i, &sbd_dev, (void **)&sbd) || !sbd)
continue;
- if (need_refresh[CLUSTER_SBD_DEVICE])
+ if (need_refresh[CLUSTER_SBD_DEVICE] ||
+ need_refresh[CLUSTER_SBD_DEVICE_ALL])
hacluster_refresh_sbd_device(sbd_dev, &sbd->sbd);
}
@@ -1161,7 +1259,8 @@ hacluster_fetch_refresh(pmdaExt *pmda, int *need_refresh)
if (!pmdaCacheLookup(INDOM(DRBD_RESOURCE_INDOM), i, &resource_name, (void **)&resource) || !resource)
continue;
- if (need_refresh[CLUSTER_DRBD_RESOURCE])
+ if (need_refresh[CLUSTER_DRBD_RESOURCE] ||
+ need_refresh[CLUSTER_DRBD_RESOURCE_ALL])
hacluster_refresh_drbd_resource(resource_name, &resource->resource);
}
@@ -1171,7 +1270,8 @@ hacluster_fetch_refresh(pmdaExt *pmda, int *need_refresh)
if (!pmdaCacheLookup(INDOM(DRBD_PEER_DEVICE_INDOM), i, &peer_device, (void **)&peer) || !peer)
continue;
- if (need_refresh[CLUSTER_DRBD_PEER_DEVICE])
+ if (need_refresh[CLUSTER_DRBD_PEER_DEVICE] ||
+ need_refresh[CLUSTER_DRBD_PEER_DEVICE_ALL])
hacluster_refresh_drbd_peer_device(peer_device, &peer->peer_device);
}
@@ -1228,6 +1328,9 @@ hacluster_fetchCallBack(pmdaMetric *mdesc, unsigned int inst, pmAtomValue *atom)
return sts;
return hacluster_pacemaker_constraints_fetch(item, &constraints->location_constraints, atom);
+ case CLUSTER_PACEMAKER_CONSTRAINTS_ALL:
+ return hacluster_pacemaker_constraints_all_fetch(item, atom);
+
case CLUSTER_PACEMAKER_NODES:
sts = pmdaCacheLookup(INDOM(PACEMAKER_NODES_INDOM), inst, NULL, (void **)&pace_nodes);
if (sts < 0)
@@ -1239,6 +1342,9 @@ hacluster_fetchCallBack(pmdaMetric *mdesc, unsigned int inst, pmAtomValue *atom)
if (sts < 0)
return sts;
return hacluster_pacemaker_node_attribs_fetch(item, &pace_attribs->attributes, atom);
+
+ case CLUSTER_PACEMAKER_NODE_ATTRIB_ALL:
+ return hacluster_pacemaker_node_attribs_all_fetch(item, atom);
case CLUSTER_PACEMAKER_RESOURCES:
sts = pmdaCacheLookup(INDOM(PACEMAKER_RESOURCES_INDOM), inst, NULL, (void **)&pace_resources);
@@ -1246,6 +1352,9 @@ hacluster_fetchCallBack(pmdaMetric *mdesc, unsigned int inst, pmAtomValue *atom)
return sts;
return hacluster_pacemaker_resources_fetch(item, &pace_resources->resources, atom);
+ case CLUSTER_PACEMAKER_RESOURCES_ALL:
+ return hacluster_pacemaker_resources_all_fetch(item, atom);
+
case CLUSTER_COROSYNC_NODE:
sts = pmdaCacheLookup(INDOM(COROSYNC_NODE_INDOM), inst, NULL, (void **)&node);
if (sts < 0)
@@ -1260,18 +1369,27 @@ hacluster_fetchCallBack(pmdaMetric *mdesc, unsigned int inst, pmAtomValue *atom)
if (sts < 0)
return sts;
return hacluster_corosync_ring_fetch(item, &ring->rings, atom);
+
+ case CLUSTER_COROSYNC_RING_ALL:
+ return hacluster_corosync_ring_all_fetch(item, atom);
case CLUSTER_SBD_DEVICE:
sts = pmdaCacheLookup(INDOM(SBD_DEVICE_INDOM), inst, NULL, (void **)&sbd);
if (sts < 0)
return sts;
return hacluster_sbd_device_fetch(item, &sbd->sbd, atom);
+
+ case CLUSTER_SBD_DEVICE_ALL:
+ return hacluster_sbd_device_all_fetch(item, atom);
case CLUSTER_DRBD_RESOURCE:
sts = pmdaCacheLookup(INDOM(DRBD_RESOURCE_INDOM), inst, NULL, (void **)&resource);
if (sts < 0)
return sts;
- return hacluster_drbd_resource_fetch(item, &resource->resource, atom);
+ return hacluster_drbd_resource_fetch(item, &resource->resource, atom);
+
+ case CLUSTER_DRBD_RESOURCE_ALL:
+ return hacluster_drbd_resource_all_fetch(item, atom);
case CLUSTER_DRBD_PEER_DEVICE:
sts = pmdaCacheLookup(INDOM(DRBD_PEER_DEVICE_INDOM), inst, NULL, (void **)&peer);
@@ -1279,6 +1397,9 @@ hacluster_fetchCallBack(pmdaMetric *mdesc, unsigned int inst, pmAtomValue *atom)
return sts;
return hacluster_drbd_peer_device_fetch(item, &peer->peer_device, atom);
+ case CLUSTER_DRBD_PEER_DEVICE_ALL:
+ return hacluster_drbd_peer_device_all_fetch(item, atom);
+
default:
return PM_ERR_PMID;
}
@@ -1286,6 +1407,220 @@ hacluster_fetchCallBack(pmdaMetric *mdesc, unsigned int inst, pmAtomValue *atom)
return PMDA_FETCH_STATIC;
}
+static int
+hacluster_labelInDom(pmID pmid, pmLabelSet **lp)
+{
+ unsigned int cluster = pmID_cluster(pmid);
+
+ switch (cluster) {
+ case CLUSTER_PACEMAKER_CONSTRAINTS_ALL:
+ pmdaAddLabels(lp, "{\"constraint\":\"constraint\"}");
+ pmdaAddLabels(lp, "{\"node\":\"node name\"}");
+ pmdaAddLabels(lp, "{\"resource\":\"resource name\"}");
+ pmdaAddLabels(lp, "{\"role\":\"role of node\"}");
+ pmdaAddLabels(lp, "{\"score\":\"score\"}");
+ return 1;
+
+ case CLUSTER_PACEMAKER_NODE_ATTRIB_ALL:
+ pmdaAddLabels(lp, "{\"name\":\"attribute name\"}");
+ pmdaAddLabels(lp, "{\"node\":\"node name\"}");
+ pmdaAddLabels(lp, "{\"value\":\"value\"}");
+ return 1;
+
+ case CLUSTER_PACEMAKER_RESOURCES_ALL:
+ pmdaAddLabels(lp, "{\"agent\":\"agent\"}");
+ pmdaAddLabels(lp, "{\"clone\":\"clone\"}");
+ pmdaAddLabels(lp, "{\"group\":\"group\"}");
+ pmdaAddLabels(lp, "{\"managed\":\"managed\"}");
+ pmdaAddLabels(lp, "{\"node\":\"node name\"}");
+ pmdaAddLabels(lp, "{\"resource\":\"resource name\"}");
+ pmdaAddLabels(lp, "{\"role\":\"role\"}");
+ return 1;
+
+ case CLUSTER_COROSYNC_RING_ALL:
+ pmdaAddLabels(lp, "{\"address\":\"ip address\"}");
+ pmdaAddLabels(lp, "{\"node_id\":\"id of node\"}");
+ pmdaAddLabels(lp, "{\"number\":\"ring number\"}");
+ pmdaAddLabels(lp, "{\"ring_id\":\"id of ring\"}");
+ return 1;
+
+ case CLUSTER_SBD_DEVICE_ALL:
+ pmdaAddLabels(lp, "{\"device\":\"device\"}");
+ pmdaAddLabels(lp, "{\"status\":\"status\"}");
+ return 1;
+
+ case CLUSTER_DRBD_RESOURCE_ALL:
+ pmdaAddLabels(lp, "{\"disk_state\":\"disk state\"}");
+ pmdaAddLabels(lp, "{\"resource\":\"resource name\"}");
+ pmdaAddLabels(lp, "{\"role\":\"role\"}");
+ pmdaAddLabels(lp, "{\"volume\":\"volume\"}");
+ return 1;
+
+ case CLUSTER_DRBD_PEER_DEVICE_ALL:
+ pmdaAddLabels(lp, "{\"peer_disk_state\":\"peer disk state\"}");
+ pmdaAddLabels(lp, "{\"peer_node_id\":\"peer node id\"}");
+ pmdaAddLabels(lp, "{\"peer_role\":\"peer role\"}");
+ pmdaAddLabels(lp, "{\"resource\":\"resource\"}");
+ pmdaAddLabels(lp, "{\"volume\":\"volume\"}");
+ return 1;
+
+ default:
+ break;
+ }
+ return 0;
+}
+
+static int
+hacluster_label(int ident, int type, pmLabelSet **lpp, pmdaExt *pmda)
+{
+ int sts;
+
+ switch (type) {
+ case PM_LABEL_ITEM:
+ if ((sts = hacluster_labelInDom((pmID)ident, lpp)) <0)
+ return sts;
+ break;
+
+ default:
+ break;
+ }
+ return pmdaLabel(ident, type, lpp, pmda);
+}
+
+static int
+hacluster_labelCallBack(pmInDom indom, unsigned int inst, pmLabelSet **lp)
+{
+ struct location_constraints *constraints;
+ struct attributes *attributes;
+ struct resources *resources;
+ struct rings *ring;
+ struct sbd *sbd;
+ struct resource *resource;
+ struct peer_device *peer_device;
+
+ int sts;
+ int no_node_attachment = 0;
+ char *name;
+ char *node_name, *attribute_name, *node, *resource_id, *tofree, *str;
+
+ if (indom == PM_INDOM_NULL)
+ return 0;
+
+ switch (pmInDom_serial(indom)) {
+ case PACEMAKER_CONSTRAINTS_ALL_INDOM:
+ sts = pmdaCacheLookup(INDOM(PACEMAKER_CONSTRAINTS_INDOM), inst, &name, (void **)&constraints);
+ if (sts < 0 || sts == PMDA_CACHE_INACTIVE)
+ return 0;
+ return pmdaAddLabels(lp, "{\"constraint\":\"%s\", \"node\":\"%s\", \"resource\":\"%s\", \"role\":\"%s\", \"score\":\"%s\"}",
+ name,
+ constraints->node,
+ constraints->resource,
+ constraints->role,
+ constraints->score
+ );
+
+ case PACEMAKER_NODE_ATTRIB_ALL_INDOM:
+ sts = pmdaCacheLookup(INDOM(PACEMAKER_NODE_ATTRIB_INDOM), inst, &name, (void **)&attributes);
+ if (sts < 0 || sts == PMDA_CACHE_INACTIVE)
+ return 0;
+ /*
+ * We need to split our combined NODE:ATTRIBUTE_NAME instance names into their
+ * separated NODE and ATTRIBUTE_NAME fields
+ */
+ tofree = str = strdup(name);
+ attribute_name = strsep(&str, ":");
+ node_name = strsep(&str, ":");
+
+ sts = pmdaAddLabels(lp, "{\"name\":\"%s\", \"node\":\"%s\", \"value\":\"%s\"}",
+ node_name,
+ attribute_name,
+ attributes->value
+ );
+ free(tofree);
+ return sts;
+
+ case PACEMAKER_RESOURCES_ALL_INDOM:
+ sts = pmdaCacheLookup(INDOM(PACEMAKER_RESOURCES_INDOM), inst, &name, (void **)&resources);
+ if (sts < 0 || sts == PMDA_CACHE_INACTIVE)
+ return 0;
+
+ if (strchr(name, ':') == NULL) {
+ node = (char*)name;
+ no_node_attachment = 1;
+ resource_id = "";
+ } else {
+ tofree = str = strdup(name);
+ node = strsep(&str, ":");
+ resource_id = strsep(&str, ":");
+ }
+
+ sts = pmdaAddLabels(lp, "{\"agent\":\"%s\", \"clone\":\"%s\", \"group\":\"%s\", \"managed\":%u, \"node\":\"%s\", \"resource\":\"%s\", \"role\":\"%s\"}",
+ resources->agent,
+ resources->clone,
+ resources->group,
+ resources->managed,
+ resource_id,
+ node,
+ resources->role
+ );
+
+ if (!no_node_attachment)
+ free(tofree);
+ return sts;
+
+ case COROSYNC_RING_ALL_INDOM:
+ sts = pmdaCacheLookup(INDOM(COROSYNC_RING_INDOM), inst, &name, (void **)&ring);
+ if (sts <0 || sts == PMDA_CACHE_INACTIVE)
+ return 0;
+
+ return pmdaAddLabels(lp, "{\"address\":\"%s\", \"node_id\":%"PRIu64", \"number\":%u, \"ring_id\":\"%s\"}",
+ ring->address,
+ ring->node_id,
+ ring->number,
+ ring->ring_id
+ );
+
+ case SBD_DEVICE_ALL_INDOM:
+ sts = pmdaCacheLookup(INDOM(SBD_DEVICE_INDOM), inst, &name, (void**)&sbd);
+ if (sts <0 || sts == PMDA_CACHE_INACTIVE)
+ return 0;
+
+ return pmdaAddLabels(lp, "{\"device\":\"%s\", \"status\":\"%s\"}",
+ sbd->path,
+ sbd->status
+ );
+
+ case DRBD_RESOURCE_ALL_INDOM:
+ sts = pmdaCacheLookup(INDOM(DRBD_RESOURCE_INDOM), inst, &name, (void**)&resource);
+ if (sts <0 || sts == PMDA_CACHE_INACTIVE)
+ return 0;
+
+ return pmdaAddLabels(lp, "{\"disk_state\":\"%s\", \"resource\":\"%s\", \"role\":\"%s\", \"volume\":\"%s\"}",
+ resource->disk_state,
+ resource->resource,
+ resource->role,
+ resource->volume
+ );
+
+ case DRBD_PEER_DEVICE_ALL_INDOM:
+ sts = pmdaCacheLookup(INDOM(DRBD_PEER_DEVICE_INDOM), inst, &name, (void**)&peer_device);
+ if (sts <0 || sts == PMDA_CACHE_INACTIVE)
+ return 0;
+
+ return pmdaAddLabels(lp, "{\"peer_disk_state\":\"%s\", \"peer_node_id\":\"%s\", \"peer_role\":\"%s\", \"resource\":\"%s\", \"volume\":%"PRIu32"}",
+ peer_device->peer_disk_state,
+ peer_device->peer_node_id,
+ peer_device->peer_role,
+ peer_device->resource,
+ peer_device->volume
+ );
+
+ default:
+ break;
+ }
+ return 0;
+}
+
void
hacluster_inst_setup(void)
{
@@ -1378,7 +1713,7 @@ hacluster_init(pmdaInterface *dp)
int sep = pmPathSeparator();
pmsprintf(helppath, sizeof(helppath), "%s%c" "hacluster" "%c" "help",
pmGetConfig("PCP_PMDAS_DIR"), sep, sep);
- pmdaDSO(dp, PMDA_INTERFACE_4, "HACLUSTER DSO", helppath);
+ pmdaDSO(dp, PMDA_INTERFACE_7, "HACLUSTER DSO", helppath);
}
if (dp->status != 0)
@@ -1391,13 +1726,15 @@ hacluster_init(pmdaInterface *dp)
sbd_stats_setup();
drbd_stats_setup();
- dp->version.four.instance = hacluster_instance;
- dp->version.four.fetch = hacluster_fetch;
- dp->version.four.text = hacluster_text;
- dp->version.four.pmid = hacluster_pmid;
- dp->version.four.name = hacluster_name;
- dp->version.four.children = hacluster_children;
+ dp->version.seven.instance = hacluster_instance;
+ dp->version.seven.fetch = hacluster_fetch;
+ dp->version.seven.text = hacluster_text;
+ dp->version.seven.pmid = hacluster_pmid;
+ dp->version.seven.name = hacluster_name;
+ dp->version.seven.children = hacluster_children;
+ dp->version.seven.label = hacluster_label;
pmdaSetFetchCallBack(dp, hacluster_fetchCallBack);
+ pmdaSetLabelCallBack(dp, hacluster_labelCallBack);
pmdaSetFlags(dp, PMDA_EXT_FLAG_HASHED);
pmdaInit(dp, indomtable, nindoms, metrictable, nmetrics);
@@ -1428,7 +1765,7 @@ main(int argc, char **argv)
pmSetProgname(argv[0]);
pmsprintf(helppath, sizeof(helppath), "%s%c" "hacluster" "%c" "help",
pmGetConfig("PCP_PMDAS_DIR"), sep, sep);
- pmdaDaemon(&dispatch, PMDA_INTERFACE_4, pmGetProgname(), HACLUSTER, "hacluster.log", helppath);
+ pmdaDaemon(&dispatch, PMDA_INTERFACE_7, pmGetProgname(), HACLUSTER, "hacluster.log", helppath);
pmdaGetOptions(argc, argv, &opts, &dispatch);
if (opts.errors) {
diff --git a/src/pmdas/hacluster/pmdahacluster.h b/src/pmdas/hacluster/pmdahacluster.h
index 9d3c243a4..a813ccb02 100644
--- a/src/pmdas/hacluster/pmdahacluster.h
+++ b/src/pmdas/hacluster/pmdahacluster.h
@@ -23,32 +23,46 @@
#include "drbd.h"
enum {
- CLUSTER_PACEMAKER_GLOBAL = 0, /* 0 -- NULL INDOM */
- CLUSTER_PACEMAKER_FAIL, /* 1 -- PACEMAKER_FAIL_INDOM */
- CLUSTER_PACEMAKER_CONSTRAINTS, /* 2 -- PACEMAKER_CONSTRAINTS_INDOM */
- CLUSTER_PACEMAKER_NODES, /* 3 -- PACEMAKER_NODES_IDOM*/
- CLUSTER_PACEMAKER_NODE_ATTRIB, /* 4 -- PACEMAKER_NODE_ATRRIB_INDOM */
- CLUSTER_PACEMAKER_RESOURCES, /* 5 -- PACEMAKER_RESOURCES_INDOM */
- CLUSTER_COROSYNC_NODE, /* 6 -- COROSYNC_NODE_INDOM */
- CLUSTER_COROSYNC_GLOBAL, /* 7 -- NULL INDOM */
- CLUSTER_COROSYNC_RING, /* 8 -- COROSYNC_RING INDOM */
- CLUSTER_SBD_DEVICE, /* 9 -- SBD_DEVICES_INDOM */
- CLUSTER_DRBD_RESOURCE, /* 10 -- DRBD_RESOURCE_INDOM */
- CLUSTER_DRBD_PEER_DEVICE, /* 11 -- DRBD_PEER_DEVICE_INDOM */
+ CLUSTER_PACEMAKER_GLOBAL = 0, /* 0 -- NULL INDOM */
+ CLUSTER_PACEMAKER_FAIL, /* 1 -- PACEMAKER_FAIL_INDOM */
+ CLUSTER_PACEMAKER_CONSTRAINTS, /* 2 -- PACEMAKER_CONSTRAINTS_INDOM */
+ CLUSTER_PACEMAKER_NODES, /* 3 -- PACEMAKER_NODES_IDOM*/
+ CLUSTER_PACEMAKER_NODE_ATTRIB, /* 4 -- PACEMAKER_NODE_ATRRIB_INDOM */
+ CLUSTER_PACEMAKER_RESOURCES, /* 5 -- PACEMAKER_RESOURCES_INDOM */
+ CLUSTER_COROSYNC_NODE, /* 6 -- COROSYNC_NODE_INDOM */
+ CLUSTER_COROSYNC_GLOBAL, /* 7 -- NULL INDOM */
+ CLUSTER_COROSYNC_RING, /* 8 -- COROSYNC_RING INDOM */
+ CLUSTER_SBD_DEVICE, /* 9 -- SBD_DEVICES_INDOM */
+ CLUSTER_DRBD_RESOURCE, /* 10 -- DRBD_RESOURCE_INDOM */
+ CLUSTER_DRBD_PEER_DEVICE, /* 11 -- DRBD_PEER_DEVICE_INDOM */
+ CLUSTER_PACEMAKER_CONSTRAINTS_ALL, /* 12 -- PACEMAKER_CONSTRAINTS_ALL_INDOM */
+ CLUSTER_PACEMAKER_NODE_ATTRIB_ALL, /* 13 -- PACEMAKER_NODE_ATTRIB_ALL_INDOM */
+ CLUSTER_PACEMAKER_RESOURCES_ALL, /* 14 -- PACEMAKER_RESOURCES_ALL_INDOM */
+ CLUSTER_COROSYNC_RING_ALL, /* 15 -- COROSYNC_RING_ALL_INDOM */
+ CLUSTER_SBD_DEVICE_ALL, /* 16 -- SBD_DEVICES_ALL_INDOM */
+ CLUSTER_DRBD_RESOURCE_ALL, /* 17 -- DRBD_RESOURCE_ALL_INDOM */
+ CLUSTER_DRBD_PEER_DEVICE_ALL, /* 18 -- DRBD_PEER_DEVICE_ALL_INDOM */
NUM_CLUSTERS
};
enum {
- PACEMAKER_FAIL_INDOM = 0, /* 0 -- Pacemaker failure/migrations */
- PACEMAKER_CONSTRAINTS_INDOM, /* 1 -- Pacemaker location constraints */
- PACEMAKER_NODES_INDOM, /* 2 -- Pacemaker nodes data */
- PACEMAKER_NODE_ATTRIB_INDOM, /* 3 -- Pacemaker node attributes */
- PACEMAKER_RESOURCES_INDOM, /* 4 -- Pacemaker resources */
- COROSYNC_NODE_INDOM, /* 5 -- Corosync available nodes */
- COROSYNC_RING_INDOM, /* 6 -- Corosync available rings */
- SBD_DEVICE_INDOM, /* 7 -- SBD available devices */
- DRBD_RESOURCE_INDOM, /* 8 -- DRBD Resources */
- DRBD_PEER_DEVICE_INDOM, /* 9 -- DRBD Peer Devices */
+ PACEMAKER_FAIL_INDOM = 0, /* 0 -- Pacemaker failure/migrations */
+ PACEMAKER_CONSTRAINTS_INDOM, /* 1 -- Pacemaker location constraints */
+ PACEMAKER_NODES_INDOM, /* 2 -- Pacemaker nodes data */
+ PACEMAKER_NODE_ATTRIB_INDOM, /* 3 -- Pacemaker node attributes */
+ PACEMAKER_RESOURCES_INDOM, /* 4 -- Pacemaker resources */
+ COROSYNC_NODE_INDOM, /* 5 -- Corosync available nodes */
+ COROSYNC_RING_INDOM, /* 6 -- Corosync available rings */
+ SBD_DEVICE_INDOM, /* 7 -- SBD available devices */
+ DRBD_RESOURCE_INDOM, /* 8 -- DRBD Resources */
+ DRBD_PEER_DEVICE_INDOM, /* 9 -- DRBD Peer Devices */
+ PACEMAKER_CONSTRAINTS_ALL_INDOM, /* 10 -- Pacemaker location constraints all (labels) */
+ PACEMAKER_NODE_ATTRIB_ALL_INDOM, /* 11 -- Pacemaker node attributes all(labels) */
+ PACEMAKER_RESOURCES_ALL_INDOM, /* 12 -- Pacemaker resources all (labels) */
+ COROSYNC_RING_ALL_INDOM, /* 13 -- Corosync available rings all (labels) */
+ SBD_DEVICE_ALL_INDOM, /* 14 -- SBD available devices all (labels) */
+ DRBD_RESOURCE_ALL_INDOM, /* 15 -- DRBD Resources all (labels) */
+ DRBD_PEER_DEVICE_ALL_INDOM, /* 16 -- DRBD Peer Devicesall (labels) */
NUM_INDOMS
};
diff --git a/src/pmdas/hacluster/pmns b/src/pmdas/hacluster/pmns
index 75fc1ea4c..274d5f2ac 100644
--- a/src/pmdas/hacluster/pmns
+++ b/src/pmdas/hacluster/pmns
@@ -21,131 +21,138 @@ ha_cluster {
ha_cluster.pacemaker {
config_last_change HACLUSTER:0:0
- fail_count HACLUSTER:1:0
+ fail_count HACLUSTER:1:0
location_constraints
migration_threshold HACLUSTER:1:1
nodes
node_attributes HACLUSTER:4:0
+ node_attributes_all HACLUSTER:13:0
resources
stonith_enabled HACLUSTER:0:1
}
ha_cluster.pacemaker.location_constraints {
- node HACLUSTER:2:0
- resource HACLUSTER:2:1
- role HACLUSTER:2:2
- score HACLUSTER:2:3
+ node HACLUSTER:2:0
+ resource HACLUSTER:2:1
+ role HACLUSTER:2:2
+ score HACLUSTER:2:3
+ all HACLUSTER:12:0
}
ha_cluster.pacemaker.nodes {
status
- type HACLUSTER:3:9
+ type HACLUSTER:3:9
}
ha_cluster.pacemaker.nodes.status {
- online HACLUSTER:3:0
- standby HACLUSTER:3:1
+ online HACLUSTER:3:0
+ standby HACLUSTER:3:1
standby_on_fail HACLUSTER:3:2
- maintenance HACLUSTER:3:3
- pending HACLUSTER:3:4
- unclean HACLUSTER:3:5
- shutdown HACLUSTER:3:6
- expected_up HACLUSTER:3:7
- dc HACLUSTER:3:8
+ maintenance HACLUSTER:3:3
+ pending HACLUSTER:3:4
+ unclean HACLUSTER:3:5
+ shutdown HACLUSTER:3:6
+ expected_up HACLUSTER:3:7
+ dc HACLUSTER:3:8
}
ha_cluster.pacemaker.resources {
- agent HACLUSTER:5:0
- clone HACLUSTER:5:1
- group HACLUSTER:5:2
- managed HACLUSTER:5:3
- role HACLUSTER:5:4
+ agent HACLUSTER:5:0
+ clone HACLUSTER:5:1
+ group HACLUSTER:5:2
+ managed HACLUSTER:5:3
+ role HACLUSTER:5:4
status
+ all HACLUSTER:14:0
}
ha_cluster.pacemaker.resources.status {
- active HACLUSTER:5:5
- orphaned HACLUSTER:5:6
- blocked HACLUSTER:5:7
- failed HACLUSTER:5:8
+ active HACLUSTER:5:5
+ orphaned HACLUSTER:5:6
+ blocked HACLUSTER:5:7
+ failed HACLUSTER:5:8
failure_ignored HACLUSTER:5:9
}
ha_cluster.corosync {
member_votes
- quorate HACLUSTER:7:0
+ quorate HACLUSTER:7:0
quorum_votes
- ring_errors HACLUSTER:7:5
+ ring_errors HACLUSTER:7:5
rings
}
ha_cluster.corosync.member_votes {
- votes HACLUSTER:6:0
- local HACLUSTER:6:1
- node_id HACLUSTER:6:2
+ votes HACLUSTER:6:0
+ local HACLUSTER:6:1
+ node_id HACLUSTER:6:2
}
ha_cluster.corosync.quorum_votes {
expected_votes HACLUSTER:7:1
highest_expected HACLUSTER:7:2
- total_votes HACLUSTER:7:3
- quorum HACLUSTER:7:4
+ total_votes HACLUSTER:7:3
+ quorum HACLUSTER:7:4
}
ha_cluster.corosync.rings {
- status HACLUSTER:8:0
- address HACLUSTER:8:1
- node_id HACLUSTER:8:2
- number HACLUSTER:8:3
- ring_id HACLUSTER:8:4
+ status HACLUSTER:8:0
+ address HACLUSTER:8:1
+ node_id HACLUSTER:8:2
+ number HACLUSTER:8:3
+ ring_id HACLUSTER:8:4
+ all HACLUSTER:15:0
}
ha_cluster.sbd {
devices
timeouts
+ all HACLUSTER:16:0
}
ha_cluster.sbd.devices {
- path HACLUSTER:9:0
- status HACLUSTER:9:1
+ path HACLUSTER:9:0
+ status HACLUSTER:9:1
}
ha_cluster.sbd.timeouts {
- mgswait HACLUSTER:9:2
- allocate HACLUSTER:9:3
- loop HACLUSTER:9:4
- watchdog HACLUSTER:9:5
+ mgswait HACLUSTER:9:2
+ allocate HACLUSTER:9:3
+ loop HACLUSTER:9:4
+ watchdog HACLUSTER:9:5
}
ha_cluster.drbd {
- resources
- written HACLUSTER:10:4
- read HACLUSTER:10:5
- al_writes HACLUSTER:10:6
- bm_writes HACLUSTER:10:7
+ resources
+ written HACLUSTER:10:4
+ read HACLUSTER:10:5
+ al_writes HACLUSTER:10:6
+ bm_writes HACLUSTER:10:7
upper_pending HACLUSTER:10:8
lower_pending HACLUSTER:10:9
- quorum HACLUSTER:10:10
+ quorum HACLUSTER:10:10
connections
connections_sync HACLUSTER:11:5
- connections_received HACLUSTER:11:6
+ connections_received HACLUSTER:11:6
connections_sent HACLUSTER:11:7
connections_pending HACLUSTER:11:8
connections_unacked HACLUSTER:11:9
- split_brain HACLUSTER:10:11
+ split_brain HACLUSTER:10:11
}
ha_cluster.drbd.resources {
- resource HACLUSTER:10:0
- role HACLUSTER:10:1
- volume HACLUSTER:10:2
- disk_state HACLUSTER:10:3
+ resource HACLUSTER:10:0
+ role HACLUSTER:10:1
+ volume HACLUSTER:10:2
+ disk_state HACLUSTER:10:3
+ all HACLUSTER:17:0
}
ha_cluster.drbd.connections {
- resource HACLUSTER:11:0
+ resource HACLUSTER:11:0
peer_node_id HACLUSTER:11:1
- peer_role HACLUSTER:11:2
- volume HACLUSTER:11:3
+ peer_role HACLUSTER:11:2
+ volume HACLUSTER:11:3
peer_disk_state HACLUSTER:11:4
+ all HACLUSTER:18:0
}
diff --git a/src/pmdas/hacluster/sbd.c b/src/pmdas/hacluster/sbd.c
index 2824f6589..05f55ad52 100644
--- a/src/pmdas/hacluster/sbd.c
+++ b/src/pmdas/hacluster/sbd.c
@@ -68,6 +68,13 @@ hacluster_sbd_device_fetch(int item, struct sbd *sbd, pmAtomValue *atom)
return PMDA_FETCH_NOVALUES;
}
+int
+hacluster_sbd_device_all_fetch(int item, pmAtomValue *atom)
+{
+ atom->ul = 1; /* Assign default exists value 1 */
+ return PMDA_FETCH_STATIC;
+}
+
int
hacluster_refresh_sbd_device(const char *sbd_dev, struct sbd *sbd)
{
diff --git a/src/pmdas/hacluster/sbd.h b/src/pmdas/hacluster/sbd.h
index 0db22e7a0..0a6971d0e 100644
--- a/src/pmdas/hacluster/sbd.h
+++ b/src/pmdas/hacluster/sbd.h
@@ -37,6 +37,7 @@ struct sbd {
};
extern int hacluster_sbd_device_fetch(int, struct sbd *, pmAtomValue *);
+extern int hacluster_sbd_device_all_fetch(int, pmAtomValue *);
extern int hacluster_refresh_sbd_device(const char *, struct sbd *);
extern void sbd_stats_setup(void);
--
2.31.1
From 96e746ac389999ee5a67f1b1d5621f62cff70bcb Mon Sep 17 00:00:00 2001
From: Nathan Scott <nathans@redhat.com>
Date: Sat, 19 Jun 2021 16:32:38 +1000
Subject: [PATCH 6/7] docs: typo fixes in hacluster help text
---
qa/1897.out | 22 +++++++++++-----------
src/pmdas/hacluster/help | 22 +++++++++++-----------
2 files changed, 22 insertions(+), 22 deletions(-)
diff --git a/qa/1897.out b/qa/1897.out
index b757e857a..a0b53a998 100644
--- a/qa/1897.out
+++ b/qa/1897.out
@@ -41,7 +41,7 @@ Help:
The number of expected quorum votes for the cluster.
value 2
-ha_cluster.corosync.quorum_votes.highest_expected PMID: 155.7.2 [Hightest expected vote count]
+ha_cluster.corosync.quorum_votes.highest_expected PMID: 155.7.2 [Highest expected vote count]
Data Type: 32-bit unsigned int InDom: PM_INDOM_NULL 0xffffffff
Semantics: instant Units: count
Help:
@@ -80,7 +80,7 @@ ha_cluster.corosync.rings.all PMID: 155.15.0 [Corosync rings information]
Data Type: 32-bit unsigned int InDom: 155.13 0x26c0000d
Semantics: instant Units: count
Help:
-Value is 1 if a ring exists. The details of the corrosponding ring
+Value is 1 if a ring exists. The details of the corresponding ring
is given as label metadata values for this metric.
inst [0 or "0"] value 1
@@ -132,7 +132,7 @@ ha_cluster.drbd.connections.all PMID: 155.18.0 [DRBD Peer disk information]
Data Type: 32-bit unsigned int InDom: 155.16 0x26c00010
Semantics: instant Units: count
Help:
-Value is 1 if a drbd peer connection exists. The details of the corrosponding DRBD peer
+Value is 1 if a drbd peer connection exists. The details of the corresponding DRBD peer
connection is given as label metadata values for this metric.
inst [0 or "drbd1:1"] value 1
@@ -219,7 +219,7 @@ The number of open requests to the local I/O subsystem by DRBD for the
resource:volume.
inst [0 or "drbd1:0"] value 0
-ha_cluster.drbd.quorum PMID: 155.10.10 [Quorum satus of DRBD resource:volume]
+ha_cluster.drbd.quorum PMID: 155.10.10 [Quorum status of DRBD resource:volume]
Data Type: 32-bit unsigned int InDom: 155.8 0x26c00008
Semantics: instant Units: count
Help:
@@ -238,7 +238,7 @@ ha_cluster.drbd.resources.all PMID: 155.17.0 [DRBD resource information]
Data Type: 32-bit unsigned int InDom: 155.15 0x26c0000f
Semantics: instant Units: count
Help:
-Value is 1 if a drbd resource exists. The details of the corrosponding drbd resource
+Value is 1 if a drbd resource exists. The details of the corresponding drbd resource
is given as label metadata values for this metric.
inst [0 or "drbd1:0"] value 1
@@ -260,21 +260,21 @@ ha_cluster.drbd.resources.role PMID: 155.10.1 [Role of the resource]
Data Type: string InDom: 155.8 0x26c00008
Semantics: instant Units: count
Help:
-The resported role for ther DRBD resource for each resource:volume.
+The reported role for the DRBD resource for each resource:volume.
inst [0 or "drbd1:0"] value "Primary"
ha_cluster.drbd.resources.volume PMID: 155.10.2 [Volume of the resource]
Data Type: string InDom: 155.8 0x26c00008
Semantics: instant Units: count
Help:
-The volume number of ther resource for each resource:volume.
+The volume number of the resource for each resource:volume.
inst [0 or "drbd1:0"] value "0"
ha_cluster.drbd.split_brain PMID: 155.10.11 [Signal for split brain detection.]
Data Type: 32-bit unsigned int InDom: 155.8 0x26c00008
Semantics: instant Units: count
Help:
-This metric signals if there has been a split brain occuring in DRBD for the
+This metric signals if there has been a split brain occurring in DRBD for the
resource:volume, value is 1 is a split brain has been detected.
inst [0 or "drbd1:0"] value 1
@@ -293,7 +293,7 @@ Help:
Amount in KiB written to the DRBD resource:volume.
inst [0 or "drbd1:0"] value 0
-ha_cluster.pacemaker.config_last_change PMID: 155.0.0 [Unix timestamp corresponding to last Pacmaker configuration change]
+ha_cluster.pacemaker.config_last_change PMID: 155.0.0 [Unix timestamp corresponding to last Pacemaker configuration change]
Data Type: 64-bit unsigned int InDom: PM_INDOM_NULL 0xffffffff
Semantics: instant Units: count
Help:
@@ -335,7 +335,7 @@ ha_cluster.pacemaker.location_constraints.role PMID: 155.2.2 [Resource role of t
Data Type: string InDom: 155.1 0x26c00001
Semantics: instant Units: count
Help:
-The resource role that the location contraint applies to, if any.
+The resource role that the location constraint applies to, if any.
inst [0 or "test"] value "Started"
ha_cluster.pacemaker.location_constraints.score PMID: 155.2.3 [Score of the location constraint]
@@ -617,7 +617,7 @@ ha_cluster.sbd.all PMID: 155.16.0 [SBD device information]
Data Type: 32-bit unsigned int InDom: 155.14 0x26c0000e
Semantics: instant Units: count
Help:
-Value is 1 if a sbd device exists. The details of the corrosponding SBD device
+Value is 1 if a sbd device exists. The details of the corresponding SBD device
is given as label metadata values for this metric.
inst [0 or "/dev/vdb"] value 1
diff --git a/src/pmdas/hacluster/help b/src/pmdas/hacluster/help
index bdcd68e5f..417bfd4cf 100644
--- a/src/pmdas/hacluster/help
+++ b/src/pmdas/hacluster/help
@@ -28,7 +28,7 @@
#
@ HACLUSTER.0 Instance domain for High Availability Cluster component metrics
-@ ha_cluster.pacemaker.config_last_change Unix timestamp corresponding to last Pacmaker configuration change
+@ ha_cluster.pacemaker.config_last_change Unix timestamp corresponding to last Pacemaker configuration change
Unix timestamp in seconds corresponding to the last time that the Pacemaker
configuration was changed on the system.
@@ -58,7 +58,7 @@ The node that the location constraint applies to in the cluster.
The resource that the location constraint applies to in the cluster.
@ ha_cluster.pacemaker.location_constraints.role Resource role of the location constraint
-The resource role that the location contraint applies to, if any.
+The resource role that the location constraint applies to, if any.
@ ha_cluster.pacemaker.location_constraints.score Score of the location constraint
The score given to the location constraint by Pacemaker, the value depends on
@@ -167,7 +167,7 @@ The full corosync ID for the nodes in the cluster.
@ ha_cluster.corosync.quorum_votes.expected_votes Expected vote count
The number of expected quorum votes for the cluster.
-@ ha_cluster.corosync.quorum_votes.highest_expected Hightest expected vote count
+@ ha_cluster.corosync.quorum_votes.highest_expected Highest expected vote count
The highest number of expected quorum votes for the cluster.
@ ha_cluster.corosync.quorum_votes.total_votes Total number of votes
@@ -193,7 +193,7 @@ The number for the Corosync ring.
The internal Corosync ring ID, corresponds to the first node to join.
@ ha_cluster.corosync.rings.all Corosync rings information
-Value is 1 if a ring exists. The details of the corrosponding ring
+Value is 1 if a ring exists. The details of the corresponding ring
is given as label metadata values for this metric.
@ ha_cluster.sbd.devices.path Path of SBD device
@@ -204,7 +204,7 @@ The current status given for each of the SBD devices, the value is one of
healthy or unhealthy.
@ ha_cluster.sbd.all SBD device information
-Value is 1 if a sbd device exists. The details of the corrosponding SBD device
+Value is 1 if a sbd device exists. The details of the corresponding SBD device
is given as label metadata values for this metric.
@ ha_cluster.sbd.timeouts.mgswait mgswait timeout value
@@ -240,7 +240,7 @@ for the resource:volume.
The number of open requests to the local I/O subsystem by DRBD for the
resource:volume.
-@ ha_cluster.drbd.quorum Quorum satus of DRBD resource:volume
+@ ha_cluster.drbd.quorum Quorum status of DRBD resource:volume
The Quorum status of the DRBD resource according to resource:volume, 1 is
quorate and 0 is non-quorate.
@@ -265,23 +265,23 @@ The number of requests sent which have not yet been acknowledged by DRBD for
the resource:volume.
@ ha_cluster.drbd.split_brain Signal for split brain detection.
-This metric signals if there has been a split brain occuring in DRBD for the
+This metric signals if there has been a split brain occurring in DRBD for the
resource:volume, value is 1 is a split brain has been detected.
@ ha_cluster.drbd.resources.resource Name of the resource
The name given for the DRBD resource for each resource:volume.
@ ha_cluster.drbd.resources.role Role of the resource
-The resported role for ther DRBD resource for each resource:volume.
+The reported role for the DRBD resource for each resource:volume.
@ ha_cluster.drbd.resources.volume Volume of the resource
-The volume number of ther resource for each resource:volume.
+The volume number of the resource for each resource:volume.
@ ha_cluster.drbd.resources.disk_state Disk state
The current reported disk state of for the resource:volume.
@ ha_cluster.drbd.resources.all DRBD resource information
-Value is 1 if a drbd resource exists. The details of the corrosponding drbd resource
+Value is 1 if a drbd resource exists. The details of the corresponding drbd resource
is given as label metadata values for this metric.
@ ha_cluster.drbd.connections.resource Resource that the connection is for
@@ -300,5 +300,5 @@ The reported volume for the connection.
The reported peer disk state for the connection.
@ ha_cluster.drbd.connections.all DRBD Peer disk information
-Value is 1 if a drbd peer connection exists. The details of the corrosponding DRBD peer
+Value is 1 if a drbd peer connection exists. The details of the corresponding DRBD peer
connection is given as label metadata values for this metric.
--
2.31.1
From b17119354e2b044a7ba5c9cd36cb6da59ab9ed05 Mon Sep 17 00:00:00 2001
From: Paul Evans <pevans@redhat.com>
Date: Mon, 21 Jun 2021 18:46:32 +0100
Subject: [PATCH 7/7] pmdahacluster: Remove un-needed pmdaCacheLookupName()
calls
Update instance refresh code for the label supported metrics to remove the
superfluous pmdaCacheLookupName() calls.
We update the label supported indoms in mirror to the non-label indoms
and in turn don't require re-checking instance updates immediately again
(after doing so for the non-label indoms).
This also fixes the Coverity defects reported by Nathan for unchecked returns
on these calls.
---
src/pmdas/hacluster/pmda.c | 9 ++-------
1 file changed, 2 insertions(+), 7 deletions(-)
diff --git a/src/pmdas/hacluster/pmda.c b/src/pmdas/hacluster/pmda.c
index 6c9163b25..5be6d3668 100644
--- a/src/pmdas/hacluster/pmda.c
+++ b/src/pmdas/hacluster/pmda.c
@@ -495,7 +495,6 @@ hacluster_pacemaker_constraints_instance_refresh(void)
struct pacemaker_constraints *constraints;
sts = pmdaCacheLookupName(indom, constraint_name, NULL, (void **)&constraints);
- pmdaCacheLookupName(indom_all, constraint_name, NULL, NULL);
if (sts == PM_ERR_INST || (sts >=0 && constraints == NULL)) {
constraints = calloc(1, sizeof(struct pacemaker_constraints));
if (constraints == NULL) {
@@ -636,7 +635,6 @@ hacluster_pacemaker_node_attrib_instance_refresh(void)
struct pacemaker_node_attrib *node_attrib;
sts = pmdaCacheLookupName(indom, instance_name, NULL, (void **)&node_attrib);
- pmdaCacheLookupName(indom_all, instance_name, NULL, NULL);
if (sts == PM_ERR_INST || (sts >=0 && node_attrib == NULL)) {
node_attrib = calloc(1, sizeof(struct pacemaker_node_attrib));
if (node_attrib == NULL) {
@@ -716,7 +714,6 @@ hacluster_pacemaker_resources_instance_refresh(void)
struct pacemaker_resources *pace_resources;
sts = pmdaCacheLookupName(indom, instance_name, NULL, (void **)&pace_resources);
- pmdaCacheLookupName(indom_all, instance_name, NULL, NULL);
if (sts == PM_ERR_INST || (sts >=0 && pace_resources == NULL)) {
pace_resources = calloc(1, sizeof(struct pacemaker_resources));
if (pace_resources == NULL) {
@@ -842,7 +839,6 @@ hacluster_corosync_ring_instance_refresh(void)
struct corosync_ring *ring;
sts = pmdaCacheLookupName(indom, ring_name, NULL, (void **)&ring);
- pmdaCacheLookupName(indom_all, ring_name, NULL, NULL);
if (sts == PM_ERR_INST || (sts >=0 && ring == NULL)) {
ring = calloc(1, sizeof(struct corosync_ring));
if (ring == NULL) {
@@ -914,7 +910,6 @@ hacluster_sbd_device_instance_refresh(void)
struct sbd_device *sbd;
sts = pmdaCacheLookupName(indom, dev_name, NULL, (void **)&sbd);
- pmdaCacheLookupName(indom_all, dev_name, NULL, NULL);
if (sts == PM_ERR_INST || (sts >=0 && sbd == NULL)) {
sbd = calloc(1, sizeof(struct sbd_device));
if (sbd == NULL) {
@@ -997,7 +992,6 @@ hacluster_drbd_resource_instance_refresh(void)
struct drbd_resource *resource;
sts = pmdaCacheLookupName(indom, resource_name, NULL, (void **)&resource);
- pmdaCacheLookupName(indom_all, resource_name, NULL, NULL);
if (sts == PM_ERR_INST || (sts >=0 && resource == NULL)) {
resource = calloc(1, sizeof(struct drbd_resource));
if (resource == NULL) {
@@ -1010,6 +1004,7 @@ hacluster_drbd_resource_instance_refresh(void)
pmdaCacheStore(indom, PMDA_CACHE_ADD, resource_name, (void *)resource);
pmdaCacheStore(indom_all, PMDA_CACHE_ADD, resource_name, NULL);
+
found_volume = 0;
}
}
@@ -1078,7 +1073,6 @@ hacluster_drbd_peer_device_instance_refresh(void)
struct drbd_peer_device *peer_device;
sts = pmdaCacheLookupName(indom, peer_name, NULL, (void **)&peer_device);
- pmdaCacheLookupName(indom_all, peer_name, NULL, NULL);
if (sts == PM_ERR_INST || (sts >=0 && peer_device == NULL)) {
peer_device = calloc(1, sizeof(struct drbd_peer_device));
if (peer_device == NULL) {
@@ -1091,6 +1085,7 @@ hacluster_drbd_peer_device_instance_refresh(void)
pmdaCacheStore(indom, PMDA_CACHE_ADD, peer_name, (void *)peer_device);
pmdaCacheStore(indom_all, PMDA_CACHE_ADD, peer_name, NULL);
+
found_peer_node = 0;
}
}
--
2.31.1