pacemaker/010-probe-pending.patch
Ken Gaillot e32fe21465 Backport selected fixes from upstream 2.1.1-rc1 release
- Avoid selecting wrong device when dynamic-list fencing is used with host map
- Show better error messages in crm_resource with invalid resource types
- Do not schedule probes of unmanaged resources on pending nodes
- Fix regressions in crm_attribute and crm_master argument handling
- Resolves: rhbz1978013
- Resolves: rhbz1983196
- Resolves: rhbz1983197
- Resolves: rhbz1984130
2021-07-30 10:27:21 -05:00

716 lines
49 KiB
Diff

From b0347f7b8e609420a7055d5fe537cc40ac0d1bb2 Mon Sep 17 00:00:00 2001
From: Ken Gaillot <kgaillot@redhat.com>
Date: Fri, 16 Jul 2021 11:08:05 -0500
Subject: [PATCH 1/3] Fix: scheduler: don't schedule probes of unmanaged
resources on pending nodes
Previously, custom_action() would set an action's optional or runnable flag in
the same, exclusive if-else sequence. This means that if an action should be
optional *and* runnable, only one would be set. In particular, this meant that
if a resource is unmanaged *and* its allocated node is pending, any probe would
be set to optional, but not unrunnable, and the controller could wrongly
attempt the probe before the join completed.
Now, optional is checked separately.
---
lib/pengine/utils.c | 22 ++++++++++++++--------
1 file changed, 14 insertions(+), 8 deletions(-)
diff --git a/lib/pengine/utils.c b/lib/pengine/utils.c
index 5ef742e..965824b 100644
--- a/lib/pengine/utils.c
+++ b/lib/pengine/utils.c
@@ -541,6 +541,20 @@ custom_action(pe_resource_t * rsc, char *key, const char *task,
FALSE, data_set);
}
+ // Make the action optional if its resource is unmanaged
+ if (!pcmk_is_set(action->flags, pe_action_pseudo)
+ && (action->node != NULL)
+ && !pcmk_is_set(action->rsc->flags, pe_rsc_managed)
+ && (g_hash_table_lookup(action->meta,
+ XML_LRM_ATTR_INTERVAL_MS) == NULL)) {
+ pe_rsc_debug(rsc, "%s on %s is optional (%s is unmanaged)",
+ action->uuid, action->node->details->uname,
+ action->rsc->id);
+ pe__set_action_flags(action, pe_action_optional);
+ // We shouldn't clear runnable here because ... something
+ }
+
+ // Make the action runnable or unrunnable as appropriate
if (pcmk_is_set(action->flags, pe_action_pseudo)) {
/* leave untouched */
@@ -549,14 +563,6 @@ custom_action(pe_resource_t * rsc, char *key, const char *task,
action->uuid);
pe__clear_action_flags(action, pe_action_runnable);
- } else if (!pcmk_is_set(rsc->flags, pe_rsc_managed)
- && g_hash_table_lookup(action->meta,
- XML_LRM_ATTR_INTERVAL_MS) == NULL) {
- pe_rsc_debug(rsc, "%s on %s is optional (%s is unmanaged)",
- action->uuid, action->node->details->uname, rsc->id);
- pe__set_action_flags(action, pe_action_optional);
- //pe__clear_action_flags(action, pe_action_runnable);
-
} else if (!pcmk_is_set(action->flags, pe_action_dc)
&& !(action->node->details->online)
&& (!pe__is_guest_node(action->node)
--
1.8.3.1
From 520303b90eb707f5b7a9afa9b106e4a38b90f0f9 Mon Sep 17 00:00:00 2001
From: Ken Gaillot <kgaillot@redhat.com>
Date: Wed, 14 Jul 2021 17:18:44 -0500
Subject: [PATCH 2/3] Test: scheduler: update existing tests for probe
scheduling change
This is an improvement. Looking at bundle-probe-order-2 for example,
the bundle's first instance has this status to start:
* Replica[0]
* galera (ocf::heartbeat:galera): Stopped (unmanaged)
* galera-bundle-docker-0 (ocf::heartbeat:docker): Started centos2 (unmanaged)
* galera-bundle-0 (ocf::pacemaker:remote): Started centos2 (unmanaged)
After the changes, we now schedule recurring monitors for
galera-bundle-docker-0 and galera-bundle-0 on centos2, and a probe of galera:0
on galera-bundle-0, all of which are possible.
---
cts/scheduler/dot/bundle-probe-order-2.dot | 3 ++
cts/scheduler/dot/bundle-probe-order-3.dot | 1 +
cts/scheduler/exp/bundle-probe-order-2.exp | 33 ++++++++++++++++++++--
cts/scheduler/exp/bundle-probe-order-3.exp | 21 ++++++++++----
cts/scheduler/summary/bundle-probe-order-2.summary | 3 ++
cts/scheduler/summary/bundle-probe-order-3.summary | 1 +
6 files changed, 53 insertions(+), 9 deletions(-)
diff --git a/cts/scheduler/dot/bundle-probe-order-2.dot b/cts/scheduler/dot/bundle-probe-order-2.dot
index 0cce3fd..7706195 100644
--- a/cts/scheduler/dot/bundle-probe-order-2.dot
+++ b/cts/scheduler/dot/bundle-probe-order-2.dot
@@ -1,6 +1,9 @@
digraph "g" {
+"galera-bundle-0_monitor_30000 centos2" [ style=bold color="green" fontcolor="black"]
+"galera-bundle-docker-0_monitor_60000 centos2" [ style=bold color="green" fontcolor="black"]
"galera-bundle-docker-1_monitor_0 centos2" [ style=bold color="green" fontcolor="black"]
"galera-bundle-docker-2_monitor_0 centos1" [ style=bold color="green" fontcolor="black"]
"galera-bundle-docker-2_monitor_0 centos2" [ style=bold color="green" fontcolor="black"]
"galera-bundle-docker-2_monitor_0 centos3" [ style=bold color="green" fontcolor="black"]
+"galera:0_monitor_0 galera-bundle-0" [ style=bold color="green" fontcolor="black"]
}
diff --git a/cts/scheduler/dot/bundle-probe-order-3.dot b/cts/scheduler/dot/bundle-probe-order-3.dot
index a4b109f..53a384b 100644
--- a/cts/scheduler/dot/bundle-probe-order-3.dot
+++ b/cts/scheduler/dot/bundle-probe-order-3.dot
@@ -2,6 +2,7 @@
"galera-bundle-0_monitor_0 centos1" [ style=bold color="green" fontcolor="black"]
"galera-bundle-0_monitor_0 centos2" [ style=bold color="green" fontcolor="black"]
"galera-bundle-0_monitor_0 centos3" [ style=bold color="green" fontcolor="black"]
+"galera-bundle-docker-0_monitor_60000 centos2" [ style=bold color="green" fontcolor="black"]
"galera-bundle-docker-1_monitor_0 centos2" [ style=bold color="green" fontcolor="black"]
"galera-bundle-docker-2_monitor_0 centos1" [ style=bold color="green" fontcolor="black"]
"galera-bundle-docker-2_monitor_0 centos2" [ style=bold color="green" fontcolor="black"]
diff --git a/cts/scheduler/exp/bundle-probe-order-2.exp b/cts/scheduler/exp/bundle-probe-order-2.exp
index d6174e7..5b28050 100644
--- a/cts/scheduler/exp/bundle-probe-order-2.exp
+++ b/cts/scheduler/exp/bundle-probe-order-2.exp
@@ -1,6 +1,33 @@
<transition_graph cluster-delay="60s" stonith-timeout="60s" failed-stop-offset="INFINITY" failed-start-offset="INFINITY" transition_id="0">
<synapse id="0">
<action_set>
+ <rsc_op id="14" operation="monitor" operation_key="galera:0_monitor_0" on_node="galera-bundle-0" on_node_uuid="galera-bundle-0" router_node="centos2">
+ <primitive id="galera" long-id="galera:0" class="ocf" provider="heartbeat" type="galera"/>
+ <attributes CRM_meta_clone="0" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_container_attribute_target="host" CRM_meta_globally_unique="false" CRM_meta_master_max="3" CRM_meta_master_node_max="1" CRM_meta_notify="false" CRM_meta_on_node="galera-bundle-0" CRM_meta_on_node_uuid="galera-bundle-0" CRM_meta_op_target_rc="7" CRM_meta_physical_host="centos2" CRM_meta_promoted_max="3" CRM_meta_promoted_node_max="1" CRM_meta_timeout="30000" cluster_host_map="centos1:centos1;centos2:centos2;centos3:centos3" enable_creation="true" wsrep_cluster_address="gcomm://centos1,centos2,centos3"/>
+ </rsc_op>
+ </action_set>
+ <inputs/>
+ </synapse>
+ <synapse id="1">
+ <action_set>
+ <rsc_op id="16" operation="monitor" operation_key="galera-bundle-docker-0_monitor_60000" on_node="centos2" on_node_uuid="2">
+ <primitive id="galera-bundle-docker-0" class="ocf" provider="heartbeat" type="docker"/>
+ <attributes CRM_meta_interval="60000" CRM_meta_name="monitor" CRM_meta_on_node="centos2" CRM_meta_on_node_uuid="2" CRM_meta_timeout="20000" allow_pull="true" force_kill="false" image="docker.io/tripleoupstream/centos-binary-mariadb:latest" monitor_cmd="/bin/true" mount_points="/var/log/pacemaker/bundles/galera-bundle-0" reuse="false" run_cmd="/usr/sbin/pacemaker_remoted" run_opts=" --restart=no -e PCMK_stderr=1 --net=host -e PCMK_remote_port=3123 -v /foo:/etc/libqb/force-filesystem-sockets:ro -v /etc/my.cnf.d/galera.cnf:/etc/my.cnf.d/galera.cnf:ro -v /var/lib/mysql:/var/lib/mysql:rw -v /etc/pacemaker/authkey:/etc/pacemaker/authkey -v /var/log/pacemaker/bundles/galera-bundle-0:/var/log --user=root --log-driver=journald "/>
+ </rsc_op>
+ </action_set>
+ <inputs/>
+ </synapse>
+ <synapse id="2">
+ <action_set>
+ <rsc_op id="18" operation="monitor" operation_key="galera-bundle-0_monitor_30000" on_node="centos2" on_node_uuid="2">
+ <primitive id="galera-bundle-0" class="ocf" provider="pacemaker" type="remote"/>
+ <attributes CRM_meta_container="galera-bundle-docker-0" CRM_meta_interval="30000" CRM_meta_name="monitor" CRM_meta_on_node="centos2" CRM_meta_on_node_uuid="2" CRM_meta_timeout="30000" addr="centos2" port="3123"/>
+ </rsc_op>
+ </action_set>
+ <inputs/>
+ </synapse>
+ <synapse id="3">
+ <action_set>
<rsc_op id="7" operation="monitor" operation_key="galera-bundle-docker-1_monitor_0" on_node="centos2" on_node_uuid="2">
<primitive id="galera-bundle-docker-1" class="ocf" provider="heartbeat" type="docker"/>
<attributes CRM_meta_on_node="centos2" CRM_meta_on_node_uuid="2" CRM_meta_op_target_rc="7" CRM_meta_timeout="20000" allow_pull="true" force_kill="false" image="docker.io/tripleoupstream/centos-binary-mariadb:latest" monitor_cmd="/bin/true" mount_points="/var/log/pacemaker/bundles/galera-bundle-1" reuse="false" run_cmd="/usr/sbin/pacemaker_remoted" run_opts=" --restart=no -e PCMK_stderr=1 --net=host -e PCMK_remote_port=3123 -v /foo:/etc/libqb/force-filesystem-sockets:ro -v /etc/my.cnf.d/galera.cnf:/etc/my.cnf.d/galera.cnf:ro -v /var/lib/mysql:/var/lib/mysql:rw -v /etc/pacemaker/authkey:/etc/pacemaker/authkey -v /var/log/pacemaker/bundles/galera-bundle-1:/var/log --user=root --log-driver=journald "/>
@@ -8,7 +35,7 @@
</action_set>
<inputs/>
</synapse>
- <synapse id="1">
+ <synapse id="4">
<action_set>
<rsc_op id="12" operation="monitor" operation_key="galera-bundle-docker-2_monitor_0" on_node="centos3" on_node_uuid="3">
<primitive id="galera-bundle-docker-2" class="ocf" provider="heartbeat" type="docker"/>
@@ -17,7 +44,7 @@
</action_set>
<inputs/>
</synapse>
- <synapse id="2">
+ <synapse id="5">
<action_set>
<rsc_op id="9" operation="monitor" operation_key="galera-bundle-docker-2_monitor_0" on_node="centos2" on_node_uuid="2">
<primitive id="galera-bundle-docker-2" class="ocf" provider="heartbeat" type="docker"/>
@@ -26,7 +53,7 @@
</action_set>
<inputs/>
</synapse>
- <synapse id="3">
+ <synapse id="6">
<action_set>
<rsc_op id="5" operation="monitor" operation_key="galera-bundle-docker-2_monitor_0" on_node="centos1" on_node_uuid="1">
<primitive id="galera-bundle-docker-2" class="ocf" provider="heartbeat" type="docker"/>
diff --git a/cts/scheduler/exp/bundle-probe-order-3.exp b/cts/scheduler/exp/bundle-probe-order-3.exp
index e1f60e7..69140a4 100644
--- a/cts/scheduler/exp/bundle-probe-order-3.exp
+++ b/cts/scheduler/exp/bundle-probe-order-3.exp
@@ -1,6 +1,15 @@
<transition_graph cluster-delay="60s" stonith-timeout="60s" failed-stop-offset="INFINITY" failed-start-offset="INFINITY" transition_id="0">
<synapse id="0">
<action_set>
+ <rsc_op id="16" operation="monitor" operation_key="galera-bundle-docker-0_monitor_60000" on_node="centos2" on_node_uuid="2">
+ <primitive id="galera-bundle-docker-0" class="ocf" provider="heartbeat" type="docker"/>
+ <attributes CRM_meta_interval="60000" CRM_meta_name="monitor" CRM_meta_on_node="centos2" CRM_meta_on_node_uuid="2" CRM_meta_timeout="20000" allow_pull="true" force_kill="false" image="docker.io/tripleoupstream/centos-binary-mariadb:latest" monitor_cmd="/bin/true" mount_points="/var/log/pacemaker/bundles/galera-bundle-0" reuse="false" run_cmd="/usr/sbin/pacemaker_remoted" run_opts=" --restart=no -e PCMK_stderr=1 --net=host -e PCMK_remote_port=3123 -v /foo:/etc/libqb/force-filesystem-sockets:ro -v /etc/my.cnf.d/galera.cnf:/etc/my.cnf.d/galera.cnf:ro -v /var/lib/mysql:/var/lib/mysql:rw -v /etc/pacemaker/authkey:/etc/pacemaker/authkey -v /var/log/pacemaker/bundles/galera-bundle-0:/var/log --user=root --log-driver=journald "/>
+ </rsc_op>
+ </action_set>
+ <inputs/>
+ </synapse>
+ <synapse id="1">
+ <action_set>
<rsc_op id="11" operation="monitor" operation_key="galera-bundle-0_monitor_0" on_node="centos3" on_node_uuid="3">
<primitive id="galera-bundle-0" class="ocf" provider="pacemaker" type="remote"/>
<attributes CRM_meta_container="galera-bundle-docker-0" CRM_meta_on_node="centos3" CRM_meta_on_node_uuid="3" CRM_meta_op_target_rc="7" CRM_meta_timeout="30000" addr="centos2" port="3123"/>
@@ -8,7 +17,7 @@
</action_set>
<inputs/>
</synapse>
- <synapse id="1">
+ <synapse id="2">
<action_set>
<rsc_op id="6" operation="monitor" operation_key="galera-bundle-0_monitor_0" on_node="centos2" on_node_uuid="2">
<primitive id="galera-bundle-0" class="ocf" provider="pacemaker" type="remote"/>
@@ -17,7 +26,7 @@
</action_set>
<inputs/>
</synapse>
- <synapse id="2">
+ <synapse id="3">
<action_set>
<rsc_op id="3" operation="monitor" operation_key="galera-bundle-0_monitor_0" on_node="centos1" on_node_uuid="1">
<primitive id="galera-bundle-0" class="ocf" provider="pacemaker" type="remote"/>
@@ -26,7 +35,7 @@
</action_set>
<inputs/>
</synapse>
- <synapse id="3">
+ <synapse id="4">
<action_set>
<rsc_op id="7" operation="monitor" operation_key="galera-bundle-docker-1_monitor_0" on_node="centos2" on_node_uuid="2">
<primitive id="galera-bundle-docker-1" class="ocf" provider="heartbeat" type="docker"/>
@@ -35,7 +44,7 @@
</action_set>
<inputs/>
</synapse>
- <synapse id="4">
+ <synapse id="5">
<action_set>
<rsc_op id="13" operation="monitor" operation_key="galera-bundle-docker-2_monitor_0" on_node="centos3" on_node_uuid="3">
<primitive id="galera-bundle-docker-2" class="ocf" provider="heartbeat" type="docker"/>
@@ -44,7 +53,7 @@
</action_set>
<inputs/>
</synapse>
- <synapse id="5">
+ <synapse id="6">
<action_set>
<rsc_op id="9" operation="monitor" operation_key="galera-bundle-docker-2_monitor_0" on_node="centos2" on_node_uuid="2">
<primitive id="galera-bundle-docker-2" class="ocf" provider="heartbeat" type="docker"/>
@@ -53,7 +62,7 @@
</action_set>
<inputs/>
</synapse>
- <synapse id="6">
+ <synapse id="7">
<action_set>
<rsc_op id="4" operation="monitor" operation_key="galera-bundle-docker-2_monitor_0" on_node="centos1" on_node_uuid="1">
<primitive id="galera-bundle-docker-2" class="ocf" provider="heartbeat" type="docker"/>
diff --git a/cts/scheduler/summary/bundle-probe-order-2.summary b/cts/scheduler/summary/bundle-probe-order-2.summary
index 681d607..024c472 100644
--- a/cts/scheduler/summary/bundle-probe-order-2.summary
+++ b/cts/scheduler/summary/bundle-probe-order-2.summary
@@ -13,6 +13,9 @@ Current cluster status:
Transition Summary:
Executing Cluster Transition:
+ * Resource action: galera:0 monitor on galera-bundle-0
+ * Resource action: galera-bundle-docker-0 monitor=60000 on centos2
+ * Resource action: galera-bundle-0 monitor=30000 on centos2
* Resource action: galera-bundle-docker-1 monitor on centos2
* Resource action: galera-bundle-docker-2 monitor on centos3
* Resource action: galera-bundle-docker-2 monitor on centos2
diff --git a/cts/scheduler/summary/bundle-probe-order-3.summary b/cts/scheduler/summary/bundle-probe-order-3.summary
index f089618..331bd87 100644
--- a/cts/scheduler/summary/bundle-probe-order-3.summary
+++ b/cts/scheduler/summary/bundle-probe-order-3.summary
@@ -12,6 +12,7 @@ Current cluster status:
Transition Summary:
Executing Cluster Transition:
+ * Resource action: galera-bundle-docker-0 monitor=60000 on centos2
* Resource action: galera-bundle-0 monitor on centos3
* Resource action: galera-bundle-0 monitor on centos2
* Resource action: galera-bundle-0 monitor on centos1
--
1.8.3.1
From cb9c294a7ef22916866e0e42e51e88c2b1a61c2e Mon Sep 17 00:00:00 2001
From: Ken Gaillot <kgaillot@redhat.com>
Date: Wed, 14 Jul 2021 17:23:11 -0500
Subject: [PATCH 3/3] Test: scheduler: add test for probe of unmanaged resource
on pending node
No probes should be scheduled in this case
---
cts/cts-scheduler.in | 1 +
cts/scheduler/dot/probe-pending-node.dot | 2 +
cts/scheduler/exp/probe-pending-node.exp | 1 +
cts/scheduler/scores/probe-pending-node.scores | 61 ++++++
cts/scheduler/summary/probe-pending-node.summary | 55 +++++
cts/scheduler/xml/probe-pending-node.xml | 247 +++++++++++++++++++++++
6 files changed, 367 insertions(+)
create mode 100644 cts/scheduler/dot/probe-pending-node.dot
create mode 100644 cts/scheduler/exp/probe-pending-node.exp
create mode 100644 cts/scheduler/scores/probe-pending-node.scores
create mode 100644 cts/scheduler/summary/probe-pending-node.summary
create mode 100644 cts/scheduler/xml/probe-pending-node.xml
diff --git a/cts/cts-scheduler.in b/cts/cts-scheduler.in
index fc9790b..7ba2415 100644
--- a/cts/cts-scheduler.in
+++ b/cts/cts-scheduler.in
@@ -110,6 +110,7 @@ TESTS = [
[ "probe-2", "Correctly re-probe cloned groups" ],
[ "probe-3", "Probe (pending node)" ],
[ "probe-4", "Probe (pending node + stopped resource)" ],
+ [ "probe-pending-node", "Probe (pending node + unmanaged resource)" ],
[ "standby", "Standby" ],
[ "comments", "Comments" ],
],
diff --git a/cts/scheduler/dot/probe-pending-node.dot b/cts/scheduler/dot/probe-pending-node.dot
new file mode 100644
index 0000000..d8f1c9f
--- /dev/null
+++ b/cts/scheduler/dot/probe-pending-node.dot
@@ -0,0 +1,2 @@
+ digraph "g" {
+}
diff --git a/cts/scheduler/exp/probe-pending-node.exp b/cts/scheduler/exp/probe-pending-node.exp
new file mode 100644
index 0000000..56e315f
--- /dev/null
+++ b/cts/scheduler/exp/probe-pending-node.exp
@@ -0,0 +1 @@
+<transition_graph cluster-delay="60s" stonith-timeout="60s" failed-stop-offset="INFINITY" failed-start-offset="INFINITY" transition_id="0"/>
diff --git a/cts/scheduler/scores/probe-pending-node.scores b/cts/scheduler/scores/probe-pending-node.scores
new file mode 100644
index 0000000..020a1a0
--- /dev/null
+++ b/cts/scheduler/scores/probe-pending-node.scores
@@ -0,0 +1,61 @@
+
+pcmk__clone_allocate: fs_UC5_SAPMNT-clone allocation score on gcdoubwap01: 0
+pcmk__clone_allocate: fs_UC5_SAPMNT-clone allocation score on gcdoubwap02: 0
+pcmk__clone_allocate: fs_UC5_SAPMNT:0 allocation score on gcdoubwap01: 0
+pcmk__clone_allocate: fs_UC5_SAPMNT:0 allocation score on gcdoubwap02: 0
+pcmk__clone_allocate: fs_UC5_SAPMNT:1 allocation score on gcdoubwap01: 0
+pcmk__clone_allocate: fs_UC5_SAPMNT:1 allocation score on gcdoubwap02: 0
+pcmk__clone_allocate: fs_UC5_SYS-clone allocation score on gcdoubwap01: 0
+pcmk__clone_allocate: fs_UC5_SYS-clone allocation score on gcdoubwap02: 0
+pcmk__clone_allocate: fs_UC5_SYS:0 allocation score on gcdoubwap01: 0
+pcmk__clone_allocate: fs_UC5_SYS:0 allocation score on gcdoubwap02: 0
+pcmk__clone_allocate: fs_UC5_SYS:1 allocation score on gcdoubwap01: 0
+pcmk__clone_allocate: fs_UC5_SYS:1 allocation score on gcdoubwap02: 0
+pcmk__group_allocate: fs_UC5_ascs allocation score on gcdoubwap01: 0
+pcmk__group_allocate: fs_UC5_ascs allocation score on gcdoubwap02: 0
+pcmk__group_allocate: fs_UC5_ers allocation score on gcdoubwap01: 0
+pcmk__group_allocate: fs_UC5_ers allocation score on gcdoubwap02: 0
+pcmk__group_allocate: grp_UC5_ascs allocation score on gcdoubwap01: 0
+pcmk__group_allocate: grp_UC5_ascs allocation score on gcdoubwap02: 0
+pcmk__group_allocate: grp_UC5_ers allocation score on gcdoubwap01: 0
+pcmk__group_allocate: grp_UC5_ers allocation score on gcdoubwap02: 0
+pcmk__group_allocate: rsc_sap_UC5_ASCS11 allocation score on gcdoubwap01: 0
+pcmk__group_allocate: rsc_sap_UC5_ASCS11 allocation score on gcdoubwap02: 0
+pcmk__group_allocate: rsc_sap_UC5_ERS12 allocation score on gcdoubwap01: 0
+pcmk__group_allocate: rsc_sap_UC5_ERS12 allocation score on gcdoubwap02: 0
+pcmk__group_allocate: rsc_vip_gcp_ascs allocation score on gcdoubwap01: INFINITY
+pcmk__group_allocate: rsc_vip_gcp_ascs allocation score on gcdoubwap02: 0
+pcmk__group_allocate: rsc_vip_gcp_ers allocation score on gcdoubwap01: 0
+pcmk__group_allocate: rsc_vip_gcp_ers allocation score on gcdoubwap02: 0
+pcmk__group_allocate: rsc_vip_init_ers allocation score on gcdoubwap01: 0
+pcmk__group_allocate: rsc_vip_init_ers allocation score on gcdoubwap02: 0
+pcmk__group_allocate: rsc_vip_int_ascs allocation score on gcdoubwap01: 0
+pcmk__group_allocate: rsc_vip_int_ascs allocation score on gcdoubwap02: 0
+pcmk__native_allocate: fs_UC5_SAPMNT:0 allocation score on gcdoubwap01: 0
+pcmk__native_allocate: fs_UC5_SAPMNT:0 allocation score on gcdoubwap02: -INFINITY
+pcmk__native_allocate: fs_UC5_SAPMNT:1 allocation score on gcdoubwap01: 0
+pcmk__native_allocate: fs_UC5_SAPMNT:1 allocation score on gcdoubwap02: -INFINITY
+pcmk__native_allocate: fs_UC5_SYS:0 allocation score on gcdoubwap01: 0
+pcmk__native_allocate: fs_UC5_SYS:0 allocation score on gcdoubwap02: -INFINITY
+pcmk__native_allocate: fs_UC5_SYS:1 allocation score on gcdoubwap01: 0
+pcmk__native_allocate: fs_UC5_SYS:1 allocation score on gcdoubwap02: -INFINITY
+pcmk__native_allocate: fs_UC5_ascs allocation score on gcdoubwap01: 0
+pcmk__native_allocate: fs_UC5_ascs allocation score on gcdoubwap02: -INFINITY
+pcmk__native_allocate: fs_UC5_ers allocation score on gcdoubwap01: -INFINITY
+pcmk__native_allocate: fs_UC5_ers allocation score on gcdoubwap02: -INFINITY
+pcmk__native_allocate: rsc_sap_UC5_ASCS11 allocation score on gcdoubwap01: -INFINITY
+pcmk__native_allocate: rsc_sap_UC5_ASCS11 allocation score on gcdoubwap02: -INFINITY
+pcmk__native_allocate: rsc_sap_UC5_ERS12 allocation score on gcdoubwap01: -INFINITY
+pcmk__native_allocate: rsc_sap_UC5_ERS12 allocation score on gcdoubwap02: -INFINITY
+pcmk__native_allocate: rsc_vip_gcp_ascs allocation score on gcdoubwap01: -INFINITY
+pcmk__native_allocate: rsc_vip_gcp_ascs allocation score on gcdoubwap02: -INFINITY
+pcmk__native_allocate: rsc_vip_gcp_ers allocation score on gcdoubwap01: -INFINITY
+pcmk__native_allocate: rsc_vip_gcp_ers allocation score on gcdoubwap02: -INFINITY
+pcmk__native_allocate: rsc_vip_init_ers allocation score on gcdoubwap01: 0
+pcmk__native_allocate: rsc_vip_init_ers allocation score on gcdoubwap02: -INFINITY
+pcmk__native_allocate: rsc_vip_int_ascs allocation score on gcdoubwap01: INFINITY
+pcmk__native_allocate: rsc_vip_int_ascs allocation score on gcdoubwap02: -INFINITY
+pcmk__native_allocate: stonith_gcdoubwap01 allocation score on gcdoubwap01: -INFINITY
+pcmk__native_allocate: stonith_gcdoubwap01 allocation score on gcdoubwap02: 0
+pcmk__native_allocate: stonith_gcdoubwap02 allocation score on gcdoubwap01: 0
+pcmk__native_allocate: stonith_gcdoubwap02 allocation score on gcdoubwap02: -INFINITY
diff --git a/cts/scheduler/summary/probe-pending-node.summary b/cts/scheduler/summary/probe-pending-node.summary
new file mode 100644
index 0000000..208186b
--- /dev/null
+++ b/cts/scheduler/summary/probe-pending-node.summary
@@ -0,0 +1,55 @@
+Using the original execution date of: 2021-06-11 13:55:24Z
+
+ *** Resource management is DISABLED ***
+ The cluster will not attempt to start, stop or recover services
+
+Current cluster status:
+ * Node List:
+ * Node gcdoubwap02: pending
+ * Online: [ gcdoubwap01 ]
+
+ * Full List of Resources:
+ * stonith_gcdoubwap01 (stonith:fence_gce): Stopped (unmanaged)
+ * stonith_gcdoubwap02 (stonith:fence_gce): Stopped (unmanaged)
+ * Clone Set: fs_UC5_SAPMNT-clone [fs_UC5_SAPMNT] (unmanaged):
+ * Stopped: [ gcdoubwap01 gcdoubwap02 ]
+ * Clone Set: fs_UC5_SYS-clone [fs_UC5_SYS] (unmanaged):
+ * Stopped: [ gcdoubwap01 gcdoubwap02 ]
+ * Resource Group: grp_UC5_ascs (unmanaged):
+ * rsc_vip_int_ascs (ocf:heartbeat:IPaddr2): Stopped (unmanaged)
+ * rsc_vip_gcp_ascs (ocf:heartbeat:gcp-vpc-move-vip): Started gcdoubwap01 (unmanaged)
+ * fs_UC5_ascs (ocf:heartbeat:Filesystem): Stopped (unmanaged)
+ * rsc_sap_UC5_ASCS11 (ocf:heartbeat:SAPInstance): Stopped (unmanaged)
+ * Resource Group: grp_UC5_ers (unmanaged):
+ * rsc_vip_init_ers (ocf:heartbeat:IPaddr2): Stopped (unmanaged)
+ * rsc_vip_gcp_ers (ocf:heartbeat:gcp-vpc-move-vip): Stopped (unmanaged)
+ * fs_UC5_ers (ocf:heartbeat:Filesystem): Stopped (unmanaged)
+ * rsc_sap_UC5_ERS12 (ocf:heartbeat:SAPInstance): Stopped (unmanaged)
+
+Transition Summary:
+
+Executing Cluster Transition:
+Using the original execution date of: 2021-06-11 13:55:24Z
+
+Revised Cluster Status:
+ * Node List:
+ * Node gcdoubwap02: pending
+ * Online: [ gcdoubwap01 ]
+
+ * Full List of Resources:
+ * stonith_gcdoubwap01 (stonith:fence_gce): Stopped (unmanaged)
+ * stonith_gcdoubwap02 (stonith:fence_gce): Stopped (unmanaged)
+ * Clone Set: fs_UC5_SAPMNT-clone [fs_UC5_SAPMNT] (unmanaged):
+ * Stopped: [ gcdoubwap01 gcdoubwap02 ]
+ * Clone Set: fs_UC5_SYS-clone [fs_UC5_SYS] (unmanaged):
+ * Stopped: [ gcdoubwap01 gcdoubwap02 ]
+ * Resource Group: grp_UC5_ascs (unmanaged):
+ * rsc_vip_int_ascs (ocf:heartbeat:IPaddr2): Stopped (unmanaged)
+ * rsc_vip_gcp_ascs (ocf:heartbeat:gcp-vpc-move-vip): Started gcdoubwap01 (unmanaged)
+ * fs_UC5_ascs (ocf:heartbeat:Filesystem): Stopped (unmanaged)
+ * rsc_sap_UC5_ASCS11 (ocf:heartbeat:SAPInstance): Stopped (unmanaged)
+ * Resource Group: grp_UC5_ers (unmanaged):
+ * rsc_vip_init_ers (ocf:heartbeat:IPaddr2): Stopped (unmanaged)
+ * rsc_vip_gcp_ers (ocf:heartbeat:gcp-vpc-move-vip): Stopped (unmanaged)
+ * fs_UC5_ers (ocf:heartbeat:Filesystem): Stopped (unmanaged)
+ * rsc_sap_UC5_ERS12 (ocf:heartbeat:SAPInstance): Stopped (unmanaged)
diff --git a/cts/scheduler/xml/probe-pending-node.xml b/cts/scheduler/xml/probe-pending-node.xml
new file mode 100644
index 0000000..9f55c92
--- /dev/null
+++ b/cts/scheduler/xml/probe-pending-node.xml
@@ -0,0 +1,247 @@
+<cib crm_feature_set="3.0.14" validate-with="pacemaker-2.10" epoch="395" num_updates="30" admin_epoch="0" cib-last-written="Thu Jun 10 18:01:13 2021" update-origin="gcdoubwap01" update-client="cibadmin" update-user="root" have-quorum="1" dc-uuid="1" execution-date="1623419724">
+ <configuration>
+ <crm_config>
+ <cluster_property_set id="cib-bootstrap-options">
+ <nvpair id="cib-bootstrap-options-have-watchdog" name="have-watchdog" value="false"/>
+ <nvpair id="cib-bootstrap-options-dc-version" name="dc-version" value="1.1.19-8.el7_6.5-c3c624ea3d"/>
+ <nvpair id="cib-bootstrap-options-cluster-infrastructure" name="cluster-infrastructure" value="corosync"/>
+ <nvpair id="cib-bootstrap-options-cluster-name" name="cluster-name" value="ascscluster"/>
+ <nvpair id="cib-bootstrap-options-maintenance-mode" name="maintenance-mode" value="true"/>
+ <nvpair id="cib-bootstrap-options-last-lrm-refresh" name="last-lrm-refresh" value="1622815036"/>
+ </cluster_property_set>
+ </crm_config>
+ <nodes>
+ <node id="1" uname="gcdoubwap01"/>
+ <node id="2" uname="gcdoubwap02"/>
+ </nodes>
+ <resources>
+ <primitive class="stonith" id="stonith_gcdoubwap01" type="fence_gce">
+ <instance_attributes id="stonith_gcdoubwap01-instance_attributes">
+ <nvpair id="stonith_gcdoubwap01-instance_attributes-project" name="project" value="pj-uat-do-nane1-01"/>
+ <nvpair id="stonith_gcdoubwap01-instance_attributes-zone" name="zone" value="northamerica-northeast1-b"/>
+ </instance_attributes>
+ <operations>
+ <op id="stonith_gcdoubwap01-monitor-interval-60s" interval="60s" name="monitor"/>
+ </operations>
+ </primitive>
+ <primitive class="stonith" id="stonith_gcdoubwap02" type="fence_gce">
+ <instance_attributes id="stonith_gcdoubwap02-instance_attributes">
+ <nvpair id="stonith_gcdoubwap02-instance_attributes-project" name="project" value="pj-uat-do-nane1-01"/>
+ <nvpair id="stonith_gcdoubwap02-instance_attributes-zone" name="zone" value="northamerica-northeast1-c"/>
+ </instance_attributes>
+ <operations>
+ <op id="stonith_gcdoubwap02-monitor-interval-60s" interval="60s" name="monitor"/>
+ </operations>
+ </primitive>
+ <clone id="fs_UC5_SAPMNT-clone">
+ <primitive class="ocf" id="fs_UC5_SAPMNT" provider="heartbeat" type="Filesystem">
+ <instance_attributes id="fs_UC5_SAPMNT-instance_attributes">
+ <nvpair id="fs_UC5_SAPMNT-instance_attributes-device" name="device" value="uatdoelfs.igmfinancial.net:UC5_sapmnt/root"/>
+ <nvpair id="fs_UC5_SAPMNT-instance_attributes-directory" name="directory" value="/sapmnt/UC5"/>
+ <nvpair id="fs_UC5_SAPMNT-instance_attributes-fstype" name="fstype" value="nfs"/>
+ </instance_attributes>
+ <operations>
+ <op id="fs_UC5_SAPMNT-monitor-interval-20s" interval="20s" name="monitor" timeout="40s"/>
+ <op id="fs_UC5_SAPMNT-notify-interval-0s" interval="0s" name="notify" timeout="60s"/>
+ <op id="fs_UC5_SAPMNT-start-interval-0s" interval="0s" name="start" timeout="60s"/>
+ <op id="fs_UC5_SAPMNT-stop-interval-0s" interval="0s" name="stop" timeout="60s"/>
+ </operations>
+ </primitive>
+ <meta_attributes id="fs_UC5_SAPMNT-clone-meta_attributes">
+ <nvpair id="fs_UC5_SAPMNT-clone-meta_attributes-interleave" name="interleave" value="true"/>
+ </meta_attributes>
+ </clone>
+ <clone id="fs_UC5_SYS-clone">
+ <primitive class="ocf" id="fs_UC5_SYS" provider="heartbeat" type="Filesystem">
+ <instance_attributes id="fs_UC5_SYS-instance_attributes">
+ <nvpair id="fs_UC5_SYS-instance_attributes-device" name="device" value="uatdoelfs.igmfinancial.net:UC5_SYS/root"/>
+ <nvpair id="fs_UC5_SYS-instance_attributes-directory" name="directory" value="/usr/sap/UC5/SYS"/>
+ <nvpair id="fs_UC5_SYS-instance_attributes-fstype" name="fstype" value="nfs"/>
+ </instance_attributes>
+ <operations>
+ <op id="fs_UC5_SYS-monitor-interval-20s" interval="20s" name="monitor" timeout="40s"/>
+ <op id="fs_UC5_SYS-notify-interval-0s" interval="0s" name="notify" timeout="60s"/>
+ <op id="fs_UC5_SYS-start-interval-0s" interval="0s" name="start" timeout="60s"/>
+ <op id="fs_UC5_SYS-stop-interval-0s" interval="0s" name="stop" timeout="60s"/>
+ </operations>
+ </primitive>
+ <meta_attributes id="fs_UC5_SYS-clone-meta_attributes">
+ <nvpair id="fs_UC5_SYS-clone-meta_attributes-interleave" name="interleave" value="true"/>
+ </meta_attributes>
+ </clone>
+ <group id="grp_UC5_ascs">
+ <primitive class="ocf" id="rsc_vip_int_ascs" provider="heartbeat" type="IPaddr2">
+ <instance_attributes id="rsc_vip_int_ascs-instance_attributes">
+ <nvpair id="rsc_vip_int_ascs-instance_attributes-cidr_netmask" name="cidr_netmask" value="32"/>
+ <nvpair id="rsc_vip_int_ascs-instance_attributes-ip" name="ip" value="10.4.130.38"/>
+ <nvpair id="rsc_vip_int_ascs-instance_attributes-nic" name="nic" value="eth0"/>
+ </instance_attributes>
+ <operations>
+ <op id="rsc_vip_int_ascs-monitor-interval-10s" interval="10s" name="monitor" timeout="20s"/>
+ <op id="rsc_vip_int_ascs-start-interval-0s" interval="0s" name="start" timeout="20s"/>
+ <op id="rsc_vip_int_ascs-stop-interval-0s" interval="0s" name="stop" timeout="20s"/>
+ </operations>
+ </primitive>
+ <primitive class="ocf" id="rsc_vip_gcp_ascs" provider="heartbeat" type="gcp-vpc-move-vip">
+ <instance_attributes id="rsc_vip_gcp_ascs-instance_attributes">
+ <nvpair id="rsc_vip_gcp_ascs-instance_attributes-alias_ip" name="alias_ip" value="10.4.130.38/32"/>
+ <nvpair id="rsc_vip_gcp_ascs-instance_attributes-hostlist" name="hostlist" value="gcdoubwap01 gcdoubwap02"/>
+ </instance_attributes>
+ <operations>
+ <op id="rsc_vip_gcp_ascs-monitor-interval-60s" interval="60s" name="monitor" on-fail="ignore"/>
+ <op id="rsc_vip_gcp_ascs-start-interval-0s" interval="0s" name="start" timeout="300s"/>
+ <op id="rsc_vip_gcp_ascs-stop-interval-0s" interval="0s" name="stop" timeout="15s"/>
+ </operations>
+ </primitive>
+ <primitive class="ocf" id="fs_UC5_ascs" provider="heartbeat" type="Filesystem">
+ <instance_attributes id="fs_UC5_ascs-instance_attributes">
+ <nvpair id="fs_UC5_ascs-instance_attributes-device" name="device" value="uatdoelfs.igmfinancial.net:UC5_ASCS/root"/>
+ <nvpair id="fs_UC5_ascs-instance_attributes-directory" name="directory" value="/usr/sap/UC5/ASCS11"/>
+ <nvpair id="fs_UC5_ascs-instance_attributes-force_unmount" name="force_unmount" value="safe"/>
+ <nvpair id="fs_UC5_ascs-instance_attributes-fstype" name="fstype" value="nfs"/>
+ </instance_attributes>
+ <operations>
+ <op id="fs_UC5_ascs-monitor-interval-200" interval="200" name="monitor" timeout="40"/>
+ <op id="fs_UC5_ascs-notify-interval-0s" interval="0s" name="notify" timeout="60s"/>
+ <op id="fs_UC5_ascs-start-interval-0" interval="0" name="start" timeout="60"/>
+ <op id="fs_UC5_ascs-stop-interval-0" interval="0" name="stop" timeout="120"/>
+ </operations>
+ </primitive>
+ <primitive class="ocf" id="rsc_sap_UC5_ASCS11" provider="heartbeat" type="SAPInstance">
+ <instance_attributes id="rsc_sap_UC5_ASCS11-instance_attributes">
+ <nvpair id="rsc_sap_UC5_ASCS11-instance_attributes-AUTOMATIC_RECOVER" name="AUTOMATIC_RECOVER" value="false"/>
+ <nvpair id="rsc_sap_UC5_ASCS11-instance_attributes-InstanceName" name="InstanceName" value="UC5_ASCS11_uatdobwscs"/>
+ <nvpair id="rsc_sap_UC5_ASCS11-instance_attributes-START_PROFILE" name="START_PROFILE" value="/sapmnt/UC5/profile/UC5_ASCS11_uatdobwscs"/>
+ </instance_attributes>
+ <meta_attributes id="rsc_sap_UC5_ASCS11-meta_attributes">
+ <nvpair id="rsc_sap_UC5_ASCS11-meta_attributes-failure-timeout" name="failure-timeout" value="60"/>
+ <nvpair id="rsc_sap_UC5_ASCS11-meta_attributes-migration-threshold" name="migration-threshold" value="1"/>
+ <nvpair id="rsc_sap_UC5_ASCS11-meta_attributes-resource-stickiness" name="resource-stickiness" value="5000"/>
+ </meta_attributes>
+ <operations>
+ <op id="rsc_sap_UC5_ASCS11-demote-interval-0s" interval="0s" name="demote" timeout="320s"/>
+ <op id="rsc_sap_UC5_ASCS11-methods-interval-0s" interval="0s" name="methods" timeout="5s"/>
+ <op id="rsc_sap_UC5_ASCS11-monitor-interval-20" interval="20" name="monitor" on-fail="restart" timeout="60"/>
+ <op id="rsc_sap_UC5_ASCS11-promote-interval-0s" interval="0s" name="promote" timeout="320s"/>
+ <op id="rsc_sap_UC5_ASCS11-start-interval-0" interval="0" name="start" timeout="600"/>
+ <op id="rsc_sap_UC5_ASCS11-stop-interval-0" interval="0" name="stop" timeout="600"/>
+ </operations>
+ </primitive>
+ </group>
+ <group id="grp_UC5_ers">
+ <primitive class="ocf" id="rsc_vip_init_ers" provider="heartbeat" type="IPaddr2">
+ <instance_attributes id="rsc_vip_init_ers-instance_attributes">
+ <nvpair id="rsc_vip_init_ers-instance_attributes-cidr_netmask" name="cidr_netmask" value="32"/>
+ <nvpair id="rsc_vip_init_ers-instance_attributes-ip" name="ip" value="10.4.130.39"/>
+ <nvpair id="rsc_vip_init_ers-instance_attributes-nic" name="nic" value="eth0"/>
+ </instance_attributes>
+ <operations>
+ <op id="rsc_vip_init_ers-monitor-interval-10s" interval="10s" name="monitor" timeout="20s"/>
+ <op id="rsc_vip_init_ers-start-interval-0s" interval="0s" name="start" timeout="20s"/>
+ <op id="rsc_vip_init_ers-stop-interval-0s" interval="0s" name="stop" timeout="20s"/>
+ </operations>
+ </primitive>
+ <primitive class="ocf" id="rsc_vip_gcp_ers" provider="heartbeat" type="gcp-vpc-move-vip">
+ <instance_attributes id="rsc_vip_gcp_ers-instance_attributes">
+ <nvpair id="rsc_vip_gcp_ers-instance_attributes-alias_ip" name="alias_ip" value="10.4.130.39/32"/>
+ <nvpair id="rsc_vip_gcp_ers-instance_attributes-hostlist" name="hostlist" value="gcdoubwap01 gcdoubwap02"/>
+ </instance_attributes>
+ <operations>
+ <op id="rsc_vip_gcp_ers-monitor-interval-60s" interval="60s" name="monitor" on-fail="ignore"/>
+ <op id="rsc_vip_gcp_ers-start-interval-0s" interval="0s" name="start" timeout="300s"/>
+ <op id="rsc_vip_gcp_ers-stop-interval-0s" interval="0s" name="stop" timeout="180s"/>
+ </operations>
+ </primitive>
+ <primitive class="ocf" id="fs_UC5_ers" provider="heartbeat" type="Filesystem">
+ <instance_attributes id="fs_UC5_ers-instance_attributes">
+ <nvpair id="fs_UC5_ers-instance_attributes-device" name="device" value="uatdoelfs.igmfinancial.net:UC5_ERS/root"/>
+ <nvpair id="fs_UC5_ers-instance_attributes-directory" name="directory" value="/usr/sap/UC5/ERS12"/>
+ <nvpair id="fs_UC5_ers-instance_attributes-force_unmount" name="force_unmount" value="safe"/>
+ <nvpair id="fs_UC5_ers-instance_attributes-fstype" name="fstype" value="nfs"/>
+ </instance_attributes>
+ <operations>
+ <op id="fs_UC5_ers-monitor-interval-200" interval="200" name="monitor" timeout="40"/>
+ <op id="fs_UC5_ers-notify-interval-0s" interval="0s" name="notify" timeout="60s"/>
+ <op id="fs_UC5_ers-start-interval-0" interval="0" name="start" timeout="60"/>
+ <op id="fs_UC5_ers-stop-interval-0" interval="0" name="stop" timeout="120"/>
+ </operations>
+ </primitive>
+ <primitive class="ocf" id="rsc_sap_UC5_ERS12" provider="heartbeat" type="SAPInstance">
+ <instance_attributes id="rsc_sap_UC5_ERS12-instance_attributes">
+ <nvpair id="rsc_sap_UC5_ERS12-instance_attributes-AUTOMATIC_RECOVER" name="AUTOMATIC_RECOVER" value="false"/>
+ <nvpair id="rsc_sap_UC5_ERS12-instance_attributes-IS_ERS" name="IS_ERS" value="true"/>
+ <nvpair id="rsc_sap_UC5_ERS12-instance_attributes-InstanceName" name="InstanceName" value="UC5_ERS12_uatdobwers"/>
+ <nvpair id="rsc_sap_UC5_ERS12-instance_attributes-START_PROFILE" name="START_PROFILE" value="/sapmnt/UC5/profile/UC5_ERS12_uatdobwers"/>
+ </instance_attributes>
+ <operations>
+ <op id="rsc_sap_UC5_ERS12-demote-interval-0s" interval="0s" name="demote" timeout="320s"/>
+ <op id="rsc_sap_UC5_ERS12-methods-interval-0s" interval="0s" name="methods" timeout="5s"/>
+ <op id="rsc_sap_UC5_ERS12-monitor-interval-20" interval="20" name="monitor" on-fail="restart" timeout="60"/>
+ <op id="rsc_sap_UC5_ERS12-promote-interval-0s" interval="0s" name="promote" timeout="320s"/>
+ <op id="rsc_sap_UC5_ERS12-start-interval-0" interval="0" name="start" timeout="600"/>
+ <op id="rsc_sap_UC5_ERS12-stop-interval-0" interval="0" name="stop" timeout="600"/>
+ </operations>
+ </primitive>
+ </group>
+ </resources>
+ <constraints>
+ <rsc_colocation id="colocation-grp_UC5_ers-grp_UC5_ascs--5000" rsc="grp_UC5_ers" score="-5000" with-rsc="grp_UC5_ascs"/>
+ <rsc_order first="grp_UC5_ascs" first-action="start" id="order-grp_UC5_ascs-grp_UC5_ers-mandatory" symmetrical="false" then="grp_UC5_ers" then-action="stop"/>
+ <rsc_location id="location-rsc_sap_UC5_ASCS11" rsc="rsc_sap_UC5_ASCS11">
+ <rule id="location-rsc_sap_UC5_ASCS11-rule" score="2000">
+ <expression attribute="runs_ERS_UC5" id="location-rsc_sap_UC5_ASCS11-rule-expr" operation="eq" value="1"/>
+ </rule>
+ </rsc_location>
+ <rsc_location id="location-stonith_gcdoubwap01-gcdoubwap02" node="gcdoubwap01" rsc="stonith_gcdoubwap01" score="-INFINITY"/>
+ <rsc_location id="location-stonith_gcdoubwap02-gcdoubwap01" node="gcdoubwap02" rsc="stonith_gcdoubwap02" score="-INFINITY"/>
+ <rsc_order first="fs_UC5_SAPMNT-clone" first-action="start" id="order-fs_UC5_SAPMNT-clone-grp_UC5_ascs-mandatory" then="grp_UC5_ascs" then-action="start"/>
+ <rsc_order first="fs_UC5_SAPMNT-clone" first-action="start" id="order-fs_UC5_SAPMNT-clone-grp_UC5_ers-mandatory" then="grp_UC5_ers" then-action="start"/>
+ </constraints>
+ </configuration>
+ <status>
+ <node_state id="1" uname="gcdoubwap01" in_ccm="true" crmd="online" crm-debug-origin="post_cache_update" join="member" expected="member">
+ <lrm id="1">
+ <lrm_resources>
+ <lrm_resource id="stonith_gcdoubwap01" type="fence_gce" class="stonith">
+ <lrm_rsc_op id="stonith_gcdoubwap01_last_0" operation_key="stonith_gcdoubwap01_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="1:0:7:5e41afa8-15bd-443a-88fb-ec276232a804" transition-magic="0:7;1:0:7:5e41afa8-15bd-443a-88fb-ec276232a804" exit-reason="" on_node="gcdoubwap01" call-id="5" rc-code="7" op-status="0" interval="0" last-run="1623419700" last-rc-change="1623419700" exec-time="27" queue-time="0" op-digest="e6935031dfde569ad30fb442953d3d91"/>
+ </lrm_resource>
+ <lrm_resource id="stonith_gcdoubwap02" type="fence_gce" class="stonith">
+ <lrm_rsc_op id="stonith_gcdoubwap02_last_0" operation_key="stonith_gcdoubwap02_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="2:0:7:5e41afa8-15bd-443a-88fb-ec276232a804" transition-magic="0:7;2:0:7:5e41afa8-15bd-443a-88fb-ec276232a804" exit-reason="" on_node="gcdoubwap01" call-id="9" rc-code="7" op-status="0" interval="0" last-run="1623419700" last-rc-change="1623419700" exec-time="0" queue-time="0" op-digest="064645c51d6d3a802eb6928f6116222c"/>
+ </lrm_resource>
+ <lrm_resource id="fs_UC5_SAPMNT" type="Filesystem" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="fs_UC5_SAPMNT_last_0" operation_key="fs_UC5_SAPMNT_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="3:0:7:5e41afa8-15bd-443a-88fb-ec276232a804" transition-magic="0:7;3:0:7:5e41afa8-15bd-443a-88fb-ec276232a804" exit-reason="" on_node="gcdoubwap01" call-id="14" rc-code="7" op-status="0" interval="0" last-run="1623419700" last-rc-change="1623419700" exec-time="126" queue-time="1" op-digest="02c74f325691f1af3c3dd9c2ab702b01"/>
+ </lrm_resource>
+ <lrm_resource id="fs_UC5_SYS" type="Filesystem" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="fs_UC5_SYS_last_0" operation_key="fs_UC5_SYS_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="4:0:7:5e41afa8-15bd-443a-88fb-ec276232a804" transition-magic="0:7;4:0:7:5e41afa8-15bd-443a-88fb-ec276232a804" exit-reason="" on_node="gcdoubwap01" call-id="19" rc-code="7" op-status="0" interval="0" last-run="1623419700" last-rc-change="1623419700" exec-time="130" queue-time="0" op-digest="f1f67b01fc16ed22d8fa1fe030d9c06b"/>
+ </lrm_resource>
+ <lrm_resource id="rsc_vip_int_ascs" type="IPaddr2" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="rsc_vip_int_ascs_last_0" operation_key="rsc_vip_int_ascs_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="5:0:7:5e41afa8-15bd-443a-88fb-ec276232a804" transition-magic="0:7;5:0:7:5e41afa8-15bd-443a-88fb-ec276232a804" exit-reason="" on_node="gcdoubwap01" call-id="23" rc-code="7" op-status="0" interval="0" last-run="1623419700" last-rc-change="1623419700" exec-time="105" queue-time="0" op-digest="da0b35400002727d7281b8f7051fe400"/>
+ </lrm_resource>
+ <lrm_resource id="rsc_vip_gcp_ascs" type="gcp-vpc-move-vip" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="rsc_vip_gcp_ascs_last_0" operation_key="rsc_vip_gcp_ascs_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="6:0:7:5e41afa8-15bd-443a-88fb-ec276232a804" transition-magic="0:0;6:0:7:5e41afa8-15bd-443a-88fb-ec276232a804" exit-reason="" on_node="gcdoubwap01" call-id="27" rc-code="0" op-status="0" interval="0" last-run="1623419705" last-rc-change="1623419705" exec-time="1842" queue-time="0" op-digest="face88a40d76658d0caa541eefc02ca8"/>
+ <lrm_rsc_op id="rsc_vip_gcp_ascs_last_failure_0" operation_key="rsc_vip_gcp_ascs_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="6:0:7:5e41afa8-15bd-443a-88fb-ec276232a804" transition-magic="0:0;6:0:7:5e41afa8-15bd-443a-88fb-ec276232a804" exit-reason="" on_node="gcdoubwap01" call-id="27" rc-code="0" op-status="0" interval="0" last-run="1623419705" last-rc-change="1623419705" exec-time="1842" queue-time="0" op-digest="face88a40d76658d0caa541eefc02ca8"/>
+ </lrm_resource>
+ <lrm_resource id="fs_UC5_ascs" type="Filesystem" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="fs_UC5_ascs_last_0" operation_key="fs_UC5_ascs_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="7:0:7:5e41afa8-15bd-443a-88fb-ec276232a804" transition-magic="0:7;7:0:7:5e41afa8-15bd-443a-88fb-ec276232a804" exit-reason="" on_node="gcdoubwap01" call-id="31" rc-code="7" op-status="0" interval="0" last-run="1623419705" last-rc-change="1623419705" exec-time="241" queue-time="0" op-digest="acac63abd6c034d7dad4aae73e2ca95d"/>
+ </lrm_resource>
+ <lrm_resource id="rsc_sap_UC5_ASCS11" type="SAPInstance" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="rsc_sap_UC5_ASCS11_last_0" operation_key="rsc_sap_UC5_ASCS11_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="8:0:7:5e41afa8-15bd-443a-88fb-ec276232a804" transition-magic="0:7;8:0:7:5e41afa8-15bd-443a-88fb-ec276232a804" exit-reason="" on_node="gcdoubwap01" call-id="35" rc-code="7" op-status="0" interval="0" last-run="1623419705" last-rc-change="1623419705" exec-time="969" queue-time="0" op-digest="08c114a33aa3c16b3204ff09cb983107" op-force-restart=" ERS_START_PROFILE ERS_InstanceName START_PROFILE InstanceName " op-restart-digest="315a463141e0ef59afedf7a62a8d6362"/>
+ </lrm_resource>
+ <lrm_resource id="rsc_vip_init_ers" type="IPaddr2" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="rsc_vip_init_ers_last_0" operation_key="rsc_vip_init_ers_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="9:0:7:5e41afa8-15bd-443a-88fb-ec276232a804" transition-magic="0:7;9:0:7:5e41afa8-15bd-443a-88fb-ec276232a804" exit-reason="" on_node="gcdoubwap01" call-id="39" rc-code="7" op-status="0" interval="0" last-run="1623419705" last-rc-change="1623419705" exec-time="1033" queue-time="0" op-digest="7b29d7af6a7baa6015d1eeac471a9b42"/>
+ </lrm_resource>
+ <lrm_resource id="rsc_vip_gcp_ers" type="gcp-vpc-move-vip" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="rsc_vip_gcp_ers_last_0" operation_key="rsc_vip_gcp_ers_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="10:0:7:5e41afa8-15bd-443a-88fb-ec276232a804" transition-magic="0:7;10:0:7:5e41afa8-15bd-443a-88fb-ec276232a804" exit-reason="" on_node="gcdoubwap01" call-id="43" rc-code="7" op-status="0" interval="0" last-run="1623419705" last-rc-change="1623419705" exec-time="1702" queue-time="0" op-digest="10365a97fe5a5864a3679c314bf65bfd"/>
+ </lrm_resource>
+ <lrm_resource id="fs_UC5_ers" type="Filesystem" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="fs_UC5_ers_last_0" operation_key="fs_UC5_ers_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="11:0:7:5e41afa8-15bd-443a-88fb-ec276232a804" transition-magic="0:7;11:0:7:5e41afa8-15bd-443a-88fb-ec276232a804" exit-reason="" on_node="gcdoubwap01" call-id="47" rc-code="7" op-status="0" interval="0" last-run="1623419706" last-rc-change="1623419706" exec-time="709" queue-time="0" op-digest="61e45529b2da32c1e53055238a00ca99"/>
+ </lrm_resource>
+ <lrm_resource id="rsc_sap_UC5_ERS12" type="SAPInstance" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="rsc_sap_UC5_ERS12_last_0" operation_key="rsc_sap_UC5_ERS12_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="12:0:7:5e41afa8-15bd-443a-88fb-ec276232a804" transition-magic="0:7;12:0:7:5e41afa8-15bd-443a-88fb-ec276232a804" exit-reason="" on_node="gcdoubwap01" call-id="51" rc-code="7" op-status="0" interval="0" last-run="1623419706" last-rc-change="1623419706" exec-time="914" queue-time="1" op-digest="b550e70bd4203af88473e4d914b11f87" op-force-restart=" ERS_START_PROFILE ERS_InstanceName START_PROFILE InstanceName " op-restart-digest="2fb6ec6eb77e25302c8dc0dad84dc46f"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ </node_state>
+ <node_state id="2" uname="gcdoubwap02" crmd="offline" crm-debug-origin="post_cache_update" in_ccm="true"/>
+ </status>
+</cib>
--
1.8.3.1