From eae355ca4c869f7ccf1ad3d1f5ce488375a6f353 Mon Sep 17 00:00:00 2001 From: Reid Wahl Date: Tue, 20 Apr 2021 12:55:45 -0700 Subject: [PATCH 01/19] Refactor: libpe_status: Add pe__rsc_node flag enum This commit adds a new pe__rsc_node flag enum containing values for assigned, current, and pending. This indicates the criterion used to look up a resource's location. After a compatibility break, native_location() could use these flags instead of an int. Signed-off-by: Reid Wahl --- include/crm/pengine/internal.h | 14 ++++++++++++++ lib/pengine/native.c | 1 + 2 files changed, 15 insertions(+) diff --git a/include/crm/pengine/internal.h b/include/crm/pengine/internal.h index 1b5f6f1d8d9..8fcb9c991f3 100644 --- a/include/crm/pengine/internal.h +++ b/include/crm/pengine/internal.h @@ -235,6 +235,19 @@ bool pe_can_fence(const pe_working_set_t *data_set, const pe_node_t *node); void add_hash_param(GHashTable * hash, const char *name, const char *value); +/*! + * \internal + * \enum pe__rsc_node + * \brief Type of resource location lookup to perform + */ +enum pe__rsc_node { + pe__rsc_node_assigned = 0, //!< Where resource is assigned + pe__rsc_node_current = 1, //!< Where resource is running + + // @COMPAT: Use in native_location() at a compatibility break + pe__rsc_node_pending = 2, //!< Where resource is pending +}; + char *native_parameter(pe_resource_t * rsc, pe_node_t * node, gboolean create, const char *name, pe_working_set_t * data_set); pe_node_t *native_location(const pe_resource_t *rsc, GList **list, int current); @@ -576,6 +589,7 @@ bool pe__bundle_needs_remote_name(pe_resource_t *rsc); const char *pe__add_bundle_remote_name(pe_resource_t *rsc, pe_working_set_t *data_set, xmlNode *xml, const char *field); + const char *pe_node_attribute_calculated(const pe_node_t *node, const char *name, const pe_resource_t *rsc); diff --git a/lib/pengine/native.c b/lib/pengine/native.c index 5e92ddcefdf..44d4805ac56 100644 --- a/lib/pengine/native.c +++ b/lib/pengine/native.c @@ -1092,6 +1092,7 @@ native_resource_state(const pe_resource_t * rsc, gboolean current) pe_node_t * native_location(const pe_resource_t *rsc, GList **list, int current) { + // @COMPAT: Accept a pe__rsc_node argument instead of int current pe_node_t *one = NULL; GList *result = NULL; From 809b9c2ea13e5f32bfa6eecf3482eb257802b92d Mon Sep 17 00:00:00 2001 From: Reid Wahl Date: Sun, 11 Sep 2022 19:36:07 -0700 Subject: [PATCH 02/19] Refactor: libpe_status: pe_node_attribute_calculated() accepts node type Use enum pe__rsc_node in pe_node_attribute_calculated() to determine which container host (assigned or current) to get the attribute value from. For now, there's no use case for pending. Pass pe__rsc_node_current for existing calls, since that maintains the existing behavior. Signed-off-by: Reid Wahl --- include/crm/pengine/internal.h | 3 +- lib/pacemaker/pcmk_sched_location.c | 5 ++- lib/pacemaker/pcmk_sched_promotable.c | 3 +- lib/pengine/common.c | 60 ++++++++++++++++++++++----- 4 files changed, 57 insertions(+), 14 deletions(-) diff --git a/include/crm/pengine/internal.h b/include/crm/pengine/internal.h index 8fcb9c991f3..ef8c382f62b 100644 --- a/include/crm/pengine/internal.h +++ b/include/crm/pengine/internal.h @@ -592,7 +592,8 @@ const char *pe__add_bundle_remote_name(pe_resource_t *rsc, const char *pe_node_attribute_calculated(const pe_node_t *node, const char *name, - const pe_resource_t *rsc); + const pe_resource_t *rsc, + enum pe__rsc_node node_type); const char *pe_node_attribute_raw(const pe_node_t *node, const char *name); bool pe__is_universal_clone(const pe_resource_t *rsc, const pe_working_set_t *data_set); diff --git a/lib/pacemaker/pcmk_sched_location.c b/lib/pacemaker/pcmk_sched_location.c index b4ce4ff07dc..5f42ec0fc8c 100644 --- a/lib/pacemaker/pcmk_sched_location.c +++ b/lib/pacemaker/pcmk_sched_location.c @@ -31,7 +31,10 @@ get_node_score(const char *rule, const char *score, bool raw, score_f = char2score(score); } else { - const char *attr_score = pe_node_attribute_calculated(node, score, rsc); + const char *attr_score = NULL; + + attr_score = pe_node_attribute_calculated(node, score, rsc, + pe__rsc_node_current); if (attr_score == NULL) { crm_debug("Rule %s: %s did not have a value for %s", diff --git a/lib/pacemaker/pcmk_sched_promotable.c b/lib/pacemaker/pcmk_sched_promotable.c index d12d017bab2..2bad1d0c487 100644 --- a/lib/pacemaker/pcmk_sched_promotable.c +++ b/lib/pacemaker/pcmk_sched_promotable.c @@ -649,7 +649,8 @@ promotion_attr_value(const pe_resource_t *rsc, const pe_node_t *node, CRM_CHECK((rsc != NULL) && (node != NULL) && (name != NULL), return NULL); attr_name = pcmk_promotion_score_name(name); - attr_value = pe_node_attribute_calculated(node, attr_name, rsc); + attr_value = pe_node_attribute_calculated(node, attr_name, rsc, + pe__rsc_node_current); free(attr_name); return attr_value; } diff --git a/lib/pengine/common.c b/lib/pengine/common.c index 6c69bfcb41a..af41c1f6e89 100644 --- a/lib/pengine/common.c +++ b/lib/pengine/common.c @@ -516,9 +516,15 @@ add_hash_param(GHashTable * hash, const char *name, const char *value) const char * pe_node_attribute_calculated(const pe_node_t *node, const char *name, - const pe_resource_t *rsc) + const pe_resource_t *rsc, + enum pe__rsc_node node_type) { - const char *source; + const char *source = NULL; + const char *node_type_s = NULL; + const char *reason = NULL; + + const pe_resource_t *container = NULL; + const pe_node_t *host = NULL; if(node == NULL) { return NULL; @@ -539,18 +545,50 @@ pe_node_attribute_calculated(const pe_node_t *node, const char *name, * storage */ - CRM_ASSERT(node->details->remote_rsc); - CRM_ASSERT(node->details->remote_rsc->container); + CRM_ASSERT(node->details->remote_rsc != NULL); + + container = node->details->remote_rsc->container; + CRM_ASSERT(container != NULL); + + switch (node_type) { + case pe__rsc_node_assigned: + node_type_s = "assigned"; + host = container->allocated_to; + if (host == NULL) { + reason = "not assigned"; + } + break; + + case pe__rsc_node_current: + node_type_s = "current"; - if(node->details->remote_rsc->container->running_on) { - pe_node_t *host = node->details->remote_rsc->container->running_on->data; - pe_rsc_trace(rsc, "%s: Looking for %s on the container host %s", - rsc->id, name, pe__node_name(host)); - return g_hash_table_lookup(host->details->attrs, name); + if (container->running_on != NULL) { + host = container->running_on->data; + } + if (host == NULL) { + reason = "inactive"; + } + break; + + default: + // Add support for other enum pe__rsc_node values if needed + CRM_ASSERT(false); + break; } - pe_rsc_trace(rsc, "%s: Not looking for %s on the container host: %s is inactive", - rsc->id, name, node->details->remote_rsc->container->id); + if (host != NULL) { + const char *value = g_hash_table_lookup(host->details->attrs, name); + + pe_rsc_trace(rsc, + "%s: Value lookup for %s on %s container host %s %s%s", + rsc->id, name, node_type_s, pe__node_name(host), + ((value != NULL)? "succeeded: " : "failed"), + pcmk__s(value, "")); + return value; + } + pe_rsc_trace(rsc, + "%s: Not looking for %s on %s container host: %s is %s", + rsc->id, name, node_type_s, container->id, reason); return NULL; } From d5a56afd2ecd861e0cf0d1049157e82a034f3f7a Mon Sep 17 00:00:00 2001 From: Reid Wahl Date: Thu, 15 Jun 2023 00:34:39 -0700 Subject: [PATCH 03/19] Fix: libpacemaker: Get container attr from assigned node, if any promotion_attr_value() should get a container's promotion score from the host to which it's assigned (if it's been assigned), rather than the host on which it's running. Ref T489 Signed-off-by: Reid Wahl --- lib/pacemaker/pcmk_sched_promotable.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/lib/pacemaker/pcmk_sched_promotable.c b/lib/pacemaker/pcmk_sched_promotable.c index 2bad1d0c487..8612c25a51d 100644 --- a/lib/pacemaker/pcmk_sched_promotable.c +++ b/lib/pacemaker/pcmk_sched_promotable.c @@ -645,12 +645,14 @@ promotion_attr_value(const pe_resource_t *rsc, const pe_node_t *node, { char *attr_name = NULL; const char *attr_value = NULL; + enum pe__rsc_node node_type = pe__rsc_node_assigned; - CRM_CHECK((rsc != NULL) && (node != NULL) && (name != NULL), return NULL); - + if (pcmk_is_set(rsc->flags, pe_rsc_provisional)) { + // Not assigned yet + node_type = pe__rsc_node_current; + } attr_name = pcmk_promotion_score_name(name); - attr_value = pe_node_attribute_calculated(node, attr_name, rsc, - pe__rsc_node_current); + attr_value = pe_node_attribute_calculated(node, attr_name, rsc, node_type); free(attr_name); return attr_value; } From cfc2cd20e15c0f1c6b6ed8517c310acd756c1533 Mon Sep 17 00:00:00 2001 From: Reid Wahl Date: Mon, 10 Jul 2023 02:26:26 -0700 Subject: [PATCH 04/19] Test: scheduler: Update outputs for promotion_attr_value() fix Update outputs after previous commit (get container's promotion score from assigned host). There are a few changes to scores, as well as dot and exp files. The behavior in the bundle-interleave-start test appears to be an improvement. Ref T489 Signed-off-by: Reid Wahl --- cts/scheduler/dot/bundle-interleave-start.dot | 44 +- cts/scheduler/exp/bundle-interleave-start.exp | 556 +++++++++++------- .../exp/no-promote-on-unrunnable-guest.exp | 14 +- .../scores/bundle-interleave-start.scores | 12 +- .../scores/cancel-behind-moving-remote.scores | 2 +- .../scores/guest-host-not-fenceable.scores | 2 +- .../no-promote-on-unrunnable-guest.scores | 2 +- .../summary/bundle-interleave-start.summary | 54 +- cts/scheduler/xml/bundle-interleave-start.xml | 3 +- 9 files changed, 445 insertions(+), 244 deletions(-) diff --git a/cts/scheduler/dot/bundle-interleave-start.dot b/cts/scheduler/dot/bundle-interleave-start.dot index bf6ed7f9edb..a513ac5806b 100644 --- a/cts/scheduler/dot/bundle-interleave-start.dot +++ b/cts/scheduler/dot/bundle-interleave-start.dot @@ -41,9 +41,15 @@ "app-bundle-2_monitor_0 node5" [ style=bold color="green" fontcolor="black"] "app-bundle-2_monitor_30000 node4" [ style=bold color="green" fontcolor="black"] "app-bundle-2_start_0 node4" -> "app-bundle-2_monitor_30000 node4" [ style = bold] -"app-bundle-2_start_0 node4" -> "app:2_monitor_16000 app-bundle-2" [ style = bold] +"app-bundle-2_start_0 node4" -> "app:2_monitor_15000 app-bundle-2" [ style = bold] +"app-bundle-2_start_0 node4" -> "app:2_promote_0 app-bundle-2" [ style = bold] "app-bundle-2_start_0 node4" -> "app:2_start_0 app-bundle-2" [ style = bold] "app-bundle-2_start_0 node4" [ style=bold color="green" fontcolor="black"] +"app-bundle-clone_promote_0" -> "app:2_promote_0 app-bundle-2" [ style = bold] +"app-bundle-clone_promote_0" [ style=bold color="green" fontcolor="orange"] +"app-bundle-clone_promoted_0" -> "app-bundle_promoted_0" [ style = bold] +"app-bundle-clone_promoted_0" [ style=bold color="green" fontcolor="orange"] +"app-bundle-clone_running_0" -> "app-bundle-clone_promote_0" [ style = bold] "app-bundle-clone_running_0" -> "app-bundle_running_0" [ style = bold] "app-bundle-clone_running_0" [ style=bold color="green" fontcolor="orange"] "app-bundle-clone_start_0" -> "app-bundle-clone_running_0" [ style = bold] @@ -133,8 +139,13 @@ "app-bundle-podman-2_start_0 node4" -> "app-bundle-2_start_0 node4" [ style = bold] "app-bundle-podman-2_start_0 node4" -> "app-bundle-podman-2_monitor_60000 node4" [ style = bold] "app-bundle-podman-2_start_0 node4" -> "app-bundle_running_0" [ style = bold] +"app-bundle-podman-2_start_0 node4" -> "app:2_promote_0 app-bundle-2" [ style = bold] "app-bundle-podman-2_start_0 node4" -> "app:2_start_0 app-bundle-2" [ style = bold] "app-bundle-podman-2_start_0 node4" [ style=bold color="green" fontcolor="black"] +"app-bundle_promote_0" -> "app-bundle-clone_promote_0" [ style = bold] +"app-bundle_promote_0" [ style=bold color="green" fontcolor="orange"] +"app-bundle_promoted_0" [ style=bold color="green" fontcolor="orange"] +"app-bundle_running_0" -> "app-bundle_promote_0" [ style = bold] "app-bundle_running_0" [ style=bold color="green" fontcolor="orange"] "app-bundle_start_0" -> "app-bundle-clone_start_0" [ style = bold] "app-bundle_start_0" -> "app-bundle-podman-0_start_0 node2" [ style = bold] @@ -151,9 +162,13 @@ "app:1_start_0 app-bundle-1" -> "app:1_monitor_16000 app-bundle-1" [ style = bold] "app:1_start_0 app-bundle-1" -> "app:2_start_0 app-bundle-2" [ style = bold] "app:1_start_0 app-bundle-1" [ style=bold color="green" fontcolor="black"] -"app:2_monitor_16000 app-bundle-2" [ style=bold color="green" fontcolor="black"] +"app:2_monitor_15000 app-bundle-2" [ style=bold color="green" fontcolor="black"] +"app:2_promote_0 app-bundle-2" -> "app-bundle-clone_promoted_0" [ style = bold] +"app:2_promote_0 app-bundle-2" -> "app:2_monitor_15000 app-bundle-2" [ style = bold] +"app:2_promote_0 app-bundle-2" [ style=bold color="green" fontcolor="black"] "app:2_start_0 app-bundle-2" -> "app-bundle-clone_running_0" [ style = bold] -"app:2_start_0 app-bundle-2" -> "app:2_monitor_16000 app-bundle-2" [ style = bold] +"app:2_start_0 app-bundle-2" -> "app:2_monitor_15000 app-bundle-2" [ style = bold] +"app:2_start_0 app-bundle-2" -> "app:2_promote_0 app-bundle-2" [ style = bold] "app:2_start_0 app-bundle-2" [ style=bold color="green" fontcolor="black"] "base-bundle-0_monitor_0 node1" -> "base-bundle-0_start_0 node2" [ style = bold] "base-bundle-0_monitor_0 node1" [ style=bold color="green" fontcolor="black"] @@ -197,9 +212,15 @@ "base-bundle-2_monitor_0 node5" [ style=bold color="green" fontcolor="black"] "base-bundle-2_monitor_30000 node4" [ style=bold color="green" fontcolor="black"] "base-bundle-2_start_0 node4" -> "base-bundle-2_monitor_30000 node4" [ style = bold] -"base-bundle-2_start_0 node4" -> "base:2_monitor_16000 base-bundle-2" [ style = bold] +"base-bundle-2_start_0 node4" -> "base:2_monitor_15000 base-bundle-2" [ style = bold] +"base-bundle-2_start_0 node4" -> "base:2_promote_0 base-bundle-2" [ style = bold] "base-bundle-2_start_0 node4" -> "base:2_start_0 base-bundle-2" [ style = bold] "base-bundle-2_start_0 node4" [ style=bold color="green" fontcolor="black"] +"base-bundle-clone_promote_0" -> "base:2_promote_0 base-bundle-2" [ style = bold] +"base-bundle-clone_promote_0" [ style=bold color="green" fontcolor="orange"] +"base-bundle-clone_promoted_0" -> "base-bundle_promoted_0" [ style = bold] +"base-bundle-clone_promoted_0" [ style=bold color="green" fontcolor="orange"] +"base-bundle-clone_running_0" -> "base-bundle-clone_promote_0" [ style = bold] "base-bundle-clone_running_0" -> "base-bundle_running_0" [ style = bold] "base-bundle-clone_running_0" [ style=bold color="green" fontcolor="orange"] "base-bundle-clone_start_0" -> "base-bundle-clone_running_0" [ style = bold] @@ -289,9 +310,15 @@ "base-bundle-podman-2_start_0 node4" -> "base-bundle-2_start_0 node4" [ style = bold] "base-bundle-podman-2_start_0 node4" -> "base-bundle-podman-2_monitor_60000 node4" [ style = bold] "base-bundle-podman-2_start_0 node4" -> "base-bundle_running_0" [ style = bold] +"base-bundle-podman-2_start_0 node4" -> "base:2_promote_0 base-bundle-2" [ style = bold] "base-bundle-podman-2_start_0 node4" -> "base:2_start_0 base-bundle-2" [ style = bold] "base-bundle-podman-2_start_0 node4" [ style=bold color="green" fontcolor="black"] +"base-bundle_promote_0" -> "base-bundle-clone_promote_0" [ style = bold] +"base-bundle_promote_0" [ style=bold color="green" fontcolor="orange"] +"base-bundle_promoted_0" -> "app-bundle_promote_0" [ style = bold] +"base-bundle_promoted_0" [ style=bold color="green" fontcolor="orange"] "base-bundle_running_0" -> "app-bundle_start_0" [ style = bold] +"base-bundle_running_0" -> "base-bundle_promote_0" [ style = bold] "base-bundle_running_0" [ style=bold color="green" fontcolor="orange"] "base-bundle_start_0" -> "base-bundle-clone_start_0" [ style = bold] "base-bundle_start_0" -> "base-bundle-podman-0_start_0 node2" [ style = bold] @@ -310,9 +337,14 @@ "base:1_start_0 base-bundle-1" -> "base:1_monitor_16000 base-bundle-1" [ style = bold] "base:1_start_0 base-bundle-1" -> "base:2_start_0 base-bundle-2" [ style = bold] "base:1_start_0 base-bundle-1" [ style=bold color="green" fontcolor="black"] -"base:2_monitor_16000 base-bundle-2" [ style=bold color="green" fontcolor="black"] +"base:2_monitor_15000 base-bundle-2" [ style=bold color="green" fontcolor="black"] +"base:2_promote_0 base-bundle-2" -> "app:2_promote_0 app-bundle-2" [ style = bold] +"base:2_promote_0 base-bundle-2" -> "base-bundle-clone_promoted_0" [ style = bold] +"base:2_promote_0 base-bundle-2" -> "base:2_monitor_15000 base-bundle-2" [ style = bold] +"base:2_promote_0 base-bundle-2" [ style=bold color="green" fontcolor="black"] "base:2_start_0 base-bundle-2" -> "app-bundle-podman-2_start_0 node4" [ style = bold] "base:2_start_0 base-bundle-2" -> "base-bundle-clone_running_0" [ style = bold] -"base:2_start_0 base-bundle-2" -> "base:2_monitor_16000 base-bundle-2" [ style = bold] +"base:2_start_0 base-bundle-2" -> "base:2_monitor_15000 base-bundle-2" [ style = bold] +"base:2_start_0 base-bundle-2" -> "base:2_promote_0 base-bundle-2" [ style = bold] "base:2_start_0 base-bundle-2" [ style=bold color="green" fontcolor="black"] } diff --git a/cts/scheduler/exp/bundle-interleave-start.exp b/cts/scheduler/exp/bundle-interleave-start.exp index e676b1bfba9..57e551c487e 100644 --- a/cts/scheduler/exp/bundle-interleave-start.exp +++ b/cts/scheduler/exp/bundle-interleave-start.exp @@ -1,7 +1,7 @@ - + @@ -11,13 +11,13 @@ - + - + @@ -30,13 +30,13 @@ - + - + @@ -46,13 +46,13 @@ - + - + @@ -65,18 +65,18 @@ - + - + - + - + @@ -84,13 +84,38 @@ - + + + + - + + + + + + + + + + + + + + + + + + + + + + + @@ -103,37 +128,64 @@ - + - + - + - + - + + + + + + + + + + - + - + + + + + + + + + + + + + - + + + + + + + - + - + @@ -188,7 +240,7 @@ - + @@ -201,7 +253,7 @@ - + @@ -235,7 +287,7 @@ - + @@ -244,7 +296,7 @@ - + @@ -253,7 +305,7 @@ - + @@ -262,7 +314,7 @@ - + @@ -271,7 +323,7 @@ - + @@ -280,7 +332,7 @@ - + @@ -293,7 +345,7 @@ - + @@ -321,7 +373,7 @@ - + @@ -334,7 +386,7 @@ - + @@ -347,7 +399,7 @@ - + @@ -360,7 +412,7 @@ - + @@ -373,7 +425,7 @@ - + @@ -386,7 +438,7 @@ - + @@ -399,7 +451,7 @@ - + @@ -433,7 +485,7 @@ - + @@ -442,7 +494,7 @@ - + @@ -451,7 +503,7 @@ - + @@ -460,7 +512,7 @@ - + @@ -469,7 +521,7 @@ - + @@ -478,7 +530,7 @@ - + @@ -491,7 +543,7 @@ - + @@ -519,7 +571,7 @@ - + @@ -532,7 +584,7 @@ - + @@ -545,7 +597,7 @@ - + @@ -558,7 +610,7 @@ - + @@ -571,7 +623,7 @@ - + @@ -584,7 +636,7 @@ - + @@ -597,7 +649,7 @@ - + @@ -631,7 +683,7 @@ - + @@ -640,7 +692,7 @@ - + @@ -649,7 +701,7 @@ - + @@ -658,7 +710,7 @@ - + @@ -667,7 +719,7 @@ - + @@ -676,7 +728,7 @@ - + @@ -689,7 +741,7 @@ - + @@ -717,7 +769,7 @@ - + @@ -730,7 +782,7 @@ - + @@ -743,7 +795,7 @@ - + @@ -756,7 +808,7 @@ - + @@ -769,7 +821,7 @@ - + @@ -782,141 +834,196 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + + + - + - + - + + + + - + - + - + - + - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - + - + - + - + - + - + @@ -967,26 +1074,26 @@ - + - + - + - + - + - + @@ -1014,14 +1121,14 @@ - + - + - + @@ -1030,7 +1137,7 @@ - + @@ -1039,7 +1146,7 @@ - + @@ -1048,7 +1155,7 @@ - + @@ -1057,7 +1164,7 @@ - + @@ -1066,22 +1173,22 @@ - + - + - + - + - + @@ -1103,11 +1210,11 @@ - + - + @@ -1116,11 +1223,11 @@ - + - + @@ -1129,11 +1236,11 @@ - + - + @@ -1142,11 +1249,11 @@ - + - + @@ -1155,11 +1262,11 @@ - + - + @@ -1168,26 +1275,26 @@ - + - + - + - + - + - + @@ -1215,14 +1322,14 @@ - + - + - + @@ -1231,7 +1338,7 @@ - + @@ -1240,7 +1347,7 @@ - + @@ -1249,7 +1356,7 @@ - + @@ -1258,7 +1365,7 @@ - + @@ -1267,22 +1374,22 @@ - + - + - + - + - + @@ -1304,11 +1411,11 @@ - + - + @@ -1317,11 +1424,11 @@ - + - + @@ -1330,11 +1437,11 @@ - + - + @@ -1343,11 +1450,11 @@ - + - + @@ -1356,11 +1463,11 @@ - + - + @@ -1369,26 +1476,26 @@ - + - + - + - + - + - + @@ -1416,14 +1523,14 @@ - + - + - + @@ -1432,7 +1539,7 @@ - + @@ -1441,7 +1548,7 @@ - + @@ -1450,7 +1557,7 @@ - + @@ -1459,7 +1566,7 @@ - + @@ -1468,22 +1575,22 @@ - + - + - + - + - + @@ -1505,11 +1612,11 @@ - + - + @@ -1518,11 +1625,11 @@ - + - + @@ -1531,11 +1638,11 @@ - + - + @@ -1544,11 +1651,11 @@ - + - + @@ -1557,11 +1664,11 @@ - + - + @@ -1570,34 +1677,61 @@ - + + + + + + + + + + + + + - + - + - + - + + + + + + + + + + - + - + + + + + + + - + - + @@ -1607,7 +1741,31 @@ - + + + + + + + + + + + + + + + + + + + + + + + + + @@ -1624,11 +1782,11 @@ - + - + diff --git a/cts/scheduler/exp/no-promote-on-unrunnable-guest.exp b/cts/scheduler/exp/no-promote-on-unrunnable-guest.exp index 351aec11df0..350495f4a6f 100644 --- a/cts/scheduler/exp/no-promote-on-unrunnable-guest.exp +++ b/cts/scheduler/exp/no-promote-on-unrunnable-guest.exp @@ -14,7 +14,7 @@ - + @@ -82,14 +82,14 @@ - + - + @@ -101,7 +101,7 @@ - + @@ -250,7 +250,7 @@ - + @@ -396,7 +396,7 @@ - + @@ -473,7 +473,7 @@ - + diff --git a/cts/scheduler/scores/bundle-interleave-start.scores b/cts/scheduler/scores/bundle-interleave-start.scores index 7f4a370474d..b3aa9b571e8 100644 --- a/cts/scheduler/scores/bundle-interleave-start.scores +++ b/cts/scheduler/scores/bundle-interleave-start.scores @@ -1,10 +1,10 @@ -app:0 promotion score on app-bundle-0: -1 -app:1 promotion score on app-bundle-1: -1 -app:2 promotion score on app-bundle-2: -1 -base:0 promotion score on base-bundle-0: -1 -base:1 promotion score on base-bundle-1: -1 -base:2 promotion score on base-bundle-2: -1 +app:0 promotion score on app-bundle-0: 12 +app:1 promotion score on app-bundle-1: 13 +app:2 promotion score on app-bundle-2: 14 +base:0 promotion score on base-bundle-0: 12 +base:1 promotion score on base-bundle-1: 13 +base:2 promotion score on base-bundle-2: 14 pcmk__bundle_allocate: app-bundle allocation score on node1: 0 pcmk__bundle_allocate: app-bundle allocation score on node2: 0 pcmk__bundle_allocate: app-bundle allocation score on node3: 0 diff --git a/cts/scheduler/scores/cancel-behind-moving-remote.scores b/cts/scheduler/scores/cancel-behind-moving-remote.scores index 0dfd78caa92..0e11b225aea 100644 --- a/cts/scheduler/scores/cancel-behind-moving-remote.scores +++ b/cts/scheduler/scores/cancel-behind-moving-remote.scores @@ -2,7 +2,7 @@ galera:0 promotion score on galera-bundle-0: 100 galera:1 promotion score on galera-bundle-1: 100 galera:2 promotion score on galera-bundle-2: 100 -ovndb_servers:0 promotion score on ovn-dbs-bundle-0: -1 +ovndb_servers:0 promotion score on ovn-dbs-bundle-0: 5 ovndb_servers:1 promotion score on ovn-dbs-bundle-1: 5 ovndb_servers:2 promotion score on ovn-dbs-bundle-2: 5 pcmk__bundle_allocate: galera-bundle allocation score on compute-0: -INFINITY diff --git a/cts/scheduler/scores/guest-host-not-fenceable.scores b/cts/scheduler/scores/guest-host-not-fenceable.scores index e4c7fc2033d..5f43bcb0812 100644 --- a/cts/scheduler/scores/guest-host-not-fenceable.scores +++ b/cts/scheduler/scores/guest-host-not-fenceable.scores @@ -1,6 +1,6 @@ galera:0 promotion score on galera-bundle-0: 100 -galera:1 promotion score on galera-bundle-1: 100 +galera:1 promotion score on galera-bundle-1: -1 galera:2 promotion score on galera-bundle-2: -1 pcmk__bundle_allocate: galera-bundle allocation score on node1: 0 pcmk__bundle_allocate: galera-bundle allocation score on node2: 0 diff --git a/cts/scheduler/scores/no-promote-on-unrunnable-guest.scores b/cts/scheduler/scores/no-promote-on-unrunnable-guest.scores index 7923cdc2320..9362dc0e1f2 100644 --- a/cts/scheduler/scores/no-promote-on-unrunnable-guest.scores +++ b/cts/scheduler/scores/no-promote-on-unrunnable-guest.scores @@ -2,7 +2,7 @@ galera:0 promotion score on galera-bundle-0: 100 galera:1 promotion score on galera-bundle-1: 100 galera:2 promotion score on galera-bundle-2: 100 -ovndb_servers:0 promotion score on ovn-dbs-bundle-0: 5 +ovndb_servers:0 promotion score on ovn-dbs-bundle-0: -1 ovndb_servers:1 promotion score on ovn-dbs-bundle-1: 5 ovndb_servers:2 promotion score on ovn-dbs-bundle-2: 5 pcmk__bundle_allocate: galera-bundle allocation score on controller-0: 0 diff --git a/cts/scheduler/summary/bundle-interleave-start.summary b/cts/scheduler/summary/bundle-interleave-start.summary index 1648e929bf7..07ff7561968 100644 --- a/cts/scheduler/summary/bundle-interleave-start.summary +++ b/cts/scheduler/summary/bundle-interleave-start.summary @@ -14,24 +14,24 @@ Current cluster status: * app-bundle-2 (ocf:pacemaker:Stateful): Stopped Transition Summary: - * Start base-bundle-podman-0 ( node2 ) - * Start base-bundle-0 ( node2 ) - * Start base:0 ( base-bundle-0 ) - * Start base-bundle-podman-1 ( node3 ) - * Start base-bundle-1 ( node3 ) - * Start base:1 ( base-bundle-1 ) - * Start base-bundle-podman-2 ( node4 ) - * Start base-bundle-2 ( node4 ) - * Start base:2 ( base-bundle-2 ) - * Start app-bundle-podman-0 ( node2 ) - * Start app-bundle-0 ( node2 ) - * Start app:0 ( app-bundle-0 ) - * Start app-bundle-podman-1 ( node3 ) - * Start app-bundle-1 ( node3 ) - * Start app:1 ( app-bundle-1 ) - * Start app-bundle-podman-2 ( node4 ) - * Start app-bundle-2 ( node4 ) - * Start app:2 ( app-bundle-2 ) + * Start base-bundle-podman-0 ( node2 ) + * Start base-bundle-0 ( node2 ) + * Start base:0 ( base-bundle-0 ) + * Start base-bundle-podman-1 ( node3 ) + * Start base-bundle-1 ( node3 ) + * Start base:1 ( base-bundle-1 ) + * Start base-bundle-podman-2 ( node4 ) + * Start base-bundle-2 ( node4 ) + * Promote base:2 ( Stopped -> Promoted base-bundle-2 ) + * Start app-bundle-podman-0 ( node2 ) + * Start app-bundle-0 ( node2 ) + * Start app:0 ( app-bundle-0 ) + * Start app-bundle-podman-1 ( node3 ) + * Start app-bundle-1 ( node3 ) + * Start app:1 ( app-bundle-1 ) + * Start app-bundle-podman-2 ( node4 ) + * Start app-bundle-2 ( node4 ) + * Promote app:2 ( Stopped -> Promoted app-bundle-2 ) Executing Cluster Transition: * Resource action: base-bundle-podman-0 monitor on node5 @@ -100,8 +100,9 @@ Executing Cluster Transition: * Pseudo action: base-bundle_running_0 * Resource action: base:0 monitor=16000 on base-bundle-0 * Resource action: base:1 monitor=16000 on base-bundle-1 - * Resource action: base:2 monitor=16000 on base-bundle-2 * Pseudo action: app-bundle_start_0 + * Pseudo action: base-bundle_promote_0 + * Pseudo action: base-bundle-clone_promote_0 * Pseudo action: app-bundle-clone_start_0 * Resource action: app-bundle-podman-0 start on node2 * Resource action: app-bundle-0 monitor on node5 @@ -121,12 +122,16 @@ Executing Cluster Transition: * Resource action: app-bundle-2 monitor on node3 * Resource action: app-bundle-2 monitor on node2 * Resource action: app-bundle-2 monitor on node1 + * Resource action: base:2 promote on base-bundle-2 + * Pseudo action: base-bundle-clone_promoted_0 * Resource action: app-bundle-podman-0 monitor=60000 on node2 * Resource action: app-bundle-0 start on node2 * Resource action: app-bundle-podman-1 monitor=60000 on node3 * Resource action: app-bundle-1 start on node3 * Resource action: app-bundle-podman-2 monitor=60000 on node4 * Resource action: app-bundle-2 start on node4 + * Pseudo action: base-bundle_promoted_0 + * Resource action: base:2 monitor=15000 on base-bundle-2 * Resource action: app:0 start on app-bundle-0 * Resource action: app:1 start on app-bundle-1 * Resource action: app:2 start on app-bundle-2 @@ -137,7 +142,12 @@ Executing Cluster Transition: * Pseudo action: app-bundle_running_0 * Resource action: app:0 monitor=16000 on app-bundle-0 * Resource action: app:1 monitor=16000 on app-bundle-1 - * Resource action: app:2 monitor=16000 on app-bundle-2 + * Pseudo action: app-bundle_promote_0 + * Pseudo action: app-bundle-clone_promote_0 + * Resource action: app:2 promote on app-bundle-2 + * Pseudo action: app-bundle-clone_promoted_0 + * Pseudo action: app-bundle_promoted_0 + * Resource action: app:2 monitor=15000 on app-bundle-2 Revised Cluster Status: * Node List: @@ -149,8 +159,8 @@ Revised Cluster Status: * Container bundle set: base-bundle [localhost/pcmktest:base]: * base-bundle-0 (ocf:pacemaker:Stateful): Unpromoted node2 * base-bundle-1 (ocf:pacemaker:Stateful): Unpromoted node3 - * base-bundle-2 (ocf:pacemaker:Stateful): Unpromoted node4 + * base-bundle-2 (ocf:pacemaker:Stateful): Promoted node4 * Container bundle set: app-bundle [localhost/pcmktest:app]: * app-bundle-0 (ocf:pacemaker:Stateful): Unpromoted node2 * app-bundle-1 (ocf:pacemaker:Stateful): Unpromoted node3 - * app-bundle-2 (ocf:pacemaker:Stateful): Unpromoted node4 + * app-bundle-2 (ocf:pacemaker:Stateful): Promoted node4 diff --git a/cts/scheduler/xml/bundle-interleave-start.xml b/cts/scheduler/xml/bundle-interleave-start.xml index e8630cdf054..facb181b2a4 100644 --- a/cts/scheduler/xml/bundle-interleave-start.xml +++ b/cts/scheduler/xml/bundle-interleave-start.xml @@ -6,7 +6,8 @@ and its promoted role is colocated with base's. App's starts and promotes are ordered after base's. - In this test, all are stopped and must be started. + In this test, all are stopped and must be started. One replica of each + bundle must be promoted. --> From 6e5bc0d119c1609a3228763a5116a68829870948 Mon Sep 17 00:00:00 2001 From: Reid Wahl Date: Mon, 26 Jun 2023 12:42:10 -0700 Subject: [PATCH 05/19] Refactor: libpacemaker: De-functionize pcmk__finalize_assignment() Move it into pcmk__assign_resource(). Also correct the "was assignment changed" logic, and allocate rc_stopped only once. Signed-off-by: Reid Wahl --- lib/pacemaker/libpacemaker_private.h | 4 - lib/pacemaker/pcmk_sched_primitive.c | 9 +- lib/pacemaker/pcmk_sched_resource.c | 156 ++++++++++++--------------- 3 files changed, 74 insertions(+), 95 deletions(-) diff --git a/lib/pacemaker/libpacemaker_private.h b/lib/pacemaker/libpacemaker_private.h index 192d5a703ff..614d695f83f 100644 --- a/lib/pacemaker/libpacemaker_private.h +++ b/lib/pacemaker/libpacemaker_private.h @@ -908,10 +908,6 @@ void pcmk__noop_add_graph_meta(const pe_resource_t *rsc, xmlNode *xml); G_GNUC_INTERNAL void pcmk__output_resource_actions(pe_resource_t *rsc); -G_GNUC_INTERNAL -bool pcmk__finalize_assignment(pe_resource_t *rsc, pe_node_t *chosen, - bool force); - G_GNUC_INTERNAL bool pcmk__assign_resource(pe_resource_t *rsc, pe_node_t *node, bool force); diff --git a/lib/pacemaker/pcmk_sched_primitive.c b/lib/pacemaker/pcmk_sched_primitive.c index aefbf9aa140..2470b08ed69 100644 --- a/lib/pacemaker/pcmk_sched_primitive.c +++ b/lib/pacemaker/pcmk_sched_primitive.c @@ -152,7 +152,6 @@ assign_best_node(pe_resource_t *rsc, const pe_node_t *prefer) GList *nodes = NULL; pe_node_t *chosen = NULL; pe_node_t *best = NULL; - bool result = false; const pe_node_t *most_free_node = pcmk__ban_insufficient_capacity(rsc); if (prefer == NULL) { @@ -260,9 +259,9 @@ assign_best_node(pe_resource_t *rsc, const pe_node_t *prefer) pe__node_name(chosen), rsc->id, g_list_length(nodes)); } - result = pcmk__finalize_assignment(rsc, chosen, false); + pcmk__assign_resource(rsc, chosen, false); g_list_free(nodes); - return result; + return rsc->allocated_to != NULL; } /*! @@ -475,11 +474,11 @@ pcmk__primitive_assign(pe_resource_t *rsc, const pe_node_t *prefer) } pe_rsc_info(rsc, "Unmanaged resource %s assigned to %s: %s", rsc->id, (assign_to? assign_to->details->uname : "no node"), reason); - pcmk__finalize_assignment(rsc, assign_to, true); + pcmk__assign_resource(rsc, assign_to, true); } else if (pcmk_is_set(rsc->cluster->flags, pe_flag_stop_everything)) { pe_rsc_debug(rsc, "Forcing %s to stop: stop-all-resources", rsc->id); - pcmk__finalize_assignment(rsc, NULL, true); + pcmk__assign_resource(rsc, NULL, true); } else if (pcmk_is_set(rsc->flags, pe_rsc_provisional) && assign_best_node(rsc, prefer)) { diff --git a/lib/pacemaker/pcmk_sched_resource.c b/lib/pacemaker/pcmk_sched_resource.c index b8554998197..dd9939a42a6 100644 --- a/lib/pacemaker/pcmk_sched_resource.c +++ b/lib/pacemaker/pcmk_sched_resource.c @@ -331,140 +331,124 @@ pcmk__output_resource_actions(pe_resource_t *rsc) /*! * \internal - * \brief Assign a specified primitive resource to a node + * \brief Assign a specified resource (of any variant) to a node * - * Assign a specified primitive resource to a specified node, if the node can - * run the resource (or unconditionally, if \p force is true). Mark the resource - * as no longer provisional. If the primitive can't be assigned (or \p chosen is - * NULL), unassign any previous assignment for it, set its next role to stopped, - * and update any existing actions scheduled for it. This is not done - * recursively for children, so it should be called only for primitives. + * Assign a specified resource and its children (if any) to a specified node, if + * the node can run the resource (or unconditionally, if \p force is true). Mark + * the resources as no longer provisional. If a resource can't be assigned (or + * \p node is \c NULL), unassign any previous assignment, set next role to + * stopped, and update any existing actions scheduled for it. * - * \param[in,out] rsc Resource to assign - * \param[in,out] chosen Node to assign \p rsc to - * \param[in] force If true, assign to \p chosen even if unavailable + * \param[in,out] rsc Resource to assign + * \param[in,out] node Node to assign \p rsc to + * \param[in] force If true, assign to \p node even if unavailable * - * \return true if \p rsc could be assigned, otherwise false + * \return \c true if the assignment of \p rsc changed, or \c false otherwise * * \note Assigning a resource to the NULL node using this function is different * from calling pcmk__unassign_resource(), in that it will also update any * actions created for the resource. + * \note The \c resource_alloc_functions_t:assign() method is preferred, unless + * a resource should be assigned to the \c NULL node or every resource in + * a tree should be assigned to the same node. */ bool -pcmk__finalize_assignment(pe_resource_t *rsc, pe_node_t *chosen, bool force) +pcmk__assign_resource(pe_resource_t *rsc, pe_node_t *node, bool force) { - pcmk__output_t *out = rsc->cluster->priv; + bool changed = false; + + CRM_ASSERT(rsc != NULL); - CRM_ASSERT(rsc->variant == pe_native); - - if (!force && (chosen != NULL)) { - if ((chosen->weight < 0) - // Allow the graph to assume that guest node connections will come up - || (!pcmk__node_available(chosen, true, false) - && !pe__is_guest_node(chosen))) { - - crm_debug("All nodes for resource %s are unavailable, unclean or " - "shutting down (%s can%s run resources, with weight %d)", - rsc->id, pe__node_name(chosen), - (pcmk__node_available(chosen, true, false)? "" : "not"), - chosen->weight); - pe__set_next_role(rsc, RSC_ROLE_STOPPED, "node availability"); - chosen = NULL; + if (rsc->children != NULL) { + for (GList *iter = rsc->children; iter != NULL; iter = iter->next) { + pe_resource_t *child_rsc = iter->data; + + changed |= pcmk__assign_resource(child_rsc, node, force); } + return changed; } + // Assigning a primitive + + if (!force && (node != NULL) + && ((node->weight < 0) + // Allow graph to assume that guest node connections will come up + || (!pcmk__node_available(node, true, false) + && !pe__is_guest_node(node)))) { + + pe_rsc_debug(rsc, + "All nodes for resource %s are unavailable, unclean or " + "shutting down (%s can%s run resources, with score %s)", + rsc->id, pe__node_name(node), + (pcmk__node_available(node, true, false)? "" : "not"), + pcmk_readable_score(node->weight)); + pe__set_next_role(rsc, RSC_ROLE_STOPPED, "node availability"); + node = NULL; + } + + if (rsc->allocated_to != NULL) { + changed = !pe__same_node(rsc->allocated_to, node); + } else { + changed = (node != NULL); + } pcmk__unassign_resource(rsc); pe__clear_resource_flags(rsc, pe_rsc_provisional); - if (chosen == NULL) { - crm_debug("Could not allocate a node for %s", rsc->id); - pe__set_next_role(rsc, RSC_ROLE_STOPPED, "unable to allocate"); + if (node == NULL) { + char *rc_stopped = NULL; + + pe_rsc_debug(rsc, "Could not assign %s to a node", rsc->id); + pe__set_next_role(rsc, RSC_ROLE_STOPPED, "unable to assign"); for (GList *iter = rsc->actions; iter != NULL; iter = iter->next) { pe_action_t *op = (pe_action_t *) iter->data; - crm_debug("Updating %s for allocation failure", op->uuid); + pe_rsc_debug(rsc, "Updating %s for %s assignment failure", + op->uuid, rsc->id); if (pcmk__str_eq(op->task, RSC_STOP, pcmk__str_casei)) { pe__clear_action_flags(op, pe_action_optional); } else if (pcmk__str_eq(op->task, RSC_START, pcmk__str_casei)) { pe__clear_action_flags(op, pe_action_runnable); - //pe__set_resource_flags(rsc, pe_rsc_block); } else { // Cancel recurring actions, unless for stopped state const char *interval_ms_s = NULL; const char *target_rc_s = NULL; - char *rc_stopped = pcmk__itoa(PCMK_OCF_NOT_RUNNING); interval_ms_s = g_hash_table_lookup(op->meta, XML_LRM_ATTR_INTERVAL_MS); target_rc_s = g_hash_table_lookup(op->meta, XML_ATTR_TE_TARGET_RC); - if ((interval_ms_s != NULL) - && !pcmk__str_eq(interval_ms_s, "0", pcmk__str_none) + if (rc_stopped == NULL) { + rc_stopped = pcmk__itoa(PCMK_OCF_NOT_RUNNING); + } + + if (!pcmk__str_eq(interval_ms_s, "0", pcmk__str_null_matches) && !pcmk__str_eq(rc_stopped, target_rc_s, pcmk__str_none)) { + pe__clear_action_flags(op, pe_action_runnable); } - free(rc_stopped); } } - return false; + free(rc_stopped); + return changed; } - crm_debug("Assigning %s to %s", rsc->id, pe__node_name(chosen)); - rsc->allocated_to = pe__copy_node(chosen); + pe_rsc_debug(rsc, "Assigning %s to %s", rsc->id, pe__node_name(node)); + rsc->allocated_to = pe__copy_node(node); - chosen->details->allocated_rsc = g_list_prepend(chosen->details->allocated_rsc, - rsc); - chosen->details->num_resources++; - chosen->count++; - pcmk__consume_node_capacity(chosen->details->utilization, rsc); + node->details->allocated_rsc = g_list_prepend(node->details->allocated_rsc, + rsc); + node->details->num_resources++; + node->count++; + pcmk__consume_node_capacity(node->details->utilization, rsc); if (pcmk_is_set(rsc->cluster->flags, pe_flag_show_utilization)) { - out->message(out, "resource-util", rsc, chosen, __func__); - } - return true; -} - -/*! - * \internal - * \brief Assign a specified resource (of any variant) to a node - * - * Assign a specified resource and its children (if any) to a specified node, if - * the node can run the resource (or unconditionally, if \p force is true). Mark - * the resources as no longer provisional. If the resources can't be assigned - * (or \p chosen is NULL), unassign any previous assignments, set next role to - * stopped, and update any existing actions scheduled for them. - * - * \param[in,out] rsc Resource to assign - * \param[in,out] chosen Node to assign \p rsc to - * \param[in] force If true, assign to \p chosen even if unavailable - * - * \return true if \p rsc could be assigned, otherwise false - * - * \note Assigning a resource to the NULL node using this function is different - * from calling pcmk__unassign_resource(), in that it will also update any - * actions created for the resource. - */ -bool -pcmk__assign_resource(pe_resource_t *rsc, pe_node_t *node, bool force) -{ - bool changed = false; - - if (rsc->children == NULL) { - if (rsc->allocated_to != NULL) { - changed = true; - } - pcmk__finalize_assignment(rsc, node, force); - - } else { - for (GList *iter = rsc->children; iter != NULL; iter = iter->next) { - pe_resource_t *child_rsc = (pe_resource_t *) iter->data; + pcmk__output_t *out = rsc->cluster->priv; - changed |= pcmk__assign_resource(child_rsc, node, force); - } + out->message(out, "resource-util", rsc, node, __func__); } return changed; } From b01ecf9444e856227cd61c53f1c0106936eccd74 Mon Sep 17 00:00:00 2001 From: Reid Wahl Date: Mon, 10 Jul 2023 02:28:54 -0700 Subject: [PATCH 06/19] Test: cts-cli: Update tests after defunctionization pcmk__finalize_assignment() -> pcmk__assign_resource() Signed-off-by: Reid Wahl --- cts/cli/regression.tools.exp | 42 ++++++++++++++++++------------------ 1 file changed, 21 insertions(+), 21 deletions(-) diff --git a/cts/cli/regression.tools.exp b/cts/cli/regression.tools.exp index a8e2236063c..506e9ba01b6 100644 --- a/cts/cli/regression.tools.exp +++ b/cts/cli/regression.tools.exp @@ -5711,26 +5711,26 @@ Original: cluster02 capacity: Original: httpd-bundle-0 capacity: Original: httpd-bundle-1 capacity: Original: httpd-bundle-2 capacity: -pcmk__finalize_assignment: ping:0 utilization on cluster02: -pcmk__finalize_assignment: ping:1 utilization on cluster01: -pcmk__finalize_assignment: Fencing utilization on cluster01: -pcmk__finalize_assignment: dummy utilization on cluster02: -pcmk__finalize_assignment: httpd-bundle-docker-0 utilization on cluster01: -pcmk__finalize_assignment: httpd-bundle-docker-1 utilization on cluster02: -pcmk__finalize_assignment: httpd-bundle-ip-192.168.122.131 utilization on cluster01: -pcmk__finalize_assignment: httpd-bundle-0 utilization on cluster01: -pcmk__finalize_assignment: httpd:0 utilization on httpd-bundle-0: -pcmk__finalize_assignment: httpd-bundle-ip-192.168.122.132 utilization on cluster02: -pcmk__finalize_assignment: httpd-bundle-1 utilization on cluster02: -pcmk__finalize_assignment: httpd:1 utilization on httpd-bundle-1: -pcmk__finalize_assignment: httpd-bundle-2 utilization on cluster01: -pcmk__finalize_assignment: httpd:2 utilization on httpd-bundle-2: -pcmk__finalize_assignment: Public-IP utilization on cluster02: -pcmk__finalize_assignment: Email utilization on cluster02: -pcmk__finalize_assignment: mysql-proxy:0 utilization on cluster02: -pcmk__finalize_assignment: mysql-proxy:1 utilization on cluster01: -pcmk__finalize_assignment: promotable-rsc:0 utilization on cluster02: -pcmk__finalize_assignment: promotable-rsc:1 utilization on cluster01: +pcmk__assign_resource: ping:0 utilization on cluster02: +pcmk__assign_resource: ping:1 utilization on cluster01: +pcmk__assign_resource: Fencing utilization on cluster01: +pcmk__assign_resource: dummy utilization on cluster02: +pcmk__assign_resource: httpd-bundle-docker-0 utilization on cluster01: +pcmk__assign_resource: httpd-bundle-docker-1 utilization on cluster02: +pcmk__assign_resource: httpd-bundle-ip-192.168.122.131 utilization on cluster01: +pcmk__assign_resource: httpd-bundle-0 utilization on cluster01: +pcmk__assign_resource: httpd:0 utilization on httpd-bundle-0: +pcmk__assign_resource: httpd-bundle-ip-192.168.122.132 utilization on cluster02: +pcmk__assign_resource: httpd-bundle-1 utilization on cluster02: +pcmk__assign_resource: httpd:1 utilization on httpd-bundle-1: +pcmk__assign_resource: httpd-bundle-2 utilization on cluster01: +pcmk__assign_resource: httpd:2 utilization on httpd-bundle-2: +pcmk__assign_resource: Public-IP utilization on cluster02: +pcmk__assign_resource: Email utilization on cluster02: +pcmk__assign_resource: mysql-proxy:0 utilization on cluster02: +pcmk__assign_resource: mysql-proxy:1 utilization on cluster01: +pcmk__assign_resource: promotable-rsc:0 utilization on cluster02: +pcmk__assign_resource: promotable-rsc:1 utilization on cluster01: Remaining: cluster01 capacity: Remaining: cluster02 capacity: Remaining: httpd-bundle-0 capacity: @@ -5961,7 +5961,7 @@ Transition Summary: * Move Public-IP ( cluster02 -> cluster01 ) * Move Email ( cluster02 -> cluster01 ) * Stop mysql-proxy:0 ( cluster02 ) due to node availability - * Stop promotable-rsc:0 ( Promoted cluster02 ) due to node availability + * Stop promotable-rsc:0 ( Promoted cluster02 ) due to node availability Executing Cluster Transition: * Pseudo action: httpd-bundle-1_stop_0 From 0ad4a3c8404d57e2026e41a234a9b8a0a237b2bd Mon Sep 17 00:00:00 2001 From: Reid Wahl Date: Tue, 20 Jun 2023 23:22:54 -0700 Subject: [PATCH 07/19] Test: scheduler: Clone instances should not shuffle unnecessarily In some cases, clone instances may be shuffled when a new instance is scheduled to start or promote. This can cause instances to be stopped and started unnecessarily. Here we add tests for three types of clones: * "Bare" clones of primitives * Clones of groups * Clones of primitives within bundles (clone is bundle child resource) For each clone type, we add four tests. In each test, no clone instance is running on node 1, and a new instance should be started and possibly promoted there. * No constraints or stickiness * Location constraint preferring node 1 * Promotable clone where node 1 has the highest promotion score * Promotable clone where node 1 does not have the highest promotion score The following tests are currently incorrect: * clone-no-recover-shuffle-4 (shuffling) * clone-no-recover-shuffle-5 (all instances of an anonymous clone move to one node) * clone-no-recover-shuffle-6 (shuffling) * clone-no-recover-shuffle-7 (shuffling) Ref T489 Signed-off-by: Reid Wahl --- cts/cts-scheduler.in | 33 ++ .../dot/clone-recover-no-shuffle-1.dot | 10 + .../dot/clone-recover-no-shuffle-10.dot | 10 + .../dot/clone-recover-no-shuffle-11.dot | 21 + .../dot/clone-recover-no-shuffle-12.dot | 35 ++ .../dot/clone-recover-no-shuffle-2.dot | 21 + .../dot/clone-recover-no-shuffle-3.dot | 32 ++ .../dot/clone-recover-no-shuffle-4.dot | 23 + .../dot/clone-recover-no-shuffle-5.dot | 80 +++ .../dot/clone-recover-no-shuffle-6.dot | 97 ++++ .../dot/clone-recover-no-shuffle-7.dot | 45 ++ .../dot/clone-recover-no-shuffle-8.dot | 63 +++ .../dot/clone-recover-no-shuffle-9.dot | 69 +++ .../exp/clone-recover-no-shuffle-1.exp | 51 ++ .../exp/clone-recover-no-shuffle-10.exp | 51 ++ .../exp/clone-recover-no-shuffle-11.exp | 110 ++++ .../exp/clone-recover-no-shuffle-12.exp | 187 +++++++ .../exp/clone-recover-no-shuffle-2.exp | 110 ++++ .../exp/clone-recover-no-shuffle-3.exp | 171 ++++++ .../exp/clone-recover-no-shuffle-4.exp | 123 +++++ .../exp/clone-recover-no-shuffle-5.exp | 452 ++++++++++++++++ .../exp/clone-recover-no-shuffle-6.exp | 507 ++++++++++++++++++ .../exp/clone-recover-no-shuffle-7.exp | 240 +++++++++ .../exp/clone-recover-no-shuffle-8.exp | 338 ++++++++++++ .../exp/clone-recover-no-shuffle-9.exp | 364 +++++++++++++ .../scores/clone-recover-no-shuffle-1.scores | 25 + .../scores/clone-recover-no-shuffle-10.scores | 31 ++ .../scores/clone-recover-no-shuffle-11.scores | 82 +++ .../scores/clone-recover-no-shuffle-12.scores | 67 +++ .../scores/clone-recover-no-shuffle-2.scores | 79 +++ .../scores/clone-recover-no-shuffle-3.scores | 64 +++ .../scores/clone-recover-no-shuffle-4.scores | 31 ++ .../scores/clone-recover-no-shuffle-5.scores | 79 +++ .../scores/clone-recover-no-shuffle-6.scores | 70 +++ .../scores/clone-recover-no-shuffle-7.scores | 34 ++ .../scores/clone-recover-no-shuffle-8.scores | 82 +++ .../scores/clone-recover-no-shuffle-9.scores | 67 +++ .../clone-recover-no-shuffle-1.summary | 29 + .../clone-recover-no-shuffle-10.summary | 29 + .../clone-recover-no-shuffle-11.summary | 34 ++ .../clone-recover-no-shuffle-12.summary | 43 ++ .../clone-recover-no-shuffle-2.summary | 32 ++ .../clone-recover-no-shuffle-3.summary | 42 ++ .../clone-recover-no-shuffle-4.summary | 35 ++ .../clone-recover-no-shuffle-5.summary | 59 ++ .../clone-recover-no-shuffle-6.summary | 68 +++ .../clone-recover-no-shuffle-7.summary | 44 ++ .../clone-recover-no-shuffle-8.summary | 52 ++ .../clone-recover-no-shuffle-9.summary | 56 ++ .../xml/clone-recover-no-shuffle-1.xml | 113 ++++ .../xml/clone-recover-no-shuffle-10.xml | 120 +++++ .../xml/clone-recover-no-shuffle-11.xml | 153 ++++++ .../xml/clone-recover-no-shuffle-12.xml | 186 +++++++ .../xml/clone-recover-no-shuffle-2.xml | 141 +++++ .../xml/clone-recover-no-shuffle-3.xml | 180 +++++++ .../xml/clone-recover-no-shuffle-4.xml | 120 +++++ .../xml/clone-recover-no-shuffle-5.xml | 148 +++++ .../xml/clone-recover-no-shuffle-6.xml | 187 +++++++ .../xml/clone-recover-no-shuffle-7.xml | 125 +++++ .../xml/clone-recover-no-shuffle-8.xml | 153 ++++++ .../xml/clone-recover-no-shuffle-9.xml | 186 +++++++ 61 files changed, 6289 insertions(+) create mode 100644 cts/scheduler/dot/clone-recover-no-shuffle-1.dot create mode 100644 cts/scheduler/dot/clone-recover-no-shuffle-10.dot create mode 100644 cts/scheduler/dot/clone-recover-no-shuffle-11.dot create mode 100644 cts/scheduler/dot/clone-recover-no-shuffle-12.dot create mode 100644 cts/scheduler/dot/clone-recover-no-shuffle-2.dot create mode 100644 cts/scheduler/dot/clone-recover-no-shuffle-3.dot create mode 100644 cts/scheduler/dot/clone-recover-no-shuffle-4.dot create mode 100644 cts/scheduler/dot/clone-recover-no-shuffle-5.dot create mode 100644 cts/scheduler/dot/clone-recover-no-shuffle-6.dot create mode 100644 cts/scheduler/dot/clone-recover-no-shuffle-7.dot create mode 100644 cts/scheduler/dot/clone-recover-no-shuffle-8.dot create mode 100644 cts/scheduler/dot/clone-recover-no-shuffle-9.dot create mode 100644 cts/scheduler/exp/clone-recover-no-shuffle-1.exp create mode 100644 cts/scheduler/exp/clone-recover-no-shuffle-10.exp create mode 100644 cts/scheduler/exp/clone-recover-no-shuffle-11.exp create mode 100644 cts/scheduler/exp/clone-recover-no-shuffle-12.exp create mode 100644 cts/scheduler/exp/clone-recover-no-shuffle-2.exp create mode 100644 cts/scheduler/exp/clone-recover-no-shuffle-3.exp create mode 100644 cts/scheduler/exp/clone-recover-no-shuffle-4.exp create mode 100644 cts/scheduler/exp/clone-recover-no-shuffle-5.exp create mode 100644 cts/scheduler/exp/clone-recover-no-shuffle-6.exp create mode 100644 cts/scheduler/exp/clone-recover-no-shuffle-7.exp create mode 100644 cts/scheduler/exp/clone-recover-no-shuffle-8.exp create mode 100644 cts/scheduler/exp/clone-recover-no-shuffle-9.exp create mode 100644 cts/scheduler/scores/clone-recover-no-shuffle-1.scores create mode 100644 cts/scheduler/scores/clone-recover-no-shuffle-10.scores create mode 100644 cts/scheduler/scores/clone-recover-no-shuffle-11.scores create mode 100644 cts/scheduler/scores/clone-recover-no-shuffle-12.scores create mode 100644 cts/scheduler/scores/clone-recover-no-shuffle-2.scores create mode 100644 cts/scheduler/scores/clone-recover-no-shuffle-3.scores create mode 100644 cts/scheduler/scores/clone-recover-no-shuffle-4.scores create mode 100644 cts/scheduler/scores/clone-recover-no-shuffle-5.scores create mode 100644 cts/scheduler/scores/clone-recover-no-shuffle-6.scores create mode 100644 cts/scheduler/scores/clone-recover-no-shuffle-7.scores create mode 100644 cts/scheduler/scores/clone-recover-no-shuffle-8.scores create mode 100644 cts/scheduler/scores/clone-recover-no-shuffle-9.scores create mode 100644 cts/scheduler/summary/clone-recover-no-shuffle-1.summary create mode 100644 cts/scheduler/summary/clone-recover-no-shuffle-10.summary create mode 100644 cts/scheduler/summary/clone-recover-no-shuffle-11.summary create mode 100644 cts/scheduler/summary/clone-recover-no-shuffle-12.summary create mode 100644 cts/scheduler/summary/clone-recover-no-shuffle-2.summary create mode 100644 cts/scheduler/summary/clone-recover-no-shuffle-3.summary create mode 100644 cts/scheduler/summary/clone-recover-no-shuffle-4.summary create mode 100644 cts/scheduler/summary/clone-recover-no-shuffle-5.summary create mode 100644 cts/scheduler/summary/clone-recover-no-shuffle-6.summary create mode 100644 cts/scheduler/summary/clone-recover-no-shuffle-7.summary create mode 100644 cts/scheduler/summary/clone-recover-no-shuffle-8.summary create mode 100644 cts/scheduler/summary/clone-recover-no-shuffle-9.summary create mode 100644 cts/scheduler/xml/clone-recover-no-shuffle-1.xml create mode 100644 cts/scheduler/xml/clone-recover-no-shuffle-10.xml create mode 100644 cts/scheduler/xml/clone-recover-no-shuffle-11.xml create mode 100644 cts/scheduler/xml/clone-recover-no-shuffle-12.xml create mode 100644 cts/scheduler/xml/clone-recover-no-shuffle-2.xml create mode 100644 cts/scheduler/xml/clone-recover-no-shuffle-3.xml create mode 100644 cts/scheduler/xml/clone-recover-no-shuffle-4.xml create mode 100644 cts/scheduler/xml/clone-recover-no-shuffle-5.xml create mode 100644 cts/scheduler/xml/clone-recover-no-shuffle-6.xml create mode 100644 cts/scheduler/xml/clone-recover-no-shuffle-7.xml create mode 100644 cts/scheduler/xml/clone-recover-no-shuffle-8.xml create mode 100644 cts/scheduler/xml/clone-recover-no-shuffle-9.xml diff --git a/cts/cts-scheduler.in b/cts/cts-scheduler.in index ee0cb7b4722..4ff035c23a3 100644 --- a/cts/cts-scheduler.in +++ b/cts/cts-scheduler.in @@ -441,6 +441,39 @@ TESTS = [ [ "cloned-group", "Make sure only the correct number of cloned groups are started" ], [ "cloned-group-stop", "Ensure stopping qpidd also stops glance and cinder" ], [ "clone-no-shuffle", "Don't prioritize allocation of instances that must be moved" ], + [ "clone-recover-no-shuffle-1", + "Don't shuffle instances when starting a new primitive instance" ], + [ "clone-recover-no-shuffle-2", + "Don't shuffle instances when starting a new group instance" ], + [ "clone-recover-no-shuffle-3", + "Don't shuffle instances when starting a new bundle instance" ], + [ "clone-recover-no-shuffle-4", + "Don't shuffle instances when starting a new primitive instance with " + "location preference "], + [ "clone-recover-no-shuffle-5", + "Don't shuffle instances when starting a new group instance with " + "location preference" ], + [ "clone-recover-no-shuffle-6", + "Don't shuffle instances when starting a new bundle instance with " + "location preference" ], + [ "clone-recover-no-shuffle-7", + "Don't shuffle instances when starting a new primitive instance that " + "will be promoted" ], + [ "clone-recover-no-shuffle-8", + "Don't shuffle instances when starting a new group instance that " + "will be promoted " ], + [ "clone-recover-no-shuffle-9", + "Don't shuffle instances when starting a new bundle instance that " + "will be promoted " ], + [ "clone-recover-no-shuffle-10", + "Don't shuffle instances when starting a new primitive instance that " + "won't be promoted" ], + [ "clone-recover-no-shuffle-11", + "Don't shuffle instances when starting a new group instance that " + "won't be promoted " ], + [ "clone-recover-no-shuffle-12", + "Don't shuffle instances when starting a new bundle instance that " + "won't be promoted " ], [ "clone-max-zero", "Orphan processing with clone-max=0" ], [ "clone-anon-dup", "Bug LF#2087 - Correctly parse the state of anonymous clones that are active more than once per node" ], diff --git a/cts/scheduler/dot/clone-recover-no-shuffle-1.dot b/cts/scheduler/dot/clone-recover-no-shuffle-1.dot new file mode 100644 index 00000000000..287d82d3806 --- /dev/null +++ b/cts/scheduler/dot/clone-recover-no-shuffle-1.dot @@ -0,0 +1,10 @@ + digraph "g" { +"dummy-clone_running_0" [ style=bold color="green" fontcolor="orange"] +"dummy-clone_start_0" -> "dummy-clone_running_0" [ style = bold] +"dummy-clone_start_0" -> "dummy:2_start_0 node1" [ style = bold] +"dummy-clone_start_0" [ style=bold color="green" fontcolor="orange"] +"dummy:2_monitor_10000 node1" [ style=bold color="green" fontcolor="black"] +"dummy:2_start_0 node1" -> "dummy-clone_running_0" [ style = bold] +"dummy:2_start_0 node1" -> "dummy:2_monitor_10000 node1" [ style = bold] +"dummy:2_start_0 node1" [ style=bold color="green" fontcolor="black"] +} diff --git a/cts/scheduler/dot/clone-recover-no-shuffle-10.dot b/cts/scheduler/dot/clone-recover-no-shuffle-10.dot new file mode 100644 index 00000000000..1e1840966fa --- /dev/null +++ b/cts/scheduler/dot/clone-recover-no-shuffle-10.dot @@ -0,0 +1,10 @@ + digraph "g" { +"dummy-clone_running_0" [ style=bold color="green" fontcolor="orange"] +"dummy-clone_start_0" -> "dummy-clone_running_0" [ style = bold] +"dummy-clone_start_0" -> "dummy:2_start_0 node1" [ style = bold] +"dummy-clone_start_0" [ style=bold color="green" fontcolor="orange"] +"dummy:2_monitor_11000 node1" [ style=bold color="green" fontcolor="black"] +"dummy:2_start_0 node1" -> "dummy-clone_running_0" [ style = bold] +"dummy:2_start_0 node1" -> "dummy:2_monitor_11000 node1" [ style = bold] +"dummy:2_start_0 node1" [ style=bold color="green" fontcolor="black"] +} diff --git a/cts/scheduler/dot/clone-recover-no-shuffle-11.dot b/cts/scheduler/dot/clone-recover-no-shuffle-11.dot new file mode 100644 index 00000000000..2b08a594561 --- /dev/null +++ b/cts/scheduler/dot/clone-recover-no-shuffle-11.dot @@ -0,0 +1,21 @@ + digraph "g" { +"grp-clone_running_0" [ style=bold color="green" fontcolor="orange"] +"grp-clone_start_0" -> "grp-clone_running_0" [ style = bold] +"grp-clone_start_0" -> "grp:2_start_0" [ style = bold] +"grp-clone_start_0" [ style=bold color="green" fontcolor="orange"] +"grp:2_running_0" -> "grp-clone_running_0" [ style = bold] +"grp:2_running_0" [ style=bold color="green" fontcolor="orange"] +"grp:2_start_0" -> "grp:2_running_0" [ style = bold] +"grp:2_start_0" -> "rsc1:2_start_0 node1" [ style = bold] +"grp:2_start_0" -> "rsc2:2_start_0 node1" [ style = bold] +"grp:2_start_0" [ style=bold color="green" fontcolor="orange"] +"rsc1:2_monitor_11000 node1" [ style=bold color="green" fontcolor="black"] +"rsc1:2_start_0 node1" -> "grp:2_running_0" [ style = bold] +"rsc1:2_start_0 node1" -> "rsc1:2_monitor_11000 node1" [ style = bold] +"rsc1:2_start_0 node1" -> "rsc2:2_start_0 node1" [ style = bold] +"rsc1:2_start_0 node1" [ style=bold color="green" fontcolor="black"] +"rsc2:2_monitor_11000 node1" [ style=bold color="green" fontcolor="black"] +"rsc2:2_start_0 node1" -> "grp:2_running_0" [ style = bold] +"rsc2:2_start_0 node1" -> "rsc2:2_monitor_11000 node1" [ style = bold] +"rsc2:2_start_0 node1" [ style=bold color="green" fontcolor="black"] +} diff --git a/cts/scheduler/dot/clone-recover-no-shuffle-12.dot b/cts/scheduler/dot/clone-recover-no-shuffle-12.dot new file mode 100644 index 00000000000..ebc1dc6a815 --- /dev/null +++ b/cts/scheduler/dot/clone-recover-no-shuffle-12.dot @@ -0,0 +1,35 @@ + digraph "g" { +"base-bundle-2_monitor_0 node1" -> "base-bundle-2_start_0 node1" [ style = bold] +"base-bundle-2_monitor_0 node1" [ style=bold color="green" fontcolor="black"] +"base-bundle-2_monitor_0 node2" -> "base-bundle-2_start_0 node1" [ style = bold] +"base-bundle-2_monitor_0 node2" [ style=bold color="green" fontcolor="black"] +"base-bundle-2_monitor_0 node3" -> "base-bundle-2_start_0 node1" [ style = bold] +"base-bundle-2_monitor_0 node3" [ style=bold color="green" fontcolor="black"] +"base-bundle-2_monitor_30000 node1" [ style=bold color="green" fontcolor="black"] +"base-bundle-2_start_0 node1" -> "base-bundle-2_monitor_30000 node1" [ style = bold] +"base-bundle-2_start_0 node1" -> "base:2_monitor_16000 base-bundle-2" [ style = bold] +"base-bundle-2_start_0 node1" -> "base:2_start_0 base-bundle-2" [ style = bold] +"base-bundle-2_start_0 node1" [ style=bold color="green" fontcolor="black"] +"base-bundle-clone_running_0" -> "base-bundle_running_0" [ style = bold] +"base-bundle-clone_running_0" [ style=bold color="green" fontcolor="orange"] +"base-bundle-clone_start_0" -> "base-bundle-clone_running_0" [ style = bold] +"base-bundle-clone_start_0" -> "base:2_start_0 base-bundle-2" [ style = bold] +"base-bundle-clone_start_0" [ style=bold color="green" fontcolor="orange"] +"base-bundle-podman-2_monitor_60000 node1" [ style=bold color="green" fontcolor="black"] +"base-bundle-podman-2_start_0 node1" -> "base-bundle-2_monitor_0 node1" [ style = bold] +"base-bundle-podman-2_start_0 node1" -> "base-bundle-2_monitor_0 node2" [ style = bold] +"base-bundle-podman-2_start_0 node1" -> "base-bundle-2_monitor_0 node3" [ style = bold] +"base-bundle-podman-2_start_0 node1" -> "base-bundle-2_start_0 node1" [ style = bold] +"base-bundle-podman-2_start_0 node1" -> "base-bundle-podman-2_monitor_60000 node1" [ style = bold] +"base-bundle-podman-2_start_0 node1" -> "base-bundle_running_0" [ style = bold] +"base-bundle-podman-2_start_0 node1" -> "base:2_start_0 base-bundle-2" [ style = bold] +"base-bundle-podman-2_start_0 node1" [ style=bold color="green" fontcolor="black"] +"base-bundle_running_0" [ style=bold color="green" fontcolor="orange"] +"base-bundle_start_0" -> "base-bundle-clone_start_0" [ style = bold] +"base-bundle_start_0" -> "base-bundle-podman-2_start_0 node1" [ style = bold] +"base-bundle_start_0" [ style=bold color="green" fontcolor="orange"] +"base:2_monitor_16000 base-bundle-2" [ style=bold color="green" fontcolor="black"] +"base:2_start_0 base-bundle-2" -> "base-bundle-clone_running_0" [ style = bold] +"base:2_start_0 base-bundle-2" -> "base:2_monitor_16000 base-bundle-2" [ style = bold] +"base:2_start_0 base-bundle-2" [ style=bold color="green" fontcolor="black"] +} diff --git a/cts/scheduler/dot/clone-recover-no-shuffle-2.dot b/cts/scheduler/dot/clone-recover-no-shuffle-2.dot new file mode 100644 index 00000000000..d3bdf04baa9 --- /dev/null +++ b/cts/scheduler/dot/clone-recover-no-shuffle-2.dot @@ -0,0 +1,21 @@ + digraph "g" { +"grp-clone_running_0" [ style=bold color="green" fontcolor="orange"] +"grp-clone_start_0" -> "grp-clone_running_0" [ style = bold] +"grp-clone_start_0" -> "grp:2_start_0" [ style = bold] +"grp-clone_start_0" [ style=bold color="green" fontcolor="orange"] +"grp:2_running_0" -> "grp-clone_running_0" [ style = bold] +"grp:2_running_0" [ style=bold color="green" fontcolor="orange"] +"grp:2_start_0" -> "grp:2_running_0" [ style = bold] +"grp:2_start_0" -> "rsc1:2_start_0 node1" [ style = bold] +"grp:2_start_0" -> "rsc2:2_start_0 node1" [ style = bold] +"grp:2_start_0" [ style=bold color="green" fontcolor="orange"] +"rsc1:2_monitor_10000 node1" [ style=bold color="green" fontcolor="black"] +"rsc1:2_start_0 node1" -> "grp:2_running_0" [ style = bold] +"rsc1:2_start_0 node1" -> "rsc1:2_monitor_10000 node1" [ style = bold] +"rsc1:2_start_0 node1" -> "rsc2:2_start_0 node1" [ style = bold] +"rsc1:2_start_0 node1" [ style=bold color="green" fontcolor="black"] +"rsc2:2_monitor_10000 node1" [ style=bold color="green" fontcolor="black"] +"rsc2:2_start_0 node1" -> "grp:2_running_0" [ style = bold] +"rsc2:2_start_0 node1" -> "rsc2:2_monitor_10000 node1" [ style = bold] +"rsc2:2_start_0 node1" [ style=bold color="green" fontcolor="black"] +} diff --git a/cts/scheduler/dot/clone-recover-no-shuffle-3.dot b/cts/scheduler/dot/clone-recover-no-shuffle-3.dot new file mode 100644 index 00000000000..f60fd2cc04e --- /dev/null +++ b/cts/scheduler/dot/clone-recover-no-shuffle-3.dot @@ -0,0 +1,32 @@ + digraph "g" { +"base-bundle-2_monitor_0 node1" -> "base-bundle-2_start_0 node1" [ style = bold] +"base-bundle-2_monitor_0 node1" [ style=bold color="green" fontcolor="black"] +"base-bundle-2_monitor_0 node2" -> "base-bundle-2_start_0 node1" [ style = bold] +"base-bundle-2_monitor_0 node2" [ style=bold color="green" fontcolor="black"] +"base-bundle-2_monitor_0 node3" -> "base-bundle-2_start_0 node1" [ style = bold] +"base-bundle-2_monitor_0 node3" [ style=bold color="green" fontcolor="black"] +"base-bundle-2_monitor_30000 node1" [ style=bold color="green" fontcolor="black"] +"base-bundle-2_start_0 node1" -> "base-bundle-2_monitor_30000 node1" [ style = bold] +"base-bundle-2_start_0 node1" -> "base:2_start_0 base-bundle-2" [ style = bold] +"base-bundle-2_start_0 node1" [ style=bold color="green" fontcolor="black"] +"base-bundle-clone_running_0" -> "base-bundle_running_0" [ style = bold] +"base-bundle-clone_running_0" [ style=bold color="green" fontcolor="orange"] +"base-bundle-clone_start_0" -> "base-bundle-clone_running_0" [ style = bold] +"base-bundle-clone_start_0" -> "base:2_start_0 base-bundle-2" [ style = bold] +"base-bundle-clone_start_0" [ style=bold color="green" fontcolor="orange"] +"base-bundle-podman-2_monitor_60000 node1" [ style=bold color="green" fontcolor="black"] +"base-bundle-podman-2_start_0 node1" -> "base-bundle-2_monitor_0 node1" [ style = bold] +"base-bundle-podman-2_start_0 node1" -> "base-bundle-2_monitor_0 node2" [ style = bold] +"base-bundle-podman-2_start_0 node1" -> "base-bundle-2_monitor_0 node3" [ style = bold] +"base-bundle-podman-2_start_0 node1" -> "base-bundle-2_start_0 node1" [ style = bold] +"base-bundle-podman-2_start_0 node1" -> "base-bundle-podman-2_monitor_60000 node1" [ style = bold] +"base-bundle-podman-2_start_0 node1" -> "base-bundle_running_0" [ style = bold] +"base-bundle-podman-2_start_0 node1" -> "base:2_start_0 base-bundle-2" [ style = bold] +"base-bundle-podman-2_start_0 node1" [ style=bold color="green" fontcolor="black"] +"base-bundle_running_0" [ style=bold color="green" fontcolor="orange"] +"base-bundle_start_0" -> "base-bundle-clone_start_0" [ style = bold] +"base-bundle_start_0" -> "base-bundle-podman-2_start_0 node1" [ style = bold] +"base-bundle_start_0" [ style=bold color="green" fontcolor="orange"] +"base:2_start_0 base-bundle-2" -> "base-bundle-clone_running_0" [ style = bold] +"base:2_start_0 base-bundle-2" [ style=bold color="green" fontcolor="black"] +} diff --git a/cts/scheduler/dot/clone-recover-no-shuffle-4.dot b/cts/scheduler/dot/clone-recover-no-shuffle-4.dot new file mode 100644 index 00000000000..fd002f28fcf --- /dev/null +++ b/cts/scheduler/dot/clone-recover-no-shuffle-4.dot @@ -0,0 +1,23 @@ + digraph "g" { +"dummy-clone_running_0" [ style=bold color="green" fontcolor="orange"] +"dummy-clone_start_0" -> "dummy-clone_running_0" [ style = bold] +"dummy-clone_start_0" -> "dummy:2_start_0 node2" [ style = bold] +"dummy-clone_start_0" -> "dummy_start_0 node1" [ style = bold] +"dummy-clone_start_0" [ style=bold color="green" fontcolor="orange"] +"dummy-clone_stop_0" -> "dummy-clone_stopped_0" [ style = bold] +"dummy-clone_stop_0" -> "dummy_stop_0 node2" [ style = bold] +"dummy-clone_stop_0" [ style=bold color="green" fontcolor="orange"] +"dummy-clone_stopped_0" -> "dummy-clone_start_0" [ style = bold] +"dummy-clone_stopped_0" [ style=bold color="green" fontcolor="orange"] +"dummy:2_monitor_10000 node2" [ style=bold color="green" fontcolor="black"] +"dummy:2_start_0 node2" -> "dummy-clone_running_0" [ style = bold] +"dummy:2_start_0 node2" -> "dummy:2_monitor_10000 node2" [ style = bold] +"dummy:2_start_0 node2" [ style=bold color="green" fontcolor="black"] +"dummy_monitor_10000 node1" [ style=bold color="green" fontcolor="black"] +"dummy_start_0 node1" -> "dummy-clone_running_0" [ style = bold] +"dummy_start_0 node1" -> "dummy_monitor_10000 node1" [ style = bold] +"dummy_start_0 node1" [ style=bold color="green" fontcolor="black"] +"dummy_stop_0 node2" -> "dummy-clone_stopped_0" [ style = bold] +"dummy_stop_0 node2" -> "dummy_start_0 node1" [ style = bold] +"dummy_stop_0 node2" [ style=bold color="green" fontcolor="black"] +} diff --git a/cts/scheduler/dot/clone-recover-no-shuffle-5.dot b/cts/scheduler/dot/clone-recover-no-shuffle-5.dot new file mode 100644 index 00000000000..7219ee5a6d3 --- /dev/null +++ b/cts/scheduler/dot/clone-recover-no-shuffle-5.dot @@ -0,0 +1,80 @@ + digraph "g" { +"grp-clone_running_0" [ style=bold color="green" fontcolor="orange"] +"grp-clone_start_0" -> "grp-clone_running_0" [ style = bold] +"grp-clone_start_0" -> "grp:0_start_0" [ style = bold] +"grp-clone_start_0" -> "grp:1_start_0" [ style = bold] +"grp-clone_start_0" -> "grp:2_start_0" [ style = bold] +"grp-clone_start_0" [ style=bold color="green" fontcolor="orange"] +"grp-clone_stop_0" -> "grp-clone_stopped_0" [ style = bold] +"grp-clone_stop_0" -> "grp:0_stop_0" [ style = bold] +"grp-clone_stop_0" -> "grp:1_stop_0" [ style = bold] +"grp-clone_stop_0" [ style=bold color="green" fontcolor="orange"] +"grp-clone_stopped_0" -> "grp-clone_start_0" [ style = bold] +"grp-clone_stopped_0" [ style=bold color="green" fontcolor="orange"] +"grp:0_running_0" -> "grp-clone_running_0" [ style = bold] +"grp:0_running_0" [ style=bold color="green" fontcolor="orange"] +"grp:0_start_0" -> "grp:0_running_0" [ style = bold] +"grp:0_start_0" -> "rsc1_start_0 node1" [ style = bold] +"grp:0_start_0" -> "rsc2_start_0 node1" [ style = bold] +"grp:0_start_0" [ style=bold color="green" fontcolor="orange"] +"grp:0_stop_0" -> "grp:0_stopped_0" [ style = bold] +"grp:0_stop_0" -> "rsc1_stop_0 node2" [ style = bold] +"grp:0_stop_0" -> "rsc2_stop_0 node2" [ style = bold] +"grp:0_stop_0" [ style=bold color="green" fontcolor="orange"] +"grp:0_stopped_0" -> "grp-clone_stopped_0" [ style = bold] +"grp:0_stopped_0" -> "grp:0_start_0" [ style = bold] +"grp:0_stopped_0" [ style=bold color="green" fontcolor="orange"] +"grp:1_running_0" -> "grp-clone_running_0" [ style = bold] +"grp:1_running_0" [ style=bold color="green" fontcolor="orange"] +"grp:1_start_0" -> "grp:1_running_0" [ style = bold] +"grp:1_start_0" -> "rsc1_start_0 node1" [ style = bold] +"grp:1_start_0" -> "rsc2_start_0 node1" [ style = bold] +"grp:1_start_0" [ style=bold color="green" fontcolor="orange"] +"grp:1_stop_0" -> "grp:1_stopped_0" [ style = bold] +"grp:1_stop_0" -> "rsc1_stop_0 node3" [ style = bold] +"grp:1_stop_0" -> "rsc2_stop_0 node3" [ style = bold] +"grp:1_stop_0" [ style=bold color="green" fontcolor="orange"] +"grp:1_stopped_0" -> "grp-clone_stopped_0" [ style = bold] +"grp:1_stopped_0" -> "grp:1_start_0" [ style = bold] +"grp:1_stopped_0" [ style=bold color="green" fontcolor="orange"] +"grp:2_running_0" -> "grp-clone_running_0" [ style = bold] +"grp:2_running_0" [ style=bold color="green" fontcolor="orange"] +"grp:2_start_0" -> "grp:2_running_0" [ style = bold] +"grp:2_start_0" -> "rsc1:2_start_0 node1" [ style = bold] +"grp:2_start_0" -> "rsc2:2_start_0 node1" [ style = bold] +"grp:2_start_0" [ style=bold color="green" fontcolor="orange"] +"rsc1:2_monitor_10000 node1" [ style=bold color="green" fontcolor="black"] +"rsc1:2_start_0 node1" -> "grp:2_running_0" [ style = bold] +"rsc1:2_start_0 node1" -> "rsc1:2_monitor_10000 node1" [ style = bold] +"rsc1:2_start_0 node1" -> "rsc2:2_start_0 node1" [ style = bold] +"rsc1:2_start_0 node1" [ style=bold color="green" fontcolor="black"] +"rsc1_monitor_10000 node1" [ style=bold color="green" fontcolor="black"] +"rsc1_start_0 node1" -> "grp:0_running_0" [ style = bold] +"rsc1_start_0 node1" -> "grp:1_running_0" [ style = bold] +"rsc1_start_0 node1" -> "rsc1_monitor_10000 node1" [ style = bold] +"rsc1_start_0 node1" -> "rsc2_start_0 node1" [ style = bold] +"rsc1_start_0 node1" [ style=bold color="green" fontcolor="black"] +"rsc1_stop_0 node2" -> "grp:0_stopped_0" [ style = bold] +"rsc1_stop_0 node2" -> "rsc1_start_0 node1" [ style = bold] +"rsc1_stop_0 node2" [ style=bold color="green" fontcolor="black"] +"rsc1_stop_0 node3" -> "grp:1_stopped_0" [ style = bold] +"rsc1_stop_0 node3" -> "rsc1_start_0 node1" [ style = bold] +"rsc1_stop_0 node3" [ style=bold color="green" fontcolor="black"] +"rsc2:2_monitor_10000 node1" [ style=bold color="green" fontcolor="black"] +"rsc2:2_start_0 node1" -> "grp:2_running_0" [ style = bold] +"rsc2:2_start_0 node1" -> "rsc2:2_monitor_10000 node1" [ style = bold] +"rsc2:2_start_0 node1" [ style=bold color="green" fontcolor="black"] +"rsc2_monitor_10000 node1" [ style=bold color="green" fontcolor="black"] +"rsc2_start_0 node1" -> "grp:0_running_0" [ style = bold] +"rsc2_start_0 node1" -> "grp:1_running_0" [ style = bold] +"rsc2_start_0 node1" -> "rsc2_monitor_10000 node1" [ style = bold] +"rsc2_start_0 node1" [ style=bold color="green" fontcolor="black"] +"rsc2_stop_0 node2" -> "grp:0_stopped_0" [ style = bold] +"rsc2_stop_0 node2" -> "rsc1_stop_0 node2" [ style = bold] +"rsc2_stop_0 node2" -> "rsc2_start_0 node1" [ style = bold] +"rsc2_stop_0 node2" [ style=bold color="green" fontcolor="black"] +"rsc2_stop_0 node3" -> "grp:1_stopped_0" [ style = bold] +"rsc2_stop_0 node3" -> "rsc1_stop_0 node3" [ style = bold] +"rsc2_stop_0 node3" -> "rsc2_start_0 node1" [ style = bold] +"rsc2_stop_0 node3" [ style=bold color="green" fontcolor="black"] +} diff --git a/cts/scheduler/dot/clone-recover-no-shuffle-6.dot b/cts/scheduler/dot/clone-recover-no-shuffle-6.dot new file mode 100644 index 00000000000..f8cfe9252d2 --- /dev/null +++ b/cts/scheduler/dot/clone-recover-no-shuffle-6.dot @@ -0,0 +1,97 @@ + digraph "g" { +"base-bundle-0_monitor_30000 node1" [ style=bold color="green" fontcolor="black"] +"base-bundle-0_start_0 node1" -> "base-bundle-0_monitor_30000 node1" [ style = bold] +"base-bundle-0_start_0 node1" -> "base_start_0 base-bundle-0" [ style = bold] +"base-bundle-0_start_0 node1" [ style=bold color="green" fontcolor="black"] +"base-bundle-0_stop_0 node3" -> "base-bundle-0_start_0 node1" [ style = bold] +"base-bundle-0_stop_0 node3" -> "base-bundle-podman-0_stop_0 node3" [ style = bold] +"base-bundle-0_stop_0 node3" [ style=bold color="green" fontcolor="black"] +"base-bundle-1_monitor_30000 node3" [ style=bold color="green" fontcolor="black"] +"base-bundle-1_start_0 node3" -> "base-bundle-1_monitor_30000 node3" [ style = bold] +"base-bundle-1_start_0 node3" -> "base_start_0 base-bundle-1" [ style = bold] +"base-bundle-1_start_0 node3" [ style=bold color="green" fontcolor="black"] +"base-bundle-1_stop_0 node2" -> "base-bundle-1_start_0 node3" [ style = bold] +"base-bundle-1_stop_0 node2" -> "base-bundle-podman-1_stop_0 node2" [ style = bold] +"base-bundle-1_stop_0 node2" [ style=bold color="green" fontcolor="black"] +"base-bundle-2_monitor_0 node1" -> "base-bundle-2_start_0 node2" [ style = bold] +"base-bundle-2_monitor_0 node1" [ style=bold color="green" fontcolor="black"] +"base-bundle-2_monitor_0 node2" -> "base-bundle-2_start_0 node2" [ style = bold] +"base-bundle-2_monitor_0 node2" [ style=bold color="green" fontcolor="black"] +"base-bundle-2_monitor_0 node3" -> "base-bundle-2_start_0 node2" [ style = bold] +"base-bundle-2_monitor_0 node3" [ style=bold color="green" fontcolor="black"] +"base-bundle-2_monitor_30000 node2" [ style=bold color="green" fontcolor="black"] +"base-bundle-2_start_0 node2" -> "base-bundle-2_monitor_30000 node2" [ style = bold] +"base-bundle-2_start_0 node2" -> "base:2_start_0 base-bundle-2" [ style = bold] +"base-bundle-2_start_0 node2" [ style=bold color="green" fontcolor="black"] +"base-bundle-clone_running_0" -> "base-bundle_running_0" [ style = bold] +"base-bundle-clone_running_0" [ style=bold color="green" fontcolor="orange"] +"base-bundle-clone_start_0" -> "base-bundle-clone_running_0" [ style = bold] +"base-bundle-clone_start_0" -> "base:2_start_0 base-bundle-2" [ style = bold] +"base-bundle-clone_start_0" -> "base_start_0 base-bundle-0" [ style = bold] +"base-bundle-clone_start_0" -> "base_start_0 base-bundle-1" [ style = bold] +"base-bundle-clone_start_0" [ style=bold color="green" fontcolor="orange"] +"base-bundle-clone_stop_0" -> "base-bundle-clone_stopped_0" [ style = bold] +"base-bundle-clone_stop_0" -> "base_stop_0 base-bundle-0" [ style = bold] +"base-bundle-clone_stop_0" -> "base_stop_0 base-bundle-1" [ style = bold] +"base-bundle-clone_stop_0" [ style=bold color="green" fontcolor="orange"] +"base-bundle-clone_stopped_0" -> "base-bundle-clone_start_0" [ style = bold] +"base-bundle-clone_stopped_0" -> "base-bundle_stopped_0" [ style = bold] +"base-bundle-clone_stopped_0" [ style=bold color="green" fontcolor="orange"] +"base-bundle-podman-0_monitor_60000 node1" [ style=bold color="green" fontcolor="black"] +"base-bundle-podman-0_start_0 node1" -> "base-bundle-0_start_0 node1" [ style = bold] +"base-bundle-podman-0_start_0 node1" -> "base-bundle-podman-0_monitor_60000 node1" [ style = bold] +"base-bundle-podman-0_start_0 node1" -> "base-bundle_running_0" [ style = bold] +"base-bundle-podman-0_start_0 node1" -> "base_start_0 base-bundle-0" [ style = bold] +"base-bundle-podman-0_start_0 node1" [ style=bold color="green" fontcolor="black"] +"base-bundle-podman-0_stop_0 node3" -> "base-bundle-podman-0_start_0 node1" [ style = bold] +"base-bundle-podman-0_stop_0 node3" -> "base-bundle_stopped_0" [ style = bold] +"base-bundle-podman-0_stop_0 node3" [ style=bold color="green" fontcolor="black"] +"base-bundle-podman-1_monitor_60000 node3" [ style=bold color="green" fontcolor="black"] +"base-bundle-podman-1_start_0 node3" -> "base-bundle-1_start_0 node3" [ style = bold] +"base-bundle-podman-1_start_0 node3" -> "base-bundle-podman-1_monitor_60000 node3" [ style = bold] +"base-bundle-podman-1_start_0 node3" -> "base-bundle_running_0" [ style = bold] +"base-bundle-podman-1_start_0 node3" -> "base_start_0 base-bundle-1" [ style = bold] +"base-bundle-podman-1_start_0 node3" [ style=bold color="green" fontcolor="black"] +"base-bundle-podman-1_stop_0 node2" -> "base-bundle-podman-1_start_0 node3" [ style = bold] +"base-bundle-podman-1_stop_0 node2" -> "base-bundle_stopped_0" [ style = bold] +"base-bundle-podman-1_stop_0 node2" [ style=bold color="green" fontcolor="black"] +"base-bundle-podman-2_monitor_60000 node2" [ style=bold color="green" fontcolor="black"] +"base-bundle-podman-2_start_0 node2" -> "base-bundle-2_monitor_0 node1" [ style = bold] +"base-bundle-podman-2_start_0 node2" -> "base-bundle-2_monitor_0 node2" [ style = bold] +"base-bundle-podman-2_start_0 node2" -> "base-bundle-2_monitor_0 node3" [ style = bold] +"base-bundle-podman-2_start_0 node2" -> "base-bundle-2_start_0 node2" [ style = bold] +"base-bundle-podman-2_start_0 node2" -> "base-bundle-podman-2_monitor_60000 node2" [ style = bold] +"base-bundle-podman-2_start_0 node2" -> "base-bundle_running_0" [ style = bold] +"base-bundle-podman-2_start_0 node2" -> "base:2_start_0 base-bundle-2" [ style = bold] +"base-bundle-podman-2_start_0 node2" [ style=bold color="green" fontcolor="black"] +"base-bundle_running_0" [ style=bold color="green" fontcolor="orange"] +"base-bundle_start_0" -> "base-bundle-clone_start_0" [ style = bold] +"base-bundle_start_0" -> "base-bundle-podman-0_start_0 node1" [ style = bold] +"base-bundle_start_0" -> "base-bundle-podman-1_start_0 node3" [ style = bold] +"base-bundle_start_0" -> "base-bundle-podman-2_start_0 node2" [ style = bold] +"base-bundle_start_0" [ style=bold color="green" fontcolor="orange"] +"base-bundle_stop_0" -> "base-bundle-clone_stop_0" [ style = bold] +"base-bundle_stop_0" -> "base-bundle-podman-0_stop_0 node3" [ style = bold] +"base-bundle_stop_0" -> "base-bundle-podman-1_stop_0 node2" [ style = bold] +"base-bundle_stop_0" -> "base_stop_0 base-bundle-0" [ style = bold] +"base-bundle_stop_0" -> "base_stop_0 base-bundle-1" [ style = bold] +"base-bundle_stop_0" [ style=bold color="green" fontcolor="orange"] +"base-bundle_stopped_0" [ style=bold color="green" fontcolor="orange"] +"base:2_start_0 base-bundle-2" -> "base-bundle-clone_running_0" [ style = bold] +"base:2_start_0 base-bundle-2" [ style=bold color="green" fontcolor="black"] +"base_start_0 base-bundle-0" -> "base-bundle-clone_running_0" [ style = bold] +"base_start_0 base-bundle-0" -> "base_start_0 base-bundle-1" [ style = bold] +"base_start_0 base-bundle-0" [ style=bold color="green" fontcolor="black"] +"base_start_0 base-bundle-1" -> "base-bundle-clone_running_0" [ style = bold] +"base_start_0 base-bundle-1" -> "base:2_start_0 base-bundle-2" [ style = bold] +"base_start_0 base-bundle-1" [ style=bold color="green" fontcolor="black"] +"base_stop_0 base-bundle-0" -> "base-bundle-0_stop_0 node3" [ style = bold] +"base_stop_0 base-bundle-0" -> "base-bundle-clone_stopped_0" [ style = bold] +"base_stop_0 base-bundle-0" -> "base_start_0 base-bundle-0" [ style = bold] +"base_stop_0 base-bundle-0" [ style=bold color="green" fontcolor="black"] +"base_stop_0 base-bundle-1" -> "base-bundle-1_stop_0 node2" [ style = bold] +"base_stop_0 base-bundle-1" -> "base-bundle-clone_stopped_0" [ style = bold] +"base_stop_0 base-bundle-1" -> "base_start_0 base-bundle-1" [ style = bold] +"base_stop_0 base-bundle-1" -> "base_stop_0 base-bundle-0" [ style = bold] +"base_stop_0 base-bundle-1" [ style=bold color="green" fontcolor="black"] +} diff --git a/cts/scheduler/dot/clone-recover-no-shuffle-7.dot b/cts/scheduler/dot/clone-recover-no-shuffle-7.dot new file mode 100644 index 00000000000..8bff7da01db --- /dev/null +++ b/cts/scheduler/dot/clone-recover-no-shuffle-7.dot @@ -0,0 +1,45 @@ + digraph "g" { +"Cancel dummy_monitor_10000 node2" -> "dummy_demote_0 node2" [ style = bold] +"Cancel dummy_monitor_10000 node2" [ style=bold color="green" fontcolor="black"] +"dummy-clone_demote_0" -> "dummy-clone_demoted_0" [ style = bold] +"dummy-clone_demote_0" -> "dummy_demote_0 node2" [ style = bold] +"dummy-clone_demote_0" [ style=bold color="green" fontcolor="orange"] +"dummy-clone_demoted_0" -> "dummy-clone_promote_0" [ style = bold] +"dummy-clone_demoted_0" -> "dummy-clone_start_0" [ style = bold] +"dummy-clone_demoted_0" -> "dummy-clone_stop_0" [ style = bold] +"dummy-clone_demoted_0" [ style=bold color="green" fontcolor="orange"] +"dummy-clone_promote_0" -> "dummy_promote_0 node1" [ style = bold] +"dummy-clone_promote_0" [ style=bold color="green" fontcolor="orange"] +"dummy-clone_promoted_0" [ style=bold color="green" fontcolor="orange"] +"dummy-clone_running_0" -> "dummy-clone_promote_0" [ style = bold] +"dummy-clone_running_0" [ style=bold color="green" fontcolor="orange"] +"dummy-clone_start_0" -> "dummy-clone_running_0" [ style = bold] +"dummy-clone_start_0" -> "dummy:2_start_0 node3" [ style = bold] +"dummy-clone_start_0" -> "dummy_start_0 node1" [ style = bold] +"dummy-clone_start_0" [ style=bold color="green" fontcolor="orange"] +"dummy-clone_stop_0" -> "dummy-clone_stopped_0" [ style = bold] +"dummy-clone_stop_0" -> "dummy_stop_0 node3" [ style = bold] +"dummy-clone_stop_0" [ style=bold color="green" fontcolor="orange"] +"dummy-clone_stopped_0" -> "dummy-clone_promote_0" [ style = bold] +"dummy-clone_stopped_0" -> "dummy-clone_start_0" [ style = bold] +"dummy-clone_stopped_0" [ style=bold color="green" fontcolor="orange"] +"dummy:2_monitor_11000 node3" [ style=bold color="green" fontcolor="black"] +"dummy:2_start_0 node3" -> "dummy-clone_running_0" [ style = bold] +"dummy:2_start_0 node3" -> "dummy:2_monitor_11000 node3" [ style = bold] +"dummy:2_start_0 node3" [ style=bold color="green" fontcolor="black"] +"dummy_demote_0 node2" -> "dummy-clone_demoted_0" [ style = bold] +"dummy_demote_0 node2" -> "dummy_monitor_11000 node2" [ style = bold] +"dummy_demote_0 node2" [ style=bold color="green" fontcolor="black"] +"dummy_monitor_10000 node1" [ style=bold color="green" fontcolor="black"] +"dummy_monitor_11000 node2" [ style=bold color="green" fontcolor="black"] +"dummy_promote_0 node1" -> "dummy-clone_promoted_0" [ style = bold] +"dummy_promote_0 node1" -> "dummy_monitor_10000 node1" [ style = bold] +"dummy_promote_0 node1" [ style=bold color="green" fontcolor="black"] +"dummy_start_0 node1" -> "dummy-clone_running_0" [ style = bold] +"dummy_start_0 node1" -> "dummy_monitor_10000 node1" [ style = bold] +"dummy_start_0 node1" -> "dummy_promote_0 node1" [ style = bold] +"dummy_start_0 node1" [ style=bold color="green" fontcolor="black"] +"dummy_stop_0 node3" -> "dummy-clone_stopped_0" [ style = bold] +"dummy_stop_0 node3" -> "dummy_start_0 node1" [ style = bold] +"dummy_stop_0 node3" [ style=bold color="green" fontcolor="black"] +} diff --git a/cts/scheduler/dot/clone-recover-no-shuffle-8.dot b/cts/scheduler/dot/clone-recover-no-shuffle-8.dot new file mode 100644 index 00000000000..d9c311a67cb --- /dev/null +++ b/cts/scheduler/dot/clone-recover-no-shuffle-8.dot @@ -0,0 +1,63 @@ + digraph "g" { +"Cancel rsc1_monitor_10000 node2" -> "rsc1_demote_0 node2" [ style = bold] +"Cancel rsc1_monitor_10000 node2" [ style=bold color="green" fontcolor="black"] +"Cancel rsc2_monitor_10000 node2" -> "rsc2_demote_0 node2" [ style = bold] +"Cancel rsc2_monitor_10000 node2" [ style=bold color="green" fontcolor="black"] +"grp-clone_demote_0" -> "grp-clone_demoted_0" [ style = bold] +"grp-clone_demote_0" -> "grp:1_demote_0" [ style = bold] +"grp-clone_demote_0" [ style=bold color="green" fontcolor="orange"] +"grp-clone_demoted_0" -> "grp-clone_promote_0" [ style = bold] +"grp-clone_demoted_0" -> "grp-clone_start_0" [ style = bold] +"grp-clone_demoted_0" [ style=bold color="green" fontcolor="orange"] +"grp-clone_promote_0" -> "grp:2_promote_0" [ style = bold] +"grp-clone_promote_0" [ style=bold color="green" fontcolor="orange"] +"grp-clone_promoted_0" [ style=bold color="green" fontcolor="orange"] +"grp-clone_running_0" -> "grp-clone_promote_0" [ style = bold] +"grp-clone_running_0" [ style=bold color="green" fontcolor="orange"] +"grp-clone_start_0" -> "grp-clone_running_0" [ style = bold] +"grp-clone_start_0" -> "grp:2_start_0" [ style = bold] +"grp-clone_start_0" [ style=bold color="green" fontcolor="orange"] +"grp:1_demote_0" -> "rsc1_demote_0 node2" [ style = bold] +"grp:1_demote_0" -> "rsc2_demote_0 node2" [ style = bold] +"grp:1_demote_0" [ style=bold color="green" fontcolor="orange"] +"grp:1_demoted_0" -> "grp-clone_demoted_0" [ style = bold] +"grp:1_demoted_0" [ style=bold color="green" fontcolor="orange"] +"grp:2_promote_0" -> "rsc1:2_promote_0 node1" [ style = bold] +"grp:2_promote_0" -> "rsc2:2_promote_0 node1" [ style = bold] +"grp:2_promote_0" [ style=bold color="green" fontcolor="orange"] +"grp:2_promoted_0" -> "grp-clone_promoted_0" [ style = bold] +"grp:2_promoted_0" [ style=bold color="green" fontcolor="orange"] +"grp:2_running_0" -> "grp-clone_running_0" [ style = bold] +"grp:2_running_0" [ style=bold color="green" fontcolor="orange"] +"grp:2_start_0" -> "grp:2_running_0" [ style = bold] +"grp:2_start_0" -> "rsc1:2_start_0 node1" [ style = bold] +"grp:2_start_0" -> "rsc2:2_start_0 node1" [ style = bold] +"grp:2_start_0" [ style=bold color="green" fontcolor="orange"] +"rsc1:2_monitor_10000 node1" [ style=bold color="green" fontcolor="black"] +"rsc1:2_promote_0 node1" -> "grp:2_promoted_0" [ style = bold] +"rsc1:2_promote_0 node1" -> "rsc1:2_monitor_10000 node1" [ style = bold] +"rsc1:2_promote_0 node1" -> "rsc2:2_promote_0 node1" [ style = bold] +"rsc1:2_promote_0 node1" [ style=bold color="green" fontcolor="black"] +"rsc1:2_start_0 node1" -> "grp:2_running_0" [ style = bold] +"rsc1:2_start_0 node1" -> "rsc1:2_monitor_10000 node1" [ style = bold] +"rsc1:2_start_0 node1" -> "rsc1:2_promote_0 node1" [ style = bold] +"rsc1:2_start_0 node1" -> "rsc2:2_start_0 node1" [ style = bold] +"rsc1:2_start_0 node1" [ style=bold color="green" fontcolor="black"] +"rsc1_demote_0 node2" -> "grp:1_demoted_0" [ style = bold] +"rsc1_demote_0 node2" -> "rsc1_monitor_11000 node2" [ style = bold] +"rsc1_demote_0 node2" [ style=bold color="green" fontcolor="black"] +"rsc1_monitor_11000 node2" [ style=bold color="green" fontcolor="black"] +"rsc2:2_monitor_10000 node1" [ style=bold color="green" fontcolor="black"] +"rsc2:2_promote_0 node1" -> "grp:2_promoted_0" [ style = bold] +"rsc2:2_promote_0 node1" -> "rsc2:2_monitor_10000 node1" [ style = bold] +"rsc2:2_promote_0 node1" [ style=bold color="green" fontcolor="black"] +"rsc2:2_start_0 node1" -> "grp:2_running_0" [ style = bold] +"rsc2:2_start_0 node1" -> "rsc2:2_monitor_10000 node1" [ style = bold] +"rsc2:2_start_0 node1" -> "rsc2:2_promote_0 node1" [ style = bold] +"rsc2:2_start_0 node1" [ style=bold color="green" fontcolor="black"] +"rsc2_demote_0 node2" -> "grp:1_demoted_0" [ style = bold] +"rsc2_demote_0 node2" -> "rsc1_demote_0 node2" [ style = bold] +"rsc2_demote_0 node2" -> "rsc2_monitor_11000 node2" [ style = bold] +"rsc2_demote_0 node2" [ style=bold color="green" fontcolor="black"] +"rsc2_monitor_11000 node2" [ style=bold color="green" fontcolor="black"] +} diff --git a/cts/scheduler/dot/clone-recover-no-shuffle-9.dot b/cts/scheduler/dot/clone-recover-no-shuffle-9.dot new file mode 100644 index 00000000000..45dbac47e2b --- /dev/null +++ b/cts/scheduler/dot/clone-recover-no-shuffle-9.dot @@ -0,0 +1,69 @@ + digraph "g" { +"Cancel base_monitor_15000 base-bundle-1" -> "base_demote_0 base-bundle-1" [ style = bold] +"Cancel base_monitor_15000 base-bundle-1" [ style=bold color="green" fontcolor="black"] +"base-bundle-2_monitor_0 node1" -> "base-bundle-2_start_0 node1" [ style = bold] +"base-bundle-2_monitor_0 node1" [ style=bold color="green" fontcolor="black"] +"base-bundle-2_monitor_0 node2" -> "base-bundle-2_start_0 node1" [ style = bold] +"base-bundle-2_monitor_0 node2" [ style=bold color="green" fontcolor="black"] +"base-bundle-2_monitor_0 node3" -> "base-bundle-2_start_0 node1" [ style = bold] +"base-bundle-2_monitor_0 node3" [ style=bold color="green" fontcolor="black"] +"base-bundle-2_monitor_30000 node1" [ style=bold color="green" fontcolor="black"] +"base-bundle-2_start_0 node1" -> "base-bundle-2_monitor_30000 node1" [ style = bold] +"base-bundle-2_start_0 node1" -> "base:2_monitor_15000 base-bundle-2" [ style = bold] +"base-bundle-2_start_0 node1" -> "base:2_promote_0 base-bundle-2" [ style = bold] +"base-bundle-2_start_0 node1" -> "base:2_start_0 base-bundle-2" [ style = bold] +"base-bundle-2_start_0 node1" [ style=bold color="green" fontcolor="black"] +"base-bundle-clone_demote_0" -> "base-bundle-clone_demoted_0" [ style = bold] +"base-bundle-clone_demote_0" -> "base_demote_0 base-bundle-1" [ style = bold] +"base-bundle-clone_demote_0" [ style=bold color="green" fontcolor="orange"] +"base-bundle-clone_demoted_0" -> "base-bundle-clone_promote_0" [ style = bold] +"base-bundle-clone_demoted_0" -> "base-bundle-clone_start_0" [ style = bold] +"base-bundle-clone_demoted_0" -> "base-bundle_demoted_0" [ style = bold] +"base-bundle-clone_demoted_0" [ style=bold color="green" fontcolor="orange"] +"base-bundle-clone_promote_0" -> "base:2_promote_0 base-bundle-2" [ style = bold] +"base-bundle-clone_promote_0" [ style=bold color="green" fontcolor="orange"] +"base-bundle-clone_promoted_0" -> "base-bundle_promoted_0" [ style = bold] +"base-bundle-clone_promoted_0" [ style=bold color="green" fontcolor="orange"] +"base-bundle-clone_running_0" -> "base-bundle-clone_promote_0" [ style = bold] +"base-bundle-clone_running_0" -> "base-bundle_running_0" [ style = bold] +"base-bundle-clone_running_0" [ style=bold color="green" fontcolor="orange"] +"base-bundle-clone_start_0" -> "base-bundle-clone_running_0" [ style = bold] +"base-bundle-clone_start_0" -> "base:2_start_0 base-bundle-2" [ style = bold] +"base-bundle-clone_start_0" [ style=bold color="green" fontcolor="orange"] +"base-bundle-podman-2_monitor_60000 node1" [ style=bold color="green" fontcolor="black"] +"base-bundle-podman-2_start_0 node1" -> "base-bundle-2_monitor_0 node1" [ style = bold] +"base-bundle-podman-2_start_0 node1" -> "base-bundle-2_monitor_0 node2" [ style = bold] +"base-bundle-podman-2_start_0 node1" -> "base-bundle-2_monitor_0 node3" [ style = bold] +"base-bundle-podman-2_start_0 node1" -> "base-bundle-2_start_0 node1" [ style = bold] +"base-bundle-podman-2_start_0 node1" -> "base-bundle-podman-2_monitor_60000 node1" [ style = bold] +"base-bundle-podman-2_start_0 node1" -> "base-bundle_running_0" [ style = bold] +"base-bundle-podman-2_start_0 node1" -> "base:2_promote_0 base-bundle-2" [ style = bold] +"base-bundle-podman-2_start_0 node1" -> "base:2_start_0 base-bundle-2" [ style = bold] +"base-bundle-podman-2_start_0 node1" [ style=bold color="green" fontcolor="black"] +"base-bundle_demote_0" -> "base-bundle-clone_demote_0" [ style = bold] +"base-bundle_demote_0" -> "base-bundle_demoted_0" [ style = bold] +"base-bundle_demote_0" [ style=bold color="green" fontcolor="orange"] +"base-bundle_demoted_0" -> "base-bundle_promote_0" [ style = bold] +"base-bundle_demoted_0" -> "base-bundle_start_0" [ style = bold] +"base-bundle_demoted_0" [ style=bold color="green" fontcolor="orange"] +"base-bundle_promote_0" -> "base-bundle-clone_promote_0" [ style = bold] +"base-bundle_promote_0" [ style=bold color="green" fontcolor="orange"] +"base-bundle_promoted_0" [ style=bold color="green" fontcolor="orange"] +"base-bundle_running_0" -> "base-bundle_promote_0" [ style = bold] +"base-bundle_running_0" [ style=bold color="green" fontcolor="orange"] +"base-bundle_start_0" -> "base-bundle-clone_start_0" [ style = bold] +"base-bundle_start_0" -> "base-bundle-podman-2_start_0 node1" [ style = bold] +"base-bundle_start_0" [ style=bold color="green" fontcolor="orange"] +"base:2_monitor_15000 base-bundle-2" [ style=bold color="green" fontcolor="black"] +"base:2_promote_0 base-bundle-2" -> "base-bundle-clone_promoted_0" [ style = bold] +"base:2_promote_0 base-bundle-2" -> "base:2_monitor_15000 base-bundle-2" [ style = bold] +"base:2_promote_0 base-bundle-2" [ style=bold color="green" fontcolor="black"] +"base:2_start_0 base-bundle-2" -> "base-bundle-clone_running_0" [ style = bold] +"base:2_start_0 base-bundle-2" -> "base:2_monitor_15000 base-bundle-2" [ style = bold] +"base:2_start_0 base-bundle-2" -> "base:2_promote_0 base-bundle-2" [ style = bold] +"base:2_start_0 base-bundle-2" [ style=bold color="green" fontcolor="black"] +"base_demote_0 base-bundle-1" -> "base-bundle-clone_demoted_0" [ style = bold] +"base_demote_0 base-bundle-1" -> "base_monitor_16000 base-bundle-1" [ style = bold] +"base_demote_0 base-bundle-1" [ style=bold color="green" fontcolor="black"] +"base_monitor_16000 base-bundle-1" [ style=bold color="green" fontcolor="black"] +} diff --git a/cts/scheduler/exp/clone-recover-no-shuffle-1.exp b/cts/scheduler/exp/clone-recover-no-shuffle-1.exp new file mode 100644 index 00000000000..670a823dac9 --- /dev/null +++ b/cts/scheduler/exp/clone-recover-no-shuffle-1.exp @@ -0,0 +1,51 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/cts/scheduler/exp/clone-recover-no-shuffle-10.exp b/cts/scheduler/exp/clone-recover-no-shuffle-10.exp new file mode 100644 index 00000000000..27b8b7037c3 --- /dev/null +++ b/cts/scheduler/exp/clone-recover-no-shuffle-10.exp @@ -0,0 +1,51 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/cts/scheduler/exp/clone-recover-no-shuffle-11.exp b/cts/scheduler/exp/clone-recover-no-shuffle-11.exp new file mode 100644 index 00000000000..40cf1f69c11 --- /dev/null +++ b/cts/scheduler/exp/clone-recover-no-shuffle-11.exp @@ -0,0 +1,110 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/cts/scheduler/exp/clone-recover-no-shuffle-12.exp b/cts/scheduler/exp/clone-recover-no-shuffle-12.exp new file mode 100644 index 00000000000..919e6b291c0 --- /dev/null +++ b/cts/scheduler/exp/clone-recover-no-shuffle-12.exp @@ -0,0 +1,187 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/cts/scheduler/exp/clone-recover-no-shuffle-2.exp b/cts/scheduler/exp/clone-recover-no-shuffle-2.exp new file mode 100644 index 00000000000..84b1e1bc98c --- /dev/null +++ b/cts/scheduler/exp/clone-recover-no-shuffle-2.exp @@ -0,0 +1,110 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/cts/scheduler/exp/clone-recover-no-shuffle-3.exp b/cts/scheduler/exp/clone-recover-no-shuffle-3.exp new file mode 100644 index 00000000000..6b6ed075f57 --- /dev/null +++ b/cts/scheduler/exp/clone-recover-no-shuffle-3.exp @@ -0,0 +1,171 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/cts/scheduler/exp/clone-recover-no-shuffle-4.exp b/cts/scheduler/exp/clone-recover-no-shuffle-4.exp new file mode 100644 index 00000000000..4596c685d0a --- /dev/null +++ b/cts/scheduler/exp/clone-recover-no-shuffle-4.exp @@ -0,0 +1,123 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/cts/scheduler/exp/clone-recover-no-shuffle-5.exp b/cts/scheduler/exp/clone-recover-no-shuffle-5.exp new file mode 100644 index 00000000000..8a8e799793e --- /dev/null +++ b/cts/scheduler/exp/clone-recover-no-shuffle-5.exp @@ -0,0 +1,452 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/cts/scheduler/exp/clone-recover-no-shuffle-6.exp b/cts/scheduler/exp/clone-recover-no-shuffle-6.exp new file mode 100644 index 00000000000..e6704c9e254 --- /dev/null +++ b/cts/scheduler/exp/clone-recover-no-shuffle-6.exp @@ -0,0 +1,507 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/cts/scheduler/exp/clone-recover-no-shuffle-7.exp b/cts/scheduler/exp/clone-recover-no-shuffle-7.exp new file mode 100644 index 00000000000..950de9e0312 --- /dev/null +++ b/cts/scheduler/exp/clone-recover-no-shuffle-7.exp @@ -0,0 +1,240 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/cts/scheduler/exp/clone-recover-no-shuffle-8.exp b/cts/scheduler/exp/clone-recover-no-shuffle-8.exp new file mode 100644 index 00000000000..763a2f02fb0 --- /dev/null +++ b/cts/scheduler/exp/clone-recover-no-shuffle-8.exp @@ -0,0 +1,338 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/cts/scheduler/exp/clone-recover-no-shuffle-9.exp b/cts/scheduler/exp/clone-recover-no-shuffle-9.exp new file mode 100644 index 00000000000..7bfe3c47281 --- /dev/null +++ b/cts/scheduler/exp/clone-recover-no-shuffle-9.exp @@ -0,0 +1,364 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/cts/scheduler/scores/clone-recover-no-shuffle-1.scores b/cts/scheduler/scores/clone-recover-no-shuffle-1.scores new file mode 100644 index 00000000000..c1d60b2f39a --- /dev/null +++ b/cts/scheduler/scores/clone-recover-no-shuffle-1.scores @@ -0,0 +1,25 @@ + +pcmk__clone_assign: dummy-clone allocation score on node1: 0 +pcmk__clone_assign: dummy-clone allocation score on node2: 0 +pcmk__clone_assign: dummy-clone allocation score on node3: 0 +pcmk__clone_assign: dummy:0 allocation score on node1: 0 +pcmk__clone_assign: dummy:0 allocation score on node2: 1 +pcmk__clone_assign: dummy:0 allocation score on node3: 0 +pcmk__clone_assign: dummy:1 allocation score on node1: 0 +pcmk__clone_assign: dummy:1 allocation score on node2: 0 +pcmk__clone_assign: dummy:1 allocation score on node3: 1 +pcmk__clone_assign: dummy:2 allocation score on node1: 0 +pcmk__clone_assign: dummy:2 allocation score on node2: 0 +pcmk__clone_assign: dummy:2 allocation score on node3: 0 +pcmk__primitive_assign: Fencing allocation score on node1: 0 +pcmk__primitive_assign: Fencing allocation score on node2: 0 +pcmk__primitive_assign: Fencing allocation score on node3: 0 +pcmk__primitive_assign: dummy:0 allocation score on node1: 0 +pcmk__primitive_assign: dummy:0 allocation score on node2: 1 +pcmk__primitive_assign: dummy:0 allocation score on node3: 0 +pcmk__primitive_assign: dummy:1 allocation score on node1: 0 +pcmk__primitive_assign: dummy:1 allocation score on node2: -INFINITY +pcmk__primitive_assign: dummy:1 allocation score on node3: 1 +pcmk__primitive_assign: dummy:2 allocation score on node1: 0 +pcmk__primitive_assign: dummy:2 allocation score on node2: -INFINITY +pcmk__primitive_assign: dummy:2 allocation score on node3: -INFINITY diff --git a/cts/scheduler/scores/clone-recover-no-shuffle-10.scores b/cts/scheduler/scores/clone-recover-no-shuffle-10.scores new file mode 100644 index 00000000000..4ac63e37058 --- /dev/null +++ b/cts/scheduler/scores/clone-recover-no-shuffle-10.scores @@ -0,0 +1,31 @@ + +dummy:0 promotion score on node3: 5 +dummy:1 promotion score on node2: 15 +dummy:2 promotion score on node1: 10 +pcmk__clone_assign: dummy-clone allocation score on node1: 0 +pcmk__clone_assign: dummy-clone allocation score on node2: 0 +pcmk__clone_assign: dummy-clone allocation score on node3: 0 +pcmk__clone_assign: dummy:0 allocation score on node1: 10 +pcmk__clone_assign: dummy:0 allocation score on node2: 0 +pcmk__clone_assign: dummy:0 allocation score on node3: 6 +pcmk__clone_assign: dummy:1 allocation score on node1: 10 +pcmk__clone_assign: dummy:1 allocation score on node2: 16 +pcmk__clone_assign: dummy:1 allocation score on node3: 0 +pcmk__clone_assign: dummy:2 allocation score on node1: 10 +pcmk__clone_assign: dummy:2 allocation score on node2: 15 +pcmk__clone_assign: dummy:2 allocation score on node3: 5 +pcmk__primitive_assign: Fencing allocation score on node1: 0 +pcmk__primitive_assign: Fencing allocation score on node2: 0 +pcmk__primitive_assign: Fencing allocation score on node3: 0 +pcmk__primitive_assign: dummy:0 allocation score on node1: -INFINITY +pcmk__primitive_assign: dummy:0 allocation score on node1: 10 +pcmk__primitive_assign: dummy:0 allocation score on node2: -INFINITY +pcmk__primitive_assign: dummy:0 allocation score on node2: -INFINITY +pcmk__primitive_assign: dummy:0 allocation score on node3: 6 +pcmk__primitive_assign: dummy:0 allocation score on node3: 6 +pcmk__primitive_assign: dummy:1 allocation score on node1: 10 +pcmk__primitive_assign: dummy:1 allocation score on node2: 16 +pcmk__primitive_assign: dummy:1 allocation score on node3: 0 +pcmk__primitive_assign: dummy:2 allocation score on node1: 10 +pcmk__primitive_assign: dummy:2 allocation score on node2: -INFINITY +pcmk__primitive_assign: dummy:2 allocation score on node3: 5 diff --git a/cts/scheduler/scores/clone-recover-no-shuffle-11.scores b/cts/scheduler/scores/clone-recover-no-shuffle-11.scores new file mode 100644 index 00000000000..1216dba711a --- /dev/null +++ b/cts/scheduler/scores/clone-recover-no-shuffle-11.scores @@ -0,0 +1,82 @@ + +grp:0 promotion score on node3: 10 +grp:1 promotion score on node2: 30 +grp:2 promotion score on node1: 20 +pcmk__clone_assign: grp-clone allocation score on node1: 0 +pcmk__clone_assign: grp-clone allocation score on node2: 0 +pcmk__clone_assign: grp-clone allocation score on node3: 0 +pcmk__clone_assign: grp:0 allocation score on node1: 20 +pcmk__clone_assign: grp:0 allocation score on node2: 0 +pcmk__clone_assign: grp:0 allocation score on node3: 10 +pcmk__clone_assign: grp:1 allocation score on node1: 20 +pcmk__clone_assign: grp:1 allocation score on node2: 30 +pcmk__clone_assign: grp:1 allocation score on node3: 0 +pcmk__clone_assign: grp:2 allocation score on node1: 20 +pcmk__clone_assign: grp:2 allocation score on node2: 30 +pcmk__clone_assign: grp:2 allocation score on node3: 10 +pcmk__clone_assign: rsc1:0 allocation score on node1: 0 +pcmk__clone_assign: rsc1:0 allocation score on node2: 0 +pcmk__clone_assign: rsc1:0 allocation score on node3: 1 +pcmk__clone_assign: rsc1:1 allocation score on node1: 0 +pcmk__clone_assign: rsc1:1 allocation score on node2: 1 +pcmk__clone_assign: rsc1:1 allocation score on node3: 0 +pcmk__clone_assign: rsc1:2 allocation score on node1: 0 +pcmk__clone_assign: rsc1:2 allocation score on node2: 0 +pcmk__clone_assign: rsc1:2 allocation score on node3: 0 +pcmk__clone_assign: rsc2:0 allocation score on node1: 0 +pcmk__clone_assign: rsc2:0 allocation score on node2: 0 +pcmk__clone_assign: rsc2:0 allocation score on node3: 1 +pcmk__clone_assign: rsc2:1 allocation score on node1: 0 +pcmk__clone_assign: rsc2:1 allocation score on node2: 1 +pcmk__clone_assign: rsc2:1 allocation score on node3: 0 +pcmk__clone_assign: rsc2:2 allocation score on node1: 0 +pcmk__clone_assign: rsc2:2 allocation score on node2: 0 +pcmk__clone_assign: rsc2:2 allocation score on node3: 0 +pcmk__group_assign: grp:0 allocation score on node1: 20 +pcmk__group_assign: grp:0 allocation score on node2: -INFINITY +pcmk__group_assign: grp:0 allocation score on node3: 10 +pcmk__group_assign: grp:1 allocation score on node1: 20 +pcmk__group_assign: grp:1 allocation score on node2: 30 +pcmk__group_assign: grp:1 allocation score on node3: 0 +pcmk__group_assign: grp:2 allocation score on node1: 20 +pcmk__group_assign: grp:2 allocation score on node2: -INFINITY +pcmk__group_assign: grp:2 allocation score on node3: -INFINITY +pcmk__group_assign: rsc1:0 allocation score on node1: 0 +pcmk__group_assign: rsc1:0 allocation score on node2: -INFINITY +pcmk__group_assign: rsc1:0 allocation score on node3: 1 +pcmk__group_assign: rsc1:1 allocation score on node1: 0 +pcmk__group_assign: rsc1:1 allocation score on node2: 1 +pcmk__group_assign: rsc1:1 allocation score on node3: 0 +pcmk__group_assign: rsc1:2 allocation score on node1: 0 +pcmk__group_assign: rsc1:2 allocation score on node2: -INFINITY +pcmk__group_assign: rsc1:2 allocation score on node3: -INFINITY +pcmk__group_assign: rsc2:0 allocation score on node1: 0 +pcmk__group_assign: rsc2:0 allocation score on node2: -INFINITY +pcmk__group_assign: rsc2:0 allocation score on node3: 1 +pcmk__group_assign: rsc2:1 allocation score on node1: 0 +pcmk__group_assign: rsc2:1 allocation score on node2: 1 +pcmk__group_assign: rsc2:1 allocation score on node3: 0 +pcmk__group_assign: rsc2:2 allocation score on node1: 0 +pcmk__group_assign: rsc2:2 allocation score on node2: -INFINITY +pcmk__group_assign: rsc2:2 allocation score on node3: -INFINITY +pcmk__primitive_assign: Fencing allocation score on node1: 0 +pcmk__primitive_assign: Fencing allocation score on node2: 0 +pcmk__primitive_assign: Fencing allocation score on node3: 0 +pcmk__primitive_assign: rsc1:0 allocation score on node1: 0 +pcmk__primitive_assign: rsc1:0 allocation score on node2: -INFINITY +pcmk__primitive_assign: rsc1:0 allocation score on node3: 2 +pcmk__primitive_assign: rsc1:1 allocation score on node1: 0 +pcmk__primitive_assign: rsc1:1 allocation score on node2: 2 +pcmk__primitive_assign: rsc1:1 allocation score on node3: 0 +pcmk__primitive_assign: rsc1:2 allocation score on node1: 0 +pcmk__primitive_assign: rsc1:2 allocation score on node2: -INFINITY +pcmk__primitive_assign: rsc1:2 allocation score on node3: -INFINITY +pcmk__primitive_assign: rsc2:0 allocation score on node1: -INFINITY +pcmk__primitive_assign: rsc2:0 allocation score on node2: -INFINITY +pcmk__primitive_assign: rsc2:0 allocation score on node3: 1 +pcmk__primitive_assign: rsc2:1 allocation score on node1: -INFINITY +pcmk__primitive_assign: rsc2:1 allocation score on node2: 1 +pcmk__primitive_assign: rsc2:1 allocation score on node3: -INFINITY +pcmk__primitive_assign: rsc2:2 allocation score on node1: 0 +pcmk__primitive_assign: rsc2:2 allocation score on node2: -INFINITY +pcmk__primitive_assign: rsc2:2 allocation score on node3: -INFINITY diff --git a/cts/scheduler/scores/clone-recover-no-shuffle-12.scores b/cts/scheduler/scores/clone-recover-no-shuffle-12.scores new file mode 100644 index 00000000000..24cf3148c4c --- /dev/null +++ b/cts/scheduler/scores/clone-recover-no-shuffle-12.scores @@ -0,0 +1,67 @@ + +base:0 promotion score on base-bundle-0: 5 +base:1 promotion score on base-bundle-1: 15 +base:2 promotion score on base-bundle-2: 10 +pcmk__bundle_allocate: base-bundle allocation score on node1: 0 +pcmk__bundle_allocate: base-bundle allocation score on node2: 0 +pcmk__bundle_allocate: base-bundle allocation score on node3: 0 +pcmk__bundle_allocate: base-bundle-0 allocation score on node1: 0 +pcmk__bundle_allocate: base-bundle-0 allocation score on node2: 0 +pcmk__bundle_allocate: base-bundle-0 allocation score on node3: 0 +pcmk__bundle_allocate: base-bundle-1 allocation score on node1: 0 +pcmk__bundle_allocate: base-bundle-1 allocation score on node2: 0 +pcmk__bundle_allocate: base-bundle-1 allocation score on node3: 0 +pcmk__bundle_allocate: base-bundle-2 allocation score on node1: 0 +pcmk__bundle_allocate: base-bundle-2 allocation score on node2: 0 +pcmk__bundle_allocate: base-bundle-2 allocation score on node3: 0 +pcmk__bundle_allocate: base-bundle-clone allocation score on base-bundle-0: -INFINITY +pcmk__bundle_allocate: base-bundle-clone allocation score on base-bundle-1: -INFINITY +pcmk__bundle_allocate: base-bundle-clone allocation score on base-bundle-2: -INFINITY +pcmk__bundle_allocate: base-bundle-clone allocation score on node1: 0 +pcmk__bundle_allocate: base-bundle-clone allocation score on node2: 0 +pcmk__bundle_allocate: base-bundle-clone allocation score on node3: 0 +pcmk__bundle_allocate: base-bundle-podman-0 allocation score on node1: 0 +pcmk__bundle_allocate: base-bundle-podman-0 allocation score on node2: 0 +pcmk__bundle_allocate: base-bundle-podman-0 allocation score on node3: 0 +pcmk__bundle_allocate: base-bundle-podman-1 allocation score on node1: 0 +pcmk__bundle_allocate: base-bundle-podman-1 allocation score on node2: 0 +pcmk__bundle_allocate: base-bundle-podman-1 allocation score on node3: 0 +pcmk__bundle_allocate: base-bundle-podman-2 allocation score on node1: 0 +pcmk__bundle_allocate: base-bundle-podman-2 allocation score on node2: 0 +pcmk__bundle_allocate: base-bundle-podman-2 allocation score on node3: 0 +pcmk__bundle_allocate: base:0 allocation score on base-bundle-0: 501 +pcmk__bundle_allocate: base:1 allocation score on base-bundle-1: 501 +pcmk__bundle_allocate: base:2 allocation score on base-bundle-2: 500 +pcmk__clone_assign: base-bundle-clone allocation score on base-bundle-0: 0 +pcmk__clone_assign: base-bundle-clone allocation score on base-bundle-1: 0 +pcmk__clone_assign: base-bundle-clone allocation score on base-bundle-2: 0 +pcmk__clone_assign: base-bundle-clone allocation score on node1: -INFINITY +pcmk__clone_assign: base-bundle-clone allocation score on node2: -INFINITY +pcmk__clone_assign: base-bundle-clone allocation score on node3: -INFINITY +pcmk__clone_assign: base:0 allocation score on base-bundle-0: INFINITY +pcmk__clone_assign: base:1 allocation score on base-bundle-1: INFINITY +pcmk__clone_assign: base:2 allocation score on base-bundle-2: INFINITY +pcmk__primitive_assign: Fencing allocation score on node1: 0 +pcmk__primitive_assign: Fencing allocation score on node2: 0 +pcmk__primitive_assign: Fencing allocation score on node3: 0 +pcmk__primitive_assign: base-bundle-0 allocation score on node1: 0 +pcmk__primitive_assign: base-bundle-0 allocation score on node2: 0 +pcmk__primitive_assign: base-bundle-0 allocation score on node3: 10000 +pcmk__primitive_assign: base-bundle-1 allocation score on node1: 0 +pcmk__primitive_assign: base-bundle-1 allocation score on node2: 10000 +pcmk__primitive_assign: base-bundle-1 allocation score on node3: 0 +pcmk__primitive_assign: base-bundle-2 allocation score on node1: 10000 +pcmk__primitive_assign: base-bundle-2 allocation score on node2: 0 +pcmk__primitive_assign: base-bundle-2 allocation score on node3: 0 +pcmk__primitive_assign: base-bundle-podman-0 allocation score on node1: 0 +pcmk__primitive_assign: base-bundle-podman-0 allocation score on node2: 0 +pcmk__primitive_assign: base-bundle-podman-0 allocation score on node3: 0 +pcmk__primitive_assign: base-bundle-podman-1 allocation score on node1: 0 +pcmk__primitive_assign: base-bundle-podman-1 allocation score on node2: 0 +pcmk__primitive_assign: base-bundle-podman-1 allocation score on node3: -INFINITY +pcmk__primitive_assign: base-bundle-podman-2 allocation score on node1: 0 +pcmk__primitive_assign: base-bundle-podman-2 allocation score on node2: -INFINITY +pcmk__primitive_assign: base-bundle-podman-2 allocation score on node3: -INFINITY +pcmk__primitive_assign: base:0 allocation score on base-bundle-0: INFINITY +pcmk__primitive_assign: base:1 allocation score on base-bundle-1: INFINITY +pcmk__primitive_assign: base:2 allocation score on base-bundle-2: INFINITY diff --git a/cts/scheduler/scores/clone-recover-no-shuffle-2.scores b/cts/scheduler/scores/clone-recover-no-shuffle-2.scores new file mode 100644 index 00000000000..cfbd5bf5337 --- /dev/null +++ b/cts/scheduler/scores/clone-recover-no-shuffle-2.scores @@ -0,0 +1,79 @@ + +pcmk__clone_assign: grp-clone allocation score on node1: 0 +pcmk__clone_assign: grp-clone allocation score on node2: 0 +pcmk__clone_assign: grp-clone allocation score on node3: 0 +pcmk__clone_assign: grp:0 allocation score on node1: 0 +pcmk__clone_assign: grp:0 allocation score on node2: 0 +pcmk__clone_assign: grp:0 allocation score on node3: 0 +pcmk__clone_assign: grp:1 allocation score on node1: 0 +pcmk__clone_assign: grp:1 allocation score on node2: 0 +pcmk__clone_assign: grp:1 allocation score on node3: 0 +pcmk__clone_assign: grp:2 allocation score on node1: 0 +pcmk__clone_assign: grp:2 allocation score on node2: 0 +pcmk__clone_assign: grp:2 allocation score on node3: 0 +pcmk__clone_assign: rsc1:0 allocation score on node1: 0 +pcmk__clone_assign: rsc1:0 allocation score on node2: 1 +pcmk__clone_assign: rsc1:0 allocation score on node3: 0 +pcmk__clone_assign: rsc1:1 allocation score on node1: 0 +pcmk__clone_assign: rsc1:1 allocation score on node2: 0 +pcmk__clone_assign: rsc1:1 allocation score on node3: 1 +pcmk__clone_assign: rsc1:2 allocation score on node1: 0 +pcmk__clone_assign: rsc1:2 allocation score on node2: 0 +pcmk__clone_assign: rsc1:2 allocation score on node3: 0 +pcmk__clone_assign: rsc2:0 allocation score on node1: 0 +pcmk__clone_assign: rsc2:0 allocation score on node2: 1 +pcmk__clone_assign: rsc2:0 allocation score on node3: 0 +pcmk__clone_assign: rsc2:1 allocation score on node1: 0 +pcmk__clone_assign: rsc2:1 allocation score on node2: 0 +pcmk__clone_assign: rsc2:1 allocation score on node3: 1 +pcmk__clone_assign: rsc2:2 allocation score on node1: 0 +pcmk__clone_assign: rsc2:2 allocation score on node2: 0 +pcmk__clone_assign: rsc2:2 allocation score on node3: 0 +pcmk__group_assign: grp:0 allocation score on node1: 0 +pcmk__group_assign: grp:0 allocation score on node2: 0 +pcmk__group_assign: grp:0 allocation score on node3: 0 +pcmk__group_assign: grp:1 allocation score on node1: 0 +pcmk__group_assign: grp:1 allocation score on node2: -INFINITY +pcmk__group_assign: grp:1 allocation score on node3: 0 +pcmk__group_assign: grp:2 allocation score on node1: 0 +pcmk__group_assign: grp:2 allocation score on node2: -INFINITY +pcmk__group_assign: grp:2 allocation score on node3: -INFINITY +pcmk__group_assign: rsc1:0 allocation score on node1: 0 +pcmk__group_assign: rsc1:0 allocation score on node2: 1 +pcmk__group_assign: rsc1:0 allocation score on node3: 0 +pcmk__group_assign: rsc1:1 allocation score on node1: 0 +pcmk__group_assign: rsc1:1 allocation score on node2: -INFINITY +pcmk__group_assign: rsc1:1 allocation score on node3: 1 +pcmk__group_assign: rsc1:2 allocation score on node1: 0 +pcmk__group_assign: rsc1:2 allocation score on node2: -INFINITY +pcmk__group_assign: rsc1:2 allocation score on node3: -INFINITY +pcmk__group_assign: rsc2:0 allocation score on node1: 0 +pcmk__group_assign: rsc2:0 allocation score on node2: 1 +pcmk__group_assign: rsc2:0 allocation score on node3: 0 +pcmk__group_assign: rsc2:1 allocation score on node1: 0 +pcmk__group_assign: rsc2:1 allocation score on node2: -INFINITY +pcmk__group_assign: rsc2:1 allocation score on node3: 1 +pcmk__group_assign: rsc2:2 allocation score on node1: 0 +pcmk__group_assign: rsc2:2 allocation score on node2: -INFINITY +pcmk__group_assign: rsc2:2 allocation score on node3: -INFINITY +pcmk__primitive_assign: Fencing allocation score on node1: 0 +pcmk__primitive_assign: Fencing allocation score on node2: 0 +pcmk__primitive_assign: Fencing allocation score on node3: 0 +pcmk__primitive_assign: rsc1:0 allocation score on node1: 0 +pcmk__primitive_assign: rsc1:0 allocation score on node2: 2 +pcmk__primitive_assign: rsc1:0 allocation score on node3: 0 +pcmk__primitive_assign: rsc1:1 allocation score on node1: 0 +pcmk__primitive_assign: rsc1:1 allocation score on node2: -INFINITY +pcmk__primitive_assign: rsc1:1 allocation score on node3: 2 +pcmk__primitive_assign: rsc1:2 allocation score on node1: 0 +pcmk__primitive_assign: rsc1:2 allocation score on node2: -INFINITY +pcmk__primitive_assign: rsc1:2 allocation score on node3: -INFINITY +pcmk__primitive_assign: rsc2:0 allocation score on node1: -INFINITY +pcmk__primitive_assign: rsc2:0 allocation score on node2: 1 +pcmk__primitive_assign: rsc2:0 allocation score on node3: -INFINITY +pcmk__primitive_assign: rsc2:1 allocation score on node1: -INFINITY +pcmk__primitive_assign: rsc2:1 allocation score on node2: -INFINITY +pcmk__primitive_assign: rsc2:1 allocation score on node3: 1 +pcmk__primitive_assign: rsc2:2 allocation score on node1: 0 +pcmk__primitive_assign: rsc2:2 allocation score on node2: -INFINITY +pcmk__primitive_assign: rsc2:2 allocation score on node3: -INFINITY diff --git a/cts/scheduler/scores/clone-recover-no-shuffle-3.scores b/cts/scheduler/scores/clone-recover-no-shuffle-3.scores new file mode 100644 index 00000000000..461c11633b1 --- /dev/null +++ b/cts/scheduler/scores/clone-recover-no-shuffle-3.scores @@ -0,0 +1,64 @@ + +pcmk__bundle_allocate: base-bundle allocation score on node1: 0 +pcmk__bundle_allocate: base-bundle allocation score on node2: 0 +pcmk__bundle_allocate: base-bundle allocation score on node3: 0 +pcmk__bundle_allocate: base-bundle-0 allocation score on node1: 0 +pcmk__bundle_allocate: base-bundle-0 allocation score on node2: 0 +pcmk__bundle_allocate: base-bundle-0 allocation score on node3: 0 +pcmk__bundle_allocate: base-bundle-1 allocation score on node1: 0 +pcmk__bundle_allocate: base-bundle-1 allocation score on node2: 0 +pcmk__bundle_allocate: base-bundle-1 allocation score on node3: 0 +pcmk__bundle_allocate: base-bundle-2 allocation score on node1: 0 +pcmk__bundle_allocate: base-bundle-2 allocation score on node2: 0 +pcmk__bundle_allocate: base-bundle-2 allocation score on node3: 0 +pcmk__bundle_allocate: base-bundle-clone allocation score on base-bundle-0: -INFINITY +pcmk__bundle_allocate: base-bundle-clone allocation score on base-bundle-1: -INFINITY +pcmk__bundle_allocate: base-bundle-clone allocation score on base-bundle-2: -INFINITY +pcmk__bundle_allocate: base-bundle-clone allocation score on node1: 0 +pcmk__bundle_allocate: base-bundle-clone allocation score on node2: 0 +pcmk__bundle_allocate: base-bundle-clone allocation score on node3: 0 +pcmk__bundle_allocate: base-bundle-podman-0 allocation score on node1: 0 +pcmk__bundle_allocate: base-bundle-podman-0 allocation score on node2: 0 +pcmk__bundle_allocate: base-bundle-podman-0 allocation score on node3: 0 +pcmk__bundle_allocate: base-bundle-podman-1 allocation score on node1: 0 +pcmk__bundle_allocate: base-bundle-podman-1 allocation score on node2: 0 +pcmk__bundle_allocate: base-bundle-podman-1 allocation score on node3: 0 +pcmk__bundle_allocate: base-bundle-podman-2 allocation score on node1: 0 +pcmk__bundle_allocate: base-bundle-podman-2 allocation score on node2: 0 +pcmk__bundle_allocate: base-bundle-podman-2 allocation score on node3: 0 +pcmk__bundle_allocate: base:0 allocation score on base-bundle-0: 501 +pcmk__bundle_allocate: base:1 allocation score on base-bundle-1: 501 +pcmk__bundle_allocate: base:2 allocation score on base-bundle-2: 500 +pcmk__clone_assign: base-bundle-clone allocation score on base-bundle-0: 0 +pcmk__clone_assign: base-bundle-clone allocation score on base-bundle-1: 0 +pcmk__clone_assign: base-bundle-clone allocation score on base-bundle-2: 0 +pcmk__clone_assign: base-bundle-clone allocation score on node1: -INFINITY +pcmk__clone_assign: base-bundle-clone allocation score on node2: -INFINITY +pcmk__clone_assign: base-bundle-clone allocation score on node3: -INFINITY +pcmk__clone_assign: base:0 allocation score on base-bundle-0: INFINITY +pcmk__clone_assign: base:1 allocation score on base-bundle-1: INFINITY +pcmk__clone_assign: base:2 allocation score on base-bundle-2: INFINITY +pcmk__primitive_assign: Fencing allocation score on node1: 0 +pcmk__primitive_assign: Fencing allocation score on node2: 0 +pcmk__primitive_assign: Fencing allocation score on node3: 0 +pcmk__primitive_assign: base-bundle-0 allocation score on node1: 0 +pcmk__primitive_assign: base-bundle-0 allocation score on node2: 0 +pcmk__primitive_assign: base-bundle-0 allocation score on node3: 10000 +pcmk__primitive_assign: base-bundle-1 allocation score on node1: 0 +pcmk__primitive_assign: base-bundle-1 allocation score on node2: 10000 +pcmk__primitive_assign: base-bundle-1 allocation score on node3: 0 +pcmk__primitive_assign: base-bundle-2 allocation score on node1: 10000 +pcmk__primitive_assign: base-bundle-2 allocation score on node2: 0 +pcmk__primitive_assign: base-bundle-2 allocation score on node3: 0 +pcmk__primitive_assign: base-bundle-podman-0 allocation score on node1: 0 +pcmk__primitive_assign: base-bundle-podman-0 allocation score on node2: 0 +pcmk__primitive_assign: base-bundle-podman-0 allocation score on node3: 0 +pcmk__primitive_assign: base-bundle-podman-1 allocation score on node1: 0 +pcmk__primitive_assign: base-bundle-podman-1 allocation score on node2: 0 +pcmk__primitive_assign: base-bundle-podman-1 allocation score on node3: -INFINITY +pcmk__primitive_assign: base-bundle-podman-2 allocation score on node1: 0 +pcmk__primitive_assign: base-bundle-podman-2 allocation score on node2: -INFINITY +pcmk__primitive_assign: base-bundle-podman-2 allocation score on node3: -INFINITY +pcmk__primitive_assign: base:0 allocation score on base-bundle-0: INFINITY +pcmk__primitive_assign: base:1 allocation score on base-bundle-1: INFINITY +pcmk__primitive_assign: base:2 allocation score on base-bundle-2: INFINITY diff --git a/cts/scheduler/scores/clone-recover-no-shuffle-4.scores b/cts/scheduler/scores/clone-recover-no-shuffle-4.scores new file mode 100644 index 00000000000..492dad1baa4 --- /dev/null +++ b/cts/scheduler/scores/clone-recover-no-shuffle-4.scores @@ -0,0 +1,31 @@ + +pcmk__clone_assign: dummy-clone allocation score on node1: 100 +pcmk__clone_assign: dummy-clone allocation score on node2: 0 +pcmk__clone_assign: dummy-clone allocation score on node3: 0 +pcmk__clone_assign: dummy:0 allocation score on node1: 100 +pcmk__clone_assign: dummy:0 allocation score on node2: 1 +pcmk__clone_assign: dummy:0 allocation score on node3: 0 +pcmk__clone_assign: dummy:1 allocation score on node1: 100 +pcmk__clone_assign: dummy:1 allocation score on node2: 0 +pcmk__clone_assign: dummy:1 allocation score on node3: 1 +pcmk__clone_assign: dummy:2 allocation score on node1: 100 +pcmk__clone_assign: dummy:2 allocation score on node2: 0 +pcmk__clone_assign: dummy:2 allocation score on node3: 0 +pcmk__primitive_assign: Fencing allocation score on node1: 0 +pcmk__primitive_assign: Fencing allocation score on node2: 0 +pcmk__primitive_assign: Fencing allocation score on node3: 0 +pcmk__primitive_assign: dummy:0 allocation score on node1: 100 +pcmk__primitive_assign: dummy:0 allocation score on node1: 100 +pcmk__primitive_assign: dummy:0 allocation score on node2: 1 +pcmk__primitive_assign: dummy:0 allocation score on node2: 1 +pcmk__primitive_assign: dummy:0 allocation score on node3: 0 +pcmk__primitive_assign: dummy:0 allocation score on node3: 0 +pcmk__primitive_assign: dummy:1 allocation score on node1: -INFINITY +pcmk__primitive_assign: dummy:1 allocation score on node1: 100 +pcmk__primitive_assign: dummy:1 allocation score on node2: 0 +pcmk__primitive_assign: dummy:1 allocation score on node2: 0 +pcmk__primitive_assign: dummy:1 allocation score on node3: 1 +pcmk__primitive_assign: dummy:1 allocation score on node3: 1 +pcmk__primitive_assign: dummy:2 allocation score on node1: -INFINITY +pcmk__primitive_assign: dummy:2 allocation score on node2: 0 +pcmk__primitive_assign: dummy:2 allocation score on node3: -INFINITY diff --git a/cts/scheduler/scores/clone-recover-no-shuffle-5.scores b/cts/scheduler/scores/clone-recover-no-shuffle-5.scores new file mode 100644 index 00000000000..eecba43fae0 --- /dev/null +++ b/cts/scheduler/scores/clone-recover-no-shuffle-5.scores @@ -0,0 +1,79 @@ + +pcmk__clone_assign: grp-clone allocation score on node1: 100 +pcmk__clone_assign: grp-clone allocation score on node2: 0 +pcmk__clone_assign: grp-clone allocation score on node3: 0 +pcmk__clone_assign: grp:0 allocation score on node1: 100 +pcmk__clone_assign: grp:0 allocation score on node2: 0 +pcmk__clone_assign: grp:0 allocation score on node3: 0 +pcmk__clone_assign: grp:1 allocation score on node1: 100 +pcmk__clone_assign: grp:1 allocation score on node2: 0 +pcmk__clone_assign: grp:1 allocation score on node3: 0 +pcmk__clone_assign: grp:2 allocation score on node1: 100 +pcmk__clone_assign: grp:2 allocation score on node2: 0 +pcmk__clone_assign: grp:2 allocation score on node3: 0 +pcmk__clone_assign: rsc1:0 allocation score on node1: 100 +pcmk__clone_assign: rsc1:0 allocation score on node2: 1 +pcmk__clone_assign: rsc1:0 allocation score on node3: 0 +pcmk__clone_assign: rsc1:1 allocation score on node1: 100 +pcmk__clone_assign: rsc1:1 allocation score on node2: 0 +pcmk__clone_assign: rsc1:1 allocation score on node3: 1 +pcmk__clone_assign: rsc1:2 allocation score on node1: 100 +pcmk__clone_assign: rsc1:2 allocation score on node2: 0 +pcmk__clone_assign: rsc1:2 allocation score on node3: 0 +pcmk__clone_assign: rsc2:0 allocation score on node1: 0 +pcmk__clone_assign: rsc2:0 allocation score on node2: 1 +pcmk__clone_assign: rsc2:0 allocation score on node3: 0 +pcmk__clone_assign: rsc2:1 allocation score on node1: 0 +pcmk__clone_assign: rsc2:1 allocation score on node2: 0 +pcmk__clone_assign: rsc2:1 allocation score on node3: 1 +pcmk__clone_assign: rsc2:2 allocation score on node1: 0 +pcmk__clone_assign: rsc2:2 allocation score on node2: 0 +pcmk__clone_assign: rsc2:2 allocation score on node3: 0 +pcmk__group_assign: grp:0 allocation score on node1: 100 +pcmk__group_assign: grp:0 allocation score on node2: 0 +pcmk__group_assign: grp:0 allocation score on node3: 0 +pcmk__group_assign: grp:1 allocation score on node1: 100 +pcmk__group_assign: grp:1 allocation score on node2: 0 +pcmk__group_assign: grp:1 allocation score on node3: 0 +pcmk__group_assign: grp:2 allocation score on node1: 100 +pcmk__group_assign: grp:2 allocation score on node2: 0 +pcmk__group_assign: grp:2 allocation score on node3: 0 +pcmk__group_assign: rsc1:0 allocation score on node1: 100 +pcmk__group_assign: rsc1:0 allocation score on node2: 1 +pcmk__group_assign: rsc1:0 allocation score on node3: 0 +pcmk__group_assign: rsc1:1 allocation score on node1: 100 +pcmk__group_assign: rsc1:1 allocation score on node2: 0 +pcmk__group_assign: rsc1:1 allocation score on node3: 1 +pcmk__group_assign: rsc1:2 allocation score on node1: 100 +pcmk__group_assign: rsc1:2 allocation score on node2: 0 +pcmk__group_assign: rsc1:2 allocation score on node3: 0 +pcmk__group_assign: rsc2:0 allocation score on node1: 0 +pcmk__group_assign: rsc2:0 allocation score on node2: 1 +pcmk__group_assign: rsc2:0 allocation score on node3: 0 +pcmk__group_assign: rsc2:1 allocation score on node1: 0 +pcmk__group_assign: rsc2:1 allocation score on node2: 0 +pcmk__group_assign: rsc2:1 allocation score on node3: 1 +pcmk__group_assign: rsc2:2 allocation score on node1: 0 +pcmk__group_assign: rsc2:2 allocation score on node2: 0 +pcmk__group_assign: rsc2:2 allocation score on node3: 0 +pcmk__primitive_assign: Fencing allocation score on node1: 0 +pcmk__primitive_assign: Fencing allocation score on node2: 0 +pcmk__primitive_assign: Fencing allocation score on node3: 0 +pcmk__primitive_assign: rsc1:0 allocation score on node1: 100 +pcmk__primitive_assign: rsc1:0 allocation score on node2: 2 +pcmk__primitive_assign: rsc1:0 allocation score on node3: 0 +pcmk__primitive_assign: rsc1:1 allocation score on node1: 100 +pcmk__primitive_assign: rsc1:1 allocation score on node2: 0 +pcmk__primitive_assign: rsc1:1 allocation score on node3: 2 +pcmk__primitive_assign: rsc1:2 allocation score on node1: 100 +pcmk__primitive_assign: rsc1:2 allocation score on node2: 0 +pcmk__primitive_assign: rsc1:2 allocation score on node3: 0 +pcmk__primitive_assign: rsc2:0 allocation score on node1: 0 +pcmk__primitive_assign: rsc2:0 allocation score on node2: -INFINITY +pcmk__primitive_assign: rsc2:0 allocation score on node3: -INFINITY +pcmk__primitive_assign: rsc2:1 allocation score on node1: 0 +pcmk__primitive_assign: rsc2:1 allocation score on node2: -INFINITY +pcmk__primitive_assign: rsc2:1 allocation score on node3: -INFINITY +pcmk__primitive_assign: rsc2:2 allocation score on node1: 0 +pcmk__primitive_assign: rsc2:2 allocation score on node2: -INFINITY +pcmk__primitive_assign: rsc2:2 allocation score on node3: -INFINITY diff --git a/cts/scheduler/scores/clone-recover-no-shuffle-6.scores b/cts/scheduler/scores/clone-recover-no-shuffle-6.scores new file mode 100644 index 00000000000..643e30f9d18 --- /dev/null +++ b/cts/scheduler/scores/clone-recover-no-shuffle-6.scores @@ -0,0 +1,70 @@ + +pcmk__bundle_allocate: base-bundle allocation score on node1: 100 +pcmk__bundle_allocate: base-bundle allocation score on node2: 0 +pcmk__bundle_allocate: base-bundle allocation score on node3: 0 +pcmk__bundle_allocate: base-bundle-0 allocation score on node1: 0 +pcmk__bundle_allocate: base-bundle-0 allocation score on node2: 0 +pcmk__bundle_allocate: base-bundle-0 allocation score on node3: 0 +pcmk__bundle_allocate: base-bundle-1 allocation score on node1: 0 +pcmk__bundle_allocate: base-bundle-1 allocation score on node2: 0 +pcmk__bundle_allocate: base-bundle-1 allocation score on node3: 0 +pcmk__bundle_allocate: base-bundle-2 allocation score on node1: 0 +pcmk__bundle_allocate: base-bundle-2 allocation score on node2: 0 +pcmk__bundle_allocate: base-bundle-2 allocation score on node3: 0 +pcmk__bundle_allocate: base-bundle-clone allocation score on base-bundle-0: -INFINITY +pcmk__bundle_allocate: base-bundle-clone allocation score on base-bundle-1: -INFINITY +pcmk__bundle_allocate: base-bundle-clone allocation score on base-bundle-2: -INFINITY +pcmk__bundle_allocate: base-bundle-clone allocation score on node1: 0 +pcmk__bundle_allocate: base-bundle-clone allocation score on node2: 0 +pcmk__bundle_allocate: base-bundle-clone allocation score on node3: 0 +pcmk__bundle_allocate: base-bundle-podman-0 allocation score on node1: 100 +pcmk__bundle_allocate: base-bundle-podman-0 allocation score on node2: 0 +pcmk__bundle_allocate: base-bundle-podman-0 allocation score on node3: 0 +pcmk__bundle_allocate: base-bundle-podman-1 allocation score on node1: 100 +pcmk__bundle_allocate: base-bundle-podman-1 allocation score on node2: 0 +pcmk__bundle_allocate: base-bundle-podman-1 allocation score on node3: 0 +pcmk__bundle_allocate: base-bundle-podman-2 allocation score on node1: 100 +pcmk__bundle_allocate: base-bundle-podman-2 allocation score on node2: 0 +pcmk__bundle_allocate: base-bundle-podman-2 allocation score on node3: 0 +pcmk__bundle_allocate: base:0 allocation score on base-bundle-0: 501 +pcmk__bundle_allocate: base:1 allocation score on base-bundle-1: 501 +pcmk__bundle_allocate: base:2 allocation score on base-bundle-2: 500 +pcmk__clone_assign: base-bundle-clone allocation score on base-bundle-0: 0 +pcmk__clone_assign: base-bundle-clone allocation score on base-bundle-1: 0 +pcmk__clone_assign: base-bundle-clone allocation score on base-bundle-2: 0 +pcmk__clone_assign: base-bundle-clone allocation score on node1: -INFINITY +pcmk__clone_assign: base-bundle-clone allocation score on node2: -INFINITY +pcmk__clone_assign: base-bundle-clone allocation score on node3: -INFINITY +pcmk__clone_assign: base:0 allocation score on base-bundle-0: INFINITY +pcmk__clone_assign: base:1 allocation score on base-bundle-1: INFINITY +pcmk__clone_assign: base:2 allocation score on base-bundle-2: INFINITY +pcmk__primitive_assign: Fencing allocation score on node1: 0 +pcmk__primitive_assign: Fencing allocation score on node2: 0 +pcmk__primitive_assign: Fencing allocation score on node3: 0 +pcmk__primitive_assign: base-bundle-0 allocation score on node1: 10000 +pcmk__primitive_assign: base-bundle-0 allocation score on node2: 0 +pcmk__primitive_assign: base-bundle-0 allocation score on node3: 0 +pcmk__primitive_assign: base-bundle-1 allocation score on node1: 0 +pcmk__primitive_assign: base-bundle-1 allocation score on node2: 0 +pcmk__primitive_assign: base-bundle-1 allocation score on node3: 10000 +pcmk__primitive_assign: base-bundle-2 allocation score on node1: 0 +pcmk__primitive_assign: base-bundle-2 allocation score on node2: 10000 +pcmk__primitive_assign: base-bundle-2 allocation score on node3: 0 +pcmk__primitive_assign: base-bundle-podman-0 allocation score on node1: 100 +pcmk__primitive_assign: base-bundle-podman-0 allocation score on node1: 100 +pcmk__primitive_assign: base-bundle-podman-0 allocation score on node2: 0 +pcmk__primitive_assign: base-bundle-podman-0 allocation score on node2: 0 +pcmk__primitive_assign: base-bundle-podman-0 allocation score on node3: 0 +pcmk__primitive_assign: base-bundle-podman-0 allocation score on node3: 0 +pcmk__primitive_assign: base-bundle-podman-1 allocation score on node1: -INFINITY +pcmk__primitive_assign: base-bundle-podman-1 allocation score on node1: 100 +pcmk__primitive_assign: base-bundle-podman-1 allocation score on node2: 0 +pcmk__primitive_assign: base-bundle-podman-1 allocation score on node2: 0 +pcmk__primitive_assign: base-bundle-podman-1 allocation score on node3: 0 +pcmk__primitive_assign: base-bundle-podman-1 allocation score on node3: 0 +pcmk__primitive_assign: base-bundle-podman-2 allocation score on node1: -INFINITY +pcmk__primitive_assign: base-bundle-podman-2 allocation score on node2: 0 +pcmk__primitive_assign: base-bundle-podman-2 allocation score on node3: -INFINITY +pcmk__primitive_assign: base:0 allocation score on base-bundle-0: INFINITY +pcmk__primitive_assign: base:1 allocation score on base-bundle-1: INFINITY +pcmk__primitive_assign: base:2 allocation score on base-bundle-2: INFINITY diff --git a/cts/scheduler/scores/clone-recover-no-shuffle-7.scores b/cts/scheduler/scores/clone-recover-no-shuffle-7.scores new file mode 100644 index 00000000000..fc45bf740fd --- /dev/null +++ b/cts/scheduler/scores/clone-recover-no-shuffle-7.scores @@ -0,0 +1,34 @@ + +dummy:0 promotion score on node1: 15 +dummy:1 promotion score on node2: 10 +dummy:2 promotion score on node3: 5 +pcmk__clone_assign: dummy-clone allocation score on node1: 0 +pcmk__clone_assign: dummy-clone allocation score on node2: 0 +pcmk__clone_assign: dummy-clone allocation score on node3: 0 +pcmk__clone_assign: dummy:0 allocation score on node1: 15 +pcmk__clone_assign: dummy:0 allocation score on node2: 0 +pcmk__clone_assign: dummy:0 allocation score on node3: 6 +pcmk__clone_assign: dummy:1 allocation score on node1: 15 +pcmk__clone_assign: dummy:1 allocation score on node2: 11 +pcmk__clone_assign: dummy:1 allocation score on node3: 0 +pcmk__clone_assign: dummy:2 allocation score on node1: 15 +pcmk__clone_assign: dummy:2 allocation score on node2: 10 +pcmk__clone_assign: dummy:2 allocation score on node3: 5 +pcmk__primitive_assign: Fencing allocation score on node1: 0 +pcmk__primitive_assign: Fencing allocation score on node2: 0 +pcmk__primitive_assign: Fencing allocation score on node3: 0 +pcmk__primitive_assign: dummy:0 allocation score on node1: 15 +pcmk__primitive_assign: dummy:0 allocation score on node1: 15 +pcmk__primitive_assign: dummy:0 allocation score on node2: 0 +pcmk__primitive_assign: dummy:0 allocation score on node2: 0 +pcmk__primitive_assign: dummy:0 allocation score on node3: 6 +pcmk__primitive_assign: dummy:0 allocation score on node3: 6 +pcmk__primitive_assign: dummy:1 allocation score on node1: -INFINITY +pcmk__primitive_assign: dummy:1 allocation score on node1: 15 +pcmk__primitive_assign: dummy:1 allocation score on node2: 11 +pcmk__primitive_assign: dummy:1 allocation score on node2: 11 +pcmk__primitive_assign: dummy:1 allocation score on node3: 0 +pcmk__primitive_assign: dummy:1 allocation score on node3: 0 +pcmk__primitive_assign: dummy:2 allocation score on node1: -INFINITY +pcmk__primitive_assign: dummy:2 allocation score on node2: -INFINITY +pcmk__primitive_assign: dummy:2 allocation score on node3: 5 diff --git a/cts/scheduler/scores/clone-recover-no-shuffle-8.scores b/cts/scheduler/scores/clone-recover-no-shuffle-8.scores new file mode 100644 index 00000000000..56d4cc8395a --- /dev/null +++ b/cts/scheduler/scores/clone-recover-no-shuffle-8.scores @@ -0,0 +1,82 @@ + +grp:0 promotion score on node3: 10 +grp:1 promotion score on node2: 20 +grp:2 promotion score on node1: 30 +pcmk__clone_assign: grp-clone allocation score on node1: 0 +pcmk__clone_assign: grp-clone allocation score on node2: 0 +pcmk__clone_assign: grp-clone allocation score on node3: 0 +pcmk__clone_assign: grp:0 allocation score on node1: 30 +pcmk__clone_assign: grp:0 allocation score on node2: 0 +pcmk__clone_assign: grp:0 allocation score on node3: 10 +pcmk__clone_assign: grp:1 allocation score on node1: 30 +pcmk__clone_assign: grp:1 allocation score on node2: 20 +pcmk__clone_assign: grp:1 allocation score on node3: 0 +pcmk__clone_assign: grp:2 allocation score on node1: 30 +pcmk__clone_assign: grp:2 allocation score on node2: 20 +pcmk__clone_assign: grp:2 allocation score on node3: 10 +pcmk__clone_assign: rsc1:0 allocation score on node1: 0 +pcmk__clone_assign: rsc1:0 allocation score on node2: 0 +pcmk__clone_assign: rsc1:0 allocation score on node3: 1 +pcmk__clone_assign: rsc1:1 allocation score on node1: 0 +pcmk__clone_assign: rsc1:1 allocation score on node2: 1 +pcmk__clone_assign: rsc1:1 allocation score on node3: 0 +pcmk__clone_assign: rsc1:2 allocation score on node1: 0 +pcmk__clone_assign: rsc1:2 allocation score on node2: 0 +pcmk__clone_assign: rsc1:2 allocation score on node3: 0 +pcmk__clone_assign: rsc2:0 allocation score on node1: 0 +pcmk__clone_assign: rsc2:0 allocation score on node2: 0 +pcmk__clone_assign: rsc2:0 allocation score on node3: 1 +pcmk__clone_assign: rsc2:1 allocation score on node1: 0 +pcmk__clone_assign: rsc2:1 allocation score on node2: 1 +pcmk__clone_assign: rsc2:1 allocation score on node3: 0 +pcmk__clone_assign: rsc2:2 allocation score on node1: 0 +pcmk__clone_assign: rsc2:2 allocation score on node2: 0 +pcmk__clone_assign: rsc2:2 allocation score on node3: 0 +pcmk__group_assign: grp:0 allocation score on node1: 30 +pcmk__group_assign: grp:0 allocation score on node2: 0 +pcmk__group_assign: grp:0 allocation score on node3: 10 +pcmk__group_assign: grp:1 allocation score on node1: 30 +pcmk__group_assign: grp:1 allocation score on node2: 20 +pcmk__group_assign: grp:1 allocation score on node3: -INFINITY +pcmk__group_assign: grp:2 allocation score on node1: 30 +pcmk__group_assign: grp:2 allocation score on node2: -INFINITY +pcmk__group_assign: grp:2 allocation score on node3: -INFINITY +pcmk__group_assign: rsc1:0 allocation score on node1: 0 +pcmk__group_assign: rsc1:0 allocation score on node2: 0 +pcmk__group_assign: rsc1:0 allocation score on node3: 1 +pcmk__group_assign: rsc1:1 allocation score on node1: 0 +pcmk__group_assign: rsc1:1 allocation score on node2: 1 +pcmk__group_assign: rsc1:1 allocation score on node3: -INFINITY +pcmk__group_assign: rsc1:2 allocation score on node1: 0 +pcmk__group_assign: rsc1:2 allocation score on node2: -INFINITY +pcmk__group_assign: rsc1:2 allocation score on node3: -INFINITY +pcmk__group_assign: rsc2:0 allocation score on node1: 0 +pcmk__group_assign: rsc2:0 allocation score on node2: 0 +pcmk__group_assign: rsc2:0 allocation score on node3: 1 +pcmk__group_assign: rsc2:1 allocation score on node1: 0 +pcmk__group_assign: rsc2:1 allocation score on node2: 1 +pcmk__group_assign: rsc2:1 allocation score on node3: -INFINITY +pcmk__group_assign: rsc2:2 allocation score on node1: 0 +pcmk__group_assign: rsc2:2 allocation score on node2: -INFINITY +pcmk__group_assign: rsc2:2 allocation score on node3: -INFINITY +pcmk__primitive_assign: Fencing allocation score on node1: 0 +pcmk__primitive_assign: Fencing allocation score on node2: 0 +pcmk__primitive_assign: Fencing allocation score on node3: 0 +pcmk__primitive_assign: rsc1:0 allocation score on node1: 0 +pcmk__primitive_assign: rsc1:0 allocation score on node2: 0 +pcmk__primitive_assign: rsc1:0 allocation score on node3: 2 +pcmk__primitive_assign: rsc1:1 allocation score on node1: 0 +pcmk__primitive_assign: rsc1:1 allocation score on node2: 2 +pcmk__primitive_assign: rsc1:1 allocation score on node3: -INFINITY +pcmk__primitive_assign: rsc1:2 allocation score on node1: 0 +pcmk__primitive_assign: rsc1:2 allocation score on node2: -INFINITY +pcmk__primitive_assign: rsc1:2 allocation score on node3: -INFINITY +pcmk__primitive_assign: rsc2:0 allocation score on node1: -INFINITY +pcmk__primitive_assign: rsc2:0 allocation score on node2: -INFINITY +pcmk__primitive_assign: rsc2:0 allocation score on node3: 1 +pcmk__primitive_assign: rsc2:1 allocation score on node1: -INFINITY +pcmk__primitive_assign: rsc2:1 allocation score on node2: 1 +pcmk__primitive_assign: rsc2:1 allocation score on node3: -INFINITY +pcmk__primitive_assign: rsc2:2 allocation score on node1: 0 +pcmk__primitive_assign: rsc2:2 allocation score on node2: -INFINITY +pcmk__primitive_assign: rsc2:2 allocation score on node3: -INFINITY diff --git a/cts/scheduler/scores/clone-recover-no-shuffle-9.scores b/cts/scheduler/scores/clone-recover-no-shuffle-9.scores new file mode 100644 index 00000000000..947c86b262c --- /dev/null +++ b/cts/scheduler/scores/clone-recover-no-shuffle-9.scores @@ -0,0 +1,67 @@ + +base:0 promotion score on base-bundle-0: 5 +base:1 promotion score on base-bundle-1: 10 +base:2 promotion score on base-bundle-2: 15 +pcmk__bundle_allocate: base-bundle allocation score on node1: 0 +pcmk__bundle_allocate: base-bundle allocation score on node2: 0 +pcmk__bundle_allocate: base-bundle allocation score on node3: 0 +pcmk__bundle_allocate: base-bundle-0 allocation score on node1: 0 +pcmk__bundle_allocate: base-bundle-0 allocation score on node2: 0 +pcmk__bundle_allocate: base-bundle-0 allocation score on node3: 0 +pcmk__bundle_allocate: base-bundle-1 allocation score on node1: 0 +pcmk__bundle_allocate: base-bundle-1 allocation score on node2: 0 +pcmk__bundle_allocate: base-bundle-1 allocation score on node3: 0 +pcmk__bundle_allocate: base-bundle-2 allocation score on node1: 0 +pcmk__bundle_allocate: base-bundle-2 allocation score on node2: 0 +pcmk__bundle_allocate: base-bundle-2 allocation score on node3: 0 +pcmk__bundle_allocate: base-bundle-clone allocation score on base-bundle-0: -INFINITY +pcmk__bundle_allocate: base-bundle-clone allocation score on base-bundle-1: -INFINITY +pcmk__bundle_allocate: base-bundle-clone allocation score on base-bundle-2: -INFINITY +pcmk__bundle_allocate: base-bundle-clone allocation score on node1: 0 +pcmk__bundle_allocate: base-bundle-clone allocation score on node2: 0 +pcmk__bundle_allocate: base-bundle-clone allocation score on node3: 0 +pcmk__bundle_allocate: base-bundle-podman-0 allocation score on node1: 0 +pcmk__bundle_allocate: base-bundle-podman-0 allocation score on node2: 0 +pcmk__bundle_allocate: base-bundle-podman-0 allocation score on node3: 0 +pcmk__bundle_allocate: base-bundle-podman-1 allocation score on node1: 0 +pcmk__bundle_allocate: base-bundle-podman-1 allocation score on node2: 0 +pcmk__bundle_allocate: base-bundle-podman-1 allocation score on node3: 0 +pcmk__bundle_allocate: base-bundle-podman-2 allocation score on node1: 0 +pcmk__bundle_allocate: base-bundle-podman-2 allocation score on node2: 0 +pcmk__bundle_allocate: base-bundle-podman-2 allocation score on node3: 0 +pcmk__bundle_allocate: base:0 allocation score on base-bundle-0: 501 +pcmk__bundle_allocate: base:1 allocation score on base-bundle-1: 501 +pcmk__bundle_allocate: base:2 allocation score on base-bundle-2: 500 +pcmk__clone_assign: base-bundle-clone allocation score on base-bundle-0: 0 +pcmk__clone_assign: base-bundle-clone allocation score on base-bundle-1: 0 +pcmk__clone_assign: base-bundle-clone allocation score on base-bundle-2: 0 +pcmk__clone_assign: base-bundle-clone allocation score on node1: -INFINITY +pcmk__clone_assign: base-bundle-clone allocation score on node2: -INFINITY +pcmk__clone_assign: base-bundle-clone allocation score on node3: -INFINITY +pcmk__clone_assign: base:0 allocation score on base-bundle-0: INFINITY +pcmk__clone_assign: base:1 allocation score on base-bundle-1: INFINITY +pcmk__clone_assign: base:2 allocation score on base-bundle-2: INFINITY +pcmk__primitive_assign: Fencing allocation score on node1: 0 +pcmk__primitive_assign: Fencing allocation score on node2: 0 +pcmk__primitive_assign: Fencing allocation score on node3: 0 +pcmk__primitive_assign: base-bundle-0 allocation score on node1: 0 +pcmk__primitive_assign: base-bundle-0 allocation score on node2: 0 +pcmk__primitive_assign: base-bundle-0 allocation score on node3: 10000 +pcmk__primitive_assign: base-bundle-1 allocation score on node1: 0 +pcmk__primitive_assign: base-bundle-1 allocation score on node2: 10000 +pcmk__primitive_assign: base-bundle-1 allocation score on node3: 0 +pcmk__primitive_assign: base-bundle-2 allocation score on node1: 10000 +pcmk__primitive_assign: base-bundle-2 allocation score on node2: 0 +pcmk__primitive_assign: base-bundle-2 allocation score on node3: 0 +pcmk__primitive_assign: base-bundle-podman-0 allocation score on node1: 0 +pcmk__primitive_assign: base-bundle-podman-0 allocation score on node2: 0 +pcmk__primitive_assign: base-bundle-podman-0 allocation score on node3: 0 +pcmk__primitive_assign: base-bundle-podman-1 allocation score on node1: 0 +pcmk__primitive_assign: base-bundle-podman-1 allocation score on node2: 0 +pcmk__primitive_assign: base-bundle-podman-1 allocation score on node3: -INFINITY +pcmk__primitive_assign: base-bundle-podman-2 allocation score on node1: 0 +pcmk__primitive_assign: base-bundle-podman-2 allocation score on node2: -INFINITY +pcmk__primitive_assign: base-bundle-podman-2 allocation score on node3: -INFINITY +pcmk__primitive_assign: base:0 allocation score on base-bundle-0: INFINITY +pcmk__primitive_assign: base:1 allocation score on base-bundle-1: INFINITY +pcmk__primitive_assign: base:2 allocation score on base-bundle-2: INFINITY diff --git a/cts/scheduler/summary/clone-recover-no-shuffle-1.summary b/cts/scheduler/summary/clone-recover-no-shuffle-1.summary new file mode 100644 index 00000000000..0b6866ec16c --- /dev/null +++ b/cts/scheduler/summary/clone-recover-no-shuffle-1.summary @@ -0,0 +1,29 @@ +Using the original execution date of: 2023-06-21 00:59:59Z +Current cluster status: + * Node List: + * Online: [ node1 node2 node3 ] + + * Full List of Resources: + * Fencing (stonith:fence_xvm): Started node2 + * Clone Set: dummy-clone [dummy]: + * Started: [ node2 node3 ] + * Stopped: [ node1 ] + +Transition Summary: + * Start dummy:2 ( node1 ) + +Executing Cluster Transition: + * Pseudo action: dummy-clone_start_0 + * Resource action: dummy start on node1 + * Pseudo action: dummy-clone_running_0 + * Resource action: dummy monitor=10000 on node1 +Using the original execution date of: 2023-06-21 00:59:59Z + +Revised Cluster Status: + * Node List: + * Online: [ node1 node2 node3 ] + + * Full List of Resources: + * Fencing (stonith:fence_xvm): Started node2 + * Clone Set: dummy-clone [dummy]: + * Started: [ node1 node2 node3 ] diff --git a/cts/scheduler/summary/clone-recover-no-shuffle-10.summary b/cts/scheduler/summary/clone-recover-no-shuffle-10.summary new file mode 100644 index 00000000000..5b0f9b6d685 --- /dev/null +++ b/cts/scheduler/summary/clone-recover-no-shuffle-10.summary @@ -0,0 +1,29 @@ +Current cluster status: + * Node List: + * Online: [ node1 node2 node3 ] + + * Full List of Resources: + * Fencing (stonith:fence_xvm): Started node2 + * Clone Set: dummy-clone [dummy] (promotable): + * Promoted: [ node2 ] + * Unpromoted: [ node3 ] + * Stopped: [ node1 ] + +Transition Summary: + * Start dummy:2 ( node1 ) + +Executing Cluster Transition: + * Pseudo action: dummy-clone_start_0 + * Resource action: dummy start on node1 + * Pseudo action: dummy-clone_running_0 + * Resource action: dummy monitor=11000 on node1 + +Revised Cluster Status: + * Node List: + * Online: [ node1 node2 node3 ] + + * Full List of Resources: + * Fencing (stonith:fence_xvm): Started node2 + * Clone Set: dummy-clone [dummy] (promotable): + * Promoted: [ node2 ] + * Unpromoted: [ node1 node3 ] diff --git a/cts/scheduler/summary/clone-recover-no-shuffle-11.summary b/cts/scheduler/summary/clone-recover-no-shuffle-11.summary new file mode 100644 index 00000000000..e0bdb61d605 --- /dev/null +++ b/cts/scheduler/summary/clone-recover-no-shuffle-11.summary @@ -0,0 +1,34 @@ +Current cluster status: + * Node List: + * Online: [ node1 node2 node3 ] + + * Full List of Resources: + * Fencing (stonith:fence_xvm): Started node2 + * Clone Set: grp-clone [grp] (promotable): + * Promoted: [ node2 ] + * Unpromoted: [ node3 ] + * Stopped: [ node1 ] + +Transition Summary: + * Start rsc1:2 ( node1 ) + * Start rsc2:2 ( node1 ) + +Executing Cluster Transition: + * Pseudo action: grp-clone_start_0 + * Pseudo action: grp:2_start_0 + * Resource action: rsc1 start on node1 + * Resource action: rsc2 start on node1 + * Pseudo action: grp:2_running_0 + * Resource action: rsc1 monitor=11000 on node1 + * Resource action: rsc2 monitor=11000 on node1 + * Pseudo action: grp-clone_running_0 + +Revised Cluster Status: + * Node List: + * Online: [ node1 node2 node3 ] + + * Full List of Resources: + * Fencing (stonith:fence_xvm): Started node2 + * Clone Set: grp-clone [grp] (promotable): + * Promoted: [ node2 ] + * Unpromoted: [ node1 node3 ] diff --git a/cts/scheduler/summary/clone-recover-no-shuffle-12.summary b/cts/scheduler/summary/clone-recover-no-shuffle-12.summary new file mode 100644 index 00000000000..6e55a0b7f2f --- /dev/null +++ b/cts/scheduler/summary/clone-recover-no-shuffle-12.summary @@ -0,0 +1,43 @@ +Current cluster status: + * Node List: + * Online: [ node1 node2 node3 ] + * GuestOnline: [ base-bundle-0 base-bundle-1 ] + + * Full List of Resources: + * Fencing (stonith:fence_xvm): Started node2 + * Container bundle set: base-bundle [localhost/pcmktest]: + * base-bundle-0 (ocf:pacemaker:Stateful): Unpromoted node3 + * base-bundle-1 (ocf:pacemaker:Stateful): Promoted node2 + * base-bundle-2 (ocf:pacemaker:Stateful): Stopped + +Transition Summary: + * Start base-bundle-podman-2 ( node1 ) + * Start base-bundle-2 ( node1 ) + * Start base:2 ( base-bundle-2 ) + +Executing Cluster Transition: + * Pseudo action: base-bundle_start_0 + * Pseudo action: base-bundle-clone_start_0 + * Resource action: base-bundle-podman-2 start on node1 + * Resource action: base-bundle-2 monitor on node3 + * Resource action: base-bundle-2 monitor on node2 + * Resource action: base-bundle-2 monitor on node1 + * Resource action: base-bundle-podman-2 monitor=60000 on node1 + * Resource action: base-bundle-2 start on node1 + * Resource action: base start on base-bundle-2 + * Pseudo action: base-bundle-clone_running_0 + * Resource action: base-bundle-2 monitor=30000 on node1 + * Pseudo action: base-bundle_running_0 + * Resource action: base monitor=16000 on base-bundle-2 + +Revised Cluster Status: + * Node List: + * Online: [ node1 node2 node3 ] + * GuestOnline: [ base-bundle-0 base-bundle-1 base-bundle-2 ] + + * Full List of Resources: + * Fencing (stonith:fence_xvm): Started node2 + * Container bundle set: base-bundle [localhost/pcmktest]: + * base-bundle-0 (ocf:pacemaker:Stateful): Unpromoted node3 + * base-bundle-1 (ocf:pacemaker:Stateful): Promoted node2 + * base-bundle-2 (ocf:pacemaker:Stateful): Unpromoted node1 diff --git a/cts/scheduler/summary/clone-recover-no-shuffle-2.summary b/cts/scheduler/summary/clone-recover-no-shuffle-2.summary new file mode 100644 index 00000000000..8b18120ad8d --- /dev/null +++ b/cts/scheduler/summary/clone-recover-no-shuffle-2.summary @@ -0,0 +1,32 @@ +Current cluster status: + * Node List: + * Online: [ node1 node2 node3 ] + + * Full List of Resources: + * Fencing (stonith:fence_xvm): Started node2 + * Clone Set: grp-clone [grp]: + * Started: [ node2 node3 ] + * Stopped: [ node1 ] + +Transition Summary: + * Start rsc1:2 ( node1 ) + * Start rsc2:2 ( node1 ) + +Executing Cluster Transition: + * Pseudo action: grp-clone_start_0 + * Pseudo action: grp:2_start_0 + * Resource action: rsc1 start on node1 + * Resource action: rsc2 start on node1 + * Pseudo action: grp:2_running_0 + * Resource action: rsc1 monitor=10000 on node1 + * Resource action: rsc2 monitor=10000 on node1 + * Pseudo action: grp-clone_running_0 + +Revised Cluster Status: + * Node List: + * Online: [ node1 node2 node3 ] + + * Full List of Resources: + * Fencing (stonith:fence_xvm): Started node2 + * Clone Set: grp-clone [grp]: + * Started: [ node1 node2 node3 ] diff --git a/cts/scheduler/summary/clone-recover-no-shuffle-3.summary b/cts/scheduler/summary/clone-recover-no-shuffle-3.summary new file mode 100644 index 00000000000..5702177e33d --- /dev/null +++ b/cts/scheduler/summary/clone-recover-no-shuffle-3.summary @@ -0,0 +1,42 @@ +Current cluster status: + * Node List: + * Online: [ node1 node2 node3 ] + * GuestOnline: [ base-bundle-0 base-bundle-1 ] + + * Full List of Resources: + * Fencing (stonith:fence_xvm): Started node2 + * Container bundle set: base-bundle [localhost/pcmktest]: + * base-bundle-0 (ocf:pacemaker:Stateful): Started node3 + * base-bundle-1 (ocf:pacemaker:Stateful): Started node2 + * base-bundle-2 (ocf:pacemaker:Stateful): Stopped + +Transition Summary: + * Start base-bundle-podman-2 ( node1 ) + * Start base-bundle-2 ( node1 ) + * Start base:2 ( base-bundle-2 ) + +Executing Cluster Transition: + * Pseudo action: base-bundle_start_0 + * Pseudo action: base-bundle-clone_start_0 + * Resource action: base-bundle-podman-2 start on node1 + * Resource action: base-bundle-2 monitor on node3 + * Resource action: base-bundle-2 monitor on node2 + * Resource action: base-bundle-2 monitor on node1 + * Resource action: base-bundle-podman-2 monitor=60000 on node1 + * Resource action: base-bundle-2 start on node1 + * Resource action: base start on base-bundle-2 + * Pseudo action: base-bundle-clone_running_0 + * Resource action: base-bundle-2 monitor=30000 on node1 + * Pseudo action: base-bundle_running_0 + +Revised Cluster Status: + * Node List: + * Online: [ node1 node2 node3 ] + * GuestOnline: [ base-bundle-0 base-bundle-1 base-bundle-2 ] + + * Full List of Resources: + * Fencing (stonith:fence_xvm): Started node2 + * Container bundle set: base-bundle [localhost/pcmktest]: + * base-bundle-0 (ocf:pacemaker:Stateful): Started node3 + * base-bundle-1 (ocf:pacemaker:Stateful): Started node2 + * base-bundle-2 (ocf:pacemaker:Stateful): Started node1 diff --git a/cts/scheduler/summary/clone-recover-no-shuffle-4.summary b/cts/scheduler/summary/clone-recover-no-shuffle-4.summary new file mode 100644 index 00000000000..944bcb834b3 --- /dev/null +++ b/cts/scheduler/summary/clone-recover-no-shuffle-4.summary @@ -0,0 +1,35 @@ +Using the original execution date of: 2023-06-21 00:59:59Z +Current cluster status: + * Node List: + * Online: [ node1 node2 node3 ] + + * Full List of Resources: + * Fencing (stonith:fence_xvm): Started node2 + * Clone Set: dummy-clone [dummy]: + * Started: [ node2 node3 ] + * Stopped: [ node1 ] + +Transition Summary: + * Move dummy:0 ( node2 -> node1 ) + * Start dummy:2 ( node2 ) + +Executing Cluster Transition: + * Pseudo action: dummy-clone_stop_0 + * Resource action: dummy stop on node2 + * Pseudo action: dummy-clone_stopped_0 + * Pseudo action: dummy-clone_start_0 + * Resource action: dummy start on node1 + * Resource action: dummy start on node2 + * Pseudo action: dummy-clone_running_0 + * Resource action: dummy monitor=10000 on node1 + * Resource action: dummy monitor=10000 on node2 +Using the original execution date of: 2023-06-21 00:59:59Z + +Revised Cluster Status: + * Node List: + * Online: [ node1 node2 node3 ] + + * Full List of Resources: + * Fencing (stonith:fence_xvm): Started node2 + * Clone Set: dummy-clone [dummy]: + * Started: [ node1 node2 node3 ] diff --git a/cts/scheduler/summary/clone-recover-no-shuffle-5.summary b/cts/scheduler/summary/clone-recover-no-shuffle-5.summary new file mode 100644 index 00000000000..e84d0a574de --- /dev/null +++ b/cts/scheduler/summary/clone-recover-no-shuffle-5.summary @@ -0,0 +1,59 @@ +Current cluster status: + * Node List: + * Online: [ node1 node2 node3 ] + + * Full List of Resources: + * Fencing (stonith:fence_xvm): Started node2 + * Clone Set: grp-clone [grp]: + * Started: [ node2 node3 ] + * Stopped: [ node1 ] + +Transition Summary: + * Move rsc1:0 ( node2 -> node1 ) + * Move rsc2:0 ( node2 -> node1 ) + * Move rsc1:1 ( node3 -> node1 ) + * Move rsc2:1 ( node3 -> node1 ) + * Start rsc1:2 ( node1 ) + * Start rsc2:2 ( node1 ) + +Executing Cluster Transition: + * Pseudo action: grp-clone_stop_0 + * Pseudo action: grp:0_stop_0 + * Resource action: rsc2 stop on node2 + * Pseudo action: grp:1_stop_0 + * Resource action: rsc2 stop on node3 + * Resource action: rsc1 stop on node2 + * Resource action: rsc1 stop on node3 + * Pseudo action: grp:0_stopped_0 + * Pseudo action: grp:1_stopped_0 + * Pseudo action: grp-clone_stopped_0 + * Pseudo action: grp-clone_start_0 + * Pseudo action: grp:0_start_0 + * Resource action: rsc1 start on node1 + * Resource action: rsc2 start on node1 + * Pseudo action: grp:1_start_0 + * Resource action: rsc1 start on node1 + * Resource action: rsc2 start on node1 + * Pseudo action: grp:2_start_0 + * Resource action: rsc1 start on node1 + * Resource action: rsc2 start on node1 + * Pseudo action: grp:0_running_0 + * Resource action: rsc1 monitor=10000 on node1 + * Resource action: rsc2 monitor=10000 on node1 + * Pseudo action: grp:1_running_0 + * Resource action: rsc1 monitor=10000 on node1 + * Resource action: rsc2 monitor=10000 on node1 + * Pseudo action: grp:2_running_0 + * Resource action: rsc1 monitor=10000 on node1 + * Resource action: rsc2 monitor=10000 on node1 + * Pseudo action: grp-clone_running_0 + +Revised Cluster Status: + * Node List: + * Online: [ node1 node2 node3 ] + + * Full List of Resources: + * Fencing (stonith:fence_xvm): Started node2 + * Clone Set: grp-clone [grp]: + * Started: [ node1 ] + * Stopped: [ node2 node3 ] diff --git a/cts/scheduler/summary/clone-recover-no-shuffle-6.summary b/cts/scheduler/summary/clone-recover-no-shuffle-6.summary new file mode 100644 index 00000000000..19a957e15fb --- /dev/null +++ b/cts/scheduler/summary/clone-recover-no-shuffle-6.summary @@ -0,0 +1,68 @@ +Current cluster status: + * Node List: + * Online: [ node1 node2 node3 ] + * GuestOnline: [ base-bundle-0 base-bundle-1 ] + + * Full List of Resources: + * Fencing (stonith:fence_xvm): Started node2 + * Container bundle set: base-bundle [localhost/pcmktest]: + * base-bundle-0 (ocf:pacemaker:Stateful): Started node3 + * base-bundle-1 (ocf:pacemaker:Stateful): Started node2 + * base-bundle-2 (ocf:pacemaker:Stateful): Stopped + +Transition Summary: + * Move base-bundle-podman-0 ( node3 -> node1 ) + * Move base-bundle-0 ( node3 -> node1 ) + * Restart base:0 ( base-bundle-0 ) due to required base-bundle-podman-0 start + * Move base-bundle-podman-1 ( node2 -> node3 ) + * Move base-bundle-1 ( node2 -> node3 ) + * Restart base:1 ( base-bundle-1 ) due to required base-bundle-podman-1 start + * Start base-bundle-podman-2 ( node2 ) + * Start base-bundle-2 ( node2 ) + * Start base:2 ( base-bundle-2 ) + +Executing Cluster Transition: + * Pseudo action: base-bundle_stop_0 + * Pseudo action: base-bundle_start_0 + * Pseudo action: base-bundle-clone_stop_0 + * Resource action: base-bundle-podman-2 start on node2 + * Resource action: base-bundle-2 monitor on node3 + * Resource action: base-bundle-2 monitor on node2 + * Resource action: base-bundle-2 monitor on node1 + * Resource action: base stop on base-bundle-1 + * Resource action: base-bundle-1 stop on node2 + * Resource action: base-bundle-podman-2 monitor=60000 on node2 + * Resource action: base-bundle-2 start on node2 + * Resource action: base stop on base-bundle-0 + * Pseudo action: base-bundle-clone_stopped_0 + * Pseudo action: base-bundle-clone_start_0 + * Resource action: base-bundle-0 stop on node3 + * Resource action: base-bundle-podman-1 stop on node2 + * Resource action: base-bundle-2 monitor=30000 on node2 + * Resource action: base-bundle-podman-0 stop on node3 + * Resource action: base-bundle-podman-1 start on node3 + * Resource action: base-bundle-1 start on node3 + * Pseudo action: base-bundle_stopped_0 + * Resource action: base-bundle-podman-0 start on node1 + * Resource action: base-bundle-0 start on node1 + * Resource action: base-bundle-podman-1 monitor=60000 on node3 + * Resource action: base-bundle-1 monitor=30000 on node3 + * Resource action: base start on base-bundle-0 + * Resource action: base start on base-bundle-1 + * Resource action: base start on base-bundle-2 + * Pseudo action: base-bundle-clone_running_0 + * Resource action: base-bundle-podman-0 monitor=60000 on node1 + * Resource action: base-bundle-0 monitor=30000 on node1 + * Pseudo action: base-bundle_running_0 + +Revised Cluster Status: + * Node List: + * Online: [ node1 node2 node3 ] + * GuestOnline: [ base-bundle-0 base-bundle-1 base-bundle-2 ] + + * Full List of Resources: + * Fencing (stonith:fence_xvm): Started node2 + * Container bundle set: base-bundle [localhost/pcmktest]: + * base-bundle-0 (ocf:pacemaker:Stateful): Started node1 + * base-bundle-1 (ocf:pacemaker:Stateful): Started node3 + * base-bundle-2 (ocf:pacemaker:Stateful): Started node2 diff --git a/cts/scheduler/summary/clone-recover-no-shuffle-7.summary b/cts/scheduler/summary/clone-recover-no-shuffle-7.summary new file mode 100644 index 00000000000..e6c9baed0db --- /dev/null +++ b/cts/scheduler/summary/clone-recover-no-shuffle-7.summary @@ -0,0 +1,44 @@ +Current cluster status: + * Node List: + * Online: [ node1 node2 node3 ] + + * Full List of Resources: + * Fencing (stonith:fence_xvm): Started node2 + * Clone Set: dummy-clone [dummy] (promotable): + * Promoted: [ node2 ] + * Unpromoted: [ node3 ] + * Stopped: [ node1 ] + +Transition Summary: + * Move dummy:0 ( Unpromoted node3 -> Promoted node1 ) + * Demote dummy:1 ( Promoted -> Unpromoted node2 ) + * Start dummy:2 ( node3 ) + +Executing Cluster Transition: + * Resource action: dummy cancel=10000 on node2 + * Pseudo action: dummy-clone_demote_0 + * Resource action: dummy demote on node2 + * Pseudo action: dummy-clone_demoted_0 + * Pseudo action: dummy-clone_stop_0 + * Resource action: dummy stop on node3 + * Resource action: dummy monitor=11000 on node2 + * Pseudo action: dummy-clone_stopped_0 + * Pseudo action: dummy-clone_start_0 + * Resource action: dummy start on node1 + * Resource action: dummy start on node3 + * Pseudo action: dummy-clone_running_0 + * Resource action: dummy monitor=11000 on node3 + * Pseudo action: dummy-clone_promote_0 + * Resource action: dummy promote on node1 + * Pseudo action: dummy-clone_promoted_0 + * Resource action: dummy monitor=10000 on node1 + +Revised Cluster Status: + * Node List: + * Online: [ node1 node2 node3 ] + + * Full List of Resources: + * Fencing (stonith:fence_xvm): Started node2 + * Clone Set: dummy-clone [dummy] (promotable): + * Promoted: [ node1 ] + * Unpromoted: [ node2 node3 ] diff --git a/cts/scheduler/summary/clone-recover-no-shuffle-8.summary b/cts/scheduler/summary/clone-recover-no-shuffle-8.summary new file mode 100644 index 00000000000..878f24801dd --- /dev/null +++ b/cts/scheduler/summary/clone-recover-no-shuffle-8.summary @@ -0,0 +1,52 @@ +Current cluster status: + * Node List: + * Online: [ node1 node2 node3 ] + + * Full List of Resources: + * Fencing (stonith:fence_xvm): Started node2 + * Clone Set: grp-clone [grp] (promotable): + * Promoted: [ node2 ] + * Unpromoted: [ node3 ] + * Stopped: [ node1 ] + +Transition Summary: + * Demote rsc1:1 ( Promoted -> Unpromoted node2 ) + * Demote rsc2:1 ( Promoted -> Unpromoted node2 ) + * Promote rsc1:2 ( Stopped -> Promoted node1 ) + * Promote rsc2:2 ( Stopped -> Promoted node1 ) + +Executing Cluster Transition: + * Resource action: rsc1 cancel=10000 on node2 + * Resource action: rsc2 cancel=10000 on node2 + * Pseudo action: grp-clone_demote_0 + * Pseudo action: grp:1_demote_0 + * Resource action: rsc2 demote on node2 + * Resource action: rsc1 demote on node2 + * Resource action: rsc2 monitor=11000 on node2 + * Pseudo action: grp:1_demoted_0 + * Resource action: rsc1 monitor=11000 on node2 + * Pseudo action: grp-clone_demoted_0 + * Pseudo action: grp-clone_start_0 + * Pseudo action: grp:2_start_0 + * Resource action: rsc1 start on node1 + * Resource action: rsc2 start on node1 + * Pseudo action: grp:2_running_0 + * Pseudo action: grp-clone_running_0 + * Pseudo action: grp-clone_promote_0 + * Pseudo action: grp:2_promote_0 + * Resource action: rsc1 promote on node1 + * Resource action: rsc2 promote on node1 + * Pseudo action: grp:2_promoted_0 + * Resource action: rsc1 monitor=10000 on node1 + * Resource action: rsc2 monitor=10000 on node1 + * Pseudo action: grp-clone_promoted_0 + +Revised Cluster Status: + * Node List: + * Online: [ node1 node2 node3 ] + + * Full List of Resources: + * Fencing (stonith:fence_xvm): Started node2 + * Clone Set: grp-clone [grp] (promotable): + * Promoted: [ node1 ] + * Unpromoted: [ node2 node3 ] diff --git a/cts/scheduler/summary/clone-recover-no-shuffle-9.summary b/cts/scheduler/summary/clone-recover-no-shuffle-9.summary new file mode 100644 index 00000000000..7ede39a6e58 --- /dev/null +++ b/cts/scheduler/summary/clone-recover-no-shuffle-9.summary @@ -0,0 +1,56 @@ +Current cluster status: + * Node List: + * Online: [ node1 node2 node3 ] + * GuestOnline: [ base-bundle-0 base-bundle-1 ] + + * Full List of Resources: + * Fencing (stonith:fence_xvm): Started node2 + * Container bundle set: base-bundle [localhost/pcmktest]: + * base-bundle-0 (ocf:pacemaker:Stateful): Unpromoted node3 + * base-bundle-1 (ocf:pacemaker:Stateful): Promoted node2 + * base-bundle-2 (ocf:pacemaker:Stateful): Stopped + +Transition Summary: + * Demote base:1 ( Promoted -> Unpromoted base-bundle-1 ) + * Start base-bundle-podman-2 ( node1 ) + * Start base-bundle-2 ( node1 ) + * Promote base:2 ( Stopped -> Promoted base-bundle-2 ) + +Executing Cluster Transition: + * Resource action: base cancel=15000 on base-bundle-1 + * Pseudo action: base-bundle_demote_0 + * Pseudo action: base-bundle-clone_demote_0 + * Resource action: base demote on base-bundle-1 + * Pseudo action: base-bundle-clone_demoted_0 + * Pseudo action: base-bundle_demoted_0 + * Pseudo action: base-bundle_start_0 + * Resource action: base monitor=16000 on base-bundle-1 + * Pseudo action: base-bundle-clone_start_0 + * Resource action: base-bundle-podman-2 start on node1 + * Resource action: base-bundle-2 monitor on node3 + * Resource action: base-bundle-2 monitor on node2 + * Resource action: base-bundle-2 monitor on node1 + * Resource action: base-bundle-podman-2 monitor=60000 on node1 + * Resource action: base-bundle-2 start on node1 + * Resource action: base start on base-bundle-2 + * Pseudo action: base-bundle-clone_running_0 + * Resource action: base-bundle-2 monitor=30000 on node1 + * Pseudo action: base-bundle_running_0 + * Pseudo action: base-bundle_promote_0 + * Pseudo action: base-bundle-clone_promote_0 + * Resource action: base promote on base-bundle-2 + * Pseudo action: base-bundle-clone_promoted_0 + * Pseudo action: base-bundle_promoted_0 + * Resource action: base monitor=15000 on base-bundle-2 + +Revised Cluster Status: + * Node List: + * Online: [ node1 node2 node3 ] + * GuestOnline: [ base-bundle-0 base-bundle-1 base-bundle-2 ] + + * Full List of Resources: + * Fencing (stonith:fence_xvm): Started node2 + * Container bundle set: base-bundle [localhost/pcmktest]: + * base-bundle-0 (ocf:pacemaker:Stateful): Unpromoted node3 + * base-bundle-1 (ocf:pacemaker:Stateful): Unpromoted node2 + * base-bundle-2 (ocf:pacemaker:Stateful): Promoted node1 diff --git a/cts/scheduler/xml/clone-recover-no-shuffle-1.xml b/cts/scheduler/xml/clone-recover-no-shuffle-1.xml new file mode 100644 index 00000000000..a634ff352cd --- /dev/null +++ b/cts/scheduler/xml/clone-recover-no-shuffle-1.xml @@ -0,0 +1,113 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/cts/scheduler/xml/clone-recover-no-shuffle-10.xml b/cts/scheduler/xml/clone-recover-no-shuffle-10.xml new file mode 100644 index 00000000000..faa202a0ae0 --- /dev/null +++ b/cts/scheduler/xml/clone-recover-no-shuffle-10.xml @@ -0,0 +1,120 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/cts/scheduler/xml/clone-recover-no-shuffle-11.xml b/cts/scheduler/xml/clone-recover-no-shuffle-11.xml new file mode 100644 index 00000000000..43d6d749525 --- /dev/null +++ b/cts/scheduler/xml/clone-recover-no-shuffle-11.xml @@ -0,0 +1,153 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/cts/scheduler/xml/clone-recover-no-shuffle-12.xml b/cts/scheduler/xml/clone-recover-no-shuffle-12.xml new file mode 100644 index 00000000000..e3026903533 --- /dev/null +++ b/cts/scheduler/xml/clone-recover-no-shuffle-12.xml @@ -0,0 +1,186 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/cts/scheduler/xml/clone-recover-no-shuffle-2.xml b/cts/scheduler/xml/clone-recover-no-shuffle-2.xml new file mode 100644 index 00000000000..486666c1f26 --- /dev/null +++ b/cts/scheduler/xml/clone-recover-no-shuffle-2.xml @@ -0,0 +1,141 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/cts/scheduler/xml/clone-recover-no-shuffle-3.xml b/cts/scheduler/xml/clone-recover-no-shuffle-3.xml new file mode 100644 index 00000000000..ddafb741dce --- /dev/null +++ b/cts/scheduler/xml/clone-recover-no-shuffle-3.xml @@ -0,0 +1,180 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/cts/scheduler/xml/clone-recover-no-shuffle-4.xml b/cts/scheduler/xml/clone-recover-no-shuffle-4.xml new file mode 100644 index 00000000000..40e6520c6d0 --- /dev/null +++ b/cts/scheduler/xml/clone-recover-no-shuffle-4.xml @@ -0,0 +1,120 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/cts/scheduler/xml/clone-recover-no-shuffle-5.xml b/cts/scheduler/xml/clone-recover-no-shuffle-5.xml new file mode 100644 index 00000000000..67176dc1a03 --- /dev/null +++ b/cts/scheduler/xml/clone-recover-no-shuffle-5.xml @@ -0,0 +1,148 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/cts/scheduler/xml/clone-recover-no-shuffle-6.xml b/cts/scheduler/xml/clone-recover-no-shuffle-6.xml new file mode 100644 index 00000000000..3de42f581d4 --- /dev/null +++ b/cts/scheduler/xml/clone-recover-no-shuffle-6.xml @@ -0,0 +1,187 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/cts/scheduler/xml/clone-recover-no-shuffle-7.xml b/cts/scheduler/xml/clone-recover-no-shuffle-7.xml new file mode 100644 index 00000000000..6e9dad50db4 --- /dev/null +++ b/cts/scheduler/xml/clone-recover-no-shuffle-7.xml @@ -0,0 +1,125 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/cts/scheduler/xml/clone-recover-no-shuffle-8.xml b/cts/scheduler/xml/clone-recover-no-shuffle-8.xml new file mode 100644 index 00000000000..6f882b80785 --- /dev/null +++ b/cts/scheduler/xml/clone-recover-no-shuffle-8.xml @@ -0,0 +1,153 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/cts/scheduler/xml/clone-recover-no-shuffle-9.xml b/cts/scheduler/xml/clone-recover-no-shuffle-9.xml new file mode 100644 index 00000000000..104331d6c00 --- /dev/null +++ b/cts/scheduler/xml/clone-recover-no-shuffle-9.xml @@ -0,0 +1,186 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + From 44dfe36a316bddc562c07f7e1adbbaa57b9adf77 Mon Sep 17 00:00:00 2001 From: Reid Wahl Date: Wed, 28 Jun 2023 02:04:45 -0700 Subject: [PATCH 08/19] Refactor: libpacemaker: Recursively copy and restore allowed node tables Given a resource, these two new functions create copies of the allowed nodes tables of its entire tree of descendants, or restore from such a backup copy. Ref T678 Signed-off-by: Reid Wahl --- lib/pacemaker/libpacemaker_private.h | 6 +++ lib/pacemaker/pcmk_sched_nodes.c | 76 ++++++++++++++++++++++++++++ 2 files changed, 82 insertions(+) diff --git a/lib/pacemaker/libpacemaker_private.h b/lib/pacemaker/libpacemaker_private.h index 614d695f83f..8cdd13f7304 100644 --- a/lib/pacemaker/libpacemaker_private.h +++ b/lib/pacemaker/libpacemaker_private.h @@ -874,6 +874,12 @@ bool pcmk__any_node_available(GHashTable *nodes); G_GNUC_INTERNAL GHashTable *pcmk__copy_node_table(GHashTable *nodes); +G_GNUC_INTERNAL +void pcmk__copy_node_tables(const pe_resource_t *rsc, GHashTable **copy); + +G_GNUC_INTERNAL +void pcmk__restore_node_tables(pe_resource_t *rsc, GHashTable *backup); + G_GNUC_INTERNAL GList *pcmk__sort_nodes(GList *nodes, pe_node_t *active_node); diff --git a/lib/pacemaker/pcmk_sched_nodes.c b/lib/pacemaker/pcmk_sched_nodes.c index d7d5ba46169..eb0b2a41e39 100644 --- a/lib/pacemaker/pcmk_sched_nodes.c +++ b/lib/pacemaker/pcmk_sched_nodes.c @@ -82,6 +82,82 @@ pcmk__copy_node_table(GHashTable *nodes) return new_table; } +/*! + * \internal + * \brief Free a table of node tables + * + * \param[in,out] data Table to free + * + * \note This is a \c GDestroyNotify wrapper for \c g_hash_table_destroy(). + */ +static void +destroy_node_tables(gpointer data) +{ + g_hash_table_destroy((GHashTable *) data); +} + +/*! + * \internal + * \brief Recursively copy the node tables of a resource + * + * Build a hash table containing copies of the allowed nodes tables of \p rsc + * and its entire tree of descendants. The key is the resource ID, and the value + * is a copy of the resource's node table. + * + * \param[in] rsc Resource whose node table to copy + * \param[in,out] copy Where to store the copied node tables + * + * \note \p *copy should be \c NULL for the top-level call. + * \note The caller is responsible for freeing \p copy using + * \c g_hash_table_destroy(). + */ +void +pcmk__copy_node_tables(const pe_resource_t *rsc, GHashTable **copy) +{ + CRM_ASSERT((rsc != NULL) && (copy != NULL)); + + if (*copy == NULL) { + *copy = pcmk__strkey_table(NULL, destroy_node_tables); + } + + g_hash_table_insert(*copy, rsc->id, + pcmk__copy_node_table(rsc->allowed_nodes)); + + for (const GList *iter = rsc->children; iter != NULL; iter = iter->next) { + pcmk__copy_node_tables((const pe_resource_t *) iter->data, copy); + } +} + +/*! + * \internal + * \brief Recursively restore the node tables of a resource from backup + * + * Given a hash table containing backup copies of the allowed nodes tables of + * \p rsc and its entire tree of descendants, replace the resources' current + * node tables with the backed-up copies. + * + * \param[in,out] rsc Resource whose node tables to restore + * \param[in] backup Table of backup node tables (created by + * \c pcmk__copy_node_tables()) + * + * \note This function frees the resources' current node tables. + */ +void +pcmk__restore_node_tables(pe_resource_t *rsc, GHashTable *backup) +{ + CRM_ASSERT((rsc != NULL) && (backup != NULL)); + + g_hash_table_destroy(rsc->allowed_nodes); + + // Copy to avoid danger with multiple restores + rsc->allowed_nodes = g_hash_table_lookup(backup, rsc->id); + rsc->allowed_nodes = pcmk__copy_node_table(rsc->allowed_nodes); + + for (GList *iter = rsc->children; iter != NULL; iter = iter->next) { + pcmk__restore_node_tables((pe_resource_t *) iter->data, backup); + } +} + /*! * \internal * \brief Copy a list of node objects From a3c120c4c0aeb48efd55bac6de68423be099831d Mon Sep 17 00:00:00 2001 From: Reid Wahl Date: Wed, 28 Jun 2023 02:09:28 -0700 Subject: [PATCH 09/19] Refactor: libpacemaker: Restore node tables if cloned group assign fails Currently, when assigning an instance of a cloned group (that is, one of the groups), we make a copy only of the group's allowed nodes table. We restore only that table if an early assignment attempt fails. Here, we make a recursive copy containing the allowed nodes tables of the group itself and of all the resources in the group. Then we restore all of them from backup if the assignment fails. This doesn't visibly fix anything yet, but it's a necessary part of the fix for T678. And it was obviously wrong before :) Ref T678 Signed-off-by: Reid Wahl --- lib/pacemaker/pcmk_sched_instances.c | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/lib/pacemaker/pcmk_sched_instances.c b/lib/pacemaker/pcmk_sched_instances.c index c880196f70f..783820bbf69 100644 --- a/lib/pacemaker/pcmk_sched_instances.c +++ b/lib/pacemaker/pcmk_sched_instances.c @@ -600,8 +600,9 @@ assign_instance(pe_resource_t *instance, const pe_node_t *prefer, chosen = instance->cmds->assign(instance, NULL); } else { // Possible early assignment to preferred node - GHashTable *backup = pcmk__copy_node_table(instance->allowed_nodes); + GHashTable *backup = NULL; + pcmk__copy_node_tables(instance, &backup); chosen = instance->cmds->assign(instance, prefer); // Revert nodes if preferred node won't be assigned @@ -609,13 +610,11 @@ assign_instance(pe_resource_t *instance, const pe_node_t *prefer, crm_info("Not assigning %s to preferred node %s: %s is better", instance->id, pe__node_name(prefer), pe__node_name(chosen)); - g_hash_table_destroy(instance->allowed_nodes); - instance->allowed_nodes = backup; + pcmk__restore_node_tables(instance, backup); pcmk__unassign_resource(instance); chosen = NULL; - } else if (backup != NULL) { - g_hash_table_destroy(backup); } + g_hash_table_destroy(backup); } // The parent tracks how many instances have been assigned to each node From a5a5c76333365be87f5d3d62f354b45376894506 Mon Sep 17 00:00:00 2001 From: Reid Wahl Date: Wed, 28 Jun 2023 02:08:44 -0700 Subject: [PATCH 10/19] Fix: libpacemaker: Respect clone-node-max for cloned groups Currently, cloned groups may have more than clone-node-max instances assigned to a given node. This can happen when a location constraint exists for the clone. For example, consider the case of the clone-recover-no-shuffle-5 test. The cloned group prefers node1 with a score of 100. The location score is applied only to a group's first member. So in the early assignment attempt (within pcmk__assign_instances()), we try to assign each instance (group) to its current node. However, the first member prefers a different node (node1) and gets assigned there instead. The second member has to follow the first due to the group's internal colocation. However, node1 wasn't the preferred node. So assign_instance() tries to revert the assignment by calling pcmk__unassign_resource() on the instance (the group). But this leaves the group members assigned, because pcmk__unassign_resource() doesn't act recursively. With this commit, pcmk__unassign_resource() acts recursively. We can now unassign a resource and all its children recursively. Fixes T678 Signed-off-by: Reid Wahl --- lib/pacemaker/pcmk_sched_resource.c | 43 +++++++++++++++++++---------- 1 file changed, 28 insertions(+), 15 deletions(-) diff --git a/lib/pacemaker/pcmk_sched_resource.c b/lib/pacemaker/pcmk_sched_resource.c index dd9939a42a6..8f703789b20 100644 --- a/lib/pacemaker/pcmk_sched_resource.c +++ b/lib/pacemaker/pcmk_sched_resource.c @@ -455,13 +455,14 @@ pcmk__assign_resource(pe_resource_t *rsc, pe_node_t *node, bool force) /*! * \internal - * \brief Remove any assignment of a specified resource to a node + * \brief Remove any node assignment from a specified resource and its children * * If a specified resource has been assigned to a node, remove that assignment - * and mark the resource as provisional again. This is not done recursively for - * children, so it should be called only for primitives. + * and mark the resource as provisional again. * * \param[in,out] rsc Resource to unassign + * + * \note This function is called recursively on \p rsc and its children. */ void pcmk__unassign_resource(pe_resource_t *rsc) @@ -469,21 +470,33 @@ pcmk__unassign_resource(pe_resource_t *rsc) pe_node_t *old = rsc->allocated_to; if (old == NULL) { - return; + crm_info("Unassigning %s", rsc->id); + } else { + crm_info("Unassigning %s from %s", rsc->id, pe__node_name(old)); } - crm_info("Unassigning %s from %s", rsc->id, pe__node_name(old)); pe__set_resource_flags(rsc, pe_rsc_provisional); - rsc->allocated_to = NULL; - - /* We're going to free the pe_node_t, but its details member is shared and - * will remain, so update that appropriately first. - */ - old->details->allocated_rsc = g_list_remove(old->details->allocated_rsc, - rsc); - old->details->num_resources--; - pcmk__release_node_capacity(old->details->utilization, rsc); - free(old); + + if (rsc->children == NULL) { + if (old == NULL) { + return; + } + rsc->allocated_to = NULL; + + /* We're going to free the pe_node_t, but its details member is shared + * and will remain, so update that appropriately first. + */ + old->details->allocated_rsc = g_list_remove(old->details->allocated_rsc, + rsc); + old->details->num_resources--; + pcmk__release_node_capacity(old->details->utilization, rsc); + free(old); + return; + } + + for (GList *iter = rsc->children; iter != NULL; iter = iter->next) { + pcmk__unassign_resource((pe_resource_t *) iter->data); + } } /*! From edd9b4ef2094e776530ff540047848aa6d2a1b42 Mon Sep 17 00:00:00 2001 From: Reid Wahl Date: Wed, 28 Jun 2023 02:39:39 -0700 Subject: [PATCH 11/19] Test: scheduler: Update tests for cloned group clone-node-max fix Ref T678 Signed-off-by: Reid Wahl --- .../dot/clone-recover-no-shuffle-5.dot | 46 +--- .../exp/clone-recover-no-shuffle-5.exp | 231 +++--------------- .../scores/clone-recover-no-shuffle-5.scores | 50 +++- .../clone-recover-no-shuffle-5.summary | 27 +- .../xml/clone-recover-no-shuffle-5.xml | 6 +- 5 files changed, 97 insertions(+), 263 deletions(-) diff --git a/cts/scheduler/dot/clone-recover-no-shuffle-5.dot b/cts/scheduler/dot/clone-recover-no-shuffle-5.dot index 7219ee5a6d3..a2356f2280b 100644 --- a/cts/scheduler/dot/clone-recover-no-shuffle-5.dot +++ b/cts/scheduler/dot/clone-recover-no-shuffle-5.dot @@ -2,12 +2,10 @@ "grp-clone_running_0" [ style=bold color="green" fontcolor="orange"] "grp-clone_start_0" -> "grp-clone_running_0" [ style = bold] "grp-clone_start_0" -> "grp:0_start_0" [ style = bold] -"grp-clone_start_0" -> "grp:1_start_0" [ style = bold] "grp-clone_start_0" -> "grp:2_start_0" [ style = bold] "grp-clone_start_0" [ style=bold color="green" fontcolor="orange"] "grp-clone_stop_0" -> "grp-clone_stopped_0" [ style = bold] "grp-clone_stop_0" -> "grp:0_stop_0" [ style = bold] -"grp-clone_stop_0" -> "grp:1_stop_0" [ style = bold] "grp-clone_stop_0" [ style=bold color="green" fontcolor="orange"] "grp-clone_stopped_0" -> "grp-clone_start_0" [ style = bold] "grp-clone_stopped_0" [ style=bold color="green" fontcolor="orange"] @@ -24,57 +22,35 @@ "grp:0_stopped_0" -> "grp-clone_stopped_0" [ style = bold] "grp:0_stopped_0" -> "grp:0_start_0" [ style = bold] "grp:0_stopped_0" [ style=bold color="green" fontcolor="orange"] -"grp:1_running_0" -> "grp-clone_running_0" [ style = bold] -"grp:1_running_0" [ style=bold color="green" fontcolor="orange"] -"grp:1_start_0" -> "grp:1_running_0" [ style = bold] -"grp:1_start_0" -> "rsc1_start_0 node1" [ style = bold] -"grp:1_start_0" -> "rsc2_start_0 node1" [ style = bold] -"grp:1_start_0" [ style=bold color="green" fontcolor="orange"] -"grp:1_stop_0" -> "grp:1_stopped_0" [ style = bold] -"grp:1_stop_0" -> "rsc1_stop_0 node3" [ style = bold] -"grp:1_stop_0" -> "rsc2_stop_0 node3" [ style = bold] -"grp:1_stop_0" [ style=bold color="green" fontcolor="orange"] -"grp:1_stopped_0" -> "grp-clone_stopped_0" [ style = bold] -"grp:1_stopped_0" -> "grp:1_start_0" [ style = bold] -"grp:1_stopped_0" [ style=bold color="green" fontcolor="orange"] "grp:2_running_0" -> "grp-clone_running_0" [ style = bold] "grp:2_running_0" [ style=bold color="green" fontcolor="orange"] "grp:2_start_0" -> "grp:2_running_0" [ style = bold] -"grp:2_start_0" -> "rsc1:2_start_0 node1" [ style = bold] -"grp:2_start_0" -> "rsc2:2_start_0 node1" [ style = bold] +"grp:2_start_0" -> "rsc1:2_start_0 node2" [ style = bold] +"grp:2_start_0" -> "rsc2:2_start_0 node2" [ style = bold] "grp:2_start_0" [ style=bold color="green" fontcolor="orange"] -"rsc1:2_monitor_10000 node1" [ style=bold color="green" fontcolor="black"] -"rsc1:2_start_0 node1" -> "grp:2_running_0" [ style = bold] -"rsc1:2_start_0 node1" -> "rsc1:2_monitor_10000 node1" [ style = bold] -"rsc1:2_start_0 node1" -> "rsc2:2_start_0 node1" [ style = bold] -"rsc1:2_start_0 node1" [ style=bold color="green" fontcolor="black"] +"rsc1:2_monitor_10000 node2" [ style=bold color="green" fontcolor="black"] +"rsc1:2_start_0 node2" -> "grp:2_running_0" [ style = bold] +"rsc1:2_start_0 node2" -> "rsc1:2_monitor_10000 node2" [ style = bold] +"rsc1:2_start_0 node2" -> "rsc2:2_start_0 node2" [ style = bold] +"rsc1:2_start_0 node2" [ style=bold color="green" fontcolor="black"] "rsc1_monitor_10000 node1" [ style=bold color="green" fontcolor="black"] "rsc1_start_0 node1" -> "grp:0_running_0" [ style = bold] -"rsc1_start_0 node1" -> "grp:1_running_0" [ style = bold] "rsc1_start_0 node1" -> "rsc1_monitor_10000 node1" [ style = bold] "rsc1_start_0 node1" -> "rsc2_start_0 node1" [ style = bold] "rsc1_start_0 node1" [ style=bold color="green" fontcolor="black"] "rsc1_stop_0 node2" -> "grp:0_stopped_0" [ style = bold] "rsc1_stop_0 node2" -> "rsc1_start_0 node1" [ style = bold] "rsc1_stop_0 node2" [ style=bold color="green" fontcolor="black"] -"rsc1_stop_0 node3" -> "grp:1_stopped_0" [ style = bold] -"rsc1_stop_0 node3" -> "rsc1_start_0 node1" [ style = bold] -"rsc1_stop_0 node3" [ style=bold color="green" fontcolor="black"] -"rsc2:2_monitor_10000 node1" [ style=bold color="green" fontcolor="black"] -"rsc2:2_start_0 node1" -> "grp:2_running_0" [ style = bold] -"rsc2:2_start_0 node1" -> "rsc2:2_monitor_10000 node1" [ style = bold] -"rsc2:2_start_0 node1" [ style=bold color="green" fontcolor="black"] +"rsc2:2_monitor_10000 node2" [ style=bold color="green" fontcolor="black"] +"rsc2:2_start_0 node2" -> "grp:2_running_0" [ style = bold] +"rsc2:2_start_0 node2" -> "rsc2:2_monitor_10000 node2" [ style = bold] +"rsc2:2_start_0 node2" [ style=bold color="green" fontcolor="black"] "rsc2_monitor_10000 node1" [ style=bold color="green" fontcolor="black"] "rsc2_start_0 node1" -> "grp:0_running_0" [ style = bold] -"rsc2_start_0 node1" -> "grp:1_running_0" [ style = bold] "rsc2_start_0 node1" -> "rsc2_monitor_10000 node1" [ style = bold] "rsc2_start_0 node1" [ style=bold color="green" fontcolor="black"] "rsc2_stop_0 node2" -> "grp:0_stopped_0" [ style = bold] "rsc2_stop_0 node2" -> "rsc1_stop_0 node2" [ style = bold] "rsc2_stop_0 node2" -> "rsc2_start_0 node1" [ style = bold] "rsc2_stop_0 node2" [ style=bold color="green" fontcolor="black"] -"rsc2_stop_0 node3" -> "grp:1_stopped_0" [ style = bold] -"rsc2_stop_0 node3" -> "rsc1_stop_0 node3" [ style = bold] -"rsc2_stop_0 node3" -> "rsc2_start_0 node1" [ style = bold] -"rsc2_stop_0 node3" [ style=bold color="green" fontcolor="black"] } diff --git a/cts/scheduler/exp/clone-recover-no-shuffle-5.exp b/cts/scheduler/exp/clone-recover-no-shuffle-5.exp index 8a8e799793e..c1cee43b12f 100644 --- a/cts/scheduler/exp/clone-recover-no-shuffle-5.exp +++ b/cts/scheduler/exp/clone-recover-no-shuffle-5.exp @@ -25,7 +25,7 @@ - + @@ -58,7 +58,7 @@ - + @@ -154,245 +154,92 @@ - + - + - + - + - + - + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -401,24 +248,21 @@ - - - - + - + - + - + - + @@ -427,25 +271,22 @@ - + - - - - + - + - + - + diff --git a/cts/scheduler/scores/clone-recover-no-shuffle-5.scores b/cts/scheduler/scores/clone-recover-no-shuffle-5.scores index eecba43fae0..0dd9728830c 100644 --- a/cts/scheduler/scores/clone-recover-no-shuffle-5.scores +++ b/cts/scheduler/scores/clone-recover-no-shuffle-5.scores @@ -30,50 +30,80 @@ pcmk__clone_assign: rsc2:2 allocation score on node1: 0 pcmk__clone_assign: rsc2:2 allocation score on node2: 0 pcmk__clone_assign: rsc2:2 allocation score on node3: 0 pcmk__group_assign: grp:0 allocation score on node1: 100 +pcmk__group_assign: grp:0 allocation score on node1: 100 +pcmk__group_assign: grp:0 allocation score on node2: 0 pcmk__group_assign: grp:0 allocation score on node2: 0 pcmk__group_assign: grp:0 allocation score on node3: 0 +pcmk__group_assign: grp:0 allocation score on node3: 0 +pcmk__group_assign: grp:1 allocation score on node1: -INFINITY pcmk__group_assign: grp:1 allocation score on node1: 100 pcmk__group_assign: grp:1 allocation score on node2: 0 +pcmk__group_assign: grp:1 allocation score on node2: 0 +pcmk__group_assign: grp:1 allocation score on node3: 0 pcmk__group_assign: grp:1 allocation score on node3: 0 -pcmk__group_assign: grp:2 allocation score on node1: 100 +pcmk__group_assign: grp:2 allocation score on node1: -INFINITY pcmk__group_assign: grp:2 allocation score on node2: 0 -pcmk__group_assign: grp:2 allocation score on node3: 0 +pcmk__group_assign: grp:2 allocation score on node3: -INFINITY +pcmk__group_assign: rsc1:0 allocation score on node1: 100 pcmk__group_assign: rsc1:0 allocation score on node1: 100 pcmk__group_assign: rsc1:0 allocation score on node2: 1 +pcmk__group_assign: rsc1:0 allocation score on node2: 1 +pcmk__group_assign: rsc1:0 allocation score on node3: 0 pcmk__group_assign: rsc1:0 allocation score on node3: 0 +pcmk__group_assign: rsc1:1 allocation score on node1: -INFINITY pcmk__group_assign: rsc1:1 allocation score on node1: 100 pcmk__group_assign: rsc1:1 allocation score on node2: 0 +pcmk__group_assign: rsc1:1 allocation score on node2: 0 pcmk__group_assign: rsc1:1 allocation score on node3: 1 -pcmk__group_assign: rsc1:2 allocation score on node1: 100 +pcmk__group_assign: rsc1:1 allocation score on node3: 1 +pcmk__group_assign: rsc1:2 allocation score on node1: -INFINITY pcmk__group_assign: rsc1:2 allocation score on node2: 0 -pcmk__group_assign: rsc1:2 allocation score on node3: 0 +pcmk__group_assign: rsc1:2 allocation score on node3: -INFINITY +pcmk__group_assign: rsc2:0 allocation score on node1: 0 pcmk__group_assign: rsc2:0 allocation score on node1: 0 pcmk__group_assign: rsc2:0 allocation score on node2: 1 +pcmk__group_assign: rsc2:0 allocation score on node2: 1 pcmk__group_assign: rsc2:0 allocation score on node3: 0 +pcmk__group_assign: rsc2:0 allocation score on node3: 0 +pcmk__group_assign: rsc2:1 allocation score on node1: -INFINITY pcmk__group_assign: rsc2:1 allocation score on node1: 0 pcmk__group_assign: rsc2:1 allocation score on node2: 0 +pcmk__group_assign: rsc2:1 allocation score on node2: 0 +pcmk__group_assign: rsc2:1 allocation score on node3: 1 pcmk__group_assign: rsc2:1 allocation score on node3: 1 -pcmk__group_assign: rsc2:2 allocation score on node1: 0 +pcmk__group_assign: rsc2:2 allocation score on node1: -INFINITY pcmk__group_assign: rsc2:2 allocation score on node2: 0 -pcmk__group_assign: rsc2:2 allocation score on node3: 0 +pcmk__group_assign: rsc2:2 allocation score on node3: -INFINITY pcmk__primitive_assign: Fencing allocation score on node1: 0 pcmk__primitive_assign: Fencing allocation score on node2: 0 pcmk__primitive_assign: Fencing allocation score on node3: 0 pcmk__primitive_assign: rsc1:0 allocation score on node1: 100 +pcmk__primitive_assign: rsc1:0 allocation score on node1: 100 +pcmk__primitive_assign: rsc1:0 allocation score on node2: 2 pcmk__primitive_assign: rsc1:0 allocation score on node2: 2 pcmk__primitive_assign: rsc1:0 allocation score on node3: 0 +pcmk__primitive_assign: rsc1:0 allocation score on node3: 0 +pcmk__primitive_assign: rsc1:1 allocation score on node1: -INFINITY pcmk__primitive_assign: rsc1:1 allocation score on node1: 100 pcmk__primitive_assign: rsc1:1 allocation score on node2: 0 +pcmk__primitive_assign: rsc1:1 allocation score on node2: 0 +pcmk__primitive_assign: rsc1:1 allocation score on node3: 2 pcmk__primitive_assign: rsc1:1 allocation score on node3: 2 -pcmk__primitive_assign: rsc1:2 allocation score on node1: 100 +pcmk__primitive_assign: rsc1:2 allocation score on node1: -INFINITY pcmk__primitive_assign: rsc1:2 allocation score on node2: 0 -pcmk__primitive_assign: rsc1:2 allocation score on node3: 0 +pcmk__primitive_assign: rsc1:2 allocation score on node3: -INFINITY +pcmk__primitive_assign: rsc2:0 allocation score on node1: 0 pcmk__primitive_assign: rsc2:0 allocation score on node1: 0 pcmk__primitive_assign: rsc2:0 allocation score on node2: -INFINITY +pcmk__primitive_assign: rsc2:0 allocation score on node2: -INFINITY +pcmk__primitive_assign: rsc2:0 allocation score on node3: -INFINITY pcmk__primitive_assign: rsc2:0 allocation score on node3: -INFINITY +pcmk__primitive_assign: rsc2:1 allocation score on node1: -INFINITY pcmk__primitive_assign: rsc2:1 allocation score on node1: 0 pcmk__primitive_assign: rsc2:1 allocation score on node2: -INFINITY +pcmk__primitive_assign: rsc2:1 allocation score on node2: -INFINITY pcmk__primitive_assign: rsc2:1 allocation score on node3: -INFINITY -pcmk__primitive_assign: rsc2:2 allocation score on node1: 0 -pcmk__primitive_assign: rsc2:2 allocation score on node2: -INFINITY +pcmk__primitive_assign: rsc2:1 allocation score on node3: 1 +pcmk__primitive_assign: rsc2:2 allocation score on node1: -INFINITY +pcmk__primitive_assign: rsc2:2 allocation score on node2: 0 pcmk__primitive_assign: rsc2:2 allocation score on node3: -INFINITY diff --git a/cts/scheduler/summary/clone-recover-no-shuffle-5.summary b/cts/scheduler/summary/clone-recover-no-shuffle-5.summary index e84d0a574de..121214c42ab 100644 --- a/cts/scheduler/summary/clone-recover-no-shuffle-5.summary +++ b/cts/scheduler/summary/clone-recover-no-shuffle-5.summary @@ -11,41 +11,29 @@ Current cluster status: Transition Summary: * Move rsc1:0 ( node2 -> node1 ) * Move rsc2:0 ( node2 -> node1 ) - * Move rsc1:1 ( node3 -> node1 ) - * Move rsc2:1 ( node3 -> node1 ) - * Start rsc1:2 ( node1 ) - * Start rsc2:2 ( node1 ) + * Start rsc1:2 ( node2 ) + * Start rsc2:2 ( node2 ) Executing Cluster Transition: * Pseudo action: grp-clone_stop_0 * Pseudo action: grp:0_stop_0 * Resource action: rsc2 stop on node2 - * Pseudo action: grp:1_stop_0 - * Resource action: rsc2 stop on node3 * Resource action: rsc1 stop on node2 - * Resource action: rsc1 stop on node3 * Pseudo action: grp:0_stopped_0 - * Pseudo action: grp:1_stopped_0 * Pseudo action: grp-clone_stopped_0 * Pseudo action: grp-clone_start_0 * Pseudo action: grp:0_start_0 * Resource action: rsc1 start on node1 * Resource action: rsc2 start on node1 - * Pseudo action: grp:1_start_0 - * Resource action: rsc1 start on node1 - * Resource action: rsc2 start on node1 * Pseudo action: grp:2_start_0 - * Resource action: rsc1 start on node1 - * Resource action: rsc2 start on node1 + * Resource action: rsc1 start on node2 + * Resource action: rsc2 start on node2 * Pseudo action: grp:0_running_0 * Resource action: rsc1 monitor=10000 on node1 * Resource action: rsc2 monitor=10000 on node1 - * Pseudo action: grp:1_running_0 - * Resource action: rsc1 monitor=10000 on node1 - * Resource action: rsc2 monitor=10000 on node1 * Pseudo action: grp:2_running_0 - * Resource action: rsc1 monitor=10000 on node1 - * Resource action: rsc2 monitor=10000 on node1 + * Resource action: rsc1 monitor=10000 on node2 + * Resource action: rsc2 monitor=10000 on node2 * Pseudo action: grp-clone_running_0 Revised Cluster Status: @@ -55,5 +43,4 @@ Revised Cluster Status: * Full List of Resources: * Fencing (stonith:fence_xvm): Started node2 * Clone Set: grp-clone [grp]: - * Started: [ node1 ] - * Stopped: [ node2 node3 ] + * Started: [ node1 node2 node3 ] diff --git a/cts/scheduler/xml/clone-recover-no-shuffle-5.xml b/cts/scheduler/xml/clone-recover-no-shuffle-5.xml index 67176dc1a03..45f3b5a9f3a 100644 --- a/cts/scheduler/xml/clone-recover-no-shuffle-5.xml +++ b/cts/scheduler/xml/clone-recover-no-shuffle-5.xml @@ -14,9 +14,9 @@ * Instance grp:2 should start on node1 This test output is incorrect: - * Instance grp:0 moves from node2 to node1 - * Instance grp:1 moves from node3 to node1 - * Instance grp:2 starts on node1 (correct) + * Instance grp:0 moves to node1 + * Instance grp:1 remains started on node3 (correct) + * Instance grp:2 starts on node2 --> From ff60c47e89c6434819dbe5e5e9a87d01122e165e Mon Sep 17 00:00:00 2001 From: Reid Wahl Date: Thu, 6 Jul 2023 13:52:59 -0700 Subject: [PATCH 12/19] Refactor: libpacemaker: Move instance provisional check to loop body Avoid calling preferred_node() this way. Since assign_instance() is static and has only two callers, we don't have to worry about a sanity provisional check inside the function. Signed-off-by: Reid Wahl --- lib/pacemaker/pcmk_sched_instances.c | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/lib/pacemaker/pcmk_sched_instances.c b/lib/pacemaker/pcmk_sched_instances.c index 783820bbf69..58fad741729 100644 --- a/lib/pacemaker/pcmk_sched_instances.c +++ b/lib/pacemaker/pcmk_sched_instances.c @@ -568,11 +568,6 @@ assign_instance(pe_resource_t *instance, const pe_node_t *prefer, pe_rsc_trace(instance, "Assigning %s (preferring %s)", instance->id, ((prefer == NULL)? "no node" : prefer->details->uname)); - if (!pcmk_is_set(instance->flags, pe_rsc_provisional)) { - // Instance is already assigned - return instance->fns->location(instance, NULL, FALSE) != NULL; - } - if (pcmk_is_set(instance->flags, pe_rsc_allocating)) { pe_rsc_debug(instance, "Assignment loop detected involving %s colocations", @@ -745,6 +740,10 @@ pcmk__assign_instances(pe_resource_t *collective, GList *instances, iter = iter->next) { instance = (pe_resource_t *) iter->data; + if (!pcmk_is_set(instance->flags, pe_rsc_provisional)) { + continue; // Already assigned + } + current = preferred_node(collective, instance, optimal_per_node); if ((current != NULL) && assign_instance(instance, current, max_per_node)) { From 0f9e84238a4778da71488ff67ea9f1772e797d80 Mon Sep 17 00:00:00 2001 From: Reid Wahl Date: Fri, 23 Jun 2023 15:16:57 -0700 Subject: [PATCH 13/19] Refactor: libpacemaker: Functionize updating parent allowed node count ...in pcmk_sched_instances.c:assign_instance(). We'll use this elsewhere in an upcoming commit. Ref T489 Signed-off-by: Reid Wahl --- lib/pacemaker/pcmk_sched_instances.c | 54 ++++++++++++++++++---------- 1 file changed, 36 insertions(+), 18 deletions(-) diff --git a/lib/pacemaker/pcmk_sched_instances.c b/lib/pacemaker/pcmk_sched_instances.c index 58fad741729..1b051cb2ed9 100644 --- a/lib/pacemaker/pcmk_sched_instances.c +++ b/lib/pacemaker/pcmk_sched_instances.c @@ -545,6 +545,39 @@ pcmk__cmp_instance(gconstpointer a, gconstpointer b) return rc; } +/*! + * \internal + * \brief Increment the parent's instance count after assigning an instance + * + * An instance's parent tracks how many instances have been assigned to each + * node via its pe_node_t:count member. After assigning an instance to a node, + * find the corresponding node in the parent's allowed table and increment it. + * + * \param[in,out] instance Instance whose parent to update + * \param[in] assigned_to Node to which the instance was assigned + */ +static void +increment_parent_count(pe_resource_t *instance, const pe_node_t *assigned_to) +{ + pe_node_t *allowed = NULL; + + if (assigned_to == NULL) { + return; + } + allowed = pcmk__top_allowed_node(instance, assigned_to); + + if (allowed == NULL) { + /* The instance is allowed on the node, but its parent isn't. This + * shouldn't be possible if the resource is managed, and we won't be + * able to limit the number of instances assigned to the node. + */ + CRM_LOG_ASSERT(!pcmk_is_set(instance->flags, pe_rsc_managed)); + + } else { + allowed->count++; + } +} + /*! * \internal * \brief Choose a node for an instance @@ -562,9 +595,7 @@ assign_instance(pe_resource_t *instance, const pe_node_t *prefer, int max_per_node) { pe_node_t *chosen = NULL; - pe_node_t *allowed = NULL; - CRM_ASSERT(instance != NULL); pe_rsc_trace(instance, "Assigning %s (preferring %s)", instance->id, ((prefer == NULL)? "no node" : prefer->details->uname)); @@ -578,8 +609,8 @@ assign_instance(pe_resource_t *instance, const pe_node_t *prefer, if (prefer != NULL) { // Possible early assignment to preferred node // Get preferred node with instance's scores - allowed = g_hash_table_lookup(instance->allowed_nodes, - prefer->details->id); + pe_node_t *allowed = g_hash_table_lookup(instance->allowed_nodes, + prefer->details->id); if ((allowed == NULL) || (allowed->weight < 0)) { pe_rsc_trace(instance, @@ -612,20 +643,7 @@ assign_instance(pe_resource_t *instance, const pe_node_t *prefer, g_hash_table_destroy(backup); } - // The parent tracks how many instances have been assigned to each node - if (chosen != NULL) { - allowed = pcmk__top_allowed_node(instance, chosen); - if (allowed == NULL) { - /* The instance is allowed on the node, but its parent isn't. This - * shouldn't be possible if the resource is managed, and we won't be - * able to limit the number of instances assigned to the node. - */ - CRM_LOG_ASSERT(!pcmk_is_set(instance->flags, pe_rsc_managed)); - - } else { - allowed->count++; - } - } + increment_parent_count(instance, chosen); return chosen != NULL; } From 6cddfe269531661112537eb3ef7c90975feb73ea Mon Sep 17 00:00:00 2001 From: Reid Wahl Date: Thu, 22 Jun 2023 13:49:42 -0700 Subject: [PATCH 14/19] Refactor: libpe_status: Copy count in pe__copy_node() pe__copy_node() is supposed to make a shallow copy of a pe_node_t object. That should include the count member. The caller is free to reset it to 0 if desired. Signed-off-by: Reid Wahl --- lib/pengine/utils.c | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/pengine/utils.c b/lib/pengine/utils.c index ef0a092dc16..199ce87e61f 100644 --- a/lib/pengine/utils.c +++ b/lib/pengine/utils.c @@ -98,6 +98,7 @@ pe__copy_node(const pe_node_t *this_node) new_node->rsc_discover_mode = this_node->rsc_discover_mode; new_node->weight = this_node->weight; new_node->fixed = this_node->fixed; // @COMPAT deprecated and unused + new_node->count = this_node->count; new_node->details = this_node->details; return new_node; From 30385bedeb5177b703b3b68d9579d55356187f26 Mon Sep 17 00:00:00 2001 From: Reid Wahl Date: Fri, 23 Jun 2023 15:29:17 -0700 Subject: [PATCH 15/19] Refactor: libpacemaker: Return chosen node from assign_instance() The return type was changed to bool by commit 97f67da8. However, an upcoming commit will need the assigned-to node. Ref T489 Signed-off-by: Reid Wahl --- lib/pacemaker/pcmk_sched_instances.c | 28 ++++++++++++++++------------ 1 file changed, 16 insertions(+), 12 deletions(-) diff --git a/lib/pacemaker/pcmk_sched_instances.c b/lib/pacemaker/pcmk_sched_instances.c index 1b051cb2ed9..64c027b20b1 100644 --- a/lib/pacemaker/pcmk_sched_instances.c +++ b/lib/pacemaker/pcmk_sched_instances.c @@ -580,7 +580,7 @@ increment_parent_count(pe_resource_t *instance, const pe_node_t *assigned_to) /*! * \internal - * \brief Choose a node for an instance + * \brief Assign an instance to a node * * \param[in,out] instance Clone instance or bundle replica container * \param[in] prefer If not NULL, attempt early assignment to this @@ -588,9 +588,9 @@ increment_parent_count(pe_resource_t *instance, const pe_node_t *assigned_to) * perform final assignment * \param[in] max_per_node Assign at most this many instances to one node * - * \return true if \p instance could be assigned to a node, otherwise false + * \return Node to which \p instance is assigned */ -static bool +static const pe_node_t * assign_instance(pe_resource_t *instance, const pe_node_t *prefer, int max_per_node) { @@ -603,7 +603,7 @@ assign_instance(pe_resource_t *instance, const pe_node_t *prefer, pe_rsc_debug(instance, "Assignment loop detected involving %s colocations", instance->id); - return false; + return NULL; } if (prefer != NULL) { // Possible early assignment to preferred node @@ -616,7 +616,7 @@ assign_instance(pe_resource_t *instance, const pe_node_t *prefer, pe_rsc_trace(instance, "Not assigning %s to preferred node %s: unavailable", instance->id, pe__node_name(prefer)); - return false; + return NULL; } } @@ -644,7 +644,7 @@ assign_instance(pe_resource_t *instance, const pe_node_t *prefer, } increment_parent_count(instance, chosen); - return chosen != NULL; + return chosen; } /*! @@ -763,11 +763,15 @@ pcmk__assign_instances(pe_resource_t *collective, GList *instances, } current = preferred_node(collective, instance, optimal_per_node); - if ((current != NULL) - && assign_instance(instance, current, max_per_node)) { - pe_rsc_trace(collective, "Assigned %s to current node %s", - instance->id, pe__node_name(current)); - assigned++; + if (current != NULL) { + const pe_node_t *chosen = assign_instance(instance, current, + max_per_node); + + if (pe__same_node(chosen, current)) { + pe_rsc_trace(collective, "Assigned %s to current node %s", + instance->id, pe__node_name(current)); + assigned++; + } } } @@ -802,7 +806,7 @@ pcmk__assign_instances(pe_resource_t *collective, GList *instances, resource_location(instance, NULL, -INFINITY, "collective_limit_reached", collective->cluster); - } else if (assign_instance(instance, NULL, max_per_node)) { + } else if (assign_instance(instance, NULL, max_per_node) != NULL) { assigned++; } } From 010649ef135ee0d4aca916d2d61c79bcba446951 Mon Sep 17 00:00:00 2001 From: Reid Wahl Date: Fri, 23 Jun 2023 21:30:47 -0700 Subject: [PATCH 16/19] Refactor: libpacemaker: New stop_if_fail argument for assign() method ...of resource_alloc_functions_t. This will allow us to do a fully reversible assignment. Currently pcmk__unassign_resource() undoes everything assignment-related but can't undo changes to roles and actions. Now, if stop_if_fail is true, the assign() method and pcmk__assign_resource() behave as before. If stop_if_fail is false and assignment succeeds, we can safely either consider the assignment final or revert it via pcmk__unassign_resource(). If assignment fails, the effect is as if we had called pcmk__unassign_resource(); there are no side effects on next role or actions. Ref T489 Signed-off-by: Reid Wahl --- include/pcmki/pcmki_sched_allocate.h | 3 +- lib/pacemaker/libpacemaker_private.h | 30 ++++++++++++---- lib/pacemaker/pcmk_sched_bundle.c | 30 +++++++++++----- lib/pacemaker/pcmk_sched_clone.c | 22 +++++++++--- lib/pacemaker/pcmk_sched_group.c | 18 +++++++--- lib/pacemaker/pcmk_sched_instances.c | 24 +++++++------ lib/pacemaker/pcmk_sched_primitive.c | 52 ++++++++++++++++++++-------- lib/pacemaker/pcmk_sched_resource.c | 41 ++++++++++++++++------ lib/pacemaker/pcmk_scheduler.c | 4 +-- 9 files changed, 163 insertions(+), 61 deletions(-) diff --git a/include/pcmki/pcmki_sched_allocate.h b/include/pcmki/pcmki_sched_allocate.h index 32044ea96d4..f027d1211f0 100644 --- a/include/pcmki/pcmki_sched_allocate.h +++ b/include/pcmki/pcmki_sched_allocate.h @@ -19,7 +19,8 @@ # include # include -pe_node_t *pcmk__bundle_allocate(pe_resource_t *rsc, const pe_node_t *prefer); +pe_node_t *pcmk__bundle_allocate(pe_resource_t *rsc, const pe_node_t *prefer, + bool stop_if_fail); void pcmk__bundle_create_actions(pe_resource_t *rsc); bool pcmk__bundle_create_probe(pe_resource_t *rsc, pe_node_t *node); void pcmk__bundle_internal_constraints(pe_resource_t *rsc); diff --git a/lib/pacemaker/libpacemaker_private.h b/lib/pacemaker/libpacemaker_private.h index 8cdd13f7304..642176aafcd 100644 --- a/lib/pacemaker/libpacemaker_private.h +++ b/lib/pacemaker/libpacemaker_private.h @@ -58,12 +58,24 @@ struct resource_alloc_functions_s { * \internal * \brief Assign a resource to a node * - * \param[in,out] rsc Resource to assign to a node - * \param[in] prefer Node to prefer, if all else is equal + * \param[in,out] rsc Resource to assign to a node + * \param[in] prefer Node to prefer, if all else is equal + * \param[in] stop_if_fail If \c true and \p rsc can't be assigned to a + * node, set next role to stopped and update + * existing actions (if \p rsc is not a + * primitive, this applies to its primitive + * descendants instead) * * \return Node that \p rsc is assigned to, if assigned entirely to one node + * + * \note If \p stop_if_fail is \c false, then \c pcmk__unassign_resource() + * can completely undo the assignment. A successful assignment can be + * either undone or left alone as final. A failed assignment has the + * same effect as calling pcmk__unassign_resource(); there are no side + * effects on roles or actions. */ - pe_node_t *(*assign)(pe_resource_t *rsc, const pe_node_t *prefer); + pe_node_t *(*assign)(pe_resource_t *rsc, const pe_node_t *prefer, + bool stop_if_fail); /*! * \internal @@ -649,7 +661,8 @@ void pcmk__add_bundle_meta_to_xml(xmlNode *args_xml, const pe_action_t *action); // Primitives (pcmk_sched_primitive.c) G_GNUC_INTERNAL -pe_node_t *pcmk__primitive_assign(pe_resource_t *rsc, const pe_node_t *prefer); +pe_node_t *pcmk__primitive_assign(pe_resource_t *rsc, const pe_node_t *prefer, + bool stop_if_fail); G_GNUC_INTERNAL void pcmk__primitive_create_actions(pe_resource_t *rsc); @@ -696,7 +709,8 @@ void pcmk__primitive_shutdown_lock(pe_resource_t *rsc); // Groups (pcmk_sched_group.c) G_GNUC_INTERNAL -pe_node_t *pcmk__group_assign(pe_resource_t *rsc, const pe_node_t *prefer); +pe_node_t *pcmk__group_assign(pe_resource_t *rsc, const pe_node_t *prefer, + bool stop_if_fail); G_GNUC_INTERNAL void pcmk__group_create_actions(pe_resource_t *rsc); @@ -756,7 +770,8 @@ void pcmk__group_shutdown_lock(pe_resource_t *rsc); // Clones (pcmk_sched_clone.c) G_GNUC_INTERNAL -pe_node_t *pcmk__clone_assign(pe_resource_t *rsc, const pe_node_t *prefer); +pe_node_t *pcmk__clone_assign(pe_resource_t *rsc, const pe_node_t *prefer, + bool stop_if_fail); G_GNUC_INTERNAL void pcmk__clone_apply_coloc_score(pe_resource_t *dependent, @@ -915,7 +930,8 @@ G_GNUC_INTERNAL void pcmk__output_resource_actions(pe_resource_t *rsc); G_GNUC_INTERNAL -bool pcmk__assign_resource(pe_resource_t *rsc, pe_node_t *node, bool force); +bool pcmk__assign_resource(pe_resource_t *rsc, pe_node_t *node, bool force, + bool stop_if_fail); G_GNUC_INTERNAL void pcmk__unassign_resource(pe_resource_t *rsc); diff --git a/lib/pacemaker/pcmk_sched_bundle.c b/lib/pacemaker/pcmk_sched_bundle.c index 5682744395a..05a8626c889 100644 --- a/lib/pacemaker/pcmk_sched_bundle.c +++ b/lib/pacemaker/pcmk_sched_bundle.c @@ -36,13 +36,24 @@ is_bundle_node(pe__bundle_variant_data_t *data, pe_node_t *node) * \internal * \brief Assign a bundle resource to a node * - * \param[in,out] rsc Resource to assign to a node - * \param[in] prefer Node to prefer, if all else is equal + * \param[in,out] rsc Resource to assign to a node + * \param[in] prefer Node to prefer, if all else is equal + * \param[in] stop_if_fail If \c true and a primitive descendant of \p rsc + * can't be assigned to a node, set the + * descendant's next role to stopped and update + * existing actions * * \return Node that \p rsc is assigned to, if assigned entirely to one node + * + * \note If \p stop_if_fail is \c false, then \c pcmk__unassign_resource() can + * completely undo the assignment. A successful assignment can be either + * undone or left alone as final. A failed assignment has the same effect + * as calling pcmk__unassign_resource(); there are no side effects on + * roles or actions. */ pe_node_t * -pcmk__bundle_allocate(pe_resource_t *rsc, const pe_node_t *prefer) +pcmk__bundle_allocate(pe_resource_t *rsc, const pe_node_t *prefer, + bool stop_if_fail) { GList *containers = NULL; pe__bundle_variant_data_t *bundle_data = NULL; @@ -71,7 +82,7 @@ pcmk__bundle_allocate(pe_resource_t *rsc, const pe_node_t *prefer) if (replica->ip) { pe_rsc_trace(rsc, "Allocating bundle %s IP %s", rsc->id, replica->ip->id); - replica->ip->cmds->assign(replica->ip, prefer); + replica->ip->cmds->assign(replica->ip, prefer, stop_if_fail); } container_host = replica->container->allocated_to; @@ -89,7 +100,8 @@ pcmk__bundle_allocate(pe_resource_t *rsc, const pe_node_t *prefer) if (replica->remote) { pe_rsc_trace(rsc, "Allocating bundle %s connection %s", rsc->id, replica->remote->id); - replica->remote->cmds->assign(replica->remote, prefer); + replica->remote->cmds->assign(replica->remote, prefer, + stop_if_fail); } // Explicitly allocate replicas' children before bundle child @@ -110,7 +122,8 @@ pcmk__bundle_allocate(pe_resource_t *rsc, const pe_node_t *prefer) pe__set_resource_flags(replica->child->parent, pe_rsc_allocating); pe_rsc_trace(rsc, "Allocating bundle %s replica child %s", rsc->id, replica->child->id); - replica->child->cmds->assign(replica->child, replica->node); + replica->child->cmds->assign(replica->child, replica->node, + stop_if_fail); pe__clear_resource_flags(replica->child->parent, pe_rsc_allocating); } @@ -129,7 +142,8 @@ pcmk__bundle_allocate(pe_resource_t *rsc, const pe_node_t *prefer) } pe_rsc_trace(rsc, "Allocating bundle %s child %s", rsc->id, bundle_data->child->id); - bundle_data->child->cmds->assign(bundle_data->child, prefer); + bundle_data->child->cmds->assign(bundle_data->child, prefer, + stop_if_fail); } pe__clear_resource_flags(rsc, pe_rsc_allocating|pe_rsc_provisional); @@ -457,7 +471,7 @@ pcmk__bundle_apply_coloc_score(pe_resource_t *dependent, } else if (colocation->score >= INFINITY) { crm_notice("Cannot pair %s with instance of %s", dependent->id, primary->id); - pcmk__assign_resource(dependent, NULL, true); + pcmk__assign_resource(dependent, NULL, true, true); } else { pe_rsc_debug(primary, "Cannot pair %s with instance of %s", diff --git a/lib/pacemaker/pcmk_sched_clone.c b/lib/pacemaker/pcmk_sched_clone.c index 934f512d549..229257fd2be 100644 --- a/lib/pacemaker/pcmk_sched_clone.c +++ b/lib/pacemaker/pcmk_sched_clone.c @@ -18,13 +18,24 @@ * \internal * \brief Assign a clone resource's instances to nodes * - * \param[in,out] rsc Clone resource to assign - * \param[in] prefer Node to prefer, if all else is equal + * \param[in,out] rsc Clone resource to assign + * \param[in] prefer Node to prefer, if all else is equal + * \param[in] stop_if_fail If \c true and a primitive descendant of \p rsc + * can't be assigned to a node, set the + * descendant's next role to stopped and update + * existing actions * * \return NULL (clones are not assigned to a single node) + * + * \note If \p stop_if_fail is \c false, then \c pcmk__unassign_resource() can + * completely undo the assignment. A successful assignment can be either + * undone or left alone as final. A failed assignment has the same effect + * as calling pcmk__unassign_resource(); there are no side effects on + * roles or actions. */ pe_node_t * -pcmk__clone_assign(pe_resource_t *rsc, const pe_node_t *prefer) +pcmk__clone_assign(pe_resource_t *rsc, const pe_node_t *prefer, + bool stop_if_fail) { CRM_ASSERT(pe_rsc_is_clone(rsc)); @@ -53,7 +64,8 @@ pcmk__clone_assign(pe_resource_t *rsc, const pe_node_t *prefer) pe_rsc_trace(rsc, "%s: Assigning colocation %s primary %s first", rsc->id, constraint->id, constraint->primary->id); - constraint->primary->cmds->assign(constraint->primary, prefer); + constraint->primary->cmds->assign(constraint->primary, prefer, + stop_if_fail); } /* If any resources are colocated with this one, consider their preferences. @@ -305,7 +317,7 @@ pcmk__clone_apply_coloc_score(pe_resource_t *dependent, } else if (colocation->score >= INFINITY) { crm_notice("Cannot pair %s with instance of %s", dependent->id, primary->id); - pcmk__assign_resource(dependent, NULL, true); + pcmk__assign_resource(dependent, NULL, true, true); } else { pe_rsc_debug(primary, "Cannot pair %s with instance of %s", diff --git a/lib/pacemaker/pcmk_sched_group.c b/lib/pacemaker/pcmk_sched_group.c index cb139f7ddf9..55d890a5c4f 100644 --- a/lib/pacemaker/pcmk_sched_group.c +++ b/lib/pacemaker/pcmk_sched_group.c @@ -20,13 +20,23 @@ * \internal * \brief Assign a group resource to a node * - * \param[in,out] rsc Group resource to assign to a node - * \param[in] prefer Node to prefer, if all else is equal + * \param[in,out] rsc Group resource to assign to a node + * \param[in] prefer Node to prefer, if all else is equal + * \param[in] stop_if_fail If \c true and a child of \p rsc can't be + * assigned to a node, set the child's next role to + * stopped and update existing actions * * \return Node that \p rsc is assigned to, if assigned entirely to one node + * + * \note If \p stop_if_fail is \c false, then \c pcmk__unassign_resource() can + * completely undo the assignment. A successful assignment can be either + * undone or left alone as final. A failed assignment has the same effect + * as calling pcmk__unassign_resource(); there are no side effects on + * roles or actions. */ pe_node_t * -pcmk__group_assign(pe_resource_t *rsc, const pe_node_t *prefer) +pcmk__group_assign(pe_resource_t *rsc, const pe_node_t *prefer, + bool stop_if_fail) { pe_node_t *first_assigned_node = NULL; pe_resource_t *first_member = NULL; @@ -61,7 +71,7 @@ pcmk__group_assign(pe_resource_t *rsc, const pe_node_t *prefer) pe_rsc_trace(rsc, "Assigning group %s member %s", rsc->id, member->id); - node = member->cmds->assign(member, prefer); + node = member->cmds->assign(member, prefer, stop_if_fail); if (first_assigned_node == NULL) { first_assigned_node = node; } diff --git a/lib/pacemaker/pcmk_sched_instances.c b/lib/pacemaker/pcmk_sched_instances.c index 64c027b20b1..b551f3bee61 100644 --- a/lib/pacemaker/pcmk_sched_instances.c +++ b/lib/pacemaker/pcmk_sched_instances.c @@ -623,22 +623,26 @@ assign_instance(pe_resource_t *instance, const pe_node_t *prefer, ban_unavailable_allowed_nodes(instance, max_per_node); if (prefer == NULL) { // Final assignment - chosen = instance->cmds->assign(instance, NULL); + chosen = instance->cmds->assign(instance, NULL, true); } else { // Possible early assignment to preferred node GHashTable *backup = NULL; pcmk__copy_node_tables(instance, &backup); - chosen = instance->cmds->assign(instance, prefer); - - // Revert nodes if preferred node won't be assigned - if ((chosen != NULL) && (chosen->details != prefer->details)) { - crm_info("Not assigning %s to preferred node %s: %s is better", - instance->id, pe__node_name(prefer), - pe__node_name(chosen)); + chosen = instance->cmds->assign(instance, prefer, false); + + if (!pe__same_node(chosen, prefer)) { + // Revert nodes if preferred node won't be assigned + if (chosen != NULL) { + pe_rsc_info(instance, + "Not assigning %s to preferred node %s: " + "%s is better", + instance->id, pe__node_name(prefer), + pe__node_name(chosen)); + chosen = NULL; + } pcmk__restore_node_tables(instance, backup); pcmk__unassign_resource(instance); - chosen = NULL; } g_hash_table_destroy(backup); } @@ -1181,7 +1185,7 @@ unassign_if_mandatory(const pe_action_t *first, const pe_action_t *then, "Inhibiting %s from being active " "because there is no %s instance to interleave", then_instance->id, first->rsc->id); - return pcmk__assign_resource(then_instance, NULL, true); + return pcmk__assign_resource(then_instance, NULL, true, true); } return false; } diff --git a/lib/pacemaker/pcmk_sched_primitive.c b/lib/pacemaker/pcmk_sched_primitive.c index 2470b08ed69..50f11138f23 100644 --- a/lib/pacemaker/pcmk_sched_primitive.c +++ b/lib/pacemaker/pcmk_sched_primitive.c @@ -141,13 +141,23 @@ sorted_allowed_nodes(const pe_resource_t *rsc) * \internal * \brief Assign a resource to its best allowed node, if possible * - * \param[in,out] rsc Resource to choose a node for - * \param[in] prefer If not NULL, prefer this node when all else equal + * \param[in,out] rsc Resource to choose a node for + * \param[in] prefer If not \c NULL, prefer this node when all else + * equal + * \param[in] stop_if_fail If \c true and \p rsc can't be assigned to a + * node, set next role to stopped and update + * existing actions * * \return true if \p rsc could be assigned to a node, otherwise false + * + * \note If \p stop_if_fail is \c false, then \c pcmk__unassign_resource() can + * completely undo the assignment. A successful assignment can be either + * undone or left alone as final. A failed assignment has the same effect + * as calling pcmk__unassign_resource(); there are no side effects on + * roles or actions. */ static bool -assign_best_node(pe_resource_t *rsc, const pe_node_t *prefer) +assign_best_node(pe_resource_t *rsc, const pe_node_t *prefer, bool stop_if_fail) { GList *nodes = NULL; pe_node_t *chosen = NULL; @@ -259,7 +269,7 @@ assign_best_node(pe_resource_t *rsc, const pe_node_t *prefer) pe__node_name(chosen), rsc->id, g_list_length(nodes)); } - pcmk__assign_resource(rsc, chosen, false); + pcmk__assign_resource(rsc, chosen, false, stop_if_fail); g_list_free(nodes); return rsc->allocated_to != NULL; } @@ -292,7 +302,7 @@ apply_this_with(gpointer data, gpointer user_data) "(score=%d role=%s)", rsc->id, colocation->id, other->id, colocation->score, role2text(colocation->dependent_role)); - other->cmds->assign(other, NULL); + other->cmds->assign(other, NULL, true); } // Apply the colocation score to this resource's allowed node scores @@ -351,13 +361,23 @@ remote_connection_assigned(const pe_resource_t *connection) * \internal * \brief Assign a primitive resource to a node * - * \param[in,out] rsc Resource to assign to a node - * \param[in] prefer Node to prefer, if all else is equal + * \param[in,out] rsc Resource to assign to a node + * \param[in] prefer Node to prefer, if all else is equal + * \param[in] stop_if_fail If \c true and \p rsc can't be assigned to a + * node, set next role to stopped and update + * existing actions * * \return Node that \p rsc is assigned to, if assigned entirely to one node + * + * \note If \p stop_if_fail is \c false, then \c pcmk__unassign_resource() can + * completely undo the assignment. A successful assignment can be either + * undone or left alone as final. A failed assignment has the same effect + * as calling pcmk__unassign_resource(); there are no side effects on + * roles or actions. */ pe_node_t * -pcmk__primitive_assign(pe_resource_t *rsc, const pe_node_t *prefer) +pcmk__primitive_assign(pe_resource_t *rsc, const pe_node_t *prefer, + bool stop_if_fail) { GList *this_with_colocations = NULL; GList *with_this_colocations = NULL; @@ -371,7 +391,7 @@ pcmk__primitive_assign(pe_resource_t *rsc, const pe_node_t *prefer) && !pcmk_is_set(rsc->parent->flags, pe_rsc_allocating)) { pe_rsc_debug(rsc, "%s: Assigning parent %s first", rsc->id, rsc->parent->id); - rsc->parent->cmds->assign(rsc->parent, prefer); + rsc->parent->cmds->assign(rsc->parent, prefer, stop_if_fail); } if (!pcmk_is_set(rsc->flags, pe_rsc_provisional)) { @@ -474,20 +494,24 @@ pcmk__primitive_assign(pe_resource_t *rsc, const pe_node_t *prefer) } pe_rsc_info(rsc, "Unmanaged resource %s assigned to %s: %s", rsc->id, (assign_to? assign_to->details->uname : "no node"), reason); - pcmk__assign_resource(rsc, assign_to, true); + pcmk__assign_resource(rsc, assign_to, true, stop_if_fail); } else if (pcmk_is_set(rsc->cluster->flags, pe_flag_stop_everything)) { - pe_rsc_debug(rsc, "Forcing %s to stop: stop-all-resources", rsc->id); - pcmk__assign_resource(rsc, NULL, true); + // Must stop at some point, but be consistent with stop_if_fail + if (stop_if_fail) { + pe_rsc_debug(rsc, "Forcing %s to stop: stop-all-resources", + rsc->id); + } + pcmk__assign_resource(rsc, NULL, true, stop_if_fail); } else if (pcmk_is_set(rsc->flags, pe_rsc_provisional) - && assign_best_node(rsc, prefer)) { + && assign_best_node(rsc, prefer, stop_if_fail)) { // Assignment successful } else if (rsc->allocated_to == NULL) { if (!pcmk_is_set(rsc->flags, pe_rsc_orphan)) { pe_rsc_info(rsc, "Resource %s cannot run anywhere", rsc->id); - } else if (rsc->running_on != NULL) { + } else if ((rsc->running_on != NULL) && stop_if_fail) { pe_rsc_info(rsc, "Stopping orphan resource %s", rsc->id); } diff --git a/lib/pacemaker/pcmk_sched_resource.c b/lib/pacemaker/pcmk_sched_resource.c index 8f703789b20..36f49dc49b9 100644 --- a/lib/pacemaker/pcmk_sched_resource.c +++ b/lib/pacemaker/pcmk_sched_resource.c @@ -335,25 +335,38 @@ pcmk__output_resource_actions(pe_resource_t *rsc) * * Assign a specified resource and its children (if any) to a specified node, if * the node can run the resource (or unconditionally, if \p force is true). Mark - * the resources as no longer provisional. If a resource can't be assigned (or - * \p node is \c NULL), unassign any previous assignment, set next role to - * stopped, and update any existing actions scheduled for it. + * the resources as no longer provisional. * - * \param[in,out] rsc Resource to assign - * \param[in,out] node Node to assign \p rsc to - * \param[in] force If true, assign to \p node even if unavailable + * If a resource can't be assigned (or \p node is \c NULL), unassign any + * previous assignment. If \p stop_if_fail is \c true, set next role to stopped + * and update any existing actions scheduled for the resource. + * + * \param[in,out] rsc Resource to assign + * \param[in,out] node Node to assign \p rsc to + * \param[in] force If true, assign to \p node even if unavailable + * \param[in] stop_if_fail If \c true and either \p rsc can't be assigned + * or \p chosen is \c NULL, set next role to + * stopped and update existing actions (if \p rsc + * is not a primitive, this applies to its + * primitive descendants instead) * * \return \c true if the assignment of \p rsc changed, or \c false otherwise * * \note Assigning a resource to the NULL node using this function is different - * from calling pcmk__unassign_resource(), in that it will also update any + * from calling pcmk__unassign_resource(), in that it may also update any * actions created for the resource. * \note The \c resource_alloc_functions_t:assign() method is preferred, unless * a resource should be assigned to the \c NULL node or every resource in * a tree should be assigned to the same node. + * \note If \p stop_if_fail is \c false, then \c pcmk__unassign_resource() can + * completely undo the assignment. A successful assignment can be either + * undone or left alone as final. A failed assignment has the same effect + * as calling pcmk__unassign_resource(); there are no side effects on + * roles or actions. */ bool -pcmk__assign_resource(pe_resource_t *rsc, pe_node_t *node, bool force) +pcmk__assign_resource(pe_resource_t *rsc, pe_node_t *node, bool force, + bool stop_if_fail) { bool changed = false; @@ -363,7 +376,8 @@ pcmk__assign_resource(pe_resource_t *rsc, pe_node_t *node, bool force) for (GList *iter = rsc->children; iter != NULL; iter = iter->next) { pe_resource_t *child_rsc = iter->data; - changed |= pcmk__assign_resource(child_rsc, node, force); + changed |= pcmk__assign_resource(child_rsc, node, force, + stop_if_fail); } return changed; } @@ -382,7 +396,10 @@ pcmk__assign_resource(pe_resource_t *rsc, pe_node_t *node, bool force) rsc->id, pe__node_name(node), (pcmk__node_available(node, true, false)? "" : "not"), pcmk_readable_score(node->weight)); - pe__set_next_role(rsc, RSC_ROLE_STOPPED, "node availability"); + + if (stop_if_fail) { + pe__set_next_role(rsc, RSC_ROLE_STOPPED, "node availability"); + } node = NULL; } @@ -398,6 +415,10 @@ pcmk__assign_resource(pe_resource_t *rsc, pe_node_t *node, bool force) char *rc_stopped = NULL; pe_rsc_debug(rsc, "Could not assign %s to a node", rsc->id); + + if (!stop_if_fail) { + return changed; + } pe__set_next_role(rsc, RSC_ROLE_STOPPED, "unable to assign"); for (GList *iter = rsc->actions; iter != NULL; iter = iter->next) { diff --git a/lib/pacemaker/pcmk_scheduler.c b/lib/pacemaker/pcmk_scheduler.c index b4e670d865c..508cd5721c4 100644 --- a/lib/pacemaker/pcmk_scheduler.c +++ b/lib/pacemaker/pcmk_scheduler.c @@ -318,7 +318,7 @@ allocate_resources(pe_working_set_t *data_set) if (rsc->is_remote_node) { pe_rsc_trace(rsc, "Allocating remote connection resource '%s'", rsc->id); - rsc->cmds->assign(rsc, rsc->partial_migration_target); + rsc->cmds->assign(rsc, rsc->partial_migration_target, true); } } } @@ -330,7 +330,7 @@ allocate_resources(pe_working_set_t *data_set) if (!rsc->is_remote_node) { pe_rsc_trace(rsc, "Allocating %s resource '%s'", crm_element_name(rsc->xml), rsc->id); - rsc->cmds->assign(rsc, NULL); + rsc->cmds->assign(rsc, NULL, true); } } From a698dd1e17f184977f87c4ef44c2eb5b9bd933f6 Mon Sep 17 00:00:00 2001 From: Reid Wahl Date: Mon, 10 Jul 2023 02:44:46 -0700 Subject: [PATCH 17/19] Test: scheduler: Update tests after new stop_if_fail argument Some scores are repeated since we're able to back out of a failed early assignment now. Only one test changes otherwise. bug-1822 has a score change from -INFINITY to 49. However, the partially active group is still not allowed to promote, which is the purpose of the test. Ref T489 Signed-off-by: Reid Wahl --- cts/scheduler/scores/594.scores | 3 +++ cts/scheduler/scores/bug-1822.scores | 2 +- .../bug-5014-CLONE-A-stop-B-started.scores | 1 + cts/scheduler/scores/bug-lf-2171.scores | 4 ++++ cts/scheduler/scores/bug-lf-2422.scores | 16 ++++++++++++++++ cts/scheduler/scores/bug-lf-2453.scores | 4 ++++ cts/scheduler/scores/bug-lf-2574.scores | 3 +++ .../scores/bundle-order-stop-clone.scores | 4 ++++ cts/scheduler/scores/clone-max-zero.scores | 8 ++++++++ cts/scheduler/scores/cloned-group-stop.scores | 4 ++++ cts/scheduler/scores/complex_enforce_colo.scores | 9 +++++++++ cts/scheduler/scores/enforce-colo1.scores | 9 +++++++++ .../scores/promoted-asymmetrical-order.scores | 4 ++++ .../scores/promoted-failed-demote-2.scores | 10 ++++++++++ .../scores/promoted-failed-demote.scores | 10 ++++++++++ 15 files changed, 90 insertions(+), 1 deletion(-) diff --git a/cts/scheduler/scores/594.scores b/cts/scheduler/scores/594.scores index 5e99750df21..96c8f441b98 100644 --- a/cts/scheduler/scores/594.scores +++ b/cts/scheduler/scores/594.scores @@ -21,8 +21,11 @@ pcmk__primitive_assign: child_DoFencing:1 allocation score on hadev1: 1 pcmk__primitive_assign: child_DoFencing:1 allocation score on hadev2: -INFINITY pcmk__primitive_assign: child_DoFencing:1 allocation score on hadev3: -INFINITY pcmk__primitive_assign: child_DoFencing:2 allocation score on hadev1: -INFINITY +pcmk__primitive_assign: child_DoFencing:2 allocation score on hadev1: -INFINITY +pcmk__primitive_assign: child_DoFencing:2 allocation score on hadev2: -INFINITY pcmk__primitive_assign: child_DoFencing:2 allocation score on hadev2: -INFINITY pcmk__primitive_assign: child_DoFencing:2 allocation score on hadev3: -INFINITY +pcmk__primitive_assign: child_DoFencing:2 allocation score on hadev3: -INFINITY pcmk__primitive_assign: rsc_hadev1 allocation score on hadev1: 100 pcmk__primitive_assign: rsc_hadev1 allocation score on hadev2: 0 pcmk__primitive_assign: rsc_hadev1 allocation score on hadev3: 0 diff --git a/cts/scheduler/scores/bug-1822.scores b/cts/scheduler/scores/bug-1822.scores index 82191d1e74b..0a9056bbf3e 100644 --- a/cts/scheduler/scores/bug-1822.scores +++ b/cts/scheduler/scores/bug-1822.scores @@ -1,5 +1,5 @@ -ms-sf_group:0 promotion score on process2b: -INFINITY +ms-sf_group:0 promotion score on process2b: 49 ms-sf_group:1 promotion score on none: 0 pcmk__clone_assign: ms-sf allocation score on process1a: 0 pcmk__clone_assign: ms-sf allocation score on process2b: 0 diff --git a/cts/scheduler/scores/bug-5014-CLONE-A-stop-B-started.scores b/cts/scheduler/scores/bug-5014-CLONE-A-stop-B-started.scores index e698b145274..d79208c7336 100644 --- a/cts/scheduler/scores/bug-5014-CLONE-A-stop-B-started.scores +++ b/cts/scheduler/scores/bug-5014-CLONE-A-stop-B-started.scores @@ -5,3 +5,4 @@ pcmk__clone_assign: clone1 allocation score on fc16-builder: 0 pcmk__clone_assign: clone2 allocation score on fc16-builder: 0 pcmk__primitive_assign: ClusterIP2:0 allocation score on fc16-builder: 1 pcmk__primitive_assign: ClusterIP:0 allocation score on fc16-builder: -INFINITY +pcmk__primitive_assign: ClusterIP:0 allocation score on fc16-builder: -INFINITY diff --git a/cts/scheduler/scores/bug-lf-2171.scores b/cts/scheduler/scores/bug-lf-2171.scores index 7d2bdd45307..14cc28a88c5 100644 --- a/cts/scheduler/scores/bug-lf-2171.scores +++ b/cts/scheduler/scores/bug-lf-2171.scores @@ -12,8 +12,12 @@ pcmk__group_assign: res_Dummy2 allocation score on xenserver2: 0 pcmk__group_assign: res_Dummy3 allocation score on xenserver1: 200 pcmk__group_assign: res_Dummy3 allocation score on xenserver2: 0 pcmk__primitive_assign: res_Dummy1:0 allocation score on xenserver1: -INFINITY +pcmk__primitive_assign: res_Dummy1:0 allocation score on xenserver1: -INFINITY +pcmk__primitive_assign: res_Dummy1:0 allocation score on xenserver2: -INFINITY pcmk__primitive_assign: res_Dummy1:0 allocation score on xenserver2: -INFINITY pcmk__primitive_assign: res_Dummy1:1 allocation score on xenserver1: -INFINITY +pcmk__primitive_assign: res_Dummy1:1 allocation score on xenserver1: -INFINITY +pcmk__primitive_assign: res_Dummy1:1 allocation score on xenserver2: -INFINITY pcmk__primitive_assign: res_Dummy1:1 allocation score on xenserver2: -INFINITY pcmk__primitive_assign: res_Dummy2 allocation score on xenserver1: 200 pcmk__primitive_assign: res_Dummy2 allocation score on xenserver2: 0 diff --git a/cts/scheduler/scores/bug-lf-2422.scores b/cts/scheduler/scores/bug-lf-2422.scores index 99ff12e3bb6..77a284da9ce 100644 --- a/cts/scheduler/scores/bug-lf-2422.scores +++ b/cts/scheduler/scores/bug-lf-2422.scores @@ -248,20 +248,36 @@ pcmk__primitive_assign: o2cb:3 allocation score on qa-suse-2: -INFINITY pcmk__primitive_assign: o2cb:3 allocation score on qa-suse-3: -INFINITY pcmk__primitive_assign: o2cb:3 allocation score on qa-suse-4: -INFINITY pcmk__primitive_assign: ocfs:0 allocation score on qa-suse-1: -INFINITY +pcmk__primitive_assign: ocfs:0 allocation score on qa-suse-1: -INFINITY +pcmk__primitive_assign: ocfs:0 allocation score on qa-suse-2: -INFINITY pcmk__primitive_assign: ocfs:0 allocation score on qa-suse-2: -INFINITY pcmk__primitive_assign: ocfs:0 allocation score on qa-suse-3: -INFINITY +pcmk__primitive_assign: ocfs:0 allocation score on qa-suse-3: -INFINITY +pcmk__primitive_assign: ocfs:0 allocation score on qa-suse-4: -INFINITY pcmk__primitive_assign: ocfs:0 allocation score on qa-suse-4: -INFINITY pcmk__primitive_assign: ocfs:1 allocation score on qa-suse-1: -INFINITY +pcmk__primitive_assign: ocfs:1 allocation score on qa-suse-1: -INFINITY +pcmk__primitive_assign: ocfs:1 allocation score on qa-suse-2: -INFINITY pcmk__primitive_assign: ocfs:1 allocation score on qa-suse-2: -INFINITY pcmk__primitive_assign: ocfs:1 allocation score on qa-suse-3: -INFINITY +pcmk__primitive_assign: ocfs:1 allocation score on qa-suse-3: -INFINITY +pcmk__primitive_assign: ocfs:1 allocation score on qa-suse-4: -INFINITY pcmk__primitive_assign: ocfs:1 allocation score on qa-suse-4: -INFINITY pcmk__primitive_assign: ocfs:2 allocation score on qa-suse-1: -INFINITY +pcmk__primitive_assign: ocfs:2 allocation score on qa-suse-1: -INFINITY +pcmk__primitive_assign: ocfs:2 allocation score on qa-suse-2: -INFINITY pcmk__primitive_assign: ocfs:2 allocation score on qa-suse-2: -INFINITY pcmk__primitive_assign: ocfs:2 allocation score on qa-suse-3: -INFINITY +pcmk__primitive_assign: ocfs:2 allocation score on qa-suse-3: -INFINITY +pcmk__primitive_assign: ocfs:2 allocation score on qa-suse-4: -INFINITY pcmk__primitive_assign: ocfs:2 allocation score on qa-suse-4: -INFINITY pcmk__primitive_assign: ocfs:3 allocation score on qa-suse-1: -INFINITY +pcmk__primitive_assign: ocfs:3 allocation score on qa-suse-1: -INFINITY +pcmk__primitive_assign: ocfs:3 allocation score on qa-suse-2: -INFINITY pcmk__primitive_assign: ocfs:3 allocation score on qa-suse-2: -INFINITY pcmk__primitive_assign: ocfs:3 allocation score on qa-suse-3: -INFINITY +pcmk__primitive_assign: ocfs:3 allocation score on qa-suse-3: -INFINITY +pcmk__primitive_assign: ocfs:3 allocation score on qa-suse-4: -INFINITY pcmk__primitive_assign: ocfs:3 allocation score on qa-suse-4: -INFINITY pcmk__primitive_assign: sbd_stonith allocation score on qa-suse-1: 0 pcmk__primitive_assign: sbd_stonith allocation score on qa-suse-2: 0 diff --git a/cts/scheduler/scores/bug-lf-2453.scores b/cts/scheduler/scores/bug-lf-2453.scores index eaee72d2002..3ef0f6dc375 100644 --- a/cts/scheduler/scores/bug-lf-2453.scores +++ b/cts/scheduler/scores/bug-lf-2453.scores @@ -17,6 +17,10 @@ pcmk__primitive_assign: DummyResource:1 allocation score on domu1: -INFINITY pcmk__primitive_assign: DummyResource:1 allocation score on domu2: INFINITY pcmk__primitive_assign: PrimitiveResource1 allocation score on domu1: INFINITY pcmk__primitive_assign: apache:0 allocation score on domu1: -INFINITY +pcmk__primitive_assign: apache:0 allocation score on domu1: -INFINITY +pcmk__primitive_assign: apache:0 allocation score on domu2: -INFINITY pcmk__primitive_assign: apache:0 allocation score on domu2: -INFINITY pcmk__primitive_assign: apache:1 allocation score on domu1: -INFINITY +pcmk__primitive_assign: apache:1 allocation score on domu1: -INFINITY +pcmk__primitive_assign: apache:1 allocation score on domu2: -INFINITY pcmk__primitive_assign: apache:1 allocation score on domu2: -INFINITY diff --git a/cts/scheduler/scores/bug-lf-2574.scores b/cts/scheduler/scores/bug-lf-2574.scores index 0f5cf60a7e0..b4a1bd95841 100644 --- a/cts/scheduler/scores/bug-lf-2574.scores +++ b/cts/scheduler/scores/bug-lf-2574.scores @@ -39,8 +39,11 @@ pcmk__primitive_assign: prmDummy1:2 allocation score on srv01: -INFINITY pcmk__primitive_assign: prmDummy1:2 allocation score on srv02: -INFINITY pcmk__primitive_assign: prmDummy1:2 allocation score on srv03: -INFINITY pcmk__primitive_assign: prmPingd:0 allocation score on srv01: -INFINITY +pcmk__primitive_assign: prmPingd:0 allocation score on srv01: -INFINITY +pcmk__primitive_assign: prmPingd:0 allocation score on srv02: -INFINITY pcmk__primitive_assign: prmPingd:0 allocation score on srv02: -INFINITY pcmk__primitive_assign: prmPingd:0 allocation score on srv03: -INFINITY +pcmk__primitive_assign: prmPingd:0 allocation score on srv03: -INFINITY pcmk__primitive_assign: prmPingd:1 allocation score on srv01: -INFINITY pcmk__primitive_assign: prmPingd:1 allocation score on srv02: -INFINITY pcmk__primitive_assign: prmPingd:1 allocation score on srv03: INFINITY diff --git a/cts/scheduler/scores/bundle-order-stop-clone.scores b/cts/scheduler/scores/bundle-order-stop-clone.scores index 707260b80a9..06596e86a24 100644 --- a/cts/scheduler/scores/bundle-order-stop-clone.scores +++ b/cts/scheduler/scores/bundle-order-stop-clone.scores @@ -147,8 +147,12 @@ pcmk__primitive_assign: galera-bundle-2 allocation score on metal-2: 0 pcmk__primitive_assign: galera-bundle-2 allocation score on metal-3: INFINITY pcmk__primitive_assign: galera-bundle-2 allocation score on rabbitmq-bundle-0: -INFINITY pcmk__primitive_assign: galera-bundle-docker-0 allocation score on metal-1: -INFINITY +pcmk__primitive_assign: galera-bundle-docker-0 allocation score on metal-1: -INFINITY +pcmk__primitive_assign: galera-bundle-docker-0 allocation score on metal-2: -INFINITY pcmk__primitive_assign: galera-bundle-docker-0 allocation score on metal-2: -INFINITY pcmk__primitive_assign: galera-bundle-docker-0 allocation score on metal-3: -INFINITY +pcmk__primitive_assign: galera-bundle-docker-0 allocation score on metal-3: -INFINITY +pcmk__primitive_assign: galera-bundle-docker-0 allocation score on rabbitmq-bundle-0: -INFINITY pcmk__primitive_assign: galera-bundle-docker-0 allocation score on rabbitmq-bundle-0: -INFINITY pcmk__primitive_assign: galera-bundle-docker-1 allocation score on metal-1: -INFINITY pcmk__primitive_assign: galera-bundle-docker-1 allocation score on metal-2: INFINITY diff --git a/cts/scheduler/scores/clone-max-zero.scores b/cts/scheduler/scores/clone-max-zero.scores index f1711b7885e..bd116a2764c 100644 --- a/cts/scheduler/scores/clone-max-zero.scores +++ b/cts/scheduler/scores/clone-max-zero.scores @@ -26,10 +26,18 @@ pcmk__primitive_assign: drbd0:1 allocation score on c001n12: -INFINITY pcmk__primitive_assign: fencing allocation score on c001n11: 0 pcmk__primitive_assign: fencing allocation score on c001n12: 0 pcmk__primitive_assign: o2cb:0 allocation score on c001n11: -INFINITY +pcmk__primitive_assign: o2cb:0 allocation score on c001n11: -INFINITY +pcmk__primitive_assign: o2cb:0 allocation score on c001n12: -INFINITY pcmk__primitive_assign: o2cb:0 allocation score on c001n12: -INFINITY pcmk__primitive_assign: o2cb:1 allocation score on c001n11: -INFINITY +pcmk__primitive_assign: o2cb:1 allocation score on c001n11: -INFINITY +pcmk__primitive_assign: o2cb:1 allocation score on c001n12: -INFINITY pcmk__primitive_assign: o2cb:1 allocation score on c001n12: -INFINITY pcmk__primitive_assign: ocfs2-1:0 allocation score on c001n11: -INFINITY +pcmk__primitive_assign: ocfs2-1:0 allocation score on c001n11: -INFINITY +pcmk__primitive_assign: ocfs2-1:0 allocation score on c001n12: -INFINITY pcmk__primitive_assign: ocfs2-1:0 allocation score on c001n12: -INFINITY pcmk__primitive_assign: ocfs2-1:1 allocation score on c001n11: -INFINITY +pcmk__primitive_assign: ocfs2-1:1 allocation score on c001n11: -INFINITY +pcmk__primitive_assign: ocfs2-1:1 allocation score on c001n12: -INFINITY pcmk__primitive_assign: ocfs2-1:1 allocation score on c001n12: -INFINITY diff --git a/cts/scheduler/scores/cloned-group-stop.scores b/cts/scheduler/scores/cloned-group-stop.scores index be835fa5371..7e406c6ddc2 100644 --- a/cts/scheduler/scores/cloned-group-stop.scores +++ b/cts/scheduler/scores/cloned-group-stop.scores @@ -122,8 +122,12 @@ pcmk__primitive_assign: mysql-fs allocation score on rhos4-node4: -INFINITY pcmk__primitive_assign: mysql-vip allocation score on rhos4-node3: 300 pcmk__primitive_assign: mysql-vip allocation score on rhos4-node4: -INFINITY pcmk__primitive_assign: qpidd:0 allocation score on rhos4-node3: -INFINITY +pcmk__primitive_assign: qpidd:0 allocation score on rhos4-node3: -INFINITY +pcmk__primitive_assign: qpidd:0 allocation score on rhos4-node4: -INFINITY pcmk__primitive_assign: qpidd:0 allocation score on rhos4-node4: -INFINITY pcmk__primitive_assign: qpidd:1 allocation score on rhos4-node3: -INFINITY +pcmk__primitive_assign: qpidd:1 allocation score on rhos4-node3: -INFINITY +pcmk__primitive_assign: qpidd:1 allocation score on rhos4-node4: -INFINITY pcmk__primitive_assign: qpidd:1 allocation score on rhos4-node4: -INFINITY pcmk__primitive_assign: virt-fencing allocation score on rhos4-node3: 100 pcmk__primitive_assign: virt-fencing allocation score on rhos4-node4: 0 diff --git a/cts/scheduler/scores/complex_enforce_colo.scores b/cts/scheduler/scores/complex_enforce_colo.scores index 9968e1097ef..a5d0b2b4125 100644 --- a/cts/scheduler/scores/complex_enforce_colo.scores +++ b/cts/scheduler/scores/complex_enforce_colo.scores @@ -588,13 +588,22 @@ pcmk__primitive_assign: horizon:2 allocation score on rhos6-node1: -INFINITY pcmk__primitive_assign: horizon:2 allocation score on rhos6-node2: -INFINITY pcmk__primitive_assign: horizon:2 allocation score on rhos6-node3: 1 pcmk__primitive_assign: keystone:0 allocation score on rhos6-node1: -INFINITY +pcmk__primitive_assign: keystone:0 allocation score on rhos6-node1: -INFINITY +pcmk__primitive_assign: keystone:0 allocation score on rhos6-node2: -INFINITY pcmk__primitive_assign: keystone:0 allocation score on rhos6-node2: -INFINITY pcmk__primitive_assign: keystone:0 allocation score on rhos6-node3: -INFINITY +pcmk__primitive_assign: keystone:0 allocation score on rhos6-node3: -INFINITY pcmk__primitive_assign: keystone:1 allocation score on rhos6-node1: -INFINITY +pcmk__primitive_assign: keystone:1 allocation score on rhos6-node1: -INFINITY +pcmk__primitive_assign: keystone:1 allocation score on rhos6-node2: -INFINITY pcmk__primitive_assign: keystone:1 allocation score on rhos6-node2: -INFINITY pcmk__primitive_assign: keystone:1 allocation score on rhos6-node3: -INFINITY +pcmk__primitive_assign: keystone:1 allocation score on rhos6-node3: -INFINITY +pcmk__primitive_assign: keystone:2 allocation score on rhos6-node1: -INFINITY pcmk__primitive_assign: keystone:2 allocation score on rhos6-node1: -INFINITY pcmk__primitive_assign: keystone:2 allocation score on rhos6-node2: -INFINITY +pcmk__primitive_assign: keystone:2 allocation score on rhos6-node2: -INFINITY +pcmk__primitive_assign: keystone:2 allocation score on rhos6-node3: -INFINITY pcmk__primitive_assign: keystone:2 allocation score on rhos6-node3: -INFINITY pcmk__primitive_assign: lb-haproxy:0 allocation score on rhos6-node1: 1 pcmk__primitive_assign: lb-haproxy:0 allocation score on rhos6-node2: 0 diff --git a/cts/scheduler/scores/enforce-colo1.scores b/cts/scheduler/scores/enforce-colo1.scores index 8194789648a..262cbd94a30 100644 --- a/cts/scheduler/scores/enforce-colo1.scores +++ b/cts/scheduler/scores/enforce-colo1.scores @@ -18,13 +18,22 @@ pcmk__primitive_assign: engine allocation score on rhel7-auto1: -INFINITY pcmk__primitive_assign: engine allocation score on rhel7-auto2: -INFINITY pcmk__primitive_assign: engine allocation score on rhel7-auto3: 0 pcmk__primitive_assign: keystone:0 allocation score on rhel7-auto1: -INFINITY +pcmk__primitive_assign: keystone:0 allocation score on rhel7-auto1: -INFINITY +pcmk__primitive_assign: keystone:0 allocation score on rhel7-auto2: -INFINITY pcmk__primitive_assign: keystone:0 allocation score on rhel7-auto2: -INFINITY pcmk__primitive_assign: keystone:0 allocation score on rhel7-auto3: -INFINITY +pcmk__primitive_assign: keystone:0 allocation score on rhel7-auto3: -INFINITY pcmk__primitive_assign: keystone:1 allocation score on rhel7-auto1: -INFINITY +pcmk__primitive_assign: keystone:1 allocation score on rhel7-auto1: -INFINITY +pcmk__primitive_assign: keystone:1 allocation score on rhel7-auto2: -INFINITY pcmk__primitive_assign: keystone:1 allocation score on rhel7-auto2: -INFINITY pcmk__primitive_assign: keystone:1 allocation score on rhel7-auto3: -INFINITY +pcmk__primitive_assign: keystone:1 allocation score on rhel7-auto3: -INFINITY +pcmk__primitive_assign: keystone:2 allocation score on rhel7-auto1: -INFINITY pcmk__primitive_assign: keystone:2 allocation score on rhel7-auto1: -INFINITY pcmk__primitive_assign: keystone:2 allocation score on rhel7-auto2: -INFINITY +pcmk__primitive_assign: keystone:2 allocation score on rhel7-auto2: -INFINITY +pcmk__primitive_assign: keystone:2 allocation score on rhel7-auto3: -INFINITY pcmk__primitive_assign: keystone:2 allocation score on rhel7-auto3: -INFINITY pcmk__primitive_assign: shooter allocation score on rhel7-auto1: 0 pcmk__primitive_assign: shooter allocation score on rhel7-auto2: 0 diff --git a/cts/scheduler/scores/promoted-asymmetrical-order.scores b/cts/scheduler/scores/promoted-asymmetrical-order.scores index 382e0ebe285..18bc704551e 100644 --- a/cts/scheduler/scores/promoted-asymmetrical-order.scores +++ b/cts/scheduler/scores/promoted-asymmetrical-order.scores @@ -12,8 +12,12 @@ pcmk__clone_assign: rsc2:0 allocation score on node2: 0 pcmk__clone_assign: rsc2:1 allocation score on node1: 0 pcmk__clone_assign: rsc2:1 allocation score on node2: 1 pcmk__primitive_assign: rsc1:0 allocation score on node1: -INFINITY +pcmk__primitive_assign: rsc1:0 allocation score on node1: -INFINITY +pcmk__primitive_assign: rsc1:0 allocation score on node2: -INFINITY pcmk__primitive_assign: rsc1:0 allocation score on node2: -INFINITY pcmk__primitive_assign: rsc1:1 allocation score on node1: -INFINITY +pcmk__primitive_assign: rsc1:1 allocation score on node1: -INFINITY +pcmk__primitive_assign: rsc1:1 allocation score on node2: -INFINITY pcmk__primitive_assign: rsc1:1 allocation score on node2: -INFINITY pcmk__primitive_assign: rsc2:0 allocation score on node1: 1 pcmk__primitive_assign: rsc2:0 allocation score on node2: 0 diff --git a/cts/scheduler/scores/promoted-failed-demote-2.scores b/cts/scheduler/scores/promoted-failed-demote-2.scores index 2a85ae6060e..e457d8c6057 100644 --- a/cts/scheduler/scores/promoted-failed-demote-2.scores +++ b/cts/scheduler/scores/promoted-failed-demote-2.scores @@ -16,22 +16,32 @@ pcmk__clone_assign: stateful-2:0 allocation score on dl380g5b: -INFINITY pcmk__clone_assign: stateful-2:1 allocation score on dl380g5a: INFINITY pcmk__clone_assign: stateful-2:1 allocation score on dl380g5b: 0 pcmk__group_assign: group:0 allocation score on dl380g5a: -INFINITY +pcmk__group_assign: group:0 allocation score on dl380g5a: -INFINITY +pcmk__group_assign: group:0 allocation score on dl380g5b: 0 pcmk__group_assign: group:0 allocation score on dl380g5b: 0 pcmk__group_assign: group:1 allocation score on dl380g5a: 0 pcmk__group_assign: group:1 allocation score on dl380g5b: 0 pcmk__group_assign: stateful-1:0 allocation score on dl380g5a: -INFINITY +pcmk__group_assign: stateful-1:0 allocation score on dl380g5a: -INFINITY +pcmk__group_assign: stateful-1:0 allocation score on dl380g5b: -INFINITY pcmk__group_assign: stateful-1:0 allocation score on dl380g5b: -INFINITY pcmk__group_assign: stateful-1:1 allocation score on dl380g5a: INFINITY pcmk__group_assign: stateful-1:1 allocation score on dl380g5b: 0 pcmk__group_assign: stateful-2:0 allocation score on dl380g5a: -INFINITY +pcmk__group_assign: stateful-2:0 allocation score on dl380g5a: -INFINITY +pcmk__group_assign: stateful-2:0 allocation score on dl380g5b: -INFINITY pcmk__group_assign: stateful-2:0 allocation score on dl380g5b: -INFINITY pcmk__group_assign: stateful-2:1 allocation score on dl380g5a: INFINITY pcmk__group_assign: stateful-2:1 allocation score on dl380g5b: 0 pcmk__primitive_assign: stateful-1:0 allocation score on dl380g5a: -INFINITY +pcmk__primitive_assign: stateful-1:0 allocation score on dl380g5a: -INFINITY +pcmk__primitive_assign: stateful-1:0 allocation score on dl380g5b: -INFINITY pcmk__primitive_assign: stateful-1:0 allocation score on dl380g5b: -INFINITY pcmk__primitive_assign: stateful-1:1 allocation score on dl380g5a: INFINITY pcmk__primitive_assign: stateful-1:1 allocation score on dl380g5b: 0 pcmk__primitive_assign: stateful-2:0 allocation score on dl380g5a: -INFINITY +pcmk__primitive_assign: stateful-2:0 allocation score on dl380g5a: -INFINITY +pcmk__primitive_assign: stateful-2:0 allocation score on dl380g5b: -INFINITY pcmk__primitive_assign: stateful-2:0 allocation score on dl380g5b: -INFINITY pcmk__primitive_assign: stateful-2:1 allocation score on dl380g5a: INFINITY pcmk__primitive_assign: stateful-2:1 allocation score on dl380g5b: -INFINITY diff --git a/cts/scheduler/scores/promoted-failed-demote.scores b/cts/scheduler/scores/promoted-failed-demote.scores index 2a85ae6060e..e457d8c6057 100644 --- a/cts/scheduler/scores/promoted-failed-demote.scores +++ b/cts/scheduler/scores/promoted-failed-demote.scores @@ -16,22 +16,32 @@ pcmk__clone_assign: stateful-2:0 allocation score on dl380g5b: -INFINITY pcmk__clone_assign: stateful-2:1 allocation score on dl380g5a: INFINITY pcmk__clone_assign: stateful-2:1 allocation score on dl380g5b: 0 pcmk__group_assign: group:0 allocation score on dl380g5a: -INFINITY +pcmk__group_assign: group:0 allocation score on dl380g5a: -INFINITY +pcmk__group_assign: group:0 allocation score on dl380g5b: 0 pcmk__group_assign: group:0 allocation score on dl380g5b: 0 pcmk__group_assign: group:1 allocation score on dl380g5a: 0 pcmk__group_assign: group:1 allocation score on dl380g5b: 0 pcmk__group_assign: stateful-1:0 allocation score on dl380g5a: -INFINITY +pcmk__group_assign: stateful-1:0 allocation score on dl380g5a: -INFINITY +pcmk__group_assign: stateful-1:0 allocation score on dl380g5b: -INFINITY pcmk__group_assign: stateful-1:0 allocation score on dl380g5b: -INFINITY pcmk__group_assign: stateful-1:1 allocation score on dl380g5a: INFINITY pcmk__group_assign: stateful-1:1 allocation score on dl380g5b: 0 pcmk__group_assign: stateful-2:0 allocation score on dl380g5a: -INFINITY +pcmk__group_assign: stateful-2:0 allocation score on dl380g5a: -INFINITY +pcmk__group_assign: stateful-2:0 allocation score on dl380g5b: -INFINITY pcmk__group_assign: stateful-2:0 allocation score on dl380g5b: -INFINITY pcmk__group_assign: stateful-2:1 allocation score on dl380g5a: INFINITY pcmk__group_assign: stateful-2:1 allocation score on dl380g5b: 0 pcmk__primitive_assign: stateful-1:0 allocation score on dl380g5a: -INFINITY +pcmk__primitive_assign: stateful-1:0 allocation score on dl380g5a: -INFINITY +pcmk__primitive_assign: stateful-1:0 allocation score on dl380g5b: -INFINITY pcmk__primitive_assign: stateful-1:0 allocation score on dl380g5b: -INFINITY pcmk__primitive_assign: stateful-1:1 allocation score on dl380g5a: INFINITY pcmk__primitive_assign: stateful-1:1 allocation score on dl380g5b: 0 pcmk__primitive_assign: stateful-2:0 allocation score on dl380g5a: -INFINITY +pcmk__primitive_assign: stateful-2:0 allocation score on dl380g5a: -INFINITY +pcmk__primitive_assign: stateful-2:0 allocation score on dl380g5b: -INFINITY pcmk__primitive_assign: stateful-2:0 allocation score on dl380g5b: -INFINITY pcmk__primitive_assign: stateful-2:1 allocation score on dl380g5a: INFINITY pcmk__primitive_assign: stateful-2:1 allocation score on dl380g5b: -INFINITY From 4abb93e5c779cf058861a25c5eac456ac1087fd6 Mon Sep 17 00:00:00 2001 From: Reid Wahl Date: Wed, 21 Jun 2023 22:40:20 -0700 Subject: [PATCH 18/19] Fix: libpacemaker: Don't shuffle clone instances unnecessarily Currently, clone instances may be shuffled under certain conditions, causing an unnecessary resource downtime when an instance is moved away from its current running node. For example, this can happen when a stopped promotable instance is scheduled to promote and the stickiness is lower than the promotion score (see the clone-recover-no-shuffle-7 test). Instance 0 gets assigned first and goes to the node that will be promoted. If instance 0 is already running on some node, it must stop there before it can start on the new node. Another instance may start in its place after it stops. The fix is to assign an instance to its current node during the early assignment phase, if that node is going to receive any instance at all. If the node will receive an instance, it should receive its current instance. The approach is described in detail in comments. Previously, if instance 0 was running on node1 and got assigned to node2 during the early assignment phase (due to node2 having a higher score), we backed out and immediately gave up on assigning instance 0 early. Now, we increment a "number of instances reserved" counter, as well as the parent's counter of instances assigned to node2. We then try again to assign instance 0 to node1. If node2 already has the max allowed number of instances, then it will be marked unavailable for this round. Fixes T489 Fixes RHBZ#1931023 Signed-off-by: Reid Wahl --- lib/pacemaker/pcmk_sched_instances.c | 163 ++++++++++++++++++++------- 1 file changed, 122 insertions(+), 41 deletions(-) diff --git a/lib/pacemaker/pcmk_sched_instances.c b/lib/pacemaker/pcmk_sched_instances.c index b551f3bee61..b010d460dbc 100644 --- a/lib/pacemaker/pcmk_sched_instances.c +++ b/lib/pacemaker/pcmk_sched_instances.c @@ -605,50 +605,135 @@ assign_instance(pe_resource_t *instance, const pe_node_t *prefer, instance->id); return NULL; } + ban_unavailable_allowed_nodes(instance, max_per_node); + + // Failed early assignments are reversible (stop_if_fail=false) + chosen = instance->cmds->assign(instance, prefer, (prefer == NULL)); + increment_parent_count(instance, chosen); + return chosen; +} + +/*! + * \internal + * \brief Try to assign an instance to its current node early + * + * \param[in] rsc Clone or bundle being assigned (for logs only) + * \param[in] instance Clone instance or bundle replica container + * \param[in] current Instance's current node + * \param[in] max_per_node Maximum number of instances per node + * \param[in] available Number of instances still available for assignment + * + * \return \c true if \p instance was successfully assigned to its current node, + * or \c false otherwise + */ +static bool +assign_instance_early(const pe_resource_t *rsc, pe_resource_t *instance, + const pe_node_t *current, int max_per_node, int available) +{ + const pe_node_t *chosen = NULL; + int reserved = 0; - if (prefer != NULL) { // Possible early assignment to preferred node + pe_resource_t *parent = instance->parent; + GHashTable *allowed_orig = NULL; + GHashTable *allowed_orig_parent = parent->allowed_nodes; - // Get preferred node with instance's scores - pe_node_t *allowed = g_hash_table_lookup(instance->allowed_nodes, - prefer->details->id); + const pe_node_t *allowed_node = g_hash_table_lookup(instance->allowed_nodes, + current->details->id); - if ((allowed == NULL) || (allowed->weight < 0)) { - pe_rsc_trace(instance, - "Not assigning %s to preferred node %s: unavailable", - instance->id, pe__node_name(prefer)); - return NULL; - } + pe_rsc_trace(instance, "Trying to assign %s to its current node %s", + instance->id, pe__node_name(current)); + + if (!pcmk__node_available(allowed_node, true, false)) { + pe_rsc_info(instance, + "Not assigning %s to current node %s: unavailable", + instance->id, pe__node_name(current)); + return false; } - ban_unavailable_allowed_nodes(instance, max_per_node); + /* On each iteration, if instance gets assigned to a node other than its + * current one, we reserve one instance for the chosen node, unassign + * instance, restore instance's original node tables, and try again. This + * way, instances are proportionally assigned to nodes based on preferences, + * but shuffling of specific instances is minimized. If a node will be + * assigned instances at all, it preferentially receives instances that are + * currently active there. + * + * parent->allowed_nodes tracks the number of instances assigned to each + * node. If a node already has max_per_node instances assigned, + * ban_unavailable_allowed_nodes() marks it as unavailable. + * + * In the end, we restore the original parent->allowed_nodes to undo the + * changes to counts during tentative assignments. If we successfully + * assigned instance to its current node, we increment that node's counter. + */ - if (prefer == NULL) { // Final assignment - chosen = instance->cmds->assign(instance, NULL, true); + // Back up the allowed node tables of instance and its children recursively + pcmk__copy_node_tables(instance, &allowed_orig); - } else { // Possible early assignment to preferred node - GHashTable *backup = NULL; + // Update instances-per-node counts in a scratch table + parent->allowed_nodes = pcmk__copy_node_table(parent->allowed_nodes); - pcmk__copy_node_tables(instance, &backup); - chosen = instance->cmds->assign(instance, prefer, false); + while (reserved < available) { + chosen = assign_instance(instance, current, max_per_node); - if (!pe__same_node(chosen, prefer)) { - // Revert nodes if preferred node won't be assigned - if (chosen != NULL) { - pe_rsc_info(instance, - "Not assigning %s to preferred node %s: " - "%s is better", - instance->id, pe__node_name(prefer), - pe__node_name(chosen)); - chosen = NULL; - } - pcmk__restore_node_tables(instance, backup); - pcmk__unassign_resource(instance); + if (pe__same_node(chosen, current)) { + // Successfully assigned to current node + break; + } + + // Assignment updates scores, so restore to original state + pe_rsc_debug(instance, "Rolling back node scores for %s", instance->id); + pcmk__restore_node_tables(instance, allowed_orig); + + if (chosen == NULL) { + // Assignment failed, so give up + pe_rsc_info(instance, + "Not assigning %s to current node %s: unavailable", + instance->id, pe__node_name(current)); + pe__set_resource_flags(instance, pe_rsc_provisional); + break; + } + + // We prefer more strongly to assign an instance to the chosen node + pe_rsc_debug(instance, + "Not assigning %s to current node %s: %s is better", + instance->id, pe__node_name(current), + pe__node_name(chosen)); + + // Reserve one instance for the chosen node and try again + if (++reserved >= available) { + pe_rsc_info(instance, + "Not assigning %s to current node %s: " + "other assignments are more important", + instance->id, pe__node_name(current)); + + } else { + pe_rsc_debug(instance, + "Reserved an instance of %s for %s. Retrying " + "assignment of %s to %s", + rsc->id, pe__node_name(chosen), instance->id, + pe__node_name(current)); } - g_hash_table_destroy(backup); + + // Clear this assignment (frees chosen); leave instance counts in parent + pcmk__unassign_resource(instance); + chosen = NULL; } + g_hash_table_destroy(allowed_orig); + + // Restore original instances-per-node counts + g_hash_table_destroy(parent->allowed_nodes); + parent->allowed_nodes = allowed_orig_parent; + + if (chosen == NULL) { + // Couldn't assign instance to current node + return false; + } + pe_rsc_trace(instance, "Assigned %s to current node %s", + instance->id, pe__node_name(current)); increment_parent_count(instance, chosen); - return chosen; + return true; } /*! @@ -760,22 +845,18 @@ pcmk__assign_instances(pe_resource_t *collective, GList *instances, // Assign as many instances as possible to their current location for (iter = instances; (iter != NULL) && (assigned < max_total); iter = iter->next) { - instance = (pe_resource_t *) iter->data; + int available = max_total - assigned; + instance = iter->data; if (!pcmk_is_set(instance->flags, pe_rsc_provisional)) { continue; // Already assigned } current = preferred_node(collective, instance, optimal_per_node); - if (current != NULL) { - const pe_node_t *chosen = assign_instance(instance, current, - max_per_node); - - if (pe__same_node(chosen, current)) { - pe_rsc_trace(collective, "Assigned %s to current node %s", - instance->id, pe__node_name(current)); - assigned++; - } + if ((current != NULL) + && assign_instance_early(collective, instance, current, + max_per_node, available)) { + assigned++; } } From 59e9950212506a9034db8e90a17033734a1d18a1 Mon Sep 17 00:00:00 2001 From: Reid Wahl Date: Mon, 10 Jul 2023 02:50:28 -0700 Subject: [PATCH 19/19] Test: scheduler: Update test outputs after clone instance shuffling fix The following tests are now correct: * clone-recover-no-shuffle-4 * clone-recover-no-shuffle-5 * clone-recover-no-shuffle-6 * clone-recover-no-shuffle-7 Scores for several other tests are changed in ways (usually duplicates from additional tentative assignments) that don't impact the resulting transition. One test (cancel-behind-moving-remote) technically breaks. Previously, due to shuffling, ovn-dbs-bundle-1 moved to controller-0. Since ovndb_servers:1 gets promoted on ovn-dbs-bundle-1, controller-0 held the promoted instance of ovn-dbs-bundle. Now, since instances correctly prefer their current nodes, ovn-dbs-bundle-1 remains on controller-2. However, ovndb_servers:1 still gets promoted on ovn-dbs-bundle-1, so controller-2 holds the promoted instance of ovn-dbs-bundle. ip-172.17.1.87 is colocated with ovn-dbs-bundle's promoted role and is banned from controller-2. As a result, ip-172.17.1.87 is now stopped. This test is believed to have worked properly in the past due only to luck. At this point (see T672 and the bundle-promoted-*colocation-* tests), it's well-established that colocations involving promotable bundles don't work correctly. Ref T489 Ref RHBZ#1931023 Signed-off-by: Reid Wahl --- .../dot/cancel-behind-moving-remote.dot | 99 +-- .../dot/clone-recover-no-shuffle-4.dot | 23 +- .../dot/clone-recover-no-shuffle-5.dot | 57 +- .../dot/clone-recover-no-shuffle-6.dot | 99 +-- .../dot/clone-recover-no-shuffle-7.dot | 35 +- .../exp/cancel-behind-moving-remote.exp | 724 +++++------------- .../exp/clone-recover-no-shuffle-4.exp | 98 +-- .../exp/clone-recover-no-shuffle-5.exp | 239 +----- .../exp/clone-recover-no-shuffle-6.exp | 434 ++--------- .../exp/clone-recover-no-shuffle-7.exp | 174 ++--- cts/scheduler/scores/bug-cl-5168.scores | 2 +- .../scores/cancel-behind-moving-remote.scores | 27 +- .../scores/clone-recover-no-shuffle-10.scores | 2 +- .../scores/clone-recover-no-shuffle-4.scores | 10 +- .../scores/clone-recover-no-shuffle-5.scores | 48 +- .../scores/clone-recover-no-shuffle-6.scores | 22 +- .../scores/clone-recover-no-shuffle-7.scores | 14 +- .../scores/promoted-failed-demote-2.scores | 4 - .../scores/promoted-failed-demote.scores | 4 - .../scores/utilization-complex.scores | 24 + .../scores/utilization-order2.scores | 2 + .../cancel-behind-moving-remote.summary | 61 +- .../clone-recover-no-shuffle-4.summary | 8 +- .../clone-recover-no-shuffle-5.summary | 22 +- .../clone-recover-no-shuffle-6.summary | 48 +- .../clone-recover-no-shuffle-7.summary | 12 +- .../xml/cancel-behind-moving-remote.xml | 14 + .../xml/clone-recover-no-shuffle-4.xml | 5 - .../xml/clone-recover-no-shuffle-5.xml | 5 - .../xml/clone-recover-no-shuffle-6.xml | 5 - .../xml/clone-recover-no-shuffle-7.xml | 5 - 31 files changed, 526 insertions(+), 1800 deletions(-) diff --git a/cts/scheduler/dot/cancel-behind-moving-remote.dot b/cts/scheduler/dot/cancel-behind-moving-remote.dot index 1a0dfc8c889..de803a7e299 100644 --- a/cts/scheduler/dot/cancel-behind-moving-remote.dot +++ b/cts/scheduler/dot/cancel-behind-moving-remote.dot @@ -1,28 +1,12 @@ digraph "g" { "Cancel ovndb_servers_monitor_30000 ovn-dbs-bundle-1" -> "ovndb_servers_promote_0 ovn-dbs-bundle-1" [ style = bold] "Cancel ovndb_servers_monitor_30000 ovn-dbs-bundle-1" [ style=bold color="green" fontcolor="black"] -"ip-172.17.1.87_monitor_10000 controller-0" [ style=bold color="green" fontcolor="black"] -"ip-172.17.1.87_start_0 controller-0" -> "ip-172.17.1.87_monitor_10000 controller-0" [ style = bold] -"ip-172.17.1.87_start_0 controller-0" [ style=bold color="green" fontcolor="black"] "nova-evacuate_clear_failcount_0 messaging-0" [ style=bold color="green" fontcolor="black"] -"ovn-dbs-bundle-0_clear_failcount_0 controller-0" -> "ovn-dbs-bundle-0_start_0 controller-2" [ style = bold] -"ovn-dbs-bundle-0_clear_failcount_0 controller-0" [ style=bold color="green" fontcolor="black"] -"ovn-dbs-bundle-0_monitor_30000 controller-2" [ style=bold color="green" fontcolor="black"] -"ovn-dbs-bundle-0_start_0 controller-2" -> "ovn-dbs-bundle-0_monitor_30000 controller-2" [ style = bold] -"ovn-dbs-bundle-0_start_0 controller-2" -> "ovndb_servers:0_monitor_30000 ovn-dbs-bundle-0" [ style = bold] -"ovn-dbs-bundle-0_start_0 controller-2" -> "ovndb_servers:0_start_0 ovn-dbs-bundle-0" [ style = bold] -"ovn-dbs-bundle-0_start_0 controller-2" [ style=bold color="green" fontcolor="black"] -"ovn-dbs-bundle-1_clear_failcount_0 controller-2" -> "ovn-dbs-bundle-1_start_0 controller-0" [ style = bold] -"ovn-dbs-bundle-1_clear_failcount_0 controller-2" [ style=bold color="green" fontcolor="black"] -"ovn-dbs-bundle-1_monitor_30000 controller-0" [ style=bold color="green" fontcolor="black"] -"ovn-dbs-bundle-1_start_0 controller-0" -> "ovn-dbs-bundle-1_monitor_30000 controller-0" [ style = bold] -"ovn-dbs-bundle-1_start_0 controller-0" -> "ovndb_servers_monitor_10000 ovn-dbs-bundle-1" [ style = bold] -"ovn-dbs-bundle-1_start_0 controller-0" -> "ovndb_servers_promote_0 ovn-dbs-bundle-1" [ style = bold] -"ovn-dbs-bundle-1_start_0 controller-0" -> "ovndb_servers_start_0 ovn-dbs-bundle-1" [ style = bold] -"ovn-dbs-bundle-1_start_0 controller-0" [ style=bold color="green" fontcolor="black"] -"ovn-dbs-bundle-1_stop_0 controller-2" -> "ovn-dbs-bundle-1_start_0 controller-0" [ style = bold] -"ovn-dbs-bundle-1_stop_0 controller-2" -> "ovn-dbs-bundle-podman-1_stop_0 controller-2" [ style = bold] -"ovn-dbs-bundle-1_stop_0 controller-2" [ style=bold color="green" fontcolor="black"] +"ovn-dbs-bundle-0_monitor_30000 controller-0" [ style=bold color="green" fontcolor="black"] +"ovn-dbs-bundle-0_start_0 controller-0" -> "ovn-dbs-bundle-0_monitor_30000 controller-0" [ style = bold] +"ovn-dbs-bundle-0_start_0 controller-0" -> "ovndb_servers:0_monitor_30000 ovn-dbs-bundle-0" [ style = bold] +"ovn-dbs-bundle-0_start_0 controller-0" -> "ovndb_servers:0_start_0 ovn-dbs-bundle-0" [ style = bold] +"ovn-dbs-bundle-0_start_0 controller-0" [ style=bold color="green" fontcolor="black"] "ovn-dbs-bundle-master_confirmed-post_notify_promoted_0" -> "ovn-dbs-bundle_promoted_0" [ style = bold] "ovn-dbs-bundle-master_confirmed-post_notify_promoted_0" -> "ovndb_servers:0_monitor_30000 ovn-dbs-bundle-0" [ style = bold] "ovn-dbs-bundle-master_confirmed-post_notify_promoted_0" -> "ovndb_servers_monitor_10000 ovn-dbs-bundle-1" [ style = bold] @@ -32,19 +16,12 @@ "ovn-dbs-bundle-master_confirmed-post_notify_running_0" -> "ovndb_servers:0_monitor_30000 ovn-dbs-bundle-0" [ style = bold] "ovn-dbs-bundle-master_confirmed-post_notify_running_0" -> "ovndb_servers_monitor_10000 ovn-dbs-bundle-1" [ style = bold] "ovn-dbs-bundle-master_confirmed-post_notify_running_0" [ style=bold color="green" fontcolor="orange"] -"ovn-dbs-bundle-master_confirmed-post_notify_stopped_0" -> "ovn-dbs-bundle-master_pre_notify_promote_0" [ style = bold] -"ovn-dbs-bundle-master_confirmed-post_notify_stopped_0" -> "ovn-dbs-bundle-master_pre_notify_start_0" [ style = bold] -"ovn-dbs-bundle-master_confirmed-post_notify_stopped_0" -> "ovn-dbs-bundle_stopped_0" [ style = bold] -"ovn-dbs-bundle-master_confirmed-post_notify_stopped_0" [ style=bold color="green" fontcolor="orange"] "ovn-dbs-bundle-master_confirmed-pre_notify_promote_0" -> "ovn-dbs-bundle-master_post_notify_promoted_0" [ style = bold] "ovn-dbs-bundle-master_confirmed-pre_notify_promote_0" -> "ovn-dbs-bundle-master_promote_0" [ style = bold] "ovn-dbs-bundle-master_confirmed-pre_notify_promote_0" [ style=bold color="green" fontcolor="orange"] "ovn-dbs-bundle-master_confirmed-pre_notify_start_0" -> "ovn-dbs-bundle-master_post_notify_running_0" [ style = bold] "ovn-dbs-bundle-master_confirmed-pre_notify_start_0" -> "ovn-dbs-bundle-master_start_0" [ style = bold] "ovn-dbs-bundle-master_confirmed-pre_notify_start_0" [ style=bold color="green" fontcolor="orange"] -"ovn-dbs-bundle-master_confirmed-pre_notify_stop_0" -> "ovn-dbs-bundle-master_post_notify_stopped_0" [ style = bold] -"ovn-dbs-bundle-master_confirmed-pre_notify_stop_0" -> "ovn-dbs-bundle-master_stop_0" [ style = bold] -"ovn-dbs-bundle-master_confirmed-pre_notify_stop_0" [ style=bold color="green" fontcolor="orange"] "ovn-dbs-bundle-master_post_notify_promoted_0" -> "ovn-dbs-bundle-master_confirmed-post_notify_promoted_0" [ style = bold] "ovn-dbs-bundle-master_post_notify_promoted_0" -> "ovndb_servers:0_post_notify_promote_0 ovn-dbs-bundle-0" [ style = bold] "ovn-dbs-bundle-master_post_notify_promoted_0" -> "ovndb_servers_post_notify_promoted_0 ovn-dbs-bundle-1" [ style = bold] @@ -55,21 +32,15 @@ "ovn-dbs-bundle-master_post_notify_running_0" -> "ovndb_servers_post_notify_running_0 ovn-dbs-bundle-1" [ style = bold] "ovn-dbs-bundle-master_post_notify_running_0" -> "ovndb_servers_post_notify_running_0 ovn-dbs-bundle-2" [ style = bold] "ovn-dbs-bundle-master_post_notify_running_0" [ style=bold color="green" fontcolor="orange"] -"ovn-dbs-bundle-master_post_notify_stopped_0" -> "ovn-dbs-bundle-master_confirmed-post_notify_stopped_0" [ style = bold] -"ovn-dbs-bundle-master_post_notify_stopped_0" -> "ovndb_servers_post_notify_stopped_0 ovn-dbs-bundle-2" [ style = bold] -"ovn-dbs-bundle-master_post_notify_stopped_0" [ style=bold color="green" fontcolor="orange"] "ovn-dbs-bundle-master_pre_notify_promote_0" -> "ovn-dbs-bundle-master_confirmed-pre_notify_promote_0" [ style = bold] "ovn-dbs-bundle-master_pre_notify_promote_0" -> "ovndb_servers:0_pre_notify_promote_0 ovn-dbs-bundle-0" [ style = bold] "ovn-dbs-bundle-master_pre_notify_promote_0" -> "ovndb_servers_pre_notify_promote_0 ovn-dbs-bundle-1" [ style = bold] "ovn-dbs-bundle-master_pre_notify_promote_0" -> "ovndb_servers_pre_notify_promote_0 ovn-dbs-bundle-2" [ style = bold] "ovn-dbs-bundle-master_pre_notify_promote_0" [ style=bold color="green" fontcolor="orange"] "ovn-dbs-bundle-master_pre_notify_start_0" -> "ovn-dbs-bundle-master_confirmed-pre_notify_start_0" [ style = bold] +"ovn-dbs-bundle-master_pre_notify_start_0" -> "ovndb_servers_pre_notify_start_0 ovn-dbs-bundle-1" [ style = bold] "ovn-dbs-bundle-master_pre_notify_start_0" -> "ovndb_servers_pre_notify_start_0 ovn-dbs-bundle-2" [ style = bold] "ovn-dbs-bundle-master_pre_notify_start_0" [ style=bold color="green" fontcolor="orange"] -"ovn-dbs-bundle-master_pre_notify_stop_0" -> "ovn-dbs-bundle-master_confirmed-pre_notify_stop_0" [ style = bold] -"ovn-dbs-bundle-master_pre_notify_stop_0" -> "ovndb_servers_pre_notify_stop_0 ovn-dbs-bundle-1" [ style = bold] -"ovn-dbs-bundle-master_pre_notify_stop_0" -> "ovndb_servers_pre_notify_stop_0 ovn-dbs-bundle-2" [ style = bold] -"ovn-dbs-bundle-master_pre_notify_stop_0" [ style=bold color="green" fontcolor="orange"] "ovn-dbs-bundle-master_promote_0" -> "ovndb_servers_promote_0 ovn-dbs-bundle-1" [ style = bold] "ovn-dbs-bundle-master_promote_0" [ style=bold color="green" fontcolor="orange"] "ovn-dbs-bundle-master_promoted_0" -> "ovn-dbs-bundle-master_post_notify_promoted_0" [ style = bold] @@ -79,48 +50,21 @@ "ovn-dbs-bundle-master_running_0" [ style=bold color="green" fontcolor="orange"] "ovn-dbs-bundle-master_start_0" -> "ovn-dbs-bundle-master_running_0" [ style = bold] "ovn-dbs-bundle-master_start_0" -> "ovndb_servers:0_start_0 ovn-dbs-bundle-0" [ style = bold] -"ovn-dbs-bundle-master_start_0" -> "ovndb_servers_start_0 ovn-dbs-bundle-1" [ style = bold] "ovn-dbs-bundle-master_start_0" [ style=bold color="green" fontcolor="orange"] -"ovn-dbs-bundle-master_stop_0" -> "ovn-dbs-bundle-master_stopped_0" [ style = bold] -"ovn-dbs-bundle-master_stop_0" -> "ovndb_servers_stop_0 ovn-dbs-bundle-1" [ style = bold] -"ovn-dbs-bundle-master_stop_0" [ style=bold color="green" fontcolor="orange"] -"ovn-dbs-bundle-master_stopped_0" -> "ovn-dbs-bundle-master_post_notify_stopped_0" [ style = bold] -"ovn-dbs-bundle-master_stopped_0" -> "ovn-dbs-bundle-master_promote_0" [ style = bold] -"ovn-dbs-bundle-master_stopped_0" -> "ovn-dbs-bundle-master_start_0" [ style = bold] -"ovn-dbs-bundle-master_stopped_0" [ style=bold color="green" fontcolor="orange"] -"ovn-dbs-bundle-podman-0_monitor_60000 controller-2" [ style=bold color="green" fontcolor="black"] -"ovn-dbs-bundle-podman-0_start_0 controller-2" -> "ovn-dbs-bundle-0_start_0 controller-2" [ style = bold] -"ovn-dbs-bundle-podman-0_start_0 controller-2" -> "ovn-dbs-bundle-podman-0_monitor_60000 controller-2" [ style = bold] -"ovn-dbs-bundle-podman-0_start_0 controller-2" -> "ovn-dbs-bundle_running_0" [ style = bold] -"ovn-dbs-bundle-podman-0_start_0 controller-2" -> "ovndb_servers:0_start_0 ovn-dbs-bundle-0" [ style = bold] -"ovn-dbs-bundle-podman-0_start_0 controller-2" [ style=bold color="green" fontcolor="black"] -"ovn-dbs-bundle-podman-1_monitor_60000 controller-0" [ style=bold color="green" fontcolor="black"] -"ovn-dbs-bundle-podman-1_start_0 controller-0" -> "ovn-dbs-bundle-1_start_0 controller-0" [ style = bold] -"ovn-dbs-bundle-podman-1_start_0 controller-0" -> "ovn-dbs-bundle-podman-1_monitor_60000 controller-0" [ style = bold] -"ovn-dbs-bundle-podman-1_start_0 controller-0" -> "ovn-dbs-bundle_running_0" [ style = bold] -"ovn-dbs-bundle-podman-1_start_0 controller-0" -> "ovndb_servers_promote_0 ovn-dbs-bundle-1" [ style = bold] -"ovn-dbs-bundle-podman-1_start_0 controller-0" -> "ovndb_servers_start_0 ovn-dbs-bundle-1" [ style = bold] -"ovn-dbs-bundle-podman-1_start_0 controller-0" [ style=bold color="green" fontcolor="black"] -"ovn-dbs-bundle-podman-1_stop_0 controller-2" -> "ovn-dbs-bundle-podman-1_start_0 controller-0" [ style = bold] -"ovn-dbs-bundle-podman-1_stop_0 controller-2" -> "ovn-dbs-bundle_stopped_0" [ style = bold] -"ovn-dbs-bundle-podman-1_stop_0 controller-2" [ style=bold color="green" fontcolor="black"] -"ovn-dbs-bundle_promote_0" -> "ip-172.17.1.87_start_0 controller-0" [ style = bold] +"ovn-dbs-bundle-podman-0_monitor_60000 controller-0" [ style=bold color="green" fontcolor="black"] +"ovn-dbs-bundle-podman-0_start_0 controller-0" -> "ovn-dbs-bundle-0_start_0 controller-0" [ style = bold] +"ovn-dbs-bundle-podman-0_start_0 controller-0" -> "ovn-dbs-bundle-podman-0_monitor_60000 controller-0" [ style = bold] +"ovn-dbs-bundle-podman-0_start_0 controller-0" -> "ovn-dbs-bundle_running_0" [ style = bold] +"ovn-dbs-bundle-podman-0_start_0 controller-0" -> "ovndb_servers:0_start_0 ovn-dbs-bundle-0" [ style = bold] +"ovn-dbs-bundle-podman-0_start_0 controller-0" [ style=bold color="green" fontcolor="black"] "ovn-dbs-bundle_promote_0" -> "ovn-dbs-bundle-master_promote_0" [ style = bold] "ovn-dbs-bundle_promote_0" [ style=bold color="green" fontcolor="orange"] "ovn-dbs-bundle_promoted_0" [ style=bold color="green" fontcolor="orange"] "ovn-dbs-bundle_running_0" -> "ovn-dbs-bundle_promote_0" [ style = bold] "ovn-dbs-bundle_running_0" [ style=bold color="green" fontcolor="orange"] "ovn-dbs-bundle_start_0" -> "ovn-dbs-bundle-master_start_0" [ style = bold] -"ovn-dbs-bundle_start_0" -> "ovn-dbs-bundle-podman-0_start_0 controller-2" [ style = bold] -"ovn-dbs-bundle_start_0" -> "ovn-dbs-bundle-podman-1_start_0 controller-0" [ style = bold] +"ovn-dbs-bundle_start_0" -> "ovn-dbs-bundle-podman-0_start_0 controller-0" [ style = bold] "ovn-dbs-bundle_start_0" [ style=bold color="green" fontcolor="orange"] -"ovn-dbs-bundle_stop_0" -> "ovn-dbs-bundle-master_stop_0" [ style = bold] -"ovn-dbs-bundle_stop_0" -> "ovn-dbs-bundle-podman-1_stop_0 controller-2" [ style = bold] -"ovn-dbs-bundle_stop_0" -> "ovndb_servers_stop_0 ovn-dbs-bundle-1" [ style = bold] -"ovn-dbs-bundle_stop_0" [ style=bold color="green" fontcolor="orange"] -"ovn-dbs-bundle_stopped_0" -> "ovn-dbs-bundle_promote_0" [ style = bold] -"ovn-dbs-bundle_stopped_0" -> "ovn-dbs-bundle_start_0" [ style = bold] -"ovn-dbs-bundle_stopped_0" [ style=bold color="green" fontcolor="orange"] "ovndb_servers:0_monitor_30000 ovn-dbs-bundle-0" [ style=bold color="green" fontcolor="black"] "ovndb_servers:0_post_notify_promote_0 ovn-dbs-bundle-0" -> "ovn-dbs-bundle-master_confirmed-post_notify_promoted_0" [ style = bold] "ovndb_servers:0_post_notify_promote_0 ovn-dbs-bundle-0" [ style=bold color="green" fontcolor="black"] @@ -130,7 +74,6 @@ "ovndb_servers:0_pre_notify_promote_0 ovn-dbs-bundle-0" [ style=bold color="green" fontcolor="black"] "ovndb_servers:0_start_0 ovn-dbs-bundle-0" -> "ovn-dbs-bundle-master_running_0" [ style = bold] "ovndb_servers:0_start_0 ovn-dbs-bundle-0" -> "ovndb_servers:0_monitor_30000 ovn-dbs-bundle-0" [ style = bold] -"ovndb_servers:0_start_0 ovn-dbs-bundle-0" -> "ovndb_servers_start_0 ovn-dbs-bundle-1" [ style = bold] "ovndb_servers:0_start_0 ovn-dbs-bundle-0" [ style=bold color="green" fontcolor="black"] "ovndb_servers_monitor_10000 ovn-dbs-bundle-1" [ style=bold color="green" fontcolor="black"] "ovndb_servers_post_notify_promoted_0 ovn-dbs-bundle-1" -> "ovn-dbs-bundle-master_confirmed-post_notify_promoted_0" [ style = bold] @@ -141,29 +84,17 @@ "ovndb_servers_post_notify_running_0 ovn-dbs-bundle-1" [ style=bold color="green" fontcolor="black"] "ovndb_servers_post_notify_running_0 ovn-dbs-bundle-2" -> "ovn-dbs-bundle-master_confirmed-post_notify_running_0" [ style = bold] "ovndb_servers_post_notify_running_0 ovn-dbs-bundle-2" [ style=bold color="green" fontcolor="black"] -"ovndb_servers_post_notify_stopped_0 ovn-dbs-bundle-2" -> "ovn-dbs-bundle-master_confirmed-post_notify_stopped_0" [ style = bold] -"ovndb_servers_post_notify_stopped_0 ovn-dbs-bundle-2" [ style=bold color="green" fontcolor="black"] "ovndb_servers_pre_notify_promote_0 ovn-dbs-bundle-1" -> "ovn-dbs-bundle-master_confirmed-pre_notify_promote_0" [ style = bold] "ovndb_servers_pre_notify_promote_0 ovn-dbs-bundle-1" [ style=bold color="green" fontcolor="black"] "ovndb_servers_pre_notify_promote_0 ovn-dbs-bundle-2" -> "ovn-dbs-bundle-master_confirmed-pre_notify_promote_0" [ style = bold] "ovndb_servers_pre_notify_promote_0 ovn-dbs-bundle-2" [ style=bold color="green" fontcolor="black"] +"ovndb_servers_pre_notify_start_0 ovn-dbs-bundle-1" -> "ovn-dbs-bundle-master_confirmed-pre_notify_start_0" [ style = bold] +"ovndb_servers_pre_notify_start_0 ovn-dbs-bundle-1" [ style=bold color="green" fontcolor="black"] "ovndb_servers_pre_notify_start_0 ovn-dbs-bundle-2" -> "ovn-dbs-bundle-master_confirmed-pre_notify_start_0" [ style = bold] "ovndb_servers_pre_notify_start_0 ovn-dbs-bundle-2" [ style=bold color="green" fontcolor="black"] -"ovndb_servers_pre_notify_stop_0 ovn-dbs-bundle-1" -> "ovn-dbs-bundle-master_confirmed-pre_notify_stop_0" [ style = bold] -"ovndb_servers_pre_notify_stop_0 ovn-dbs-bundle-1" [ style=bold color="green" fontcolor="black"] -"ovndb_servers_pre_notify_stop_0 ovn-dbs-bundle-2" -> "ovn-dbs-bundle-master_confirmed-pre_notify_stop_0" [ style = bold] -"ovndb_servers_pre_notify_stop_0 ovn-dbs-bundle-2" [ style=bold color="green" fontcolor="black"] "ovndb_servers_promote_0 ovn-dbs-bundle-1" -> "ovn-dbs-bundle-master_promoted_0" [ style = bold] "ovndb_servers_promote_0 ovn-dbs-bundle-1" -> "ovndb_servers_monitor_10000 ovn-dbs-bundle-1" [ style = bold] "ovndb_servers_promote_0 ovn-dbs-bundle-1" [ style=bold color="green" fontcolor="black"] -"ovndb_servers_start_0 ovn-dbs-bundle-1" -> "ovn-dbs-bundle-master_running_0" [ style = bold] -"ovndb_servers_start_0 ovn-dbs-bundle-1" -> "ovndb_servers_monitor_10000 ovn-dbs-bundle-1" [ style = bold] -"ovndb_servers_start_0 ovn-dbs-bundle-1" -> "ovndb_servers_promote_0 ovn-dbs-bundle-1" [ style = bold] -"ovndb_servers_start_0 ovn-dbs-bundle-1" [ style=bold color="green" fontcolor="black"] -"ovndb_servers_stop_0 ovn-dbs-bundle-1" -> "ovn-dbs-bundle-1_stop_0 controller-2" [ style = bold] -"ovndb_servers_stop_0 ovn-dbs-bundle-1" -> "ovn-dbs-bundle-master_stopped_0" [ style = bold] -"ovndb_servers_stop_0 ovn-dbs-bundle-1" -> "ovndb_servers_start_0 ovn-dbs-bundle-1" [ style = bold] -"ovndb_servers_stop_0 ovn-dbs-bundle-1" [ style=bold color="green" fontcolor="black"] "rabbitmq-bundle-1_monitor_30000 controller-0" [ style=dashed color="red" fontcolor="black"] "rabbitmq-bundle-1_start_0 controller-0" -> "rabbitmq-bundle-1_monitor_30000 controller-0" [ style = dashed] "rabbitmq-bundle-1_start_0 controller-0" -> "rabbitmq:1_monitor_10000 rabbitmq-bundle-1" [ style = dashed] diff --git a/cts/scheduler/dot/clone-recover-no-shuffle-4.dot b/cts/scheduler/dot/clone-recover-no-shuffle-4.dot index fd002f28fcf..287d82d3806 100644 --- a/cts/scheduler/dot/clone-recover-no-shuffle-4.dot +++ b/cts/scheduler/dot/clone-recover-no-shuffle-4.dot @@ -1,23 +1,10 @@ digraph "g" { "dummy-clone_running_0" [ style=bold color="green" fontcolor="orange"] "dummy-clone_start_0" -> "dummy-clone_running_0" [ style = bold] -"dummy-clone_start_0" -> "dummy:2_start_0 node2" [ style = bold] -"dummy-clone_start_0" -> "dummy_start_0 node1" [ style = bold] +"dummy-clone_start_0" -> "dummy:2_start_0 node1" [ style = bold] "dummy-clone_start_0" [ style=bold color="green" fontcolor="orange"] -"dummy-clone_stop_0" -> "dummy-clone_stopped_0" [ style = bold] -"dummy-clone_stop_0" -> "dummy_stop_0 node2" [ style = bold] -"dummy-clone_stop_0" [ style=bold color="green" fontcolor="orange"] -"dummy-clone_stopped_0" -> "dummy-clone_start_0" [ style = bold] -"dummy-clone_stopped_0" [ style=bold color="green" fontcolor="orange"] -"dummy:2_monitor_10000 node2" [ style=bold color="green" fontcolor="black"] -"dummy:2_start_0 node2" -> "dummy-clone_running_0" [ style = bold] -"dummy:2_start_0 node2" -> "dummy:2_monitor_10000 node2" [ style = bold] -"dummy:2_start_0 node2" [ style=bold color="green" fontcolor="black"] -"dummy_monitor_10000 node1" [ style=bold color="green" fontcolor="black"] -"dummy_start_0 node1" -> "dummy-clone_running_0" [ style = bold] -"dummy_start_0 node1" -> "dummy_monitor_10000 node1" [ style = bold] -"dummy_start_0 node1" [ style=bold color="green" fontcolor="black"] -"dummy_stop_0 node2" -> "dummy-clone_stopped_0" [ style = bold] -"dummy_stop_0 node2" -> "dummy_start_0 node1" [ style = bold] -"dummy_stop_0 node2" [ style=bold color="green" fontcolor="black"] +"dummy:2_monitor_10000 node1" [ style=bold color="green" fontcolor="black"] +"dummy:2_start_0 node1" -> "dummy-clone_running_0" [ style = bold] +"dummy:2_start_0 node1" -> "dummy:2_monitor_10000 node1" [ style = bold] +"dummy:2_start_0 node1" [ style=bold color="green" fontcolor="black"] } diff --git a/cts/scheduler/dot/clone-recover-no-shuffle-5.dot b/cts/scheduler/dot/clone-recover-no-shuffle-5.dot index a2356f2280b..d3bdf04baa9 100644 --- a/cts/scheduler/dot/clone-recover-no-shuffle-5.dot +++ b/cts/scheduler/dot/clone-recover-no-shuffle-5.dot @@ -1,56 +1,21 @@ digraph "g" { "grp-clone_running_0" [ style=bold color="green" fontcolor="orange"] "grp-clone_start_0" -> "grp-clone_running_0" [ style = bold] -"grp-clone_start_0" -> "grp:0_start_0" [ style = bold] "grp-clone_start_0" -> "grp:2_start_0" [ style = bold] "grp-clone_start_0" [ style=bold color="green" fontcolor="orange"] -"grp-clone_stop_0" -> "grp-clone_stopped_0" [ style = bold] -"grp-clone_stop_0" -> "grp:0_stop_0" [ style = bold] -"grp-clone_stop_0" [ style=bold color="green" fontcolor="orange"] -"grp-clone_stopped_0" -> "grp-clone_start_0" [ style = bold] -"grp-clone_stopped_0" [ style=bold color="green" fontcolor="orange"] -"grp:0_running_0" -> "grp-clone_running_0" [ style = bold] -"grp:0_running_0" [ style=bold color="green" fontcolor="orange"] -"grp:0_start_0" -> "grp:0_running_0" [ style = bold] -"grp:0_start_0" -> "rsc1_start_0 node1" [ style = bold] -"grp:0_start_0" -> "rsc2_start_0 node1" [ style = bold] -"grp:0_start_0" [ style=bold color="green" fontcolor="orange"] -"grp:0_stop_0" -> "grp:0_stopped_0" [ style = bold] -"grp:0_stop_0" -> "rsc1_stop_0 node2" [ style = bold] -"grp:0_stop_0" -> "rsc2_stop_0 node2" [ style = bold] -"grp:0_stop_0" [ style=bold color="green" fontcolor="orange"] -"grp:0_stopped_0" -> "grp-clone_stopped_0" [ style = bold] -"grp:0_stopped_0" -> "grp:0_start_0" [ style = bold] -"grp:0_stopped_0" [ style=bold color="green" fontcolor="orange"] "grp:2_running_0" -> "grp-clone_running_0" [ style = bold] "grp:2_running_0" [ style=bold color="green" fontcolor="orange"] "grp:2_start_0" -> "grp:2_running_0" [ style = bold] -"grp:2_start_0" -> "rsc1:2_start_0 node2" [ style = bold] -"grp:2_start_0" -> "rsc2:2_start_0 node2" [ style = bold] +"grp:2_start_0" -> "rsc1:2_start_0 node1" [ style = bold] +"grp:2_start_0" -> "rsc2:2_start_0 node1" [ style = bold] "grp:2_start_0" [ style=bold color="green" fontcolor="orange"] -"rsc1:2_monitor_10000 node2" [ style=bold color="green" fontcolor="black"] -"rsc1:2_start_0 node2" -> "grp:2_running_0" [ style = bold] -"rsc1:2_start_0 node2" -> "rsc1:2_monitor_10000 node2" [ style = bold] -"rsc1:2_start_0 node2" -> "rsc2:2_start_0 node2" [ style = bold] -"rsc1:2_start_0 node2" [ style=bold color="green" fontcolor="black"] -"rsc1_monitor_10000 node1" [ style=bold color="green" fontcolor="black"] -"rsc1_start_0 node1" -> "grp:0_running_0" [ style = bold] -"rsc1_start_0 node1" -> "rsc1_monitor_10000 node1" [ style = bold] -"rsc1_start_0 node1" -> "rsc2_start_0 node1" [ style = bold] -"rsc1_start_0 node1" [ style=bold color="green" fontcolor="black"] -"rsc1_stop_0 node2" -> "grp:0_stopped_0" [ style = bold] -"rsc1_stop_0 node2" -> "rsc1_start_0 node1" [ style = bold] -"rsc1_stop_0 node2" [ style=bold color="green" fontcolor="black"] -"rsc2:2_monitor_10000 node2" [ style=bold color="green" fontcolor="black"] -"rsc2:2_start_0 node2" -> "grp:2_running_0" [ style = bold] -"rsc2:2_start_0 node2" -> "rsc2:2_monitor_10000 node2" [ style = bold] -"rsc2:2_start_0 node2" [ style=bold color="green" fontcolor="black"] -"rsc2_monitor_10000 node1" [ style=bold color="green" fontcolor="black"] -"rsc2_start_0 node1" -> "grp:0_running_0" [ style = bold] -"rsc2_start_0 node1" -> "rsc2_monitor_10000 node1" [ style = bold] -"rsc2_start_0 node1" [ style=bold color="green" fontcolor="black"] -"rsc2_stop_0 node2" -> "grp:0_stopped_0" [ style = bold] -"rsc2_stop_0 node2" -> "rsc1_stop_0 node2" [ style = bold] -"rsc2_stop_0 node2" -> "rsc2_start_0 node1" [ style = bold] -"rsc2_stop_0 node2" [ style=bold color="green" fontcolor="black"] +"rsc1:2_monitor_10000 node1" [ style=bold color="green" fontcolor="black"] +"rsc1:2_start_0 node1" -> "grp:2_running_0" [ style = bold] +"rsc1:2_start_0 node1" -> "rsc1:2_monitor_10000 node1" [ style = bold] +"rsc1:2_start_0 node1" -> "rsc2:2_start_0 node1" [ style = bold] +"rsc1:2_start_0 node1" [ style=bold color="green" fontcolor="black"] +"rsc2:2_monitor_10000 node1" [ style=bold color="green" fontcolor="black"] +"rsc2:2_start_0 node1" -> "grp:2_running_0" [ style = bold] +"rsc2:2_start_0 node1" -> "rsc2:2_monitor_10000 node1" [ style = bold] +"rsc2:2_start_0 node1" [ style=bold color="green" fontcolor="black"] } diff --git a/cts/scheduler/dot/clone-recover-no-shuffle-6.dot b/cts/scheduler/dot/clone-recover-no-shuffle-6.dot index f8cfe9252d2..f60fd2cc04e 100644 --- a/cts/scheduler/dot/clone-recover-no-shuffle-6.dot +++ b/cts/scheduler/dot/clone-recover-no-shuffle-6.dot @@ -1,97 +1,32 @@ digraph "g" { -"base-bundle-0_monitor_30000 node1" [ style=bold color="green" fontcolor="black"] -"base-bundle-0_start_0 node1" -> "base-bundle-0_monitor_30000 node1" [ style = bold] -"base-bundle-0_start_0 node1" -> "base_start_0 base-bundle-0" [ style = bold] -"base-bundle-0_start_0 node1" [ style=bold color="green" fontcolor="black"] -"base-bundle-0_stop_0 node3" -> "base-bundle-0_start_0 node1" [ style = bold] -"base-bundle-0_stop_0 node3" -> "base-bundle-podman-0_stop_0 node3" [ style = bold] -"base-bundle-0_stop_0 node3" [ style=bold color="green" fontcolor="black"] -"base-bundle-1_monitor_30000 node3" [ style=bold color="green" fontcolor="black"] -"base-bundle-1_start_0 node3" -> "base-bundle-1_monitor_30000 node3" [ style = bold] -"base-bundle-1_start_0 node3" -> "base_start_0 base-bundle-1" [ style = bold] -"base-bundle-1_start_0 node3" [ style=bold color="green" fontcolor="black"] -"base-bundle-1_stop_0 node2" -> "base-bundle-1_start_0 node3" [ style = bold] -"base-bundle-1_stop_0 node2" -> "base-bundle-podman-1_stop_0 node2" [ style = bold] -"base-bundle-1_stop_0 node2" [ style=bold color="green" fontcolor="black"] -"base-bundle-2_monitor_0 node1" -> "base-bundle-2_start_0 node2" [ style = bold] +"base-bundle-2_monitor_0 node1" -> "base-bundle-2_start_0 node1" [ style = bold] "base-bundle-2_monitor_0 node1" [ style=bold color="green" fontcolor="black"] -"base-bundle-2_monitor_0 node2" -> "base-bundle-2_start_0 node2" [ style = bold] +"base-bundle-2_monitor_0 node2" -> "base-bundle-2_start_0 node1" [ style = bold] "base-bundle-2_monitor_0 node2" [ style=bold color="green" fontcolor="black"] -"base-bundle-2_monitor_0 node3" -> "base-bundle-2_start_0 node2" [ style = bold] +"base-bundle-2_monitor_0 node3" -> "base-bundle-2_start_0 node1" [ style = bold] "base-bundle-2_monitor_0 node3" [ style=bold color="green" fontcolor="black"] -"base-bundle-2_monitor_30000 node2" [ style=bold color="green" fontcolor="black"] -"base-bundle-2_start_0 node2" -> "base-bundle-2_monitor_30000 node2" [ style = bold] -"base-bundle-2_start_0 node2" -> "base:2_start_0 base-bundle-2" [ style = bold] -"base-bundle-2_start_0 node2" [ style=bold color="green" fontcolor="black"] +"base-bundle-2_monitor_30000 node1" [ style=bold color="green" fontcolor="black"] +"base-bundle-2_start_0 node1" -> "base-bundle-2_monitor_30000 node1" [ style = bold] +"base-bundle-2_start_0 node1" -> "base:2_start_0 base-bundle-2" [ style = bold] +"base-bundle-2_start_0 node1" [ style=bold color="green" fontcolor="black"] "base-bundle-clone_running_0" -> "base-bundle_running_0" [ style = bold] "base-bundle-clone_running_0" [ style=bold color="green" fontcolor="orange"] "base-bundle-clone_start_0" -> "base-bundle-clone_running_0" [ style = bold] "base-bundle-clone_start_0" -> "base:2_start_0 base-bundle-2" [ style = bold] -"base-bundle-clone_start_0" -> "base_start_0 base-bundle-0" [ style = bold] -"base-bundle-clone_start_0" -> "base_start_0 base-bundle-1" [ style = bold] "base-bundle-clone_start_0" [ style=bold color="green" fontcolor="orange"] -"base-bundle-clone_stop_0" -> "base-bundle-clone_stopped_0" [ style = bold] -"base-bundle-clone_stop_0" -> "base_stop_0 base-bundle-0" [ style = bold] -"base-bundle-clone_stop_0" -> "base_stop_0 base-bundle-1" [ style = bold] -"base-bundle-clone_stop_0" [ style=bold color="green" fontcolor="orange"] -"base-bundle-clone_stopped_0" -> "base-bundle-clone_start_0" [ style = bold] -"base-bundle-clone_stopped_0" -> "base-bundle_stopped_0" [ style = bold] -"base-bundle-clone_stopped_0" [ style=bold color="green" fontcolor="orange"] -"base-bundle-podman-0_monitor_60000 node1" [ style=bold color="green" fontcolor="black"] -"base-bundle-podman-0_start_0 node1" -> "base-bundle-0_start_0 node1" [ style = bold] -"base-bundle-podman-0_start_0 node1" -> "base-bundle-podman-0_monitor_60000 node1" [ style = bold] -"base-bundle-podman-0_start_0 node1" -> "base-bundle_running_0" [ style = bold] -"base-bundle-podman-0_start_0 node1" -> "base_start_0 base-bundle-0" [ style = bold] -"base-bundle-podman-0_start_0 node1" [ style=bold color="green" fontcolor="black"] -"base-bundle-podman-0_stop_0 node3" -> "base-bundle-podman-0_start_0 node1" [ style = bold] -"base-bundle-podman-0_stop_0 node3" -> "base-bundle_stopped_0" [ style = bold] -"base-bundle-podman-0_stop_0 node3" [ style=bold color="green" fontcolor="black"] -"base-bundle-podman-1_monitor_60000 node3" [ style=bold color="green" fontcolor="black"] -"base-bundle-podman-1_start_0 node3" -> "base-bundle-1_start_0 node3" [ style = bold] -"base-bundle-podman-1_start_0 node3" -> "base-bundle-podman-1_monitor_60000 node3" [ style = bold] -"base-bundle-podman-1_start_0 node3" -> "base-bundle_running_0" [ style = bold] -"base-bundle-podman-1_start_0 node3" -> "base_start_0 base-bundle-1" [ style = bold] -"base-bundle-podman-1_start_0 node3" [ style=bold color="green" fontcolor="black"] -"base-bundle-podman-1_stop_0 node2" -> "base-bundle-podman-1_start_0 node3" [ style = bold] -"base-bundle-podman-1_stop_0 node2" -> "base-bundle_stopped_0" [ style = bold] -"base-bundle-podman-1_stop_0 node2" [ style=bold color="green" fontcolor="black"] -"base-bundle-podman-2_monitor_60000 node2" [ style=bold color="green" fontcolor="black"] -"base-bundle-podman-2_start_0 node2" -> "base-bundle-2_monitor_0 node1" [ style = bold] -"base-bundle-podman-2_start_0 node2" -> "base-bundle-2_monitor_0 node2" [ style = bold] -"base-bundle-podman-2_start_0 node2" -> "base-bundle-2_monitor_0 node3" [ style = bold] -"base-bundle-podman-2_start_0 node2" -> "base-bundle-2_start_0 node2" [ style = bold] -"base-bundle-podman-2_start_0 node2" -> "base-bundle-podman-2_monitor_60000 node2" [ style = bold] -"base-bundle-podman-2_start_0 node2" -> "base-bundle_running_0" [ style = bold] -"base-bundle-podman-2_start_0 node2" -> "base:2_start_0 base-bundle-2" [ style = bold] -"base-bundle-podman-2_start_0 node2" [ style=bold color="green" fontcolor="black"] +"base-bundle-podman-2_monitor_60000 node1" [ style=bold color="green" fontcolor="black"] +"base-bundle-podman-2_start_0 node1" -> "base-bundle-2_monitor_0 node1" [ style = bold] +"base-bundle-podman-2_start_0 node1" -> "base-bundle-2_monitor_0 node2" [ style = bold] +"base-bundle-podman-2_start_0 node1" -> "base-bundle-2_monitor_0 node3" [ style = bold] +"base-bundle-podman-2_start_0 node1" -> "base-bundle-2_start_0 node1" [ style = bold] +"base-bundle-podman-2_start_0 node1" -> "base-bundle-podman-2_monitor_60000 node1" [ style = bold] +"base-bundle-podman-2_start_0 node1" -> "base-bundle_running_0" [ style = bold] +"base-bundle-podman-2_start_0 node1" -> "base:2_start_0 base-bundle-2" [ style = bold] +"base-bundle-podman-2_start_0 node1" [ style=bold color="green" fontcolor="black"] "base-bundle_running_0" [ style=bold color="green" fontcolor="orange"] "base-bundle_start_0" -> "base-bundle-clone_start_0" [ style = bold] -"base-bundle_start_0" -> "base-bundle-podman-0_start_0 node1" [ style = bold] -"base-bundle_start_0" -> "base-bundle-podman-1_start_0 node3" [ style = bold] -"base-bundle_start_0" -> "base-bundle-podman-2_start_0 node2" [ style = bold] +"base-bundle_start_0" -> "base-bundle-podman-2_start_0 node1" [ style = bold] "base-bundle_start_0" [ style=bold color="green" fontcolor="orange"] -"base-bundle_stop_0" -> "base-bundle-clone_stop_0" [ style = bold] -"base-bundle_stop_0" -> "base-bundle-podman-0_stop_0 node3" [ style = bold] -"base-bundle_stop_0" -> "base-bundle-podman-1_stop_0 node2" [ style = bold] -"base-bundle_stop_0" -> "base_stop_0 base-bundle-0" [ style = bold] -"base-bundle_stop_0" -> "base_stop_0 base-bundle-1" [ style = bold] -"base-bundle_stop_0" [ style=bold color="green" fontcolor="orange"] -"base-bundle_stopped_0" [ style=bold color="green" fontcolor="orange"] "base:2_start_0 base-bundle-2" -> "base-bundle-clone_running_0" [ style = bold] "base:2_start_0 base-bundle-2" [ style=bold color="green" fontcolor="black"] -"base_start_0 base-bundle-0" -> "base-bundle-clone_running_0" [ style = bold] -"base_start_0 base-bundle-0" -> "base_start_0 base-bundle-1" [ style = bold] -"base_start_0 base-bundle-0" [ style=bold color="green" fontcolor="black"] -"base_start_0 base-bundle-1" -> "base-bundle-clone_running_0" [ style = bold] -"base_start_0 base-bundle-1" -> "base:2_start_0 base-bundle-2" [ style = bold] -"base_start_0 base-bundle-1" [ style=bold color="green" fontcolor="black"] -"base_stop_0 base-bundle-0" -> "base-bundle-0_stop_0 node3" [ style = bold] -"base_stop_0 base-bundle-0" -> "base-bundle-clone_stopped_0" [ style = bold] -"base_stop_0 base-bundle-0" -> "base_start_0 base-bundle-0" [ style = bold] -"base_stop_0 base-bundle-0" [ style=bold color="green" fontcolor="black"] -"base_stop_0 base-bundle-1" -> "base-bundle-1_stop_0 node2" [ style = bold] -"base_stop_0 base-bundle-1" -> "base-bundle-clone_stopped_0" [ style = bold] -"base_stop_0 base-bundle-1" -> "base_start_0 base-bundle-1" [ style = bold] -"base_stop_0 base-bundle-1" -> "base_stop_0 base-bundle-0" [ style = bold] -"base_stop_0 base-bundle-1" [ style=bold color="green" fontcolor="black"] } diff --git a/cts/scheduler/dot/clone-recover-no-shuffle-7.dot b/cts/scheduler/dot/clone-recover-no-shuffle-7.dot index 8bff7da01db..f61bf0d7acf 100644 --- a/cts/scheduler/dot/clone-recover-no-shuffle-7.dot +++ b/cts/scheduler/dot/clone-recover-no-shuffle-7.dot @@ -6,40 +6,25 @@ "dummy-clone_demote_0" [ style=bold color="green" fontcolor="orange"] "dummy-clone_demoted_0" -> "dummy-clone_promote_0" [ style = bold] "dummy-clone_demoted_0" -> "dummy-clone_start_0" [ style = bold] -"dummy-clone_demoted_0" -> "dummy-clone_stop_0" [ style = bold] "dummy-clone_demoted_0" [ style=bold color="green" fontcolor="orange"] -"dummy-clone_promote_0" -> "dummy_promote_0 node1" [ style = bold] +"dummy-clone_promote_0" -> "dummy:2_promote_0 node1" [ style = bold] "dummy-clone_promote_0" [ style=bold color="green" fontcolor="orange"] "dummy-clone_promoted_0" [ style=bold color="green" fontcolor="orange"] "dummy-clone_running_0" -> "dummy-clone_promote_0" [ style = bold] "dummy-clone_running_0" [ style=bold color="green" fontcolor="orange"] "dummy-clone_start_0" -> "dummy-clone_running_0" [ style = bold] -"dummy-clone_start_0" -> "dummy:2_start_0 node3" [ style = bold] -"dummy-clone_start_0" -> "dummy_start_0 node1" [ style = bold] +"dummy-clone_start_0" -> "dummy:2_start_0 node1" [ style = bold] "dummy-clone_start_0" [ style=bold color="green" fontcolor="orange"] -"dummy-clone_stop_0" -> "dummy-clone_stopped_0" [ style = bold] -"dummy-clone_stop_0" -> "dummy_stop_0 node3" [ style = bold] -"dummy-clone_stop_0" [ style=bold color="green" fontcolor="orange"] -"dummy-clone_stopped_0" -> "dummy-clone_promote_0" [ style = bold] -"dummy-clone_stopped_0" -> "dummy-clone_start_0" [ style = bold] -"dummy-clone_stopped_0" [ style=bold color="green" fontcolor="orange"] -"dummy:2_monitor_11000 node3" [ style=bold color="green" fontcolor="black"] -"dummy:2_start_0 node3" -> "dummy-clone_running_0" [ style = bold] -"dummy:2_start_0 node3" -> "dummy:2_monitor_11000 node3" [ style = bold] -"dummy:2_start_0 node3" [ style=bold color="green" fontcolor="black"] +"dummy:2_monitor_10000 node1" [ style=bold color="green" fontcolor="black"] +"dummy:2_promote_0 node1" -> "dummy-clone_promoted_0" [ style = bold] +"dummy:2_promote_0 node1" -> "dummy:2_monitor_10000 node1" [ style = bold] +"dummy:2_promote_0 node1" [ style=bold color="green" fontcolor="black"] +"dummy:2_start_0 node1" -> "dummy-clone_running_0" [ style = bold] +"dummy:2_start_0 node1" -> "dummy:2_monitor_10000 node1" [ style = bold] +"dummy:2_start_0 node1" -> "dummy:2_promote_0 node1" [ style = bold] +"dummy:2_start_0 node1" [ style=bold color="green" fontcolor="black"] "dummy_demote_0 node2" -> "dummy-clone_demoted_0" [ style = bold] "dummy_demote_0 node2" -> "dummy_monitor_11000 node2" [ style = bold] "dummy_demote_0 node2" [ style=bold color="green" fontcolor="black"] -"dummy_monitor_10000 node1" [ style=bold color="green" fontcolor="black"] "dummy_monitor_11000 node2" [ style=bold color="green" fontcolor="black"] -"dummy_promote_0 node1" -> "dummy-clone_promoted_0" [ style = bold] -"dummy_promote_0 node1" -> "dummy_monitor_10000 node1" [ style = bold] -"dummy_promote_0 node1" [ style=bold color="green" fontcolor="black"] -"dummy_start_0 node1" -> "dummy-clone_running_0" [ style = bold] -"dummy_start_0 node1" -> "dummy_monitor_10000 node1" [ style = bold] -"dummy_start_0 node1" -> "dummy_promote_0 node1" [ style = bold] -"dummy_start_0 node1" [ style=bold color="green" fontcolor="black"] -"dummy_stop_0 node3" -> "dummy-clone_stopped_0" [ style = bold] -"dummy_stop_0 node3" -> "dummy_start_0 node1" [ style = bold] -"dummy_stop_0 node3" [ style=bold color="green" fontcolor="black"] } diff --git a/cts/scheduler/exp/cancel-behind-moving-remote.exp b/cts/scheduler/exp/cancel-behind-moving-remote.exp index 17759cb8c57..68cdf4d5370 100644 --- a/cts/scheduler/exp/cancel-behind-moving-remote.exp +++ b/cts/scheduler/exp/cancel-behind-moving-remote.exp @@ -1,46 +1,46 @@ - + - + - + - + - + - + - + - + @@ -48,193 +48,187 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - - - - - - + - + - + @@ -242,61 +236,11 @@ - - - - - - - - - - + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - @@ -305,567 +249,302 @@ - + - + - + - + - - - - - - - - - - - - - - - - - - - - - - - - - - - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - - - + - + - + - + - + - + - - - - + - + - + - + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + - + - + - + - + - + - + - + - + - + - + - + - + - - - - - - - - - - + - + - - + + - - - - - - - - + - + - + - + - + - + - + - + - - - - + - - - - - - - - - - - - - - - - - - - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + - - - - - - - + - - - - - - - - - - - - - - - - - - - - - - - - - - - + @@ -874,7 +553,7 @@ - + @@ -883,7 +562,7 @@ - + @@ -892,7 +571,7 @@ - + @@ -901,42 +580,42 @@ - + - + - + - + - + - + - + - + - + @@ -945,7 +624,7 @@ - + @@ -954,7 +633,7 @@ - + @@ -963,42 +642,42 @@ - + - + - + - + - + - + - + - + - + @@ -1007,7 +686,7 @@ - + @@ -1016,7 +695,7 @@ - + @@ -1025,7 +704,7 @@ - + @@ -1034,101 +713,68 @@ - + - + - + - + - + - - - - + - + - + - + - + - + - + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + - + - + - + - + diff --git a/cts/scheduler/exp/clone-recover-no-shuffle-4.exp b/cts/scheduler/exp/clone-recover-no-shuffle-4.exp index 4596c685d0a..670a823dac9 100644 --- a/cts/scheduler/exp/clone-recover-no-shuffle-4.exp +++ b/cts/scheduler/exp/clone-recover-no-shuffle-4.exp @@ -1,123 +1,51 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + - + - + - + - + - + - + - + - + - + - + - + - + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/cts/scheduler/exp/clone-recover-no-shuffle-5.exp b/cts/scheduler/exp/clone-recover-no-shuffle-5.exp index c1cee43b12f..84b1e1bc98c 100644 --- a/cts/scheduler/exp/clone-recover-no-shuffle-5.exp +++ b/cts/scheduler/exp/clone-recover-no-shuffle-5.exp @@ -1,293 +1,110 @@ - + - + - + - + - + - + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/cts/scheduler/exp/clone-recover-no-shuffle-6.exp b/cts/scheduler/exp/clone-recover-no-shuffle-6.exp index e6704c9e254..6b6ed075f57 100644 --- a/cts/scheduler/exp/clone-recover-no-shuffle-6.exp +++ b/cts/scheduler/exp/clone-recover-no-shuffle-6.exp @@ -1,504 +1,168 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + - + - - - - - - - - - - - - - - - - - - - - - - + - + - + - + - + - - - - - - - - - - - - - - - - + - - - - + - + - + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - - - - - - - - - - - - - - - - - - + - - - - - - - - - + - + - - - - + - - - - + - + - + diff --git a/cts/scheduler/exp/clone-recover-no-shuffle-7.exp b/cts/scheduler/exp/clone-recover-no-shuffle-7.exp index 950de9e0312..870ed54e9c2 100644 --- a/cts/scheduler/exp/clone-recover-no-shuffle-7.exp +++ b/cts/scheduler/exp/clone-recover-no-shuffle-7.exp @@ -1,239 +1,161 @@ - - - + + + - - - - + - - - + + + - + - + - - - + + + - - - - - - - - + - - - + + + - + - - - - - - - - - - - + - + - - - + + + - + - + - - - - - - - - - - - - - - - - - - - - - - - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - - - + - + - + - + - + - + - + - + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + diff --git a/cts/scheduler/scores/bug-cl-5168.scores b/cts/scheduler/scores/bug-cl-5168.scores index 916fecb195f..59dee5d39b3 100644 --- a/cts/scheduler/scores/bug-cl-5168.scores +++ b/cts/scheduler/scores/bug-cl-5168.scores @@ -200,7 +200,7 @@ pcmk__primitive_assign: drbd-r1:0 allocation score on hex-2: 1001 pcmk__primitive_assign: drbd-r1:0 allocation score on hex-3: -INFINITY pcmk__primitive_assign: drbd-r1:0 allocation score on hex-3: INFINITY pcmk__primitive_assign: drbd-r1:1 allocation score on hex-1: -INFINITY -pcmk__primitive_assign: drbd-r1:1 allocation score on hex-2: 0 +pcmk__primitive_assign: drbd-r1:1 allocation score on hex-2: -INFINITY pcmk__primitive_assign: drbd-r1:1 allocation score on hex-3: INFINITY pcmk__primitive_assign: dummy1 allocation score on hex-1: -INFINITY pcmk__primitive_assign: dummy1 allocation score on hex-2: -INFINITY diff --git a/cts/scheduler/scores/cancel-behind-moving-remote.scores b/cts/scheduler/scores/cancel-behind-moving-remote.scores index 0e11b225aea..09f0175b9e2 100644 --- a/cts/scheduler/scores/cancel-behind-moving-remote.scores +++ b/cts/scheduler/scores/cancel-behind-moving-remote.scores @@ -1799,7 +1799,7 @@ pcmk__primitive_assign: ip-172.17.1.151 allocation score on messaging-1: -INFINI pcmk__primitive_assign: ip-172.17.1.151 allocation score on messaging-2: -INFINITY pcmk__primitive_assign: ip-172.17.1.87 allocation score on compute-0: -INFINITY pcmk__primitive_assign: ip-172.17.1.87 allocation score on compute-1: -INFINITY -pcmk__primitive_assign: ip-172.17.1.87 allocation score on controller-0: 0 +pcmk__primitive_assign: ip-172.17.1.87 allocation score on controller-0: -INFINITY pcmk__primitive_assign: ip-172.17.1.87 allocation score on controller-1: -INFINITY pcmk__primitive_assign: ip-172.17.1.87 allocation score on controller-2: -INFINITY pcmk__primitive_assign: ip-172.17.1.87 allocation score on database-0: -INFINITY @@ -1865,9 +1865,9 @@ pcmk__primitive_assign: openstack-cinder-volume-podman-0 allocation score on mes pcmk__primitive_assign: openstack-cinder-volume-podman-0 allocation score on messaging-2: -INFINITY pcmk__primitive_assign: ovn-dbs-bundle-0 allocation score on compute-0: -INFINITY pcmk__primitive_assign: ovn-dbs-bundle-0 allocation score on compute-1: -INFINITY -pcmk__primitive_assign: ovn-dbs-bundle-0 allocation score on controller-0: 0 +pcmk__primitive_assign: ovn-dbs-bundle-0 allocation score on controller-0: 10000 pcmk__primitive_assign: ovn-dbs-bundle-0 allocation score on controller-1: 0 -pcmk__primitive_assign: ovn-dbs-bundle-0 allocation score on controller-2: 10000 +pcmk__primitive_assign: ovn-dbs-bundle-0 allocation score on controller-2: 0 pcmk__primitive_assign: ovn-dbs-bundle-0 allocation score on database-0: 0 pcmk__primitive_assign: ovn-dbs-bundle-0 allocation score on database-1: 0 pcmk__primitive_assign: ovn-dbs-bundle-0 allocation score on database-2: 0 @@ -1876,9 +1876,9 @@ pcmk__primitive_assign: ovn-dbs-bundle-0 allocation score on messaging-1: 0 pcmk__primitive_assign: ovn-dbs-bundle-0 allocation score on messaging-2: 0 pcmk__primitive_assign: ovn-dbs-bundle-1 allocation score on compute-0: -INFINITY pcmk__primitive_assign: ovn-dbs-bundle-1 allocation score on compute-1: -INFINITY -pcmk__primitive_assign: ovn-dbs-bundle-1 allocation score on controller-0: 10000 +pcmk__primitive_assign: ovn-dbs-bundle-1 allocation score on controller-0: 0 pcmk__primitive_assign: ovn-dbs-bundle-1 allocation score on controller-1: 0 -pcmk__primitive_assign: ovn-dbs-bundle-1 allocation score on controller-2: 0 +pcmk__primitive_assign: ovn-dbs-bundle-1 allocation score on controller-2: 10000 pcmk__primitive_assign: ovn-dbs-bundle-1 allocation score on database-0: 0 pcmk__primitive_assign: ovn-dbs-bundle-1 allocation score on database-1: 0 pcmk__primitive_assign: ovn-dbs-bundle-1 allocation score on database-2: 0 @@ -1898,9 +1898,9 @@ pcmk__primitive_assign: ovn-dbs-bundle-2 allocation score on messaging-1: 0 pcmk__primitive_assign: ovn-dbs-bundle-2 allocation score on messaging-2: 0 pcmk__primitive_assign: ovn-dbs-bundle-podman-0 allocation score on compute-0: -INFINITY pcmk__primitive_assign: ovn-dbs-bundle-podman-0 allocation score on compute-1: -INFINITY -pcmk__primitive_assign: ovn-dbs-bundle-podman-0 allocation score on controller-0: -INFINITY +pcmk__primitive_assign: ovn-dbs-bundle-podman-0 allocation score on controller-0: 0 pcmk__primitive_assign: ovn-dbs-bundle-podman-0 allocation score on controller-1: -INFINITY -pcmk__primitive_assign: ovn-dbs-bundle-podman-0 allocation score on controller-2: 0 +pcmk__primitive_assign: ovn-dbs-bundle-podman-0 allocation score on controller-2: -INFINITY pcmk__primitive_assign: ovn-dbs-bundle-podman-0 allocation score on database-0: -INFINITY pcmk__primitive_assign: ovn-dbs-bundle-podman-0 allocation score on database-1: -INFINITY pcmk__primitive_assign: ovn-dbs-bundle-podman-0 allocation score on database-2: -INFINITY @@ -1909,24 +1909,35 @@ pcmk__primitive_assign: ovn-dbs-bundle-podman-0 allocation score on messaging-1: pcmk__primitive_assign: ovn-dbs-bundle-podman-0 allocation score on messaging-2: -INFINITY pcmk__primitive_assign: ovn-dbs-bundle-podman-1 allocation score on compute-0: -INFINITY pcmk__primitive_assign: ovn-dbs-bundle-podman-1 allocation score on compute-0: -INFINITY +pcmk__primitive_assign: ovn-dbs-bundle-podman-1 allocation score on compute-0: -INFINITY pcmk__primitive_assign: ovn-dbs-bundle-podman-1 allocation score on compute-1: -INFINITY pcmk__primitive_assign: ovn-dbs-bundle-podman-1 allocation score on compute-1: -INFINITY -pcmk__primitive_assign: ovn-dbs-bundle-podman-1 allocation score on controller-0: 0 +pcmk__primitive_assign: ovn-dbs-bundle-podman-1 allocation score on compute-1: -INFINITY +pcmk__primitive_assign: ovn-dbs-bundle-podman-1 allocation score on controller-0: -INFINITY +pcmk__primitive_assign: ovn-dbs-bundle-podman-1 allocation score on controller-0: -INFINITY pcmk__primitive_assign: ovn-dbs-bundle-podman-1 allocation score on controller-0: 0 pcmk__primitive_assign: ovn-dbs-bundle-podman-1 allocation score on controller-1: -INFINITY pcmk__primitive_assign: ovn-dbs-bundle-podman-1 allocation score on controller-1: 0 +pcmk__primitive_assign: ovn-dbs-bundle-podman-1 allocation score on controller-1: 0 pcmk__primitive_assign: ovn-dbs-bundle-podman-1 allocation score on controller-2: -INFINITY pcmk__primitive_assign: ovn-dbs-bundle-podman-1 allocation score on controller-2: -INFINITY +pcmk__primitive_assign: ovn-dbs-bundle-podman-1 allocation score on controller-2: 0 +pcmk__primitive_assign: ovn-dbs-bundle-podman-1 allocation score on database-0: -INFINITY pcmk__primitive_assign: ovn-dbs-bundle-podman-1 allocation score on database-0: -INFINITY pcmk__primitive_assign: ovn-dbs-bundle-podman-1 allocation score on database-0: -INFINITY pcmk__primitive_assign: ovn-dbs-bundle-podman-1 allocation score on database-1: -INFINITY pcmk__primitive_assign: ovn-dbs-bundle-podman-1 allocation score on database-1: -INFINITY +pcmk__primitive_assign: ovn-dbs-bundle-podman-1 allocation score on database-1: -INFINITY +pcmk__primitive_assign: ovn-dbs-bundle-podman-1 allocation score on database-2: -INFINITY pcmk__primitive_assign: ovn-dbs-bundle-podman-1 allocation score on database-2: -INFINITY pcmk__primitive_assign: ovn-dbs-bundle-podman-1 allocation score on database-2: -INFINITY pcmk__primitive_assign: ovn-dbs-bundle-podman-1 allocation score on messaging-0: -INFINITY pcmk__primitive_assign: ovn-dbs-bundle-podman-1 allocation score on messaging-0: -INFINITY +pcmk__primitive_assign: ovn-dbs-bundle-podman-1 allocation score on messaging-0: -INFINITY pcmk__primitive_assign: ovn-dbs-bundle-podman-1 allocation score on messaging-1: -INFINITY pcmk__primitive_assign: ovn-dbs-bundle-podman-1 allocation score on messaging-1: -INFINITY +pcmk__primitive_assign: ovn-dbs-bundle-podman-1 allocation score on messaging-1: -INFINITY +pcmk__primitive_assign: ovn-dbs-bundle-podman-1 allocation score on messaging-2: -INFINITY pcmk__primitive_assign: ovn-dbs-bundle-podman-1 allocation score on messaging-2: -INFINITY pcmk__primitive_assign: ovn-dbs-bundle-podman-1 allocation score on messaging-2: -INFINITY pcmk__primitive_assign: ovn-dbs-bundle-podman-2 allocation score on compute-0: -INFINITY diff --git a/cts/scheduler/scores/clone-recover-no-shuffle-10.scores b/cts/scheduler/scores/clone-recover-no-shuffle-10.scores index 4ac63e37058..4f4c29ed7f1 100644 --- a/cts/scheduler/scores/clone-recover-no-shuffle-10.scores +++ b/cts/scheduler/scores/clone-recover-no-shuffle-10.scores @@ -28,4 +28,4 @@ pcmk__primitive_assign: dummy:1 allocation score on node2: 16 pcmk__primitive_assign: dummy:1 allocation score on node3: 0 pcmk__primitive_assign: dummy:2 allocation score on node1: 10 pcmk__primitive_assign: dummy:2 allocation score on node2: -INFINITY -pcmk__primitive_assign: dummy:2 allocation score on node3: 5 +pcmk__primitive_assign: dummy:2 allocation score on node3: -INFINITY diff --git a/cts/scheduler/scores/clone-recover-no-shuffle-4.scores b/cts/scheduler/scores/clone-recover-no-shuffle-4.scores index 492dad1baa4..2a52c8185b2 100644 --- a/cts/scheduler/scores/clone-recover-no-shuffle-4.scores +++ b/cts/scheduler/scores/clone-recover-no-shuffle-4.scores @@ -14,7 +14,7 @@ pcmk__clone_assign: dummy:2 allocation score on node3: 0 pcmk__primitive_assign: Fencing allocation score on node1: 0 pcmk__primitive_assign: Fencing allocation score on node2: 0 pcmk__primitive_assign: Fencing allocation score on node3: 0 -pcmk__primitive_assign: dummy:0 allocation score on node1: 100 +pcmk__primitive_assign: dummy:0 allocation score on node1: -INFINITY pcmk__primitive_assign: dummy:0 allocation score on node1: 100 pcmk__primitive_assign: dummy:0 allocation score on node2: 1 pcmk__primitive_assign: dummy:0 allocation score on node2: 1 @@ -22,10 +22,10 @@ pcmk__primitive_assign: dummy:0 allocation score on node3: 0 pcmk__primitive_assign: dummy:0 allocation score on node3: 0 pcmk__primitive_assign: dummy:1 allocation score on node1: -INFINITY pcmk__primitive_assign: dummy:1 allocation score on node1: 100 -pcmk__primitive_assign: dummy:1 allocation score on node2: 0 -pcmk__primitive_assign: dummy:1 allocation score on node2: 0 +pcmk__primitive_assign: dummy:1 allocation score on node2: -INFINITY +pcmk__primitive_assign: dummy:1 allocation score on node2: -INFINITY pcmk__primitive_assign: dummy:1 allocation score on node3: 1 pcmk__primitive_assign: dummy:1 allocation score on node3: 1 -pcmk__primitive_assign: dummy:2 allocation score on node1: -INFINITY -pcmk__primitive_assign: dummy:2 allocation score on node2: 0 +pcmk__primitive_assign: dummy:2 allocation score on node1: 100 +pcmk__primitive_assign: dummy:2 allocation score on node2: -INFINITY pcmk__primitive_assign: dummy:2 allocation score on node3: -INFINITY diff --git a/cts/scheduler/scores/clone-recover-no-shuffle-5.scores b/cts/scheduler/scores/clone-recover-no-shuffle-5.scores index 0dd9728830c..c6c8072db82 100644 --- a/cts/scheduler/scores/clone-recover-no-shuffle-5.scores +++ b/cts/scheduler/scores/clone-recover-no-shuffle-5.scores @@ -29,7 +29,7 @@ pcmk__clone_assign: rsc2:1 allocation score on node3: 1 pcmk__clone_assign: rsc2:2 allocation score on node1: 0 pcmk__clone_assign: rsc2:2 allocation score on node2: 0 pcmk__clone_assign: rsc2:2 allocation score on node3: 0 -pcmk__group_assign: grp:0 allocation score on node1: 100 +pcmk__group_assign: grp:0 allocation score on node1: -INFINITY pcmk__group_assign: grp:0 allocation score on node1: 100 pcmk__group_assign: grp:0 allocation score on node2: 0 pcmk__group_assign: grp:0 allocation score on node2: 0 @@ -37,14 +37,14 @@ pcmk__group_assign: grp:0 allocation score on node3: 0 pcmk__group_assign: grp:0 allocation score on node3: 0 pcmk__group_assign: grp:1 allocation score on node1: -INFINITY pcmk__group_assign: grp:1 allocation score on node1: 100 -pcmk__group_assign: grp:1 allocation score on node2: 0 -pcmk__group_assign: grp:1 allocation score on node2: 0 +pcmk__group_assign: grp:1 allocation score on node2: -INFINITY +pcmk__group_assign: grp:1 allocation score on node2: -INFINITY pcmk__group_assign: grp:1 allocation score on node3: 0 pcmk__group_assign: grp:1 allocation score on node3: 0 -pcmk__group_assign: grp:2 allocation score on node1: -INFINITY -pcmk__group_assign: grp:2 allocation score on node2: 0 +pcmk__group_assign: grp:2 allocation score on node1: 100 +pcmk__group_assign: grp:2 allocation score on node2: -INFINITY pcmk__group_assign: grp:2 allocation score on node3: -INFINITY -pcmk__group_assign: rsc1:0 allocation score on node1: 100 +pcmk__group_assign: rsc1:0 allocation score on node1: -INFINITY pcmk__group_assign: rsc1:0 allocation score on node1: 100 pcmk__group_assign: rsc1:0 allocation score on node2: 1 pcmk__group_assign: rsc1:0 allocation score on node2: 1 @@ -52,14 +52,14 @@ pcmk__group_assign: rsc1:0 allocation score on node3: 0 pcmk__group_assign: rsc1:0 allocation score on node3: 0 pcmk__group_assign: rsc1:1 allocation score on node1: -INFINITY pcmk__group_assign: rsc1:1 allocation score on node1: 100 -pcmk__group_assign: rsc1:1 allocation score on node2: 0 -pcmk__group_assign: rsc1:1 allocation score on node2: 0 +pcmk__group_assign: rsc1:1 allocation score on node2: -INFINITY +pcmk__group_assign: rsc1:1 allocation score on node2: -INFINITY pcmk__group_assign: rsc1:1 allocation score on node3: 1 pcmk__group_assign: rsc1:1 allocation score on node3: 1 -pcmk__group_assign: rsc1:2 allocation score on node1: -INFINITY -pcmk__group_assign: rsc1:2 allocation score on node2: 0 +pcmk__group_assign: rsc1:2 allocation score on node1: 100 +pcmk__group_assign: rsc1:2 allocation score on node2: -INFINITY pcmk__group_assign: rsc1:2 allocation score on node3: -INFINITY -pcmk__group_assign: rsc2:0 allocation score on node1: 0 +pcmk__group_assign: rsc2:0 allocation score on node1: -INFINITY pcmk__group_assign: rsc2:0 allocation score on node1: 0 pcmk__group_assign: rsc2:0 allocation score on node2: 1 pcmk__group_assign: rsc2:0 allocation score on node2: 1 @@ -67,17 +67,17 @@ pcmk__group_assign: rsc2:0 allocation score on node3: 0 pcmk__group_assign: rsc2:0 allocation score on node3: 0 pcmk__group_assign: rsc2:1 allocation score on node1: -INFINITY pcmk__group_assign: rsc2:1 allocation score on node1: 0 -pcmk__group_assign: rsc2:1 allocation score on node2: 0 -pcmk__group_assign: rsc2:1 allocation score on node2: 0 +pcmk__group_assign: rsc2:1 allocation score on node2: -INFINITY +pcmk__group_assign: rsc2:1 allocation score on node2: -INFINITY pcmk__group_assign: rsc2:1 allocation score on node3: 1 pcmk__group_assign: rsc2:1 allocation score on node3: 1 -pcmk__group_assign: rsc2:2 allocation score on node1: -INFINITY -pcmk__group_assign: rsc2:2 allocation score on node2: 0 +pcmk__group_assign: rsc2:2 allocation score on node1: 0 +pcmk__group_assign: rsc2:2 allocation score on node2: -INFINITY pcmk__group_assign: rsc2:2 allocation score on node3: -INFINITY pcmk__primitive_assign: Fencing allocation score on node1: 0 pcmk__primitive_assign: Fencing allocation score on node2: 0 pcmk__primitive_assign: Fencing allocation score on node3: 0 -pcmk__primitive_assign: rsc1:0 allocation score on node1: 100 +pcmk__primitive_assign: rsc1:0 allocation score on node1: -INFINITY pcmk__primitive_assign: rsc1:0 allocation score on node1: 100 pcmk__primitive_assign: rsc1:0 allocation score on node2: 2 pcmk__primitive_assign: rsc1:0 allocation score on node2: 2 @@ -85,17 +85,17 @@ pcmk__primitive_assign: rsc1:0 allocation score on node3: 0 pcmk__primitive_assign: rsc1:0 allocation score on node3: 0 pcmk__primitive_assign: rsc1:1 allocation score on node1: -INFINITY pcmk__primitive_assign: rsc1:1 allocation score on node1: 100 -pcmk__primitive_assign: rsc1:1 allocation score on node2: 0 -pcmk__primitive_assign: rsc1:1 allocation score on node2: 0 +pcmk__primitive_assign: rsc1:1 allocation score on node2: -INFINITY +pcmk__primitive_assign: rsc1:1 allocation score on node2: -INFINITY pcmk__primitive_assign: rsc1:1 allocation score on node3: 2 pcmk__primitive_assign: rsc1:1 allocation score on node3: 2 -pcmk__primitive_assign: rsc1:2 allocation score on node1: -INFINITY -pcmk__primitive_assign: rsc1:2 allocation score on node2: 0 +pcmk__primitive_assign: rsc1:2 allocation score on node1: 100 +pcmk__primitive_assign: rsc1:2 allocation score on node2: -INFINITY pcmk__primitive_assign: rsc1:2 allocation score on node3: -INFINITY +pcmk__primitive_assign: rsc2:0 allocation score on node1: -INFINITY pcmk__primitive_assign: rsc2:0 allocation score on node1: 0 -pcmk__primitive_assign: rsc2:0 allocation score on node1: 0 -pcmk__primitive_assign: rsc2:0 allocation score on node2: -INFINITY pcmk__primitive_assign: rsc2:0 allocation score on node2: -INFINITY +pcmk__primitive_assign: rsc2:0 allocation score on node2: 1 pcmk__primitive_assign: rsc2:0 allocation score on node3: -INFINITY pcmk__primitive_assign: rsc2:0 allocation score on node3: -INFINITY pcmk__primitive_assign: rsc2:1 allocation score on node1: -INFINITY @@ -104,6 +104,6 @@ pcmk__primitive_assign: rsc2:1 allocation score on node2: -INFINITY pcmk__primitive_assign: rsc2:1 allocation score on node2: -INFINITY pcmk__primitive_assign: rsc2:1 allocation score on node3: -INFINITY pcmk__primitive_assign: rsc2:1 allocation score on node3: 1 -pcmk__primitive_assign: rsc2:2 allocation score on node1: -INFINITY -pcmk__primitive_assign: rsc2:2 allocation score on node2: 0 +pcmk__primitive_assign: rsc2:2 allocation score on node1: 0 +pcmk__primitive_assign: rsc2:2 allocation score on node2: -INFINITY pcmk__primitive_assign: rsc2:2 allocation score on node3: -INFINITY diff --git a/cts/scheduler/scores/clone-recover-no-shuffle-6.scores b/cts/scheduler/scores/clone-recover-no-shuffle-6.scores index 643e30f9d18..f1f300cbd66 100644 --- a/cts/scheduler/scores/clone-recover-no-shuffle-6.scores +++ b/cts/scheduler/scores/clone-recover-no-shuffle-6.scores @@ -41,16 +41,16 @@ pcmk__clone_assign: base:2 allocation score on base-bundle-2: INFINITY pcmk__primitive_assign: Fencing allocation score on node1: 0 pcmk__primitive_assign: Fencing allocation score on node2: 0 pcmk__primitive_assign: Fencing allocation score on node3: 0 -pcmk__primitive_assign: base-bundle-0 allocation score on node1: 10000 +pcmk__primitive_assign: base-bundle-0 allocation score on node1: 0 pcmk__primitive_assign: base-bundle-0 allocation score on node2: 0 -pcmk__primitive_assign: base-bundle-0 allocation score on node3: 0 +pcmk__primitive_assign: base-bundle-0 allocation score on node3: 10000 pcmk__primitive_assign: base-bundle-1 allocation score on node1: 0 -pcmk__primitive_assign: base-bundle-1 allocation score on node2: 0 -pcmk__primitive_assign: base-bundle-1 allocation score on node3: 10000 -pcmk__primitive_assign: base-bundle-2 allocation score on node1: 0 -pcmk__primitive_assign: base-bundle-2 allocation score on node2: 10000 +pcmk__primitive_assign: base-bundle-1 allocation score on node2: 10000 +pcmk__primitive_assign: base-bundle-1 allocation score on node3: 0 +pcmk__primitive_assign: base-bundle-2 allocation score on node1: 10000 +pcmk__primitive_assign: base-bundle-2 allocation score on node2: 0 pcmk__primitive_assign: base-bundle-2 allocation score on node3: 0 -pcmk__primitive_assign: base-bundle-podman-0 allocation score on node1: 100 +pcmk__primitive_assign: base-bundle-podman-0 allocation score on node1: -INFINITY pcmk__primitive_assign: base-bundle-podman-0 allocation score on node1: 100 pcmk__primitive_assign: base-bundle-podman-0 allocation score on node2: 0 pcmk__primitive_assign: base-bundle-podman-0 allocation score on node2: 0 @@ -60,10 +60,10 @@ pcmk__primitive_assign: base-bundle-podman-1 allocation score on node1: -INFINIT pcmk__primitive_assign: base-bundle-podman-1 allocation score on node1: 100 pcmk__primitive_assign: base-bundle-podman-1 allocation score on node2: 0 pcmk__primitive_assign: base-bundle-podman-1 allocation score on node2: 0 -pcmk__primitive_assign: base-bundle-podman-1 allocation score on node3: 0 -pcmk__primitive_assign: base-bundle-podman-1 allocation score on node3: 0 -pcmk__primitive_assign: base-bundle-podman-2 allocation score on node1: -INFINITY -pcmk__primitive_assign: base-bundle-podman-2 allocation score on node2: 0 +pcmk__primitive_assign: base-bundle-podman-1 allocation score on node3: -INFINITY +pcmk__primitive_assign: base-bundle-podman-1 allocation score on node3: -INFINITY +pcmk__primitive_assign: base-bundle-podman-2 allocation score on node1: 100 +pcmk__primitive_assign: base-bundle-podman-2 allocation score on node2: -INFINITY pcmk__primitive_assign: base-bundle-podman-2 allocation score on node3: -INFINITY pcmk__primitive_assign: base:0 allocation score on base-bundle-0: INFINITY pcmk__primitive_assign: base:1 allocation score on base-bundle-1: INFINITY diff --git a/cts/scheduler/scores/clone-recover-no-shuffle-7.scores b/cts/scheduler/scores/clone-recover-no-shuffle-7.scores index fc45bf740fd..503cbb3addf 100644 --- a/cts/scheduler/scores/clone-recover-no-shuffle-7.scores +++ b/cts/scheduler/scores/clone-recover-no-shuffle-7.scores @@ -1,7 +1,7 @@ -dummy:0 promotion score on node1: 15 +dummy:0 promotion score on node3: 5 dummy:1 promotion score on node2: 10 -dummy:2 promotion score on node3: 5 +dummy:2 promotion score on node1: 15 pcmk__clone_assign: dummy-clone allocation score on node1: 0 pcmk__clone_assign: dummy-clone allocation score on node2: 0 pcmk__clone_assign: dummy-clone allocation score on node3: 0 @@ -17,7 +17,7 @@ pcmk__clone_assign: dummy:2 allocation score on node3: 5 pcmk__primitive_assign: Fencing allocation score on node1: 0 pcmk__primitive_assign: Fencing allocation score on node2: 0 pcmk__primitive_assign: Fencing allocation score on node3: 0 -pcmk__primitive_assign: dummy:0 allocation score on node1: 15 +pcmk__primitive_assign: dummy:0 allocation score on node1: -INFINITY pcmk__primitive_assign: dummy:0 allocation score on node1: 15 pcmk__primitive_assign: dummy:0 allocation score on node2: 0 pcmk__primitive_assign: dummy:0 allocation score on node2: 0 @@ -27,8 +27,8 @@ pcmk__primitive_assign: dummy:1 allocation score on node1: -INFINITY pcmk__primitive_assign: dummy:1 allocation score on node1: 15 pcmk__primitive_assign: dummy:1 allocation score on node2: 11 pcmk__primitive_assign: dummy:1 allocation score on node2: 11 -pcmk__primitive_assign: dummy:1 allocation score on node3: 0 -pcmk__primitive_assign: dummy:1 allocation score on node3: 0 -pcmk__primitive_assign: dummy:2 allocation score on node1: -INFINITY +pcmk__primitive_assign: dummy:1 allocation score on node3: -INFINITY +pcmk__primitive_assign: dummy:1 allocation score on node3: -INFINITY +pcmk__primitive_assign: dummy:2 allocation score on node1: 15 pcmk__primitive_assign: dummy:2 allocation score on node2: -INFINITY -pcmk__primitive_assign: dummy:2 allocation score on node3: 5 +pcmk__primitive_assign: dummy:2 allocation score on node3: -INFINITY diff --git a/cts/scheduler/scores/promoted-failed-demote-2.scores b/cts/scheduler/scores/promoted-failed-demote-2.scores index e457d8c6057..39399d9eac4 100644 --- a/cts/scheduler/scores/promoted-failed-demote-2.scores +++ b/cts/scheduler/scores/promoted-failed-demote-2.scores @@ -34,14 +34,10 @@ pcmk__group_assign: stateful-2:0 allocation score on dl380g5b: -INFINITY pcmk__group_assign: stateful-2:1 allocation score on dl380g5a: INFINITY pcmk__group_assign: stateful-2:1 allocation score on dl380g5b: 0 pcmk__primitive_assign: stateful-1:0 allocation score on dl380g5a: -INFINITY -pcmk__primitive_assign: stateful-1:0 allocation score on dl380g5a: -INFINITY -pcmk__primitive_assign: stateful-1:0 allocation score on dl380g5b: -INFINITY pcmk__primitive_assign: stateful-1:0 allocation score on dl380g5b: -INFINITY pcmk__primitive_assign: stateful-1:1 allocation score on dl380g5a: INFINITY pcmk__primitive_assign: stateful-1:1 allocation score on dl380g5b: 0 pcmk__primitive_assign: stateful-2:0 allocation score on dl380g5a: -INFINITY -pcmk__primitive_assign: stateful-2:0 allocation score on dl380g5a: -INFINITY -pcmk__primitive_assign: stateful-2:0 allocation score on dl380g5b: -INFINITY pcmk__primitive_assign: stateful-2:0 allocation score on dl380g5b: -INFINITY pcmk__primitive_assign: stateful-2:1 allocation score on dl380g5a: INFINITY pcmk__primitive_assign: stateful-2:1 allocation score on dl380g5b: -INFINITY diff --git a/cts/scheduler/scores/promoted-failed-demote.scores b/cts/scheduler/scores/promoted-failed-demote.scores index e457d8c6057..39399d9eac4 100644 --- a/cts/scheduler/scores/promoted-failed-demote.scores +++ b/cts/scheduler/scores/promoted-failed-demote.scores @@ -34,14 +34,10 @@ pcmk__group_assign: stateful-2:0 allocation score on dl380g5b: -INFINITY pcmk__group_assign: stateful-2:1 allocation score on dl380g5a: INFINITY pcmk__group_assign: stateful-2:1 allocation score on dl380g5b: 0 pcmk__primitive_assign: stateful-1:0 allocation score on dl380g5a: -INFINITY -pcmk__primitive_assign: stateful-1:0 allocation score on dl380g5a: -INFINITY -pcmk__primitive_assign: stateful-1:0 allocation score on dl380g5b: -INFINITY pcmk__primitive_assign: stateful-1:0 allocation score on dl380g5b: -INFINITY pcmk__primitive_assign: stateful-1:1 allocation score on dl380g5a: INFINITY pcmk__primitive_assign: stateful-1:1 allocation score on dl380g5b: 0 pcmk__primitive_assign: stateful-2:0 allocation score on dl380g5a: -INFINITY -pcmk__primitive_assign: stateful-2:0 allocation score on dl380g5a: -INFINITY -pcmk__primitive_assign: stateful-2:0 allocation score on dl380g5b: -INFINITY pcmk__primitive_assign: stateful-2:0 allocation score on dl380g5b: -INFINITY pcmk__primitive_assign: stateful-2:1 allocation score on dl380g5a: INFINITY pcmk__primitive_assign: stateful-2:1 allocation score on dl380g5b: -INFINITY diff --git a/cts/scheduler/scores/utilization-complex.scores b/cts/scheduler/scores/utilization-complex.scores index 29bc92c193f..b9dd80c4b6a 100644 --- a/cts/scheduler/scores/utilization-complex.scores +++ b/cts/scheduler/scores/utilization-complex.scores @@ -312,18 +312,26 @@ pcmk__primitive_assign: clone1:2 allocation score on rhel8-4: 1 pcmk__primitive_assign: clone1:2 allocation score on rhel8-5: 0 pcmk__primitive_assign: clone1:3 allocation score on httpd-bundle-0: -INFINITY pcmk__primitive_assign: clone1:3 allocation score on httpd-bundle-0: -INFINITY +pcmk__primitive_assign: clone1:3 allocation score on httpd-bundle-0: -INFINITY +pcmk__primitive_assign: clone1:3 allocation score on httpd-bundle-1: -INFINITY pcmk__primitive_assign: clone1:3 allocation score on httpd-bundle-1: -INFINITY pcmk__primitive_assign: clone1:3 allocation score on httpd-bundle-1: -INFINITY pcmk__primitive_assign: clone1:3 allocation score on httpd-bundle-2: -INFINITY pcmk__primitive_assign: clone1:3 allocation score on httpd-bundle-2: -INFINITY +pcmk__primitive_assign: clone1:3 allocation score on httpd-bundle-2: -INFINITY +pcmk__primitive_assign: clone1:3 allocation score on rhel8-1: -INFINITY pcmk__primitive_assign: clone1:3 allocation score on rhel8-1: -INFINITY pcmk__primitive_assign: clone1:3 allocation score on rhel8-1: 0 pcmk__primitive_assign: clone1:3 allocation score on rhel8-2: -INFINITY pcmk__primitive_assign: clone1:3 allocation score on rhel8-2: -INFINITY +pcmk__primitive_assign: clone1:3 allocation score on rhel8-2: -INFINITY +pcmk__primitive_assign: clone1:3 allocation score on rhel8-3: -INFINITY pcmk__primitive_assign: clone1:3 allocation score on rhel8-3: -INFINITY pcmk__primitive_assign: clone1:3 allocation score on rhel8-3: -INFINITY pcmk__primitive_assign: clone1:3 allocation score on rhel8-4: -INFINITY pcmk__primitive_assign: clone1:3 allocation score on rhel8-4: -INFINITY +pcmk__primitive_assign: clone1:3 allocation score on rhel8-4: -INFINITY +pcmk__primitive_assign: clone1:3 allocation score on rhel8-5: 1 pcmk__primitive_assign: clone1:3 allocation score on rhel8-5: 1 pcmk__primitive_assign: clone1:3 allocation score on rhel8-5: 1 pcmk__primitive_assign: clone1:4 allocation score on httpd-bundle-0: -INFINITY @@ -384,18 +392,26 @@ pcmk__primitive_assign: clone2:2 allocation score on rhel8-4: 1 pcmk__primitive_assign: clone2:2 allocation score on rhel8-5: -INFINITY pcmk__primitive_assign: clone2:3 allocation score on httpd-bundle-0: -INFINITY pcmk__primitive_assign: clone2:3 allocation score on httpd-bundle-0: -INFINITY +pcmk__primitive_assign: clone2:3 allocation score on httpd-bundle-0: -INFINITY pcmk__primitive_assign: clone2:3 allocation score on httpd-bundle-1: -INFINITY pcmk__primitive_assign: clone2:3 allocation score on httpd-bundle-1: -INFINITY +pcmk__primitive_assign: clone2:3 allocation score on httpd-bundle-1: -INFINITY +pcmk__primitive_assign: clone2:3 allocation score on httpd-bundle-2: -INFINITY pcmk__primitive_assign: clone2:3 allocation score on httpd-bundle-2: -INFINITY pcmk__primitive_assign: clone2:3 allocation score on httpd-bundle-2: -INFINITY pcmk__primitive_assign: clone2:3 allocation score on rhel8-1: -INFINITY +pcmk__primitive_assign: clone2:3 allocation score on rhel8-1: -INFINITY pcmk__primitive_assign: clone2:3 allocation score on rhel8-1: 0 pcmk__primitive_assign: clone2:3 allocation score on rhel8-2: -INFINITY pcmk__primitive_assign: clone2:3 allocation score on rhel8-2: -INFINITY +pcmk__primitive_assign: clone2:3 allocation score on rhel8-2: -INFINITY +pcmk__primitive_assign: clone2:3 allocation score on rhel8-3: -INFINITY pcmk__primitive_assign: clone2:3 allocation score on rhel8-3: -INFINITY pcmk__primitive_assign: clone2:3 allocation score on rhel8-3: -INFINITY pcmk__primitive_assign: clone2:3 allocation score on rhel8-4: -INFINITY pcmk__primitive_assign: clone2:3 allocation score on rhel8-4: -INFINITY +pcmk__primitive_assign: clone2:3 allocation score on rhel8-4: -INFINITY +pcmk__primitive_assign: clone2:3 allocation score on rhel8-5: -INFINITY pcmk__primitive_assign: clone2:3 allocation score on rhel8-5: -INFINITY pcmk__primitive_assign: clone2:3 allocation score on rhel8-5: -INFINITY pcmk__primitive_assign: clone2:4 allocation score on httpd-bundle-0: -INFINITY @@ -535,18 +551,26 @@ pcmk__primitive_assign: httpd-bundle-ip-192.168.122.133 allocation score on rhel pcmk__primitive_assign: httpd-bundle-ip-192.168.122.133 allocation score on rhel8-5: -INFINITY pcmk__primitive_assign: httpd-bundle-podman-0 allocation score on httpd-bundle-0: -INFINITY pcmk__primitive_assign: httpd-bundle-podman-0 allocation score on httpd-bundle-0: -INFINITY +pcmk__primitive_assign: httpd-bundle-podman-0 allocation score on httpd-bundle-0: -INFINITY +pcmk__primitive_assign: httpd-bundle-podman-0 allocation score on httpd-bundle-1: -INFINITY pcmk__primitive_assign: httpd-bundle-podman-0 allocation score on httpd-bundle-1: -INFINITY pcmk__primitive_assign: httpd-bundle-podman-0 allocation score on httpd-bundle-1: -INFINITY pcmk__primitive_assign: httpd-bundle-podman-0 allocation score on httpd-bundle-2: -INFINITY pcmk__primitive_assign: httpd-bundle-podman-0 allocation score on httpd-bundle-2: -INFINITY +pcmk__primitive_assign: httpd-bundle-podman-0 allocation score on httpd-bundle-2: -INFINITY +pcmk__primitive_assign: httpd-bundle-podman-0 allocation score on rhel8-1: -INFINITY pcmk__primitive_assign: httpd-bundle-podman-0 allocation score on rhel8-1: -INFINITY pcmk__primitive_assign: httpd-bundle-podman-0 allocation score on rhel8-1: -INFINITY pcmk__primitive_assign: httpd-bundle-podman-0 allocation score on rhel8-2: -INFINITY pcmk__primitive_assign: httpd-bundle-podman-0 allocation score on rhel8-2: -INFINITY +pcmk__primitive_assign: httpd-bundle-podman-0 allocation score on rhel8-2: -INFINITY pcmk__primitive_assign: httpd-bundle-podman-0 allocation score on rhel8-3: -INFINITY pcmk__primitive_assign: httpd-bundle-podman-0 allocation score on rhel8-3: -INFINITY +pcmk__primitive_assign: httpd-bundle-podman-0 allocation score on rhel8-3: -INFINITY +pcmk__primitive_assign: httpd-bundle-podman-0 allocation score on rhel8-4: -INFINITY pcmk__primitive_assign: httpd-bundle-podman-0 allocation score on rhel8-4: -INFINITY pcmk__primitive_assign: httpd-bundle-podman-0 allocation score on rhel8-4: -INFINITY +pcmk__primitive_assign: httpd-bundle-podman-0 allocation score on rhel8-5: -INFINITY pcmk__primitive_assign: httpd-bundle-podman-0 allocation score on rhel8-5: 0 pcmk__primitive_assign: httpd-bundle-podman-0 allocation score on rhel8-5: 0 pcmk__primitive_assign: httpd-bundle-podman-1 allocation score on httpd-bundle-1: -INFINITY diff --git a/cts/scheduler/scores/utilization-order2.scores b/cts/scheduler/scores/utilization-order2.scores index c4b49d9b366..4476b60ee21 100644 --- a/cts/scheduler/scores/utilization-order2.scores +++ b/cts/scheduler/scores/utilization-order2.scores @@ -9,6 +9,8 @@ pcmk__primitive_assign: rsc1 allocation score on node1: 0 pcmk__primitive_assign: rsc1 allocation score on node2: 0 pcmk__primitive_assign: rsc2:0 allocation score on node1: 1 pcmk__primitive_assign: rsc2:0 allocation score on node1: 1 +pcmk__primitive_assign: rsc2:0 allocation score on node1: 1 +pcmk__primitive_assign: rsc2:0 allocation score on node2: -INFINITY pcmk__primitive_assign: rsc2:0 allocation score on node2: -INFINITY pcmk__primitive_assign: rsc2:0 allocation score on node2: 0 pcmk__primitive_assign: rsc2:1 allocation score on node1: 0 diff --git a/cts/scheduler/summary/cancel-behind-moving-remote.summary b/cts/scheduler/summary/cancel-behind-moving-remote.summary index 7726876f949..58de340318b 100644 --- a/cts/scheduler/summary/cancel-behind-moving-remote.summary +++ b/cts/scheduler/summary/cancel-behind-moving-remote.summary @@ -58,22 +58,17 @@ Current cluster status: Transition Summary: * Start rabbitmq-bundle-1 ( controller-0 ) due to unrunnable rabbitmq-bundle-podman-1 start (blocked) * Start rabbitmq:1 ( rabbitmq-bundle-1 ) due to unrunnable rabbitmq-bundle-podman-1 start (blocked) - * Start ovn-dbs-bundle-podman-0 ( controller-2 ) - * Start ovn-dbs-bundle-0 ( controller-2 ) + * Start ovn-dbs-bundle-podman-0 ( controller-0 ) + * Start ovn-dbs-bundle-0 ( controller-0 ) * Start ovndb_servers:0 ( ovn-dbs-bundle-0 ) - * Move ovn-dbs-bundle-podman-1 ( controller-2 -> controller-0 ) - * Move ovn-dbs-bundle-1 ( controller-2 -> controller-0 ) - * Restart ovndb_servers:1 ( Unpromoted -> Promoted ovn-dbs-bundle-1 ) due to required ovn-dbs-bundle-podman-1 start - * Start ip-172.17.1.87 ( controller-0 ) + * Promote ovndb_servers:1 ( Unpromoted -> Promoted ovn-dbs-bundle-1 ) * Move stonith-fence_ipmilan-52540040bb56 ( messaging-2 -> database-0 ) * Move stonith-fence_ipmilan-525400e1534e ( database-1 -> messaging-2 ) Executing Cluster Transition: * Pseudo action: rabbitmq-bundle-clone_pre_notify_start_0 * Resource action: ovndb_servers cancel=30000 on ovn-dbs-bundle-1 - * Pseudo action: ovn-dbs-bundle-master_pre_notify_stop_0 - * Cluster action: clear_failcount for ovn-dbs-bundle-0 on controller-0 - * Cluster action: clear_failcount for ovn-dbs-bundle-1 on controller-2 + * Pseudo action: ovn-dbs-bundle-master_pre_notify_start_0 * Cluster action: clear_failcount for stonith-fence_compute-fence-nova on messaging-0 * Cluster action: clear_failcount for nova-evacuate on messaging-0 * Cluster action: clear_failcount for stonith-fence_ipmilan-525400aa1373 on database-0 @@ -87,52 +82,34 @@ Executing Cluster Transition: * Cluster action: clear_failcount for stonith-fence_ipmilan-52540060dbba on messaging-0 * Cluster action: clear_failcount for stonith-fence_ipmilan-525400e018b6 on database-0 * Cluster action: clear_failcount for stonith-fence_ipmilan-525400c87cdb on database-2 - * Pseudo action: ovn-dbs-bundle_stop_0 + * Pseudo action: ovn-dbs-bundle_start_0 * Pseudo action: rabbitmq-bundle_start_0 * Pseudo action: rabbitmq-bundle-clone_confirmed-pre_notify_start_0 * Pseudo action: rabbitmq-bundle-clone_start_0 * Resource action: ovndb_servers notify on ovn-dbs-bundle-1 * Resource action: ovndb_servers notify on ovn-dbs-bundle-2 - * Pseudo action: ovn-dbs-bundle-master_confirmed-pre_notify_stop_0 - * Pseudo action: ovn-dbs-bundle-master_stop_0 + * Pseudo action: ovn-dbs-bundle-master_confirmed-pre_notify_start_0 + * Pseudo action: ovn-dbs-bundle-master_start_0 + * Resource action: ovn-dbs-bundle-podman-0 start on controller-0 + * Resource action: ovn-dbs-bundle-0 start on controller-0 * Resource action: stonith-fence_ipmilan-52540040bb56 start on database-0 * Resource action: stonith-fence_ipmilan-525400e1534e start on messaging-2 * Pseudo action: rabbitmq-bundle-clone_running_0 - * Resource action: ovndb_servers stop on ovn-dbs-bundle-1 - * Pseudo action: ovn-dbs-bundle-master_stopped_0 - * Resource action: ovn-dbs-bundle-1 stop on controller-2 + * Resource action: ovndb_servers start on ovn-dbs-bundle-0 + * Pseudo action: ovn-dbs-bundle-master_running_0 + * Resource action: ovn-dbs-bundle-podman-0 monitor=60000 on controller-0 + * Resource action: ovn-dbs-bundle-0 monitor=30000 on controller-0 * Resource action: stonith-fence_ipmilan-52540040bb56 monitor=60000 on database-0 * Resource action: stonith-fence_ipmilan-525400e1534e monitor=60000 on messaging-2 * Pseudo action: rabbitmq-bundle-clone_post_notify_running_0 - * Pseudo action: ovn-dbs-bundle-master_post_notify_stopped_0 - * Resource action: ovn-dbs-bundle-podman-1 stop on controller-2 - * Pseudo action: rabbitmq-bundle-clone_confirmed-post_notify_running_0 - * Resource action: ovndb_servers notify on ovn-dbs-bundle-2 - * Pseudo action: ovn-dbs-bundle-master_confirmed-post_notify_stopped_0 - * Pseudo action: ovn-dbs-bundle-master_pre_notify_start_0 - * Pseudo action: ovn-dbs-bundle_stopped_0 - * Pseudo action: ovn-dbs-bundle_start_0 - * Pseudo action: rabbitmq-bundle_running_0 - * Resource action: ovndb_servers notify on ovn-dbs-bundle-2 - * Pseudo action: ovn-dbs-bundle-master_confirmed-pre_notify_start_0 - * Pseudo action: ovn-dbs-bundle-master_start_0 - * Resource action: ovn-dbs-bundle-podman-0 start on controller-2 - * Resource action: ovn-dbs-bundle-0 start on controller-2 - * Resource action: ovn-dbs-bundle-podman-1 start on controller-0 - * Resource action: ovn-dbs-bundle-1 start on controller-0 - * Resource action: ovndb_servers start on ovn-dbs-bundle-0 - * Resource action: ovndb_servers start on ovn-dbs-bundle-1 - * Pseudo action: ovn-dbs-bundle-master_running_0 - * Resource action: ovn-dbs-bundle-podman-0 monitor=60000 on controller-2 - * Resource action: ovn-dbs-bundle-0 monitor=30000 on controller-2 - * Resource action: ovn-dbs-bundle-podman-1 monitor=60000 on controller-0 - * Resource action: ovn-dbs-bundle-1 monitor=30000 on controller-0 * Pseudo action: ovn-dbs-bundle-master_post_notify_running_0 + * Pseudo action: rabbitmq-bundle-clone_confirmed-post_notify_running_0 * Resource action: ovndb_servers notify on ovn-dbs-bundle-0 * Resource action: ovndb_servers notify on ovn-dbs-bundle-1 * Resource action: ovndb_servers notify on ovn-dbs-bundle-2 * Pseudo action: ovn-dbs-bundle-master_confirmed-post_notify_running_0 * Pseudo action: ovn-dbs-bundle_running_0 + * Pseudo action: rabbitmq-bundle_running_0 * Pseudo action: ovn-dbs-bundle-master_pre_notify_promote_0 * Pseudo action: ovn-dbs-bundle_promote_0 * Resource action: ovndb_servers notify on ovn-dbs-bundle-0 @@ -140,10 +117,8 @@ Executing Cluster Transition: * Resource action: ovndb_servers notify on ovn-dbs-bundle-2 * Pseudo action: ovn-dbs-bundle-master_confirmed-pre_notify_promote_0 * Pseudo action: ovn-dbs-bundle-master_promote_0 - * Resource action: ip-172.17.1.87 start on controller-0 * Resource action: ovndb_servers promote on ovn-dbs-bundle-1 * Pseudo action: ovn-dbs-bundle-master_promoted_0 - * Resource action: ip-172.17.1.87 monitor=10000 on controller-0 * Pseudo action: ovn-dbs-bundle-master_post_notify_promoted_0 * Resource action: ovndb_servers notify on ovn-dbs-bundle-0 * Resource action: ovndb_servers notify on ovn-dbs-bundle-1 @@ -187,10 +162,10 @@ Revised Cluster Status: * haproxy-bundle-podman-1 (ocf:heartbeat:podman): Started controller-0 * haproxy-bundle-podman-2 (ocf:heartbeat:podman): Started controller-1 * Container bundle set: ovn-dbs-bundle [cluster.common.tag/rhosp16-openstack-ovn-northd:pcmklatest]: - * ovn-dbs-bundle-0 (ocf:ovn:ovndb-servers): Unpromoted controller-2 - * ovn-dbs-bundle-1 (ocf:ovn:ovndb-servers): Promoted controller-0 + * ovn-dbs-bundle-0 (ocf:ovn:ovndb-servers): Unpromoted controller-0 + * ovn-dbs-bundle-1 (ocf:ovn:ovndb-servers): Promoted controller-2 * ovn-dbs-bundle-2 (ocf:ovn:ovndb-servers): Unpromoted controller-1 - * ip-172.17.1.87 (ocf:heartbeat:IPaddr2): Started controller-0 + * ip-172.17.1.87 (ocf:heartbeat:IPaddr2): Stopped * stonith-fence_compute-fence-nova (stonith:fence_compute): Started database-1 * Clone Set: compute-unfence-trigger-clone [compute-unfence-trigger]: * Started: [ compute-0 compute-1 ] diff --git a/cts/scheduler/summary/clone-recover-no-shuffle-4.summary b/cts/scheduler/summary/clone-recover-no-shuffle-4.summary index 944bcb834b3..0b6866ec16c 100644 --- a/cts/scheduler/summary/clone-recover-no-shuffle-4.summary +++ b/cts/scheduler/summary/clone-recover-no-shuffle-4.summary @@ -10,19 +10,13 @@ Current cluster status: * Stopped: [ node1 ] Transition Summary: - * Move dummy:0 ( node2 -> node1 ) - * Start dummy:2 ( node2 ) + * Start dummy:2 ( node1 ) Executing Cluster Transition: - * Pseudo action: dummy-clone_stop_0 - * Resource action: dummy stop on node2 - * Pseudo action: dummy-clone_stopped_0 * Pseudo action: dummy-clone_start_0 * Resource action: dummy start on node1 - * Resource action: dummy start on node2 * Pseudo action: dummy-clone_running_0 * Resource action: dummy monitor=10000 on node1 - * Resource action: dummy monitor=10000 on node2 Using the original execution date of: 2023-06-21 00:59:59Z Revised Cluster Status: diff --git a/cts/scheduler/summary/clone-recover-no-shuffle-5.summary b/cts/scheduler/summary/clone-recover-no-shuffle-5.summary index 121214c42ab..8b18120ad8d 100644 --- a/cts/scheduler/summary/clone-recover-no-shuffle-5.summary +++ b/cts/scheduler/summary/clone-recover-no-shuffle-5.summary @@ -9,31 +9,17 @@ Current cluster status: * Stopped: [ node1 ] Transition Summary: - * Move rsc1:0 ( node2 -> node1 ) - * Move rsc2:0 ( node2 -> node1 ) - * Start rsc1:2 ( node2 ) - * Start rsc2:2 ( node2 ) + * Start rsc1:2 ( node1 ) + * Start rsc2:2 ( node1 ) Executing Cluster Transition: - * Pseudo action: grp-clone_stop_0 - * Pseudo action: grp:0_stop_0 - * Resource action: rsc2 stop on node2 - * Resource action: rsc1 stop on node2 - * Pseudo action: grp:0_stopped_0 - * Pseudo action: grp-clone_stopped_0 * Pseudo action: grp-clone_start_0 - * Pseudo action: grp:0_start_0 + * Pseudo action: grp:2_start_0 * Resource action: rsc1 start on node1 * Resource action: rsc2 start on node1 - * Pseudo action: grp:2_start_0 - * Resource action: rsc1 start on node2 - * Resource action: rsc2 start on node2 - * Pseudo action: grp:0_running_0 + * Pseudo action: grp:2_running_0 * Resource action: rsc1 monitor=10000 on node1 * Resource action: rsc2 monitor=10000 on node1 - * Pseudo action: grp:2_running_0 - * Resource action: rsc1 monitor=10000 on node2 - * Resource action: rsc2 monitor=10000 on node2 * Pseudo action: grp-clone_running_0 Revised Cluster Status: diff --git a/cts/scheduler/summary/clone-recover-no-shuffle-6.summary b/cts/scheduler/summary/clone-recover-no-shuffle-6.summary index 19a957e15fb..5702177e33d 100644 --- a/cts/scheduler/summary/clone-recover-no-shuffle-6.summary +++ b/cts/scheduler/summary/clone-recover-no-shuffle-6.summary @@ -11,48 +11,22 @@ Current cluster status: * base-bundle-2 (ocf:pacemaker:Stateful): Stopped Transition Summary: - * Move base-bundle-podman-0 ( node3 -> node1 ) - * Move base-bundle-0 ( node3 -> node1 ) - * Restart base:0 ( base-bundle-0 ) due to required base-bundle-podman-0 start - * Move base-bundle-podman-1 ( node2 -> node3 ) - * Move base-bundle-1 ( node2 -> node3 ) - * Restart base:1 ( base-bundle-1 ) due to required base-bundle-podman-1 start - * Start base-bundle-podman-2 ( node2 ) - * Start base-bundle-2 ( node2 ) - * Start base:2 ( base-bundle-2 ) + * Start base-bundle-podman-2 ( node1 ) + * Start base-bundle-2 ( node1 ) + * Start base:2 ( base-bundle-2 ) Executing Cluster Transition: - * Pseudo action: base-bundle_stop_0 * Pseudo action: base-bundle_start_0 - * Pseudo action: base-bundle-clone_stop_0 - * Resource action: base-bundle-podman-2 start on node2 + * Pseudo action: base-bundle-clone_start_0 + * Resource action: base-bundle-podman-2 start on node1 * Resource action: base-bundle-2 monitor on node3 * Resource action: base-bundle-2 monitor on node2 * Resource action: base-bundle-2 monitor on node1 - * Resource action: base stop on base-bundle-1 - * Resource action: base-bundle-1 stop on node2 - * Resource action: base-bundle-podman-2 monitor=60000 on node2 - * Resource action: base-bundle-2 start on node2 - * Resource action: base stop on base-bundle-0 - * Pseudo action: base-bundle-clone_stopped_0 - * Pseudo action: base-bundle-clone_start_0 - * Resource action: base-bundle-0 stop on node3 - * Resource action: base-bundle-podman-1 stop on node2 - * Resource action: base-bundle-2 monitor=30000 on node2 - * Resource action: base-bundle-podman-0 stop on node3 - * Resource action: base-bundle-podman-1 start on node3 - * Resource action: base-bundle-1 start on node3 - * Pseudo action: base-bundle_stopped_0 - * Resource action: base-bundle-podman-0 start on node1 - * Resource action: base-bundle-0 start on node1 - * Resource action: base-bundle-podman-1 monitor=60000 on node3 - * Resource action: base-bundle-1 monitor=30000 on node3 - * Resource action: base start on base-bundle-0 - * Resource action: base start on base-bundle-1 + * Resource action: base-bundle-podman-2 monitor=60000 on node1 + * Resource action: base-bundle-2 start on node1 * Resource action: base start on base-bundle-2 * Pseudo action: base-bundle-clone_running_0 - * Resource action: base-bundle-podman-0 monitor=60000 on node1 - * Resource action: base-bundle-0 monitor=30000 on node1 + * Resource action: base-bundle-2 monitor=30000 on node1 * Pseudo action: base-bundle_running_0 Revised Cluster Status: @@ -63,6 +37,6 @@ Revised Cluster Status: * Full List of Resources: * Fencing (stonith:fence_xvm): Started node2 * Container bundle set: base-bundle [localhost/pcmktest]: - * base-bundle-0 (ocf:pacemaker:Stateful): Started node1 - * base-bundle-1 (ocf:pacemaker:Stateful): Started node3 - * base-bundle-2 (ocf:pacemaker:Stateful): Started node2 + * base-bundle-0 (ocf:pacemaker:Stateful): Started node3 + * base-bundle-1 (ocf:pacemaker:Stateful): Started node2 + * base-bundle-2 (ocf:pacemaker:Stateful): Started node1 diff --git a/cts/scheduler/summary/clone-recover-no-shuffle-7.summary b/cts/scheduler/summary/clone-recover-no-shuffle-7.summary index e6c9baed0db..77445700f04 100644 --- a/cts/scheduler/summary/clone-recover-no-shuffle-7.summary +++ b/cts/scheduler/summary/clone-recover-no-shuffle-7.summary @@ -10,24 +10,18 @@ Current cluster status: * Stopped: [ node1 ] Transition Summary: - * Move dummy:0 ( Unpromoted node3 -> Promoted node1 ) - * Demote dummy:1 ( Promoted -> Unpromoted node2 ) - * Start dummy:2 ( node3 ) + * Demote dummy:1 ( Promoted -> Unpromoted node2 ) + * Promote dummy:2 ( Stopped -> Promoted node1 ) Executing Cluster Transition: * Resource action: dummy cancel=10000 on node2 * Pseudo action: dummy-clone_demote_0 * Resource action: dummy demote on node2 * Pseudo action: dummy-clone_demoted_0 - * Pseudo action: dummy-clone_stop_0 - * Resource action: dummy stop on node3 - * Resource action: dummy monitor=11000 on node2 - * Pseudo action: dummy-clone_stopped_0 * Pseudo action: dummy-clone_start_0 + * Resource action: dummy monitor=11000 on node2 * Resource action: dummy start on node1 - * Resource action: dummy start on node3 * Pseudo action: dummy-clone_running_0 - * Resource action: dummy monitor=11000 on node3 * Pseudo action: dummy-clone_promote_0 * Resource action: dummy promote on node1 * Pseudo action: dummy-clone_promoted_0 diff --git a/cts/scheduler/xml/cancel-behind-moving-remote.xml b/cts/scheduler/xml/cancel-behind-moving-remote.xml index 67e14300ba8..7b880602b1b 100644 --- a/cts/scheduler/xml/cancel-behind-moving-remote.xml +++ b/cts/scheduler/xml/cancel-behind-moving-remote.xml @@ -1,5 +1,19 @@ + diff --git a/cts/scheduler/xml/clone-recover-no-shuffle-4.xml b/cts/scheduler/xml/clone-recover-no-shuffle-4.xml index 40e6520c6d0..f0a5feb8c2f 100644 --- a/cts/scheduler/xml/clone-recover-no-shuffle-4.xml +++ b/cts/scheduler/xml/clone-recover-no-shuffle-4.xml @@ -11,11 +11,6 @@ * Instance dummy:0 should remain started on node2 * Instance dummy:1 should remain started on node3 * Instance dummy:2 should start on node1 - - This test output is incorrect: - * Instance dummy:0 moves from node2 to node1 - * Instance dummy:1 remains started on node3 (correct) - * Instance dummy:2 starts on node2 --> diff --git a/cts/scheduler/xml/clone-recover-no-shuffle-5.xml b/cts/scheduler/xml/clone-recover-no-shuffle-5.xml index 45f3b5a9f3a..95e5eca9c9d 100644 --- a/cts/scheduler/xml/clone-recover-no-shuffle-5.xml +++ b/cts/scheduler/xml/clone-recover-no-shuffle-5.xml @@ -12,11 +12,6 @@ * Instance grp:0 should remain started on node2 * Instance grp:1 should remain started on node3 * Instance grp:2 should start on node1 - - This test output is incorrect: - * Instance grp:0 moves to node1 - * Instance grp:1 remains started on node3 (correct) - * Instance grp:2 starts on node2 --> diff --git a/cts/scheduler/xml/clone-recover-no-shuffle-6.xml b/cts/scheduler/xml/clone-recover-no-shuffle-6.xml index 3de42f581d4..64bb4d90179 100644 --- a/cts/scheduler/xml/clone-recover-no-shuffle-6.xml +++ b/cts/scheduler/xml/clone-recover-no-shuffle-6.xml @@ -12,11 +12,6 @@ * Instance base:0 should remain started on node3 * Instance base:1 should remain started on node2 * Instance base:2 should start on node1 - - This test output is incorrect: - * Instance base:0 moves from node3 to node1 - * Instance base:1 moves from node2 to node3 - * Instance base:2 starts on node2 --> diff --git a/cts/scheduler/xml/clone-recover-no-shuffle-7.xml b/cts/scheduler/xml/clone-recover-no-shuffle-7.xml index 6e9dad50db4..e588b811d77 100644 --- a/cts/scheduler/xml/clone-recover-no-shuffle-7.xml +++ b/cts/scheduler/xml/clone-recover-no-shuffle-7.xml @@ -11,11 +11,6 @@ * Instance dummy:0 should remain started (unpromoted) on node3 * Instance dummy:1 should demote on node2 * Instance dummy:2 should promote on node1 - - This test output is incorrect: - * Instance dummy:0 moves from unpromoted on node3 to promoted on node1 - * Instance dummy:1 demotes on node2 - * Instance dummy:2 starts on node3 -->