diff --git a/001-remote-start-state.patch b/001-remote-start-state.patch deleted file mode 100644 index e66bf16..0000000 --- a/001-remote-start-state.patch +++ /dev/null @@ -1,402 +0,0 @@ -From cf53f523e691295879cd75cff1a86bc15664fa51 Mon Sep 17 00:00:00 2001 -From: Chris Lumens -Date: Tue, 2 May 2023 09:59:13 -0400 -Subject: [PATCH 1/7] Feature: daemons: Add start state to LRMD handshake XML - -This gets read out of /etc/sysconfig/pacemaker and set into the -environment. The remote node executor will then add that to the XML -that it sends to the controller upon startup. - -Ref T183 ---- - daemons/execd/execd_commands.c | 5 +++++ - include/crm_internal.h | 1 + - 2 files changed, 6 insertions(+) - -diff --git a/daemons/execd/execd_commands.c b/daemons/execd/execd_commands.c -index fa2761e..9a783a5 100644 ---- a/daemons/execd/execd_commands.c -+++ b/daemons/execd/execd_commands.c -@@ -1474,6 +1474,7 @@ process_lrmd_signon(pcmk__client_t *client, xmlNode *request, int call_id, - int rc = pcmk_ok; - time_t now = time(NULL); - const char *protocol_version = crm_element_value(request, F_LRMD_PROTOCOL_VERSION); -+ const char *start_state = pcmk__env_option(PCMK__ENV_NODE_START_STATE); - - if (compare_version(protocol_version, LRMD_MIN_PROTOCOL_VERSION) < 0) { - crm_err("Cluster API version must be greater than or equal to %s, not %s", -@@ -1503,6 +1504,10 @@ process_lrmd_signon(pcmk__client_t *client, xmlNode *request, int call_id, - crm_xml_add(*reply, F_LRMD_PROTOCOL_VERSION, LRMD_PROTOCOL_VERSION); - crm_xml_add_ll(*reply, PCMK__XA_UPTIME, now - start_time); - -+ if (start_state) { -+ crm_xml_add(*reply, PCMK__XA_NODE_START_STATE, start_state); -+ } -+ - return rc; - } - -diff --git a/include/crm_internal.h b/include/crm_internal.h -index 5f6531f..771bd26 100644 ---- a/include/crm_internal.h -+++ b/include/crm_internal.h -@@ -84,6 +84,7 @@ - #define PCMK__XA_GRAPH_ERRORS "graph-errors" - #define PCMK__XA_GRAPH_WARNINGS "graph-warnings" - #define PCMK__XA_MODE "mode" -+#define PCMK__XA_NODE_START_STATE "node_start_state" - #define PCMK__XA_TASK "task" - #define PCMK__XA_UPTIME "uptime" - #define PCMK__XA_CONN_HOST "connection_host" --- -2.31.1 - -From c950291742711b5c4c8986adc8e938fe6fef861c Mon Sep 17 00:00:00 2001 -From: Chris Lumens -Date: Tue, 2 May 2023 10:04:32 -0400 -Subject: [PATCH 2/7] Feature: liblrmd: Save a remote node's requested start - state - -Ref T183 ---- - include/crm/common/ipc_internal.h | 1 + - lib/lrmd/lrmd_client.c | 7 +++++++ - 2 files changed, 8 insertions(+) - -diff --git a/include/crm/common/ipc_internal.h b/include/crm/common/ipc_internal.h -index 5099dda..d203924 100644 ---- a/include/crm/common/ipc_internal.h -+++ b/include/crm/common/ipc_internal.h -@@ -112,6 +112,7 @@ struct pcmk__remote_s { - int tcp_socket; - mainloop_io_t *source; - time_t uptime; -+ char *start_state; - - /* CIB-only */ - char *token; -diff --git a/lib/lrmd/lrmd_client.c b/lib/lrmd/lrmd_client.c -index c565728..4239105 100644 ---- a/lib/lrmd/lrmd_client.c -+++ b/lib/lrmd/lrmd_client.c -@@ -588,7 +588,9 @@ lrmd_tls_connection_destroy(gpointer userdata) - } - - free(native->remote->buffer); -+ free(native->remote->start_state); - native->remote->buffer = NULL; -+ native->remote->start_state = NULL; - native->source = 0; - native->sock = 0; - native->psk_cred_c = NULL; -@@ -980,6 +982,7 @@ lrmd_handshake(lrmd_t * lrmd, const char *name) - const char *version = crm_element_value(reply, F_LRMD_PROTOCOL_VERSION); - const char *msg_type = crm_element_value(reply, F_LRMD_OPERATION); - const char *tmp_ticket = crm_element_value(reply, F_LRMD_CLIENTID); -+ const char *start_state = crm_element_value(reply, PCMK__XA_NODE_START_STATE); - long long uptime = -1; - - crm_element_value_int(reply, F_LRMD_RC, &rc); -@@ -992,6 +995,10 @@ lrmd_handshake(lrmd_t * lrmd, const char *name) - crm_element_value_ll(reply, PCMK__XA_UPTIME, &uptime); - native->remote->uptime = uptime; - -+ if (start_state) { -+ native->remote->start_state = strdup(start_state); -+ } -+ - if (rc == -EPROTO) { - crm_err("Executor protocol version mismatch between client (%s) and server (%s)", - LRMD_PROTOCOL_VERSION, version); --- -2.31.1 - -From 7302014c7b7296be31b1f542b3f107d55b1fb2a0 Mon Sep 17 00:00:00 2001 -From: Chris Lumens -Date: Tue, 2 May 2023 10:05:13 -0400 -Subject: [PATCH 3/7] Feature: liblrmd: Add lrmd__node_start_state. - -This function is used to get the start state out of an lrmd_private_t -structure. - -Ref T183 ---- - include/crm/lrmd_internal.h | 1 + - lib/lrmd/lrmd_client.c | 12 ++++++++++++ - 2 files changed, 13 insertions(+) - -diff --git a/include/crm/lrmd_internal.h b/include/crm/lrmd_internal.h -index 5810554..d1cd25d 100644 ---- a/include/crm/lrmd_internal.h -+++ b/include/crm/lrmd_internal.h -@@ -47,6 +47,7 @@ void lrmd__set_result(lrmd_event_data_t *event, enum ocf_exitcode rc, - void lrmd__reset_result(lrmd_event_data_t *event); - - time_t lrmd__uptime(lrmd_t *lrmd); -+const char *lrmd__node_start_state(lrmd_t *lrmd); - - /* Shared functions for IPC proxy back end */ - -diff --git a/lib/lrmd/lrmd_client.c b/lib/lrmd/lrmd_client.c -index 4239105..82434b9 100644 ---- a/lib/lrmd/lrmd_client.c -+++ b/lib/lrmd/lrmd_client.c -@@ -2538,3 +2538,15 @@ lrmd__uptime(lrmd_t *lrmd) - return native->remote->uptime; - } - } -+ -+const char * -+lrmd__node_start_state(lrmd_t *lrmd) -+{ -+ lrmd_private_t *native = lrmd->lrmd_private; -+ -+ if (native->remote == NULL) { -+ return NULL; -+ } else { -+ return native->remote->start_state; -+ } -+} --- -2.31.1 - -From e5e4d43f847da0930bae12f63c7e9d9c44c07cdf Mon Sep 17 00:00:00 2001 -From: Chris Lumens -Date: Tue, 2 May 2023 10:07:58 -0400 -Subject: [PATCH 4/7] Refactor: controller: Make set_join_state a public - function. - -This already does all the work of setting a node's start state. It just -needs to be made public and given arguments for what node to set instead -of reading globals. - -Ref T183 ---- - daemons/controld/controld_join_client.c | 20 ++++++++++---------- - daemons/controld/pacemaker-controld.h | 3 +++ - 2 files changed, 13 insertions(+), 10 deletions(-) - -diff --git a/daemons/controld/controld_join_client.c b/daemons/controld/controld_join_client.c -index da6a9d6..07e2a27 100644 ---- a/daemons/controld/controld_join_client.c -+++ b/daemons/controld/controld_join_client.c -@@ -195,32 +195,31 @@ join_query_callback(xmlNode * msg, int call_id, int rc, xmlNode * output, void * - free_xml(generation); - } - --static void --set_join_state(const char * start_state) -+void -+set_join_state(const char *start_state, const char *node_name, const char *node_uuid) - { - if (pcmk__str_eq(start_state, "standby", pcmk__str_casei)) { - crm_notice("Forcing node %s to join in %s state per configured " -- "environment", controld_globals.our_nodename, start_state); -+ "environment", node_name, start_state); - cib__update_node_attr(controld_globals.logger_out, - controld_globals.cib_conn, cib_sync_call, -- XML_CIB_TAG_NODES, controld_globals.our_uuid, -+ XML_CIB_TAG_NODES, node_uuid, - NULL, NULL, NULL, "standby", "on", NULL, NULL); - - } else if (pcmk__str_eq(start_state, "online", pcmk__str_casei)) { - crm_notice("Forcing node %s to join in %s state per configured " -- "environment", controld_globals.our_nodename, start_state); -+ "environment", node_name, start_state); - cib__update_node_attr(controld_globals.logger_out, - controld_globals.cib_conn, cib_sync_call, -- XML_CIB_TAG_NODES, controld_globals.our_uuid, -+ XML_CIB_TAG_NODES, node_uuid, - NULL, NULL, NULL, "standby", "off", NULL, NULL); - - } else if (pcmk__str_eq(start_state, "default", pcmk__str_casei)) { -- crm_debug("Not forcing a starting state on node %s", -- controld_globals.our_nodename); -+ crm_debug("Not forcing a starting state on node %s", node_name); - - } else { - crm_warn("Unrecognized start state '%s', using 'default' (%s)", -- start_state, controld_globals.our_nodename); -+ start_state, node_name); - } - } - -@@ -335,7 +334,8 @@ do_cl_join_finalize_respond(long long action, - - first_join = FALSE; - if (start_state) { -- set_join_state(start_state); -+ set_join_state(start_state, controld_globals.our_nodename, -+ controld_globals.our_uuid); - } - } - -diff --git a/daemons/controld/pacemaker-controld.h b/daemons/controld/pacemaker-controld.h -index 1484a00..d8c2ddd 100644 ---- a/daemons/controld/pacemaker-controld.h -+++ b/daemons/controld/pacemaker-controld.h -@@ -36,4 +36,7 @@ void controld_remove_voter(const char *uname); - void controld_election_fini(void); - void controld_stop_current_election_timeout(void); - -+void set_join_state(const char *start_state, const char *node_name, -+ const char *node_uuid); -+ - #endif --- -2.31.1 - -From 63d069adb344bba2c982013226f87dfd95afaff3 Mon Sep 17 00:00:00 2001 -From: Chris Lumens -Date: Tue, 2 May 2023 13:38:03 -0400 -Subject: [PATCH 5/7] Refactor: controller: set_join_state needs to take a - remote parameter. - -Without this parameter, we won't know what to pass to as node_type to -cib__update_node_attr. And without that, that function will not know to -update a remote node - it'll try to update a regular node by the same -name, which either doesn't exist or is not what we were hoping would -happen. - -Ref T138 ---- - daemons/controld/controld_join_client.c | 11 +++++++---- - daemons/controld/pacemaker-controld.h | 2 +- - 2 files changed, 8 insertions(+), 5 deletions(-) - -diff --git a/daemons/controld/controld_join_client.c b/daemons/controld/controld_join_client.c -index 07e2a27..799d1b4 100644 ---- a/daemons/controld/controld_join_client.c -+++ b/daemons/controld/controld_join_client.c -@@ -196,7 +196,8 @@ join_query_callback(xmlNode * msg, int call_id, int rc, xmlNode * output, void * - } - - void --set_join_state(const char *start_state, const char *node_name, const char *node_uuid) -+set_join_state(const char *start_state, const char *node_name, const char *node_uuid, -+ bool remote) - { - if (pcmk__str_eq(start_state, "standby", pcmk__str_casei)) { - crm_notice("Forcing node %s to join in %s state per configured " -@@ -204,7 +205,8 @@ set_join_state(const char *start_state, const char *node_name, const char *node_ - cib__update_node_attr(controld_globals.logger_out, - controld_globals.cib_conn, cib_sync_call, - XML_CIB_TAG_NODES, node_uuid, -- NULL, NULL, NULL, "standby", "on", NULL, NULL); -+ NULL, NULL, NULL, "standby", "on", NULL, -+ remote ? "remote" : NULL); - - } else if (pcmk__str_eq(start_state, "online", pcmk__str_casei)) { - crm_notice("Forcing node %s to join in %s state per configured " -@@ -212,7 +214,8 @@ set_join_state(const char *start_state, const char *node_name, const char *node_ - cib__update_node_attr(controld_globals.logger_out, - controld_globals.cib_conn, cib_sync_call, - XML_CIB_TAG_NODES, node_uuid, -- NULL, NULL, NULL, "standby", "off", NULL, NULL); -+ NULL, NULL, NULL, "standby", "off", NULL, -+ remote ? "remote" : NULL); - - } else if (pcmk__str_eq(start_state, "default", pcmk__str_casei)) { - crm_debug("Not forcing a starting state on node %s", node_name); -@@ -335,7 +338,7 @@ do_cl_join_finalize_respond(long long action, - first_join = FALSE; - if (start_state) { - set_join_state(start_state, controld_globals.our_nodename, -- controld_globals.our_uuid); -+ controld_globals.our_uuid, false); - } - } - -diff --git a/daemons/controld/pacemaker-controld.h b/daemons/controld/pacemaker-controld.h -index d8c2ddd..2334cce 100644 ---- a/daemons/controld/pacemaker-controld.h -+++ b/daemons/controld/pacemaker-controld.h -@@ -37,6 +37,6 @@ void controld_election_fini(void); - void controld_stop_current_election_timeout(void); - - void set_join_state(const char *start_state, const char *node_name, -- const char *node_uuid); -+ const char *node_uuid, bool remote); - - #endif --- -2.31.1 - -From 67274787898355065315f8c06d62458e2c2b0afe Mon Sep 17 00:00:00 2001 -From: Chris Lumens -Date: Tue, 2 May 2023 10:09:02 -0400 -Subject: [PATCH 6/7] Feature: controller: When a remote node starts, apply any - start state. - -If we were given a start state in the handshake XML, that is now stored -in the remote node cache's private data. Extract it and set the state -on the node with set_node_state. - -Fixes T183 ---- - daemons/controld/controld_remote_ra.c | 15 +++++++++++++++ - 1 file changed, 15 insertions(+) - -diff --git a/daemons/controld/controld_remote_ra.c b/daemons/controld/controld_remote_ra.c -index f24b755..8ab1e46 100644 ---- a/daemons/controld/controld_remote_ra.c -+++ b/daemons/controld/controld_remote_ra.c -@@ -280,6 +280,7 @@ remote_node_up(const char *node_name) - int call_opt; - xmlNode *update, *state; - crm_node_t *node; -+ lrm_state_t *connection_rsc = NULL; - - CRM_CHECK(node_name != NULL, return); - crm_info("Announcing Pacemaker Remote node %s", node_name); -@@ -301,6 +302,20 @@ remote_node_up(const char *node_name) - purge_remote_node_attrs(call_opt, node); - pcmk__update_peer_state(__func__, node, CRM_NODE_MEMBER, 0); - -+ /* Apply any start state that we were given from the environment on the -+ * remote node. -+ */ -+ connection_rsc = lrm_state_find(node->uname); -+ -+ if (connection_rsc != NULL) { -+ lrmd_t *lrm = connection_rsc->conn; -+ const char *start_state = lrmd__node_start_state(lrm); -+ -+ if (start_state) { -+ set_join_state(start_state, node->uname, node->uuid, true); -+ } -+ } -+ - /* pacemaker_remote nodes don't participate in the membership layer, - * so cluster nodes don't automatically get notified when they come and go. - * We send a cluster message to the DC, and update the CIB node state entry, --- -2.31.1 - -From 91cdda7056c9b9254a0d7e7a016b30f788e3e3ff Mon Sep 17 00:00:00 2001 -From: Chris Lumens -Date: Tue, 2 May 2023 10:16:30 -0400 -Subject: [PATCH 7/7] Doc: sysconfig: Remote nodes now respect start state. - -Ref T183 ---- - etc/sysconfig/pacemaker.in | 3 +-- - 1 file changed, 1 insertion(+), 2 deletions(-) - -diff --git a/etc/sysconfig/pacemaker.in b/etc/sysconfig/pacemaker.in -index 3b03ad6..041da71 100644 ---- a/etc/sysconfig/pacemaker.in -+++ b/etc/sysconfig/pacemaker.in -@@ -144,8 +144,7 @@ - # By default, the local host will join the cluster in an online or standby - # state when Pacemaker first starts depending on whether it was previously put - # into standby mode. If this variable is set to "standby" or "online", it will --# force the local host to join in the specified state. This has no effect on --# Pacemaker Remote nodes. -+# force the local host to join in the specified state. - # - # Default: PCMK_node_start_state="default" - --- -2.31.1 - diff --git a/002-group-colocation-constraint.patch b/002-group-colocation-constraint.patch deleted file mode 100644 index 4cd58c0..0000000 --- a/002-group-colocation-constraint.patch +++ /dev/null @@ -1,2661 +0,0 @@ -From 6d438daa021eaef4ca41b84009b9d6fc11173826 Mon Sep 17 00:00:00 2001 -From: Ken Gaillot -Date: Thu, 20 Apr 2023 11:01:41 -0500 -Subject: [PATCH 01/17] Refactor: scheduler: drop redundant argument from - pcmk__new_colocation() - ---- - lib/pacemaker/libpacemaker_private.h | 2 +- - lib/pacemaker/pcmk_sched_bundle.c | 5 ++--- - lib/pacemaker/pcmk_sched_colocation.c | 27 +++++++++++---------------- - lib/pacemaker/pcmk_sched_group.c | 3 +-- - lib/pacemaker/pcmk_sched_primitive.c | 3 +-- - 5 files changed, 16 insertions(+), 24 deletions(-) - -diff --git a/lib/pacemaker/libpacemaker_private.h b/lib/pacemaker/libpacemaker_private.h -index 192d5a703ff..a6c13220e1d 100644 ---- a/lib/pacemaker/libpacemaker_private.h -+++ b/lib/pacemaker/libpacemaker_private.h -@@ -483,7 +483,7 @@ G_GNUC_INTERNAL - void pcmk__new_colocation(const char *id, const char *node_attr, int score, - pe_resource_t *dependent, pe_resource_t *primary, - const char *dependent_role, const char *primary_role, -- bool influence, pe_working_set_t *data_set); -+ bool influence); - - G_GNUC_INTERNAL - void pcmk__block_colocation_dependents(pe_action_t *action, -diff --git a/lib/pacemaker/pcmk_sched_bundle.c b/lib/pacemaker/pcmk_sched_bundle.c -index 5682744395a..6024da68fb7 100644 ---- a/lib/pacemaker/pcmk_sched_bundle.c -+++ b/lib/pacemaker/pcmk_sched_bundle.c -@@ -83,7 +83,7 @@ pcmk__bundle_allocate(pe_resource_t *rsc, const pe_node_t *prefer) - pcmk__new_colocation("child-remote-with-docker-remote", NULL, - INFINITY, replica->remote, - container_host->details->remote_rsc, NULL, -- NULL, true, rsc->cluster); -+ NULL, true); - } - - if (replica->remote) { -@@ -252,8 +252,7 @@ pcmk__bundle_internal_constraints(pe_resource_t *rsc) - pe_order_implies_first|pe_order_preserve); - - pcmk__new_colocation("ip-with-docker", NULL, INFINITY, replica->ip, -- replica->container, NULL, NULL, true, -- rsc->cluster); -+ replica->container, NULL, NULL, true); - } - - if (replica->remote) { -diff --git a/lib/pacemaker/pcmk_sched_colocation.c b/lib/pacemaker/pcmk_sched_colocation.c -index eeef4f1ca55..7d41f4d03e5 100644 ---- a/lib/pacemaker/pcmk_sched_colocation.c -+++ b/lib/pacemaker/pcmk_sched_colocation.c -@@ -297,13 +297,12 @@ anti_colocation_order(pe_resource_t *first_rsc, int first_role, - * \param[in] dependent_role Current role of \p dependent - * \param[in] primary_role Current role of \p primary - * \param[in] influence Whether colocation constraint has influence -- * \param[in,out] data_set Cluster working set to add constraint to - */ - void - pcmk__new_colocation(const char *id, const char *node_attr, int score, - pe_resource_t *dependent, pe_resource_t *primary, - const char *dependent_role, const char *primary_role, -- bool influence, pe_working_set_t *data_set) -+ bool influence) - { - pcmk__colocation_t *new_con = NULL; - -@@ -351,8 +350,8 @@ pcmk__new_colocation(const char *id, const char *node_attr, int score, - pcmk__add_this_with(&(dependent->rsc_cons), new_con); - pcmk__add_with_this(&(primary->rsc_cons_lhs), new_con); - -- data_set->colocation_constraints = g_list_append(data_set->colocation_constraints, -- new_con); -+ dependent->cluster->colocation_constraints = g_list_append( -+ dependent->cluster->colocation_constraints, new_con); - - if (score <= -INFINITY) { - anti_colocation_order(dependent, new_con->dependent_role, primary, -@@ -433,7 +432,7 @@ unpack_colocation_set(xmlNode *set, int score, const char *coloc_id, - pcmk__new_colocation(set_id, NULL, local_score, resource, - with, role, role, - unpack_influence(coloc_id, resource, -- influence_s), data_set); -+ influence_s)); - } - with = resource; - } -@@ -451,7 +450,7 @@ unpack_colocation_set(xmlNode *set, int score, const char *coloc_id, - pcmk__new_colocation(set_id, NULL, local_score, last, - resource, role, role, - unpack_influence(coloc_id, last, -- influence_s), data_set); -+ influence_s)); - } - - last = resource; -@@ -484,8 +483,7 @@ unpack_colocation_set(xmlNode *set, int score, const char *coloc_id, - pe_rsc_trace(resource, "Anti-Colocating %s with %s", resource->id, - with->id); - pcmk__new_colocation(set_id, NULL, local_score, -- resource, with, role, role, -- influence, data_set); -+ resource, with, role, role, influence); - } - } - } -@@ -535,8 +533,7 @@ colocate_rsc_sets(const char *id, xmlNode *set1, xmlNode *set2, int score, - - if ((rsc_1 != NULL) && (rsc_2 != NULL)) { - pcmk__new_colocation(id, NULL, score, rsc_1, rsc_2, role_1, role_2, -- unpack_influence(id, rsc_1, influence_s), -- data_set); -+ unpack_influence(id, rsc_1, influence_s)); - - } else if (rsc_1 != NULL) { - bool influence = unpack_influence(id, rsc_1, influence_s); -@@ -546,7 +543,7 @@ colocate_rsc_sets(const char *id, xmlNode *set1, xmlNode *set2, int score, - - EXPAND_CONSTRAINT_IDREF(id, rsc_2, ID(xml_rsc)); - pcmk__new_colocation(id, NULL, score, rsc_1, rsc_2, role_1, -- role_2, influence, data_set); -+ role_2, influence); - } - - } else if (rsc_2 != NULL) { -@@ -556,8 +553,7 @@ colocate_rsc_sets(const char *id, xmlNode *set1, xmlNode *set2, int score, - EXPAND_CONSTRAINT_IDREF(id, rsc_1, ID(xml_rsc)); - pcmk__new_colocation(id, NULL, score, rsc_1, rsc_2, role_1, - role_2, -- unpack_influence(id, rsc_1, influence_s), -- data_set); -+ unpack_influence(id, rsc_1, influence_s)); - } - - } else { -@@ -576,8 +572,7 @@ colocate_rsc_sets(const char *id, xmlNode *set1, xmlNode *set2, int score, - - EXPAND_CONSTRAINT_IDREF(id, rsc_2, ID(xml_rsc_2)); - pcmk__new_colocation(id, NULL, score, rsc_1, rsc_2, -- role_1, role_2, influence, -- data_set); -+ role_1, role_2, influence); - } - } - } -@@ -678,7 +673,7 @@ unpack_simple_colocation(xmlNode *xml_obj, const char *id, - - pcmk__new_colocation(id, attr, score_i, dependent, primary, - dependent_role, primary_role, -- unpack_influence(id, dependent, influence_s), data_set); -+ unpack_influence(id, dependent, influence_s)); - } - - // \return Standard Pacemaker return code -diff --git a/lib/pacemaker/pcmk_sched_group.c b/lib/pacemaker/pcmk_sched_group.c -index cb139f7ddf9..c1392e07a4c 100644 ---- a/lib/pacemaker/pcmk_sched_group.c -+++ b/lib/pacemaker/pcmk_sched_group.c -@@ -171,8 +171,7 @@ member_internal_constraints(gpointer data, gpointer user_data) - // Colocate this member with the previous one - pcmk__new_colocation("group:internal_colocation", NULL, INFINITY, - member, member_data->previous_member, NULL, NULL, -- pcmk_is_set(member->flags, pe_rsc_critical), -- member->cluster); -+ pcmk_is_set(member->flags, pe_rsc_critical)); - } - - if (member_data->promotable) { -diff --git a/lib/pacemaker/pcmk_sched_primitive.c b/lib/pacemaker/pcmk_sched_primitive.c -index aefbf9aa140..4e3eca3e18a 100644 ---- a/lib/pacemaker/pcmk_sched_primitive.c -+++ b/lib/pacemaker/pcmk_sched_primitive.c -@@ -999,8 +999,7 @@ pcmk__primitive_internal_constraints(pe_resource_t *rsc) - score = INFINITY; /* Force them to run on the same host */ - } - pcmk__new_colocation("resource-with-container", NULL, score, rsc, -- rsc->container, NULL, NULL, true, -- rsc->cluster); -+ rsc->container, NULL, NULL, true); - } - } - - -From c6efbe4bc45795f6991b600fc0a70b6a46c10fc3 Mon Sep 17 00:00:00 2001 -From: Ken Gaillot -Date: Mon, 26 Jun 2023 11:50:57 -0500 -Subject: [PATCH 02/17] Low: scheduler: improve error-checking when creating - colocations - ---- - lib/pacemaker/pcmk_sched_colocation.c | 20 ++++++++++++-------- - 1 file changed, 12 insertions(+), 8 deletions(-) - -diff --git a/lib/pacemaker/pcmk_sched_colocation.c b/lib/pacemaker/pcmk_sched_colocation.c -index 7d41f4d03e5..d591550fb97 100644 ---- a/lib/pacemaker/pcmk_sched_colocation.c -+++ b/lib/pacemaker/pcmk_sched_colocation.c -@@ -306,21 +306,24 @@ pcmk__new_colocation(const char *id, const char *node_attr, int score, - { - pcmk__colocation_t *new_con = NULL; - -- if (score == 0) { -- crm_trace("Ignoring colocation '%s' because score is 0", id); -- return; -- } -+ CRM_CHECK(id != NULL, return); -+ - if ((dependent == NULL) || (primary == NULL)) { - pcmk__config_err("Ignoring colocation '%s' because resource " - "does not exist", id); - return; - } - -- new_con = calloc(1, sizeof(pcmk__colocation_t)); -- if (new_con == NULL) { -+ if (score == 0) { -+ pe_rsc_trace(dependent, -+ "Ignoring colocation '%s' (%s with %s) because score is 0", -+ id, dependent->id, primary->id); - return; - } - -+ new_con = calloc(1, sizeof(pcmk__colocation_t)); -+ CRM_ASSERT(new_con != NULL); -+ - if (pcmk__str_eq(dependent_role, RSC_ROLE_STARTED_S, - pcmk__str_null_matches|pcmk__str_casei)) { - dependent_role = RSC_ROLE_UNKNOWN_S; -@@ -344,8 +347,9 @@ pcmk__new_colocation(const char *id, const char *node_attr, int score, - node_attr = CRM_ATTR_UNAME; - } - -- pe_rsc_trace(dependent, "%s ==> %s (%s %d)", -- dependent->id, primary->id, node_attr, score); -+ pe_rsc_trace(dependent, "Added colocation %s (%s with %s @%s using %s)", -+ new_con->id, dependent->id, primary->id, -+ pcmk_readable_score(score), node_attr); - - pcmk__add_this_with(&(dependent->rsc_cons), new_con); - pcmk__add_with_this(&(primary->rsc_cons_lhs), new_con); - -From 589403f548459eeddfd5188ba70723ecf9987d2b Mon Sep 17 00:00:00 2001 -From: Ken Gaillot -Date: Mon, 26 Jun 2023 12:19:44 -0500 -Subject: [PATCH 03/17] Refactor: scheduler: use flag group instead of bool for - colocation influence - -... so we can add more flags ---- - include/pcmki/pcmki_scheduler.h | 2 +- - lib/pacemaker/libpacemaker_private.h | 13 +++++- - lib/pacemaker/pcmk_sched_bundle.c | 5 ++- - lib/pacemaker/pcmk_sched_colocation.c | 61 ++++++++++++++------------- - lib/pacemaker/pcmk_sched_group.c | 8 +++- - lib/pacemaker/pcmk_sched_primitive.c | 3 +- - 6 files changed, 55 insertions(+), 37 deletions(-) - -diff --git a/include/pcmki/pcmki_scheduler.h b/include/pcmki/pcmki_scheduler.h -index dde50a57e32..53de7e1f52e 100644 ---- a/include/pcmki/pcmki_scheduler.h -+++ b/include/pcmki/pcmki_scheduler.h -@@ -29,7 +29,7 @@ typedef struct { - int primary_role; // Colocation applies only if primary has this role - - int score; -- bool influence; // Whether dependent influences active primary placement -+ uint32_t flags; // Group of enum pcmk__coloc_flags - } pcmk__colocation_t; - - void pcmk__unpack_constraints(pe_working_set_t *data_set); -diff --git a/lib/pacemaker/libpacemaker_private.h b/lib/pacemaker/libpacemaker_private.h -index a6c13220e1d..51de9d3e9a9 100644 ---- a/lib/pacemaker/libpacemaker_private.h -+++ b/lib/pacemaker/libpacemaker_private.h -@@ -16,6 +16,14 @@ - - #include // pe_action_t, pe_node_t, pe_working_set_t - -+// Colocation flags -+enum pcmk__coloc_flags { -+ pcmk__coloc_none = 0U, -+ -+ // Primary is affected even if already active -+ pcmk__coloc_influence = (1U << 0), -+}; -+ - // Flags to modify the behavior of add_colocated_node_scores() - enum pcmk__coloc_select { - // With no other flags, apply all "with this" colocations -@@ -483,7 +491,7 @@ G_GNUC_INTERNAL - void pcmk__new_colocation(const char *id, const char *node_attr, int score, - pe_resource_t *dependent, pe_resource_t *primary, - const char *dependent_role, const char *primary_role, -- bool influence); -+ uint32_t flags); - - G_GNUC_INTERNAL - void pcmk__block_colocation_dependents(pe_action_t *action, -@@ -530,7 +538,8 @@ pcmk__colocation_has_influence(const pcmk__colocation_t *colocation, - /* The dependent in a colocation influences the primary's location - * if the influence option is true or the primary is not yet active. - */ -- return colocation->influence || (rsc->running_on == NULL); -+ return pcmk_is_set(colocation->flags, pcmk__coloc_influence) -+ || (rsc->running_on == NULL); - } - - -diff --git a/lib/pacemaker/pcmk_sched_bundle.c b/lib/pacemaker/pcmk_sched_bundle.c -index 6024da68fb7..ca3c21a9977 100644 ---- a/lib/pacemaker/pcmk_sched_bundle.c -+++ b/lib/pacemaker/pcmk_sched_bundle.c -@@ -83,7 +83,7 @@ pcmk__bundle_allocate(pe_resource_t *rsc, const pe_node_t *prefer) - pcmk__new_colocation("child-remote-with-docker-remote", NULL, - INFINITY, replica->remote, - container_host->details->remote_rsc, NULL, -- NULL, true); -+ NULL, pcmk__coloc_influence); - } - - if (replica->remote) { -@@ -252,7 +252,8 @@ pcmk__bundle_internal_constraints(pe_resource_t *rsc) - pe_order_implies_first|pe_order_preserve); - - pcmk__new_colocation("ip-with-docker", NULL, INFINITY, replica->ip, -- replica->container, NULL, NULL, true); -+ replica->container, NULL, NULL, -+ pcmk__coloc_influence); - } - - if (replica->remote) { -diff --git a/lib/pacemaker/pcmk_sched_colocation.c b/lib/pacemaker/pcmk_sched_colocation.c -index d591550fb97..dbdefadfd10 100644 ---- a/lib/pacemaker/pcmk_sched_colocation.c -+++ b/lib/pacemaker/pcmk_sched_colocation.c -@@ -296,13 +296,13 @@ anti_colocation_order(pe_resource_t *first_rsc, int first_role, - * \param[in,out] primary Resource to colocate \p dependent with - * \param[in] dependent_role Current role of \p dependent - * \param[in] primary_role Current role of \p primary -- * \param[in] influence Whether colocation constraint has influence -+ * \param[in] flags Group of enum pcmk__coloc_flags - */ - void - pcmk__new_colocation(const char *id, const char *node_attr, int score, - pe_resource_t *dependent, pe_resource_t *primary, - const char *dependent_role, const char *primary_role, -- bool influence) -+ uint32_t flags) - { - pcmk__colocation_t *new_con = NULL; - -@@ -341,7 +341,7 @@ pcmk__new_colocation(const char *id, const char *node_attr, int score, - new_con->dependent_role = text2role(dependent_role); - new_con->primary_role = text2role(primary_role); - new_con->node_attribute = node_attr; -- new_con->influence = influence; -+ new_con->flags = flags; - - if (node_attr == NULL) { - node_attr = CRM_ATTR_UNAME; -@@ -373,10 +373,11 @@ pcmk__new_colocation(const char *id, const char *node_attr, int score, - * \param[in] rsc Resource involved in constraint (for default) - * \param[in] influence_s String value of influence option - * -- * \return true if string evaluates true, false if string evaluates false, -- * or value of resource's critical option if string is NULL or invalid -+ * \return pcmk__coloc_influence if string evaluates true, or string is NULL or -+ * invalid and resource's critical option evaluates true, otherwise -+ * pcmk__coloc_none - */ --static bool -+static uint32_t - unpack_influence(const char *coloc_id, const pe_resource_t *rsc, - const char *influence_s) - { -@@ -388,10 +389,13 @@ unpack_influence(const char *coloc_id, const pe_resource_t *rsc, - XML_COLOC_ATTR_INFLUENCE " (using default)", - coloc_id); - } else { -- return (influence_i != 0); -+ return (influence_i == 0)? pcmk__coloc_none : pcmk__coloc_influence; - } - } -- return pcmk_is_set(rsc->flags, pe_rsc_critical); -+ if (pcmk_is_set(rsc->flags, pe_rsc_critical)) { -+ return pcmk__coloc_influence; -+ } -+ return pcmk__coloc_none; - } - - static void -@@ -406,7 +410,7 @@ unpack_colocation_set(xmlNode *set, int score, const char *coloc_id, - const char *ordering = crm_element_value(set, "ordering"); - int local_score = score; - bool sequential = false; -- -+ uint32_t flags = pcmk__coloc_none; - const char *score_s = crm_element_value(set, XML_RULE_ATTR_SCORE); - - if (score_s) { -@@ -433,10 +437,9 @@ unpack_colocation_set(xmlNode *set, int score, const char *coloc_id, - EXPAND_CONSTRAINT_IDREF(set_id, resource, ID(xml_rsc)); - if (with != NULL) { - pe_rsc_trace(resource, "Colocating %s with %s", resource->id, with->id); -+ flags = unpack_influence(coloc_id, resource, influence_s); - pcmk__new_colocation(set_id, NULL, local_score, resource, -- with, role, role, -- unpack_influence(coloc_id, resource, -- influence_s)); -+ with, role, role, flags); - } - with = resource; - } -@@ -451,12 +454,10 @@ unpack_colocation_set(xmlNode *set, int score, const char *coloc_id, - if (last != NULL) { - pe_rsc_trace(resource, "Colocating %s with %s", - last->id, resource->id); -+ flags = unpack_influence(coloc_id, resource, influence_s); - pcmk__new_colocation(set_id, NULL, local_score, last, -- resource, role, role, -- unpack_influence(coloc_id, last, -- influence_s)); -+ resource, role, role, flags); - } -- - last = resource; - } - -@@ -470,11 +471,10 @@ unpack_colocation_set(xmlNode *set, int score, const char *coloc_id, - xml_rsc != NULL; xml_rsc = crm_next_same_xml(xml_rsc)) { - - xmlNode *xml_rsc_with = NULL; -- bool influence = true; - - EXPAND_CONSTRAINT_IDREF(set_id, resource, ID(xml_rsc)); -- influence = unpack_influence(coloc_id, resource, influence_s); - -+ flags = unpack_influence(coloc_id, resource, influence_s); - for (xml_rsc_with = first_named_child(set, XML_TAG_RESOURCE_REF); - xml_rsc_with != NULL; - xml_rsc_with = crm_next_same_xml(xml_rsc_with)) { -@@ -487,7 +487,7 @@ unpack_colocation_set(xmlNode *set, int score, const char *coloc_id, - pe_rsc_trace(resource, "Anti-Colocating %s with %s", resource->id, - with->id); - pcmk__new_colocation(set_id, NULL, local_score, -- resource, with, role, role, influence); -+ resource, with, role, role, flags); - } - } - } -@@ -506,6 +506,7 @@ colocate_rsc_sets(const char *id, xmlNode *set1, xmlNode *set2, int score, - - int rc = pcmk_rc_ok; - bool sequential = false; -+ uint32_t flags = pcmk__coloc_none; - - if (score == 0) { - crm_trace("Ignoring colocation '%s' between sets because score is 0", -@@ -536,18 +537,18 @@ colocate_rsc_sets(const char *id, xmlNode *set1, xmlNode *set2, int score, - } - - if ((rsc_1 != NULL) && (rsc_2 != NULL)) { -+ flags = unpack_influence(id, rsc_1, influence_s); - pcmk__new_colocation(id, NULL, score, rsc_1, rsc_2, role_1, role_2, -- unpack_influence(id, rsc_1, influence_s)); -+ flags); - - } else if (rsc_1 != NULL) { -- bool influence = unpack_influence(id, rsc_1, influence_s); -- -+ flags = unpack_influence(id, rsc_1, influence_s); - for (xml_rsc = first_named_child(set2, XML_TAG_RESOURCE_REF); - xml_rsc != NULL; xml_rsc = crm_next_same_xml(xml_rsc)) { - - EXPAND_CONSTRAINT_IDREF(id, rsc_2, ID(xml_rsc)); - pcmk__new_colocation(id, NULL, score, rsc_1, rsc_2, role_1, -- role_2, influence); -+ role_2, flags); - } - - } else if (rsc_2 != NULL) { -@@ -555,9 +556,9 @@ colocate_rsc_sets(const char *id, xmlNode *set1, xmlNode *set2, int score, - xml_rsc != NULL; xml_rsc = crm_next_same_xml(xml_rsc)) { - - EXPAND_CONSTRAINT_IDREF(id, rsc_1, ID(xml_rsc)); -+ flags = unpack_influence(id, rsc_1, influence_s); - pcmk__new_colocation(id, NULL, score, rsc_1, rsc_2, role_1, -- role_2, -- unpack_influence(id, rsc_1, influence_s)); -+ role_2, flags); - } - - } else { -@@ -565,18 +566,17 @@ colocate_rsc_sets(const char *id, xmlNode *set1, xmlNode *set2, int score, - xml_rsc != NULL; xml_rsc = crm_next_same_xml(xml_rsc)) { - - xmlNode *xml_rsc_2 = NULL; -- bool influence = true; - - EXPAND_CONSTRAINT_IDREF(id, rsc_1, ID(xml_rsc)); -- influence = unpack_influence(id, rsc_1, influence_s); - -+ flags = unpack_influence(id, rsc_1, influence_s); - for (xml_rsc_2 = first_named_child(set2, XML_TAG_RESOURCE_REF); - xml_rsc_2 != NULL; - xml_rsc_2 = crm_next_same_xml(xml_rsc_2)) { - - EXPAND_CONSTRAINT_IDREF(id, rsc_2, ID(xml_rsc_2)); - pcmk__new_colocation(id, NULL, score, rsc_1, rsc_2, -- role_1, role_2, influence); -+ role_1, role_2, flags); - } - } - } -@@ -587,6 +587,7 @@ unpack_simple_colocation(xmlNode *xml_obj, const char *id, - const char *influence_s, pe_working_set_t *data_set) - { - int score_i = 0; -+ uint32_t flags = pcmk__coloc_none; - - const char *score = crm_element_value(xml_obj, XML_RULE_ATTR_SCORE); - const char *dependent_id = crm_element_value(xml_obj, -@@ -675,9 +676,9 @@ unpack_simple_colocation(xmlNode *xml_obj, const char *id, - score_i = char2score(score); - } - -+ flags = unpack_influence(id, dependent, influence_s); - pcmk__new_colocation(id, attr, score_i, dependent, primary, -- dependent_role, primary_role, -- unpack_influence(id, dependent, influence_s)); -+ dependent_role, primary_role, flags); - } - - // \return Standard Pacemaker return code -diff --git a/lib/pacemaker/pcmk_sched_group.c b/lib/pacemaker/pcmk_sched_group.c -index c1392e07a4c..72f088a2709 100644 ---- a/lib/pacemaker/pcmk_sched_group.c -+++ b/lib/pacemaker/pcmk_sched_group.c -@@ -168,10 +168,16 @@ member_internal_constraints(gpointer data, gpointer user_data) - } - - } else if (member_data->colocated) { -+ uint32_t flags = pcmk__coloc_none; -+ -+ if (pcmk_is_set(member->flags, pe_rsc_critical)) { -+ flags |= pcmk__coloc_influence; -+ } -+ - // Colocate this member with the previous one - pcmk__new_colocation("group:internal_colocation", NULL, INFINITY, - member, member_data->previous_member, NULL, NULL, -- pcmk_is_set(member->flags, pe_rsc_critical)); -+ flags); - } - - if (member_data->promotable) { -diff --git a/lib/pacemaker/pcmk_sched_primitive.c b/lib/pacemaker/pcmk_sched_primitive.c -index 4e3eca3e18a..ff7052f6c79 100644 ---- a/lib/pacemaker/pcmk_sched_primitive.c -+++ b/lib/pacemaker/pcmk_sched_primitive.c -@@ -999,7 +999,8 @@ pcmk__primitive_internal_constraints(pe_resource_t *rsc) - score = INFINITY; /* Force them to run on the same host */ - } - pcmk__new_colocation("resource-with-container", NULL, score, rsc, -- rsc->container, NULL, NULL, true); -+ rsc->container, NULL, NULL, -+ pcmk__coloc_influence); - } - } - - -From 2f8d4186e16fb026176f1ddb774eb38940c90390 Mon Sep 17 00:00:00 2001 -From: Ken Gaillot -Date: Mon, 26 Jun 2023 12:33:49 -0500 -Subject: [PATCH 04/17] Refactor: scheduler: prefix all internal colocation IDs - with "#" - -... to ensure they're easily distinguished from user-configured colocations -in log messages. ---- - lib/pacemaker/pcmk_sched_bundle.c | 6 +++--- - lib/pacemaker/pcmk_sched_group.c | 5 ++--- - lib/pacemaker/pcmk_sched_primitive.c | 2 +- - 3 files changed, 6 insertions(+), 7 deletions(-) - -diff --git a/lib/pacemaker/pcmk_sched_bundle.c b/lib/pacemaker/pcmk_sched_bundle.c -index ca3c21a9977..b4beb0d488f 100644 ---- a/lib/pacemaker/pcmk_sched_bundle.c -+++ b/lib/pacemaker/pcmk_sched_bundle.c -@@ -80,7 +80,7 @@ pcmk__bundle_allocate(pe_resource_t *rsc, const pe_node_t *prefer) - * host because pacemaker-remoted only supports a single - * active connection - */ -- pcmk__new_colocation("child-remote-with-docker-remote", NULL, -+ pcmk__new_colocation("#replica-remote-with-host-remote", NULL, - INFINITY, replica->remote, - container_host->details->remote_rsc, NULL, - NULL, pcmk__coloc_influence); -@@ -251,14 +251,14 @@ pcmk__bundle_internal_constraints(pe_resource_t *rsc) - pcmk__order_stops(replica->container, replica->ip, - pe_order_implies_first|pe_order_preserve); - -- pcmk__new_colocation("ip-with-docker", NULL, INFINITY, replica->ip, -+ pcmk__new_colocation("#ip-with-container", NULL, INFINITY, replica->ip, - replica->container, NULL, NULL, - pcmk__coloc_influence); - } - - if (replica->remote) { - /* This handles ordering and colocating remote relative to container -- * (via "resource-with-container"). Since IP is also ordered and -+ * (via "#resource-with-container"). Since IP is also ordered and - * colocated relative to the container, we don't need to do anything - * explicit here with IP. - */ -diff --git a/lib/pacemaker/pcmk_sched_group.c b/lib/pacemaker/pcmk_sched_group.c -index 72f088a2709..1b6c5c416ab 100644 ---- a/lib/pacemaker/pcmk_sched_group.c -+++ b/lib/pacemaker/pcmk_sched_group.c -@@ -175,9 +175,8 @@ member_internal_constraints(gpointer data, gpointer user_data) - } - - // Colocate this member with the previous one -- pcmk__new_colocation("group:internal_colocation", NULL, INFINITY, -- member, member_data->previous_member, NULL, NULL, -- flags); -+ pcmk__new_colocation("#group-members", NULL, INFINITY, member, -+ member_data->previous_member, NULL, NULL, flags); - } - - if (member_data->promotable) { -diff --git a/lib/pacemaker/pcmk_sched_primitive.c b/lib/pacemaker/pcmk_sched_primitive.c -index ff7052f6c79..d6b39e38c5f 100644 ---- a/lib/pacemaker/pcmk_sched_primitive.c -+++ b/lib/pacemaker/pcmk_sched_primitive.c -@@ -998,7 +998,7 @@ pcmk__primitive_internal_constraints(pe_resource_t *rsc) - } else { - score = INFINITY; /* Force them to run on the same host */ - } -- pcmk__new_colocation("resource-with-container", NULL, score, rsc, -+ pcmk__new_colocation("#resource-with-container", NULL, score, rsc, - rsc->container, NULL, NULL, - pcmk__coloc_influence); - } - -From 93230be27fb4c156a1cc15daf161e2206961421e Mon Sep 17 00:00:00 2001 -From: Ken Gaillot -Date: Mon, 26 Jun 2023 16:25:02 -0500 -Subject: [PATCH 05/17] Refactor: scheduler: don't use macro for finding - constraint resource - -It obscured what was happening ---- - lib/pacemaker/pcmk_sched_colocation.c | 105 ++++++++++++++++++++------ - 1 file changed, 81 insertions(+), 24 deletions(-) - -diff --git a/lib/pacemaker/pcmk_sched_colocation.c b/lib/pacemaker/pcmk_sched_colocation.c -index dbdefadfd10..4d8fe74c206 100644 ---- a/lib/pacemaker/pcmk_sched_colocation.c -+++ b/lib/pacemaker/pcmk_sched_colocation.c -@@ -21,14 +21,6 @@ - #include "crm/msg_xml.h" - #include "libpacemaker_private.h" - --#define EXPAND_CONSTRAINT_IDREF(__set, __rsc, __name) do { \ -- __rsc = pcmk__find_constraint_resource(data_set->resources, __name); \ -- if (__rsc == NULL) { \ -- pcmk__config_err("%s: No resource found for %s", __set, __name); \ -- return; \ -- } \ -- } while(0) -- - // Used to temporarily mark a node as unusable - #define INFINITY_HACK (INFINITY * -100) - -@@ -411,6 +403,7 @@ unpack_colocation_set(xmlNode *set, int score, const char *coloc_id, - int local_score = score; - bool sequential = false; - uint32_t flags = pcmk__coloc_none; -+ const char *xml_rsc_id = NULL; - const char *score_s = crm_element_value(set, XML_RULE_ATTR_SCORE); - - if (score_s) { -@@ -434,7 +427,14 @@ unpack_colocation_set(xmlNode *set, int score, const char *coloc_id, - for (xml_rsc = first_named_child(set, XML_TAG_RESOURCE_REF); - xml_rsc != NULL; xml_rsc = crm_next_same_xml(xml_rsc)) { - -- EXPAND_CONSTRAINT_IDREF(set_id, resource, ID(xml_rsc)); -+ xml_rsc_id = ID(xml_rsc); -+ resource = pcmk__find_constraint_resource(data_set->resources, -+ xml_rsc_id); -+ if (resource == NULL) { -+ pcmk__config_err("%s: No resource found for %s", -+ set_id, xml_rsc_id); -+ return; -+ } - if (with != NULL) { - pe_rsc_trace(resource, "Colocating %s with %s", resource->id, with->id); - flags = unpack_influence(coloc_id, resource, influence_s); -@@ -450,7 +450,14 @@ unpack_colocation_set(xmlNode *set, int score, const char *coloc_id, - for (xml_rsc = first_named_child(set, XML_TAG_RESOURCE_REF); - xml_rsc != NULL; xml_rsc = crm_next_same_xml(xml_rsc)) { - -- EXPAND_CONSTRAINT_IDREF(set_id, resource, ID(xml_rsc)); -+ xml_rsc_id = ID(xml_rsc); -+ resource = pcmk__find_constraint_resource(data_set->resources, -+ xml_rsc_id); -+ if (resource == NULL) { -+ pcmk__config_err("%s: No resource found for %s", -+ set_id, xml_rsc_id); -+ return; -+ } - if (last != NULL) { - pe_rsc_trace(resource, "Colocating %s with %s", - last->id, resource->id); -@@ -472,18 +479,30 @@ unpack_colocation_set(xmlNode *set, int score, const char *coloc_id, - - xmlNode *xml_rsc_with = NULL; - -- EXPAND_CONSTRAINT_IDREF(set_id, resource, ID(xml_rsc)); -- -+ xml_rsc_id = ID(xml_rsc); -+ resource = pcmk__find_constraint_resource(data_set->resources, -+ xml_rsc_id); -+ if (resource == NULL) { -+ pcmk__config_err("%s: No resource found for %s", -+ set_id, xml_rsc_id); -+ return; -+ } - flags = unpack_influence(coloc_id, resource, influence_s); - for (xml_rsc_with = first_named_child(set, XML_TAG_RESOURCE_REF); - xml_rsc_with != NULL; - xml_rsc_with = crm_next_same_xml(xml_rsc_with)) { - -- if (pcmk__str_eq(resource->id, ID(xml_rsc_with), -- pcmk__str_casei)) { -+ xml_rsc_id = ID(xml_rsc_with); -+ if (pcmk__str_eq(resource->id, xml_rsc_id, pcmk__str_none)) { - break; - } -- EXPAND_CONSTRAINT_IDREF(set_id, with, ID(xml_rsc_with)); -+ with = pcmk__find_constraint_resource(data_set->resources, -+ xml_rsc_id); -+ if (with == NULL) { -+ pcmk__config_err("%s: No resource found for %s", -+ set_id, xml_rsc_id); -+ return; -+ } - pe_rsc_trace(resource, "Anti-Colocating %s with %s", resource->id, - with->id); - pcmk__new_colocation(set_id, NULL, local_score, -@@ -501,6 +520,7 @@ colocate_rsc_sets(const char *id, xmlNode *set1, xmlNode *set2, int score, - pe_resource_t *rsc_1 = NULL; - pe_resource_t *rsc_2 = NULL; - -+ const char *xml_rsc_id = NULL; - const char *role_1 = crm_element_value(set1, "role"); - const char *role_2 = crm_element_value(set2, "role"); - -@@ -519,21 +539,30 @@ colocate_rsc_sets(const char *id, xmlNode *set1, xmlNode *set2, int score, - // Get the first one - xml_rsc = first_named_child(set1, XML_TAG_RESOURCE_REF); - if (xml_rsc != NULL) { -- EXPAND_CONSTRAINT_IDREF(id, rsc_1, ID(xml_rsc)); -+ xml_rsc_id = ID(xml_rsc); -+ rsc_1 = pcmk__find_constraint_resource(data_set->resources, -+ xml_rsc_id); -+ if (rsc_1 == NULL) { -+ pcmk__config_err("%s: No resource found for %s", -+ id, xml_rsc_id); -+ return; -+ } - } - } - - rc = pcmk__xe_get_bool_attr(set2, "sequential", &sequential); - if (rc != pcmk_rc_ok || sequential) { - // Get the last one -- const char *rid = NULL; -- - for (xml_rsc = first_named_child(set2, XML_TAG_RESOURCE_REF); - xml_rsc != NULL; xml_rsc = crm_next_same_xml(xml_rsc)) { - -- rid = ID(xml_rsc); -+ xml_rsc_id = ID(xml_rsc); -+ } -+ rsc_2 = pcmk__find_constraint_resource(data_set->resources, xml_rsc_id); -+ if (rsc_2 == NULL) { -+ pcmk__config_err("%s: No resource found for %s", id, xml_rsc_id); -+ return; - } -- EXPAND_CONSTRAINT_IDREF(id, rsc_2, rid); - } - - if ((rsc_1 != NULL) && (rsc_2 != NULL)) { -@@ -546,7 +575,14 @@ colocate_rsc_sets(const char *id, xmlNode *set1, xmlNode *set2, int score, - for (xml_rsc = first_named_child(set2, XML_TAG_RESOURCE_REF); - xml_rsc != NULL; xml_rsc = crm_next_same_xml(xml_rsc)) { - -- EXPAND_CONSTRAINT_IDREF(id, rsc_2, ID(xml_rsc)); -+ xml_rsc_id = ID(xml_rsc); -+ rsc_2 = pcmk__find_constraint_resource(data_set->resources, -+ xml_rsc_id); -+ if (rsc_2 == NULL) { -+ pcmk__config_err("%s: No resource found for %s", -+ id, xml_rsc_id); -+ return; -+ } - pcmk__new_colocation(id, NULL, score, rsc_1, rsc_2, role_1, - role_2, flags); - } -@@ -555,7 +591,14 @@ colocate_rsc_sets(const char *id, xmlNode *set1, xmlNode *set2, int score, - for (xml_rsc = first_named_child(set1, XML_TAG_RESOURCE_REF); - xml_rsc != NULL; xml_rsc = crm_next_same_xml(xml_rsc)) { - -- EXPAND_CONSTRAINT_IDREF(id, rsc_1, ID(xml_rsc)); -+ xml_rsc_id = ID(xml_rsc); -+ rsc_1 = pcmk__find_constraint_resource(data_set->resources, -+ xml_rsc_id); -+ if (rsc_1 == NULL) { -+ pcmk__config_err("%s: No resource found for %s", -+ id, xml_rsc_id); -+ return; -+ } - flags = unpack_influence(id, rsc_1, influence_s); - pcmk__new_colocation(id, NULL, score, rsc_1, rsc_2, role_1, - role_2, flags); -@@ -567,14 +610,28 @@ colocate_rsc_sets(const char *id, xmlNode *set1, xmlNode *set2, int score, - - xmlNode *xml_rsc_2 = NULL; - -- EXPAND_CONSTRAINT_IDREF(id, rsc_1, ID(xml_rsc)); -+ xml_rsc_id = ID(xml_rsc); -+ rsc_1 = pcmk__find_constraint_resource(data_set->resources, -+ xml_rsc_id); -+ if (rsc_1 == NULL) { -+ pcmk__config_err("%s: No resource found for %s", -+ id, xml_rsc_id); -+ return; -+ } - - flags = unpack_influence(id, rsc_1, influence_s); - for (xml_rsc_2 = first_named_child(set2, XML_TAG_RESOURCE_REF); - xml_rsc_2 != NULL; - xml_rsc_2 = crm_next_same_xml(xml_rsc_2)) { - -- EXPAND_CONSTRAINT_IDREF(id, rsc_2, ID(xml_rsc_2)); -+ xml_rsc_id = ID(xml_rsc_2); -+ rsc_2 = pcmk__find_constraint_resource(data_set->resources, -+ xml_rsc_id); -+ if (rsc_2 == NULL) { -+ pcmk__config_err("%s: No resource found for %s", -+ id, xml_rsc_id); -+ return; -+ } - pcmk__new_colocation(id, NULL, score, rsc_1, rsc_2, - role_1, role_2, flags); - } - -From 23393992a75905f6bd4636f71263c15338c1556f Mon Sep 17 00:00:00 2001 -From: Ken Gaillot -Date: Tue, 27 Jun 2023 10:15:19 -0500 -Subject: [PATCH 06/17] Refactor: scheduler: use bool for "group ordering" in - colocation sets - -... for readability ---- - lib/pacemaker/pcmk_sched_colocation.c | 13 ++++++++----- - 1 file changed, 8 insertions(+), 5 deletions(-) - -diff --git a/lib/pacemaker/pcmk_sched_colocation.c b/lib/pacemaker/pcmk_sched_colocation.c -index 4d8fe74c206..4c8bca56e86 100644 ---- a/lib/pacemaker/pcmk_sched_colocation.c -+++ b/lib/pacemaker/pcmk_sched_colocation.c -@@ -399,7 +399,7 @@ unpack_colocation_set(xmlNode *set, int score, const char *coloc_id, - pe_resource_t *resource = NULL; - const char *set_id = ID(set); - const char *role = crm_element_value(set, "role"); -- const char *ordering = crm_element_value(set, "ordering"); -+ bool with_previous = false; - int local_score = score; - bool sequential = false; - uint32_t flags = pcmk__coloc_none; -@@ -415,15 +415,18 @@ unpack_colocation_set(xmlNode *set, int score, const char *coloc_id, - return; - } - -- if (ordering == NULL) { -- ordering = "group"; -+ /* The "ordering" attribute specifies whether resources in a positive-score -+ * set are colocated with the previous or next resource. -+ */ -+ if (pcmk__str_eq(crm_element_value(set, "ordering"), "group", -+ pcmk__str_null_matches|pcmk__str_casei)) { -+ with_previous = true; - } - - if (pcmk__xe_get_bool_attr(set, "sequential", &sequential) == pcmk_rc_ok && !sequential) { - return; - -- } else if ((local_score > 0) -- && pcmk__str_eq(ordering, "group", pcmk__str_casei)) { -+ } else if ((local_score > 0) && with_previous) { - for (xml_rsc = first_named_child(set, XML_TAG_RESOURCE_REF); - xml_rsc != NULL; xml_rsc = crm_next_same_xml(xml_rsc)) { - - -From e42ec03e0fe488a80172e79b319a3084854332de Mon Sep 17 00:00:00 2001 -From: Ken Gaillot -Date: Tue, 27 Jun 2023 10:18:22 -0500 -Subject: [PATCH 07/17] Refactor: scheduler: simplify unpacking a colocation - set (slightly) - ---- - lib/pacemaker/pcmk_sched_colocation.c | 56 ++++++++++----------------- - 1 file changed, 20 insertions(+), 36 deletions(-) - -diff --git a/lib/pacemaker/pcmk_sched_colocation.c b/lib/pacemaker/pcmk_sched_colocation.c -index 4c8bca56e86..e8f01e49a27 100644 ---- a/lib/pacemaker/pcmk_sched_colocation.c -+++ b/lib/pacemaker/pcmk_sched_colocation.c -@@ -395,7 +395,7 @@ unpack_colocation_set(xmlNode *set, int score, const char *coloc_id, - const char *influence_s, pe_working_set_t *data_set) - { - xmlNode *xml_rsc = NULL; -- pe_resource_t *with = NULL; -+ pe_resource_t *other = NULL; - pe_resource_t *resource = NULL; - const char *set_id = ID(set); - const char *role = crm_element_value(set, "role"); -@@ -426,30 +426,7 @@ unpack_colocation_set(xmlNode *set, int score, const char *coloc_id, - if (pcmk__xe_get_bool_attr(set, "sequential", &sequential) == pcmk_rc_ok && !sequential) { - return; - -- } else if ((local_score > 0) && with_previous) { -- for (xml_rsc = first_named_child(set, XML_TAG_RESOURCE_REF); -- xml_rsc != NULL; xml_rsc = crm_next_same_xml(xml_rsc)) { -- -- xml_rsc_id = ID(xml_rsc); -- resource = pcmk__find_constraint_resource(data_set->resources, -- xml_rsc_id); -- if (resource == NULL) { -- pcmk__config_err("%s: No resource found for %s", -- set_id, xml_rsc_id); -- return; -- } -- if (with != NULL) { -- pe_rsc_trace(resource, "Colocating %s with %s", resource->id, with->id); -- flags = unpack_influence(coloc_id, resource, influence_s); -- pcmk__new_colocation(set_id, NULL, local_score, resource, -- with, role, role, flags); -- } -- with = resource; -- } -- - } else if (local_score > 0) { -- pe_resource_t *last = NULL; -- - for (xml_rsc = first_named_child(set, XML_TAG_RESOURCE_REF); - xml_rsc != NULL; xml_rsc = crm_next_same_xml(xml_rsc)) { - -@@ -461,14 +438,21 @@ unpack_colocation_set(xmlNode *set, int score, const char *coloc_id, - set_id, xml_rsc_id); - return; - } -- if (last != NULL) { -- pe_rsc_trace(resource, "Colocating %s with %s", -- last->id, resource->id); -+ if (other != NULL) { - flags = unpack_influence(coloc_id, resource, influence_s); -- pcmk__new_colocation(set_id, NULL, local_score, last, -- resource, role, role, flags); -+ if (with_previous) { -+ pe_rsc_trace(resource, "Colocating %s with %s in set %s", -+ resource->id, other->id, set_id); -+ pcmk__new_colocation(set_id, NULL, local_score, resource, -+ other, role, role, flags); -+ } else { -+ pe_rsc_trace(resource, "Colocating %s with %s in set %s", -+ other->id, resource->id, set_id); -+ pcmk__new_colocation(set_id, NULL, local_score, other, -+ resource, role, role, flags); -+ } - } -- last = resource; -+ other = resource; - } - - } else { -@@ -499,17 +483,17 @@ unpack_colocation_set(xmlNode *set, int score, const char *coloc_id, - if (pcmk__str_eq(resource->id, xml_rsc_id, pcmk__str_none)) { - break; - } -- with = pcmk__find_constraint_resource(data_set->resources, -- xml_rsc_id); -- if (with == NULL) { -+ other = pcmk__find_constraint_resource(data_set->resources, -+ xml_rsc_id); -+ if (other == NULL) { - pcmk__config_err("%s: No resource found for %s", - set_id, xml_rsc_id); - return; - } -- pe_rsc_trace(resource, "Anti-Colocating %s with %s", resource->id, -- with->id); -+ pe_rsc_trace(resource, "Anti-Colocating %s with %s", -+ resource->id, other->id); - pcmk__new_colocation(set_id, NULL, local_score, -- resource, with, role, role, flags); -+ resource, other, role, role, flags); - } - } - } - -From a26ebb380b4bcf1f4fb8a2d69d4b8c8af306dfec Mon Sep 17 00:00:00 2001 -From: Ken Gaillot -Date: Mon, 26 Jun 2023 14:56:53 -0500 -Subject: [PATCH 08/17] Feature: CIB: deprecate "ordering" attribute of - "resource_set" - -It's undocumented, and makes sets even more confusing than they already are, -especially since it only applies when the score is positive. ---- - include/crm/pengine/internal.h | 1 + - lib/pacemaker/pcmk_sched_colocation.c | 9 +++++++-- - 2 files changed, 8 insertions(+), 2 deletions(-) - -diff --git a/include/crm/pengine/internal.h b/include/crm/pengine/internal.h -index 1b5f6f1d8d9..53cbb54de5e 100644 ---- a/include/crm/pengine/internal.h -+++ b/include/crm/pengine/internal.h -@@ -170,6 +170,7 @@ enum pe_warn_once_e { - pe_wo_group_coloc = (1 << 12), - pe_wo_upstart = (1 << 13), - pe_wo_nagios = (1 << 14), -+ pe_wo_set_ordering = (1 << 15), - }; - - extern uint32_t pe_wo; -diff --git a/lib/pacemaker/pcmk_sched_colocation.c b/lib/pacemaker/pcmk_sched_colocation.c -index e8f01e49a27..36558f38c4e 100644 ---- a/lib/pacemaker/pcmk_sched_colocation.c -+++ b/lib/pacemaker/pcmk_sched_colocation.c -@@ -415,12 +415,17 @@ unpack_colocation_set(xmlNode *set, int score, const char *coloc_id, - return; - } - -- /* The "ordering" attribute specifies whether resources in a positive-score -- * set are colocated with the previous or next resource. -+ /* @COMPAT The deprecated "ordering" attribute specifies whether resources -+ * in a positive-score set are colocated with the previous or next resource. - */ - if (pcmk__str_eq(crm_element_value(set, "ordering"), "group", - pcmk__str_null_matches|pcmk__str_casei)) { - with_previous = true; -+ } else { -+ pe_warn_once(pe_wo_set_ordering, -+ "Support for 'ordering' other than 'group' in " -+ XML_CONS_TAG_RSC_SET " (such as %s) is deprecated and " -+ "will be removed in a future release", set_id); - } - - if (pcmk__xe_get_bool_attr(set, "sequential", &sequential) == pcmk_rc_ok && !sequential) { - -From f18f365c0995df68599ec2c241f81bae54d2bd38 Mon Sep 17 00:00:00 2001 -From: Ken Gaillot -Date: Mon, 26 Jun 2023 15:05:21 -0500 -Subject: [PATCH 09/17] Log: scheduler: improve logs when unpacking colocation - sets - ---- - lib/pacemaker/pcmk_sched_colocation.c | 54 +++++++++++++++++---------- - 1 file changed, 34 insertions(+), 20 deletions(-) - -diff --git a/lib/pacemaker/pcmk_sched_colocation.c b/lib/pacemaker/pcmk_sched_colocation.c -index 36558f38c4e..7555afbc522 100644 ---- a/lib/pacemaker/pcmk_sched_colocation.c -+++ b/lib/pacemaker/pcmk_sched_colocation.c -@@ -439,8 +439,9 @@ unpack_colocation_set(xmlNode *set, int score, const char *coloc_id, - resource = pcmk__find_constraint_resource(data_set->resources, - xml_rsc_id); - if (resource == NULL) { -- pcmk__config_err("%s: No resource found for %s", -- set_id, xml_rsc_id); -+ // Should be possible only with validation disabled -+ pcmk__config_err("Ignoring %s and later resources in set %s: " -+ "No such resource", xml_rsc_id, set_id); - return; - } - if (other != NULL) { -@@ -475,8 +476,9 @@ unpack_colocation_set(xmlNode *set, int score, const char *coloc_id, - resource = pcmk__find_constraint_resource(data_set->resources, - xml_rsc_id); - if (resource == NULL) { -- pcmk__config_err("%s: No resource found for %s", -- set_id, xml_rsc_id); -+ // Should be possible only with validation disabled -+ pcmk__config_err("Ignoring %s and later resources in set %s: " -+ "No such resource", xml_rsc_id, set_id); - return; - } - flags = unpack_influence(coloc_id, resource, influence_s); -@@ -490,11 +492,7 @@ unpack_colocation_set(xmlNode *set, int score, const char *coloc_id, - } - other = pcmk__find_constraint_resource(data_set->resources, - xml_rsc_id); -- if (other == NULL) { -- pcmk__config_err("%s: No resource found for %s", -- set_id, xml_rsc_id); -- return; -- } -+ CRM_ASSERT(other != NULL); // We already processed it - pe_rsc_trace(resource, "Anti-Colocating %s with %s", - resource->id, other->id); - pcmk__new_colocation(set_id, NULL, local_score, -@@ -527,7 +525,7 @@ colocate_rsc_sets(const char *id, xmlNode *set1, xmlNode *set2, int score, - } - - rc = pcmk__xe_get_bool_attr(set1, "sequential", &sequential); -- if (rc != pcmk_rc_ok || sequential) { -+ if ((rc != pcmk_rc_ok) || sequential) { - // Get the first one - xml_rsc = first_named_child(set1, XML_TAG_RESOURCE_REF); - if (xml_rsc != NULL) { -@@ -535,15 +533,17 @@ colocate_rsc_sets(const char *id, xmlNode *set1, xmlNode *set2, int score, - rsc_1 = pcmk__find_constraint_resource(data_set->resources, - xml_rsc_id); - if (rsc_1 == NULL) { -- pcmk__config_err("%s: No resource found for %s", -- id, xml_rsc_id); -+ // Should be possible only with validation disabled -+ pcmk__config_err("Ignoring colocation of set %s with set %s " -+ "because first resource %s not found", -+ ID(set1), ID(set2), xml_rsc_id); - return; - } - } - } - - rc = pcmk__xe_get_bool_attr(set2, "sequential", &sequential); -- if (rc != pcmk_rc_ok || sequential) { -+ if ((rc != pcmk_rc_ok) || sequential) { - // Get the last one - for (xml_rsc = first_named_child(set2, XML_TAG_RESOURCE_REF); - xml_rsc != NULL; xml_rsc = crm_next_same_xml(xml_rsc)) { -@@ -552,7 +552,10 @@ colocate_rsc_sets(const char *id, xmlNode *set1, xmlNode *set2, int score, - } - rsc_2 = pcmk__find_constraint_resource(data_set->resources, xml_rsc_id); - if (rsc_2 == NULL) { -- pcmk__config_err("%s: No resource found for %s", id, xml_rsc_id); -+ // Should be possible only with validation disabled -+ pcmk__config_err("Ignoring colocation of set %s with set %s " -+ "because last resource %s not found", -+ ID(set1), ID(set2), xml_rsc_id); - return; - } - } -@@ -573,6 +576,10 @@ colocate_rsc_sets(const char *id, xmlNode *set1, xmlNode *set2, int score, - if (rsc_2 == NULL) { - pcmk__config_err("%s: No resource found for %s", - id, xml_rsc_id); -+ // Should be possible only with validation disabled -+ pcmk__config_err("Ignoring resource %s and later in set %s " -+ "for colocation with set %s: No such resource", -+ xml_rsc_id, set2, set1); - return; - } - pcmk__new_colocation(id, NULL, score, rsc_1, rsc_2, role_1, -@@ -587,8 +594,10 @@ colocate_rsc_sets(const char *id, xmlNode *set1, xmlNode *set2, int score, - rsc_1 = pcmk__find_constraint_resource(data_set->resources, - xml_rsc_id); - if (rsc_1 == NULL) { -- pcmk__config_err("%s: No resource found for %s", -- id, xml_rsc_id); -+ // Should be possible only with validation disabled -+ pcmk__config_err("Ignoring resource %s and later in set %s " -+ "for colocation with set %s: No such resource", -+ xml_rsc_id, set1, set2); - return; - } - flags = unpack_influence(id, rsc_1, influence_s); -@@ -606,8 +615,10 @@ colocate_rsc_sets(const char *id, xmlNode *set1, xmlNode *set2, int score, - rsc_1 = pcmk__find_constraint_resource(data_set->resources, - xml_rsc_id); - if (rsc_1 == NULL) { -- pcmk__config_err("%s: No resource found for %s", -- id, xml_rsc_id); -+ // Should be possible only with validation disabled -+ pcmk__config_err("Ignoring resource %s and later in set %s " -+ "for colocation with set %s: No such resource", -+ xml_rsc_id, set1, set2); - return; - } - -@@ -620,8 +631,11 @@ colocate_rsc_sets(const char *id, xmlNode *set1, xmlNode *set2, int score, - rsc_2 = pcmk__find_constraint_resource(data_set->resources, - xml_rsc_id); - if (rsc_2 == NULL) { -- pcmk__config_err("%s: No resource found for %s", -- id, xml_rsc_id); -+ // Should be possible only with validation disabled -+ pcmk__config_err("Ignoring resource %s and later in set %s " -+ "for colocation with %s in set %s: " -+ "No such resource", -+ xml_rsc_id, set2, ID(xml_rsc), set1); - return; - } - pcmk__new_colocation(id, NULL, score, rsc_1, rsc_2, - -From 19e9a9d3b30e857f98459b7f5c4f4938e48e4261 Mon Sep 17 00:00:00 2001 -From: Ken Gaillot -Date: Mon, 26 Jun 2023 16:25:17 -0500 -Subject: [PATCH 10/17] Refactor: scheduler: mark explicitly configured - colocations - ---- - lib/pacemaker/libpacemaker_private.h | 3 +++ - lib/pacemaker/pcmk_sched_colocation.c | 18 +++++++++++------- - 2 files changed, 14 insertions(+), 7 deletions(-) - -diff --git a/lib/pacemaker/libpacemaker_private.h b/lib/pacemaker/libpacemaker_private.h -index 51de9d3e9a9..a49d55d3c41 100644 ---- a/lib/pacemaker/libpacemaker_private.h -+++ b/lib/pacemaker/libpacemaker_private.h -@@ -22,6 +22,9 @@ enum pcmk__coloc_flags { - - // Primary is affected even if already active - pcmk__coloc_influence = (1U << 0), -+ -+ // Colocation was explicitly configured in CIB -+ pcmk__coloc_explicit = (1U << 1), - }; - - // Flags to modify the behavior of add_colocated_node_scores() -diff --git a/lib/pacemaker/pcmk_sched_colocation.c b/lib/pacemaker/pcmk_sched_colocation.c -index 7555afbc522..e0b39b59e81 100644 ---- a/lib/pacemaker/pcmk_sched_colocation.c -+++ b/lib/pacemaker/pcmk_sched_colocation.c -@@ -445,7 +445,8 @@ unpack_colocation_set(xmlNode *set, int score, const char *coloc_id, - return; - } - if (other != NULL) { -- flags = unpack_influence(coloc_id, resource, influence_s); -+ flags = pcmk__coloc_explicit -+ | unpack_influence(coloc_id, resource, influence_s); - if (with_previous) { - pe_rsc_trace(resource, "Colocating %s with %s in set %s", - resource->id, other->id, set_id); -@@ -481,7 +482,8 @@ unpack_colocation_set(xmlNode *set, int score, const char *coloc_id, - "No such resource", xml_rsc_id, set_id); - return; - } -- flags = unpack_influence(coloc_id, resource, influence_s); -+ flags = pcmk__coloc_explicit -+ | unpack_influence(coloc_id, resource, influence_s); - for (xml_rsc_with = first_named_child(set, XML_TAG_RESOURCE_REF); - xml_rsc_with != NULL; - xml_rsc_with = crm_next_same_xml(xml_rsc_with)) { -@@ -561,12 +563,12 @@ colocate_rsc_sets(const char *id, xmlNode *set1, xmlNode *set2, int score, - } - - if ((rsc_1 != NULL) && (rsc_2 != NULL)) { -- flags = unpack_influence(id, rsc_1, influence_s); -+ flags = pcmk__coloc_explicit | unpack_influence(id, rsc_1, influence_s); - pcmk__new_colocation(id, NULL, score, rsc_1, rsc_2, role_1, role_2, - flags); - - } else if (rsc_1 != NULL) { -- flags = unpack_influence(id, rsc_1, influence_s); -+ flags = pcmk__coloc_explicit | unpack_influence(id, rsc_1, influence_s); - for (xml_rsc = first_named_child(set2, XML_TAG_RESOURCE_REF); - xml_rsc != NULL; xml_rsc = crm_next_same_xml(xml_rsc)) { - -@@ -600,7 +602,8 @@ colocate_rsc_sets(const char *id, xmlNode *set1, xmlNode *set2, int score, - xml_rsc_id, set1, set2); - return; - } -- flags = unpack_influence(id, rsc_1, influence_s); -+ flags = pcmk__coloc_explicit -+ | unpack_influence(id, rsc_1, influence_s); - pcmk__new_colocation(id, NULL, score, rsc_1, rsc_2, role_1, - role_2, flags); - } -@@ -622,7 +625,8 @@ colocate_rsc_sets(const char *id, xmlNode *set1, xmlNode *set2, int score, - return; - } - -- flags = unpack_influence(id, rsc_1, influence_s); -+ flags = pcmk__coloc_explicit -+ | unpack_influence(id, rsc_1, influence_s); - for (xml_rsc_2 = first_named_child(set2, XML_TAG_RESOURCE_REF); - xml_rsc_2 != NULL; - xml_rsc_2 = crm_next_same_xml(xml_rsc_2)) { -@@ -739,7 +743,7 @@ unpack_simple_colocation(xmlNode *xml_obj, const char *id, - score_i = char2score(score); - } - -- flags = unpack_influence(id, dependent, influence_s); -+ flags = pcmk__coloc_explicit | unpack_influence(id, dependent, influence_s); - pcmk__new_colocation(id, attr, score_i, dependent, primary, - dependent_role, primary_role, flags); - } - -From 4f9e2bc6fb1dd78d5784d918a85bb2028f01d265 Mon Sep 17 00:00:00 2001 -From: Ken Gaillot -Date: Tue, 27 Jun 2023 10:24:58 -0500 -Subject: [PATCH 11/17] Test: scheduler: add regression test for colocation - with an inner group member - -As of this commit, the behavior is incorrect. ---- - cts/cts-scheduler.in | 4 + - .../dot/coloc-with-inner-group-member.dot | 8 + - .../exp/coloc-with-inner-group-member.exp | 38 +++ - .../coloc-with-inner-group-member.scores | 46 ++++ - .../coloc-with-inner-group-member.summary | 33 +++ - .../xml/coloc-with-inner-group-member.xml | 258 ++++++++++++++++++ - 6 files changed, 387 insertions(+) - create mode 100644 cts/scheduler/dot/coloc-with-inner-group-member.dot - create mode 100644 cts/scheduler/exp/coloc-with-inner-group-member.exp - create mode 100644 cts/scheduler/scores/coloc-with-inner-group-member.scores - create mode 100644 cts/scheduler/summary/coloc-with-inner-group-member.summary - create mode 100644 cts/scheduler/xml/coloc-with-inner-group-member.xml - -diff --git a/cts/cts-scheduler.in b/cts/cts-scheduler.in -index ee0cb7b4722..de455105985 100644 ---- a/cts/cts-scheduler.in -+++ b/cts/cts-scheduler.in -@@ -80,6 +80,10 @@ TESTS = [ - [ "group-dependents", "Account for the location preferences of things colocated with a group" ], - [ "group-stop-ordering", "Ensure blocked group member stop does not force other member stops" ], - [ "colocate-unmanaged-group", "Respect mandatory colocations even if earlier group member is unmanaged" ], -+ [ -+ "coloc-with-inner-group-member", -+ "Consider explicit colocations with inner group members" -+ ], - ], - [ - [ "rsc_dep1", "Must not" ], -diff --git a/cts/scheduler/dot/coloc-with-inner-group-member.dot b/cts/scheduler/dot/coloc-with-inner-group-member.dot -new file mode 100644 -index 00000000000..77e1a8e6e40 ---- /dev/null -+++ b/cts/scheduler/dot/coloc-with-inner-group-member.dot -@@ -0,0 +1,8 @@ -+ digraph "g" { -+"grp_stop_0" -> "grp_stopped_0" [ style = bold] -+"grp_stop_0" -> "vip_stop_0 rhel8-3" [ style = bold] -+"grp_stop_0" [ style=bold color="green" fontcolor="orange"] -+"grp_stopped_0" [ style=bold color="green" fontcolor="orange"] -+"vip_stop_0 rhel8-3" -> "grp_stopped_0" [ style = bold] -+"vip_stop_0 rhel8-3" [ style=bold color="green" fontcolor="black"] -+} -diff --git a/cts/scheduler/exp/coloc-with-inner-group-member.exp b/cts/scheduler/exp/coloc-with-inner-group-member.exp -new file mode 100644 -index 00000000000..e6d94d5fe7f ---- /dev/null -+++ b/cts/scheduler/exp/coloc-with-inner-group-member.exp -@@ -0,0 +1,38 @@ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -diff --git a/cts/scheduler/scores/coloc-with-inner-group-member.scores b/cts/scheduler/scores/coloc-with-inner-group-member.scores -new file mode 100644 -index 00000000000..10fe944cb42 ---- /dev/null -+++ b/cts/scheduler/scores/coloc-with-inner-group-member.scores -@@ -0,0 +1,46 @@ -+ -+pcmk__group_assign: bar allocation score on rhel8-1: 0 -+pcmk__group_assign: bar allocation score on rhel8-2: 0 -+pcmk__group_assign: bar allocation score on rhel8-3: 0 -+pcmk__group_assign: bar allocation score on rhel8-4: 0 -+pcmk__group_assign: bar allocation score on rhel8-5: 0 -+pcmk__group_assign: foo allocation score on rhel8-1: 0 -+pcmk__group_assign: foo allocation score on rhel8-2: 0 -+pcmk__group_assign: foo allocation score on rhel8-3: 0 -+pcmk__group_assign: foo allocation score on rhel8-4: 0 -+pcmk__group_assign: foo allocation score on rhel8-5: 0 -+pcmk__group_assign: grp allocation score on rhel8-1: 0 -+pcmk__group_assign: grp allocation score on rhel8-2: 0 -+pcmk__group_assign: grp allocation score on rhel8-3: 0 -+pcmk__group_assign: grp allocation score on rhel8-4: 0 -+pcmk__group_assign: grp allocation score on rhel8-5: 0 -+pcmk__group_assign: vip allocation score on rhel8-1: 0 -+pcmk__group_assign: vip allocation score on rhel8-2: 0 -+pcmk__group_assign: vip allocation score on rhel8-3: 0 -+pcmk__group_assign: vip allocation score on rhel8-4: 0 -+pcmk__group_assign: vip allocation score on rhel8-5: 0 -+pcmk__primitive_assign: Fencing allocation score on rhel8-1: 0 -+pcmk__primitive_assign: Fencing allocation score on rhel8-2: 0 -+pcmk__primitive_assign: Fencing allocation score on rhel8-3: 0 -+pcmk__primitive_assign: Fencing allocation score on rhel8-4: 0 -+pcmk__primitive_assign: Fencing allocation score on rhel8-5: 0 -+pcmk__primitive_assign: bar allocation score on rhel8-1: -INFINITY -+pcmk__primitive_assign: bar allocation score on rhel8-2: -INFINITY -+pcmk__primitive_assign: bar allocation score on rhel8-3: -INFINITY -+pcmk__primitive_assign: bar allocation score on rhel8-4: 0 -+pcmk__primitive_assign: bar allocation score on rhel8-5: -INFINITY -+pcmk__primitive_assign: foo allocation score on rhel8-1: 0 -+pcmk__primitive_assign: foo allocation score on rhel8-2: 0 -+pcmk__primitive_assign: foo allocation score on rhel8-3: 0 -+pcmk__primitive_assign: foo allocation score on rhel8-4: 0 -+pcmk__primitive_assign: foo allocation score on rhel8-5: 0 -+pcmk__primitive_assign: vip allocation score on rhel8-1: -INFINITY -+pcmk__primitive_assign: vip allocation score on rhel8-2: -INFINITY -+pcmk__primitive_assign: vip allocation score on rhel8-3: -INFINITY -+pcmk__primitive_assign: vip allocation score on rhel8-4: -INFINITY -+pcmk__primitive_assign: vip allocation score on rhel8-5: -INFINITY -+pcmk__primitive_assign: vip-dep allocation score on rhel8-1: 0 -+pcmk__primitive_assign: vip-dep allocation score on rhel8-2: 0 -+pcmk__primitive_assign: vip-dep allocation score on rhel8-3: 0 -+pcmk__primitive_assign: vip-dep allocation score on rhel8-4: 0 -+pcmk__primitive_assign: vip-dep allocation score on rhel8-5: 0 -diff --git a/cts/scheduler/summary/coloc-with-inner-group-member.summary b/cts/scheduler/summary/coloc-with-inner-group-member.summary -new file mode 100644 -index 00000000000..3e87f0867ef ---- /dev/null -+++ b/cts/scheduler/summary/coloc-with-inner-group-member.summary -@@ -0,0 +1,33 @@ -+Using the original execution date of: 2023-06-20 20:45:06Z -+Current cluster status: -+ * Node List: -+ * Online: [ rhel8-1 rhel8-2 rhel8-3 rhel8-4 rhel8-5 ] -+ -+ * Full List of Resources: -+ * Fencing (stonith:fence_xvm): Started rhel8-1 -+ * vip-dep (ocf:pacemaker:Dummy): Started rhel8-3 -+ * Resource Group: grp: -+ * foo (ocf:pacemaker:Dummy): Started rhel8-4 -+ * bar (ocf:pacemaker:Dummy): Started rhel8-4 -+ * vip (ocf:pacemaker:Dummy): Started rhel8-3 -+ -+Transition Summary: -+ * Stop vip ( rhel8-3 ) due to node availability -+ -+Executing Cluster Transition: -+ * Pseudo action: grp_stop_0 -+ * Resource action: vip stop on rhel8-3 -+ * Pseudo action: grp_stopped_0 -+Using the original execution date of: 2023-06-20 20:45:06Z -+ -+Revised Cluster Status: -+ * Node List: -+ * Online: [ rhel8-1 rhel8-2 rhel8-3 rhel8-4 rhel8-5 ] -+ -+ * Full List of Resources: -+ * Fencing (stonith:fence_xvm): Started rhel8-1 -+ * vip-dep (ocf:pacemaker:Dummy): Started rhel8-3 -+ * Resource Group: grp: -+ * foo (ocf:pacemaker:Dummy): Started rhel8-4 -+ * bar (ocf:pacemaker:Dummy): Started rhel8-4 -+ * vip (ocf:pacemaker:Dummy): Stopped -diff --git a/cts/scheduler/xml/coloc-with-inner-group-member.xml b/cts/scheduler/xml/coloc-with-inner-group-member.xml -new file mode 100644 -index 00000000000..c07edecb81a ---- /dev/null -+++ b/cts/scheduler/xml/coloc-with-inner-group-member.xml -@@ -0,0 +1,258 @@ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ - -From 7fa4999f96d7541ee0dad248477c3e7d4affff00 Mon Sep 17 00:00:00 2001 -From: Ken Gaillot -Date: Tue, 20 Jun 2023 19:23:18 -0500 -Subject: [PATCH 12/17] Fix: scheduler: consider explicit colocations with - group members - -Previously, a group's colocations would include only colocations explicitly -with the group itself, and with its first member (for "group with" colocations) -or last member (for "with group" colocations). Explicit colocations with a -different group member could cause incorrect node assignment. - -Fixes T679 ---- - lib/pacemaker/pcmk_sched_group.c | 70 +++++++++++++++++++++------- - lib/pacemaker/pcmk_sched_primitive.c | 52 ++++++++++++++------- - 2 files changed, 90 insertions(+), 32 deletions(-) - -diff --git a/lib/pacemaker/pcmk_sched_group.c b/lib/pacemaker/pcmk_sched_group.c -index 1b6c5c416ab..95e2d77aa5f 100644 ---- a/lib/pacemaker/pcmk_sched_group.c -+++ b/lib/pacemaker/pcmk_sched_group.c -@@ -674,16 +674,36 @@ pcmk__with_group_colocations(const pe_resource_t *rsc, - } - - /* "With this" colocations are needed only for the group itself and for its -- * last member. Add the group's colocations plus any relevant -- * parent colocations if cloned. -+ * last member. (Previous members will chain via the group internal -+ * colocations.) - */ -- if ((rsc == orig_rsc) || (orig_rsc == pe__last_group_member(rsc))) { -- crm_trace("Adding 'with %s' colocations to list for %s", -- rsc->id, orig_rsc->id); -- pcmk__add_with_this_list(list, rsc->rsc_cons_lhs); -- if (rsc->parent != NULL) { // Cloned group -- rsc->parent->cmds->with_this_colocations(rsc->parent, orig_rsc, -- list); -+ if ((orig_rsc != rsc) && (orig_rsc != pe__last_group_member(rsc))) { -+ return; -+ } -+ -+ pe_rsc_trace(rsc, "Adding 'with %s' colocations to list for %s", -+ rsc->id, orig_rsc->id); -+ -+ // Add the group's own colocations -+ pcmk__add_with_this_list(list, rsc->rsc_cons_lhs); -+ -+ // If cloned, add any relevant colocations with the clone -+ if (rsc->parent != NULL) { -+ rsc->parent->cmds->with_this_colocations(rsc->parent, orig_rsc, -+ list); -+ } -+ -+ if (!pe__group_flag_is_set(rsc, pe__group_colocated)) { -+ // @COMPAT Non-colocated groups are deprecated -+ return; -+ } -+ -+ // Add explicit colocations with the group's (other) children -+ for (GList *iter = rsc->children; iter != NULL; iter = iter->next) { -+ pe_resource_t *member = iter->data; -+ -+ if (member != orig_rsc) { -+ member->cmds->with_this_colocations(member, orig_rsc, list); - } - } - } -@@ -693,6 +713,8 @@ void - pcmk__group_with_colocations(const pe_resource_t *rsc, - const pe_resource_t *orig_rsc, GList **list) - { -+ const pe_resource_t *member = NULL; -+ - CRM_CHECK((rsc != NULL) && (rsc->variant == pe_group) - && (orig_rsc != NULL) && (list != NULL), - return); -@@ -702,18 +724,35 @@ pcmk__group_with_colocations(const pe_resource_t *rsc, - return; - } - -- /* Colocations for the group itself, or for its first member, consist of the -- * group's colocations plus any relevant parent colocations if cloned. -+ /* "This with" colocations are normally needed only for the group itself and -+ * for its first member. - */ - if ((rsc == orig_rsc) - || (orig_rsc == (const pe_resource_t *) rsc->children->data)) { -- crm_trace("Adding '%s with' colocations to list for %s", -- rsc->id, orig_rsc->id); -+ pe_rsc_trace(rsc, "Adding '%s with' colocations to list for %s", -+ rsc->id, orig_rsc->id); -+ -+ // Add the group's own colocations - pcmk__add_this_with_list(list, rsc->rsc_cons); -- if (rsc->parent != NULL) { // Cloned group -+ -+ // If cloned, add any relevant colocations involving the clone -+ if (rsc->parent != NULL) { - rsc->parent->cmds->this_with_colocations(rsc->parent, orig_rsc, - list); - } -+ -+ if (!pe__group_flag_is_set(rsc, pe__group_colocated)) { -+ // @COMPAT Non-colocated groups are deprecated -+ return; -+ } -+ -+ // Add explicit colocations involving the group's (other) children -+ for (GList *iter = rsc->children; iter != NULL; iter = iter->next) { -+ member = iter->data; -+ if (member != orig_rsc) { -+ member->cmds->this_with_colocations(member, orig_rsc, list); -+ } -+ } - return; - } - -@@ -723,8 +762,7 @@ pcmk__group_with_colocations(const pe_resource_t *rsc, - * happen, so the group's mandatory colocations must be explicitly added. - */ - for (GList *iter = rsc->children; iter != NULL; iter = iter->next) { -- const pe_resource_t *member = (const pe_resource_t *) iter->data; -- -+ member = iter->data; - if (orig_rsc == member) { - break; // We've seen all earlier members, and none are unmanaged - } -diff --git a/lib/pacemaker/pcmk_sched_primitive.c b/lib/pacemaker/pcmk_sched_primitive.c -index d6b39e38c5f..bfc6fc7fedd 100644 ---- a/lib/pacemaker/pcmk_sched_primitive.c -+++ b/lib/pacemaker/pcmk_sched_primitive.c -@@ -1069,15 +1069,25 @@ void - pcmk__with_primitive_colocations(const pe_resource_t *rsc, - const pe_resource_t *orig_rsc, GList **list) - { -- // Primitives don't have children, so rsc should also be orig_rsc -- CRM_CHECK((rsc != NULL) && (rsc->variant == pe_native) -- && (rsc == orig_rsc) && (list != NULL), -- return); -+ CRM_ASSERT((rsc != NULL) && (rsc->variant == pe_native) && (list != NULL)); - -- // Add primitive's own colocations plus any relevant ones from parent -- pcmk__add_with_this_list(list, rsc->rsc_cons_lhs); -- if (rsc->parent != NULL) { -- rsc->parent->cmds->with_this_colocations(rsc->parent, rsc, list); -+ if (rsc == orig_rsc) { -+ /* For the resource itself, add all of its own colocations and relevant -+ * colocations from its parent (if any). -+ */ -+ pcmk__add_with_this_list(list, rsc->rsc_cons_lhs); -+ if (rsc->parent != NULL) { -+ rsc->parent->cmds->with_this_colocations(rsc->parent, rsc, list); -+ } -+ } else { -+ // For an ancestor, add only explicitly configured constraints -+ for (GList *iter = rsc->rsc_cons_lhs; iter != NULL; iter = iter->next) { -+ pcmk__colocation_t *colocation = iter->data; -+ -+ if (pcmk_is_set(colocation->flags, pcmk__coloc_explicit)) { -+ pcmk__add_with_this(list, colocation); -+ } -+ } - } - } - -@@ -1088,15 +1098,25 @@ void - pcmk__primitive_with_colocations(const pe_resource_t *rsc, - const pe_resource_t *orig_rsc, GList **list) - { -- // Primitives don't have children, so rsc should also be orig_rsc -- CRM_CHECK((rsc != NULL) && (rsc->variant == pe_native) -- && (rsc == orig_rsc) && (list != NULL), -- return); -+ CRM_ASSERT((rsc != NULL) && (rsc->variant == pe_native) && (list != NULL)); - -- // Add primitive's own colocations plus any relevant ones from parent -- pcmk__add_this_with_list(list, rsc->rsc_cons); -- if (rsc->parent != NULL) { -- rsc->parent->cmds->this_with_colocations(rsc->parent, rsc, list); -+ if (rsc == orig_rsc) { -+ /* For the resource itself, add all of its own colocations and relevant -+ * colocations from its parent (if any). -+ */ -+ pcmk__add_this_with_list(list, rsc->rsc_cons); -+ if (rsc->parent != NULL) { -+ rsc->parent->cmds->this_with_colocations(rsc->parent, rsc, list); -+ } -+ } else { -+ // For an ancestor, add only explicitly configured constraints -+ for (GList *iter = rsc->rsc_cons; iter != NULL; iter = iter->next) { -+ pcmk__colocation_t *colocation = iter->data; -+ -+ if (pcmk_is_set(colocation->flags, pcmk__coloc_explicit)) { -+ pcmk__add_this_with(list, colocation); -+ } -+ } - } - } - - -From e9e734eabf147a827c8bc6731da4c54b2a4d8658 Mon Sep 17 00:00:00 2001 -From: Ken Gaillot -Date: Tue, 27 Jun 2023 10:31:18 -0500 -Subject: [PATCH 13/17] Test: scheduler: update test output for group - colocation fix - ---- - .../dot/coloc-with-inner-group-member.dot | 32 ++++ - .../exp/coloc-with-inner-group-member.exp | 176 +++++++++++++++++- - .../coloc-with-inner-group-member.scores | 14 +- - .../coloc-with-inner-group-member.summary | 20 +- - 4 files changed, 225 insertions(+), 17 deletions(-) - -diff --git a/cts/scheduler/dot/coloc-with-inner-group-member.dot b/cts/scheduler/dot/coloc-with-inner-group-member.dot -index 77e1a8e6e40..a3bad7aab12 100644 ---- a/cts/scheduler/dot/coloc-with-inner-group-member.dot -+++ b/cts/scheduler/dot/coloc-with-inner-group-member.dot -@@ -1,8 +1,40 @@ - digraph "g" { -+"bar_monitor_10000 rhel8-3" [ style=bold color="green" fontcolor="black"] -+"bar_start_0 rhel8-3" -> "bar_monitor_10000 rhel8-3" [ style = bold] -+"bar_start_0 rhel8-3" -> "grp_running_0" [ style = bold] -+"bar_start_0 rhel8-3" -> "vip_start_0 rhel8-3" [ style = bold] -+"bar_start_0 rhel8-3" [ style=bold color="green" fontcolor="black"] -+"bar_stop_0 rhel8-4" -> "bar_start_0 rhel8-3" [ style = bold] -+"bar_stop_0 rhel8-4" -> "foo_stop_0 rhel8-4" [ style = bold] -+"bar_stop_0 rhel8-4" -> "grp_stopped_0" [ style = bold] -+"bar_stop_0 rhel8-4" [ style=bold color="green" fontcolor="black"] -+"foo_monitor_10000 rhel8-3" [ style=bold color="green" fontcolor="black"] -+"foo_start_0 rhel8-3" -> "bar_start_0 rhel8-3" [ style = bold] -+"foo_start_0 rhel8-3" -> "foo_monitor_10000 rhel8-3" [ style = bold] -+"foo_start_0 rhel8-3" -> "grp_running_0" [ style = bold] -+"foo_start_0 rhel8-3" [ style=bold color="green" fontcolor="black"] -+"foo_stop_0 rhel8-4" -> "foo_start_0 rhel8-3" [ style = bold] -+"foo_stop_0 rhel8-4" -> "grp_stopped_0" [ style = bold] -+"foo_stop_0 rhel8-4" [ style=bold color="green" fontcolor="black"] -+"grp_running_0" [ style=bold color="green" fontcolor="orange"] -+"grp_start_0" -> "bar_start_0 rhel8-3" [ style = bold] -+"grp_start_0" -> "foo_start_0 rhel8-3" [ style = bold] -+"grp_start_0" -> "grp_running_0" [ style = bold] -+"grp_start_0" -> "vip_start_0 rhel8-3" [ style = bold] -+"grp_start_0" [ style=bold color="green" fontcolor="orange"] -+"grp_stop_0" -> "bar_stop_0 rhel8-4" [ style = bold] -+"grp_stop_0" -> "foo_stop_0 rhel8-4" [ style = bold] - "grp_stop_0" -> "grp_stopped_0" [ style = bold] - "grp_stop_0" -> "vip_stop_0 rhel8-3" [ style = bold] - "grp_stop_0" [ style=bold color="green" fontcolor="orange"] -+"grp_stopped_0" -> "grp_start_0" [ style = bold] - "grp_stopped_0" [ style=bold color="green" fontcolor="orange"] -+"vip_monitor_10000 rhel8-3" [ style=bold color="green" fontcolor="black"] -+"vip_start_0 rhel8-3" -> "grp_running_0" [ style = bold] -+"vip_start_0 rhel8-3" -> "vip_monitor_10000 rhel8-3" [ style = bold] -+"vip_start_0 rhel8-3" [ style=bold color="green" fontcolor="black"] -+"vip_stop_0 rhel8-3" -> "bar_stop_0 rhel8-4" [ style = bold] - "vip_stop_0 rhel8-3" -> "grp_stopped_0" [ style = bold] -+"vip_stop_0 rhel8-3" -> "vip_start_0 rhel8-3" [ style = bold] - "vip_stop_0 rhel8-3" [ style=bold color="green" fontcolor="black"] - } -diff --git a/cts/scheduler/exp/coloc-with-inner-group-member.exp b/cts/scheduler/exp/coloc-with-inner-group-member.exp -index e6d94d5fe7f..bb8f779feb1 100644 ---- a/cts/scheduler/exp/coloc-with-inner-group-member.exp -+++ b/cts/scheduler/exp/coloc-with-inner-group-member.exp -@@ -1,22 +1,28 @@ - - - -- -+ - - - - - -- -+ - - -- -+ -+ -+ -+ -+ -+ -+ - - - - - -- -+ - - - -@@ -24,14 +30,172 @@ - - - -- -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ - - - - - - -- -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ - - - -diff --git a/cts/scheduler/scores/coloc-with-inner-group-member.scores b/cts/scheduler/scores/coloc-with-inner-group-member.scores -index 10fe944cb42..8d1c6f621c1 100644 ---- a/cts/scheduler/scores/coloc-with-inner-group-member.scores -+++ b/cts/scheduler/scores/coloc-with-inner-group-member.scores -@@ -26,17 +26,17 @@ pcmk__primitive_assign: Fencing allocation score on rhel8-4: 0 - pcmk__primitive_assign: Fencing allocation score on rhel8-5: 0 - pcmk__primitive_assign: bar allocation score on rhel8-1: -INFINITY - pcmk__primitive_assign: bar allocation score on rhel8-2: -INFINITY --pcmk__primitive_assign: bar allocation score on rhel8-3: -INFINITY --pcmk__primitive_assign: bar allocation score on rhel8-4: 0 -+pcmk__primitive_assign: bar allocation score on rhel8-3: 0 -+pcmk__primitive_assign: bar allocation score on rhel8-4: -INFINITY - pcmk__primitive_assign: bar allocation score on rhel8-5: -INFINITY --pcmk__primitive_assign: foo allocation score on rhel8-1: 0 --pcmk__primitive_assign: foo allocation score on rhel8-2: 0 -+pcmk__primitive_assign: foo allocation score on rhel8-1: -INFINITY -+pcmk__primitive_assign: foo allocation score on rhel8-2: -INFINITY - pcmk__primitive_assign: foo allocation score on rhel8-3: 0 --pcmk__primitive_assign: foo allocation score on rhel8-4: 0 --pcmk__primitive_assign: foo allocation score on rhel8-5: 0 -+pcmk__primitive_assign: foo allocation score on rhel8-4: -INFINITY -+pcmk__primitive_assign: foo allocation score on rhel8-5: -INFINITY - pcmk__primitive_assign: vip allocation score on rhel8-1: -INFINITY - pcmk__primitive_assign: vip allocation score on rhel8-2: -INFINITY --pcmk__primitive_assign: vip allocation score on rhel8-3: -INFINITY -+pcmk__primitive_assign: vip allocation score on rhel8-3: 0 - pcmk__primitive_assign: vip allocation score on rhel8-4: -INFINITY - pcmk__primitive_assign: vip allocation score on rhel8-5: -INFINITY - pcmk__primitive_assign: vip-dep allocation score on rhel8-1: 0 -diff --git a/cts/scheduler/summary/coloc-with-inner-group-member.summary b/cts/scheduler/summary/coloc-with-inner-group-member.summary -index 3e87f0867ef..6659721a79c 100644 ---- a/cts/scheduler/summary/coloc-with-inner-group-member.summary -+++ b/cts/scheduler/summary/coloc-with-inner-group-member.summary -@@ -12,12 +12,24 @@ Current cluster status: - * vip (ocf:pacemaker:Dummy): Started rhel8-3 - - Transition Summary: -- * Stop vip ( rhel8-3 ) due to node availability -+ * Move foo ( rhel8-4 -> rhel8-3 ) -+ * Move bar ( rhel8-4 -> rhel8-3 ) -+ * Restart vip ( rhel8-3 ) due to required bar start - - Executing Cluster Transition: - * Pseudo action: grp_stop_0 - * Resource action: vip stop on rhel8-3 -+ * Resource action: bar stop on rhel8-4 -+ * Resource action: foo stop on rhel8-4 - * Pseudo action: grp_stopped_0 -+ * Pseudo action: grp_start_0 -+ * Resource action: foo start on rhel8-3 -+ * Resource action: bar start on rhel8-3 -+ * Resource action: vip start on rhel8-3 -+ * Resource action: vip monitor=10000 on rhel8-3 -+ * Pseudo action: grp_running_0 -+ * Resource action: foo monitor=10000 on rhel8-3 -+ * Resource action: bar monitor=10000 on rhel8-3 - Using the original execution date of: 2023-06-20 20:45:06Z - - Revised Cluster Status: -@@ -28,6 +40,6 @@ Revised Cluster Status: - * Fencing (stonith:fence_xvm): Started rhel8-1 - * vip-dep (ocf:pacemaker:Dummy): Started rhel8-3 - * Resource Group: grp: -- * foo (ocf:pacemaker:Dummy): Started rhel8-4 -- * bar (ocf:pacemaker:Dummy): Started rhel8-4 -- * vip (ocf:pacemaker:Dummy): Stopped -+ * foo (ocf:pacemaker:Dummy): Started rhel8-3 -+ * bar (ocf:pacemaker:Dummy): Started rhel8-3 -+ * vip (ocf:pacemaker:Dummy): Started rhel8-3 - -From 9ada709b568cf5050f768b83e4682a8b93d1b361 Mon Sep 17 00:00:00 2001 -From: Ken Gaillot -Date: Thu, 29 Jun 2023 09:01:41 -0500 -Subject: [PATCH 14/17] Fix: CIB: be more strict about ignoring colocation - elements without an ID - -Callers of pcmk__unpack_colocation() have more context about the element being -unpacked, so the checks are done there. ---- - lib/pacemaker/pcmk_sched_colocation.c | 24 ++++++++++++++++++------ - 1 file changed, 18 insertions(+), 6 deletions(-) - -diff --git a/lib/pacemaker/pcmk_sched_colocation.c b/lib/pacemaker/pcmk_sched_colocation.c -index e0b39b59e81..a2baddbbb5c 100644 ---- a/lib/pacemaker/pcmk_sched_colocation.c -+++ b/lib/pacemaker/pcmk_sched_colocation.c -@@ -886,23 +886,30 @@ pcmk__unpack_colocation(xmlNode *xml_obj, pe_working_set_t *data_set) - xmlNode *expanded_xml = NULL; - - const char *id = crm_element_value(xml_obj, XML_ATTR_ID); -- const char *score = crm_element_value(xml_obj, XML_RULE_ATTR_SCORE); -- const char *influence_s = crm_element_value(xml_obj, -- XML_COLOC_ATTR_INFLUENCE); -+ const char *score = NULL; -+ const char *influence_s = NULL; - -- if (score) { -- score_i = char2score(score); -+ if (pcmk__str_empty(id)) { -+ pcmk__config_err("Ignoring " XML_CONS_TAG_RSC_DEPEND -+ " without " CRM_ATTR_ID); -+ return; - } - - if (unpack_colocation_tags(xml_obj, &expanded_xml, - data_set) != pcmk_rc_ok) { - return; - } -- if (expanded_xml) { -+ if (expanded_xml != NULL) { - orig_xml = xml_obj; - xml_obj = expanded_xml; - } - -+ score = crm_element_value(xml_obj, XML_RULE_ATTR_SCORE); -+ if (score != NULL) { -+ score_i = char2score(score); -+ } -+ influence_s = crm_element_value(xml_obj, XML_COLOC_ATTR_INFLUENCE); -+ - for (set = first_named_child(xml_obj, XML_CONS_TAG_RSC_SET); set != NULL; - set = crm_next_same_xml(set)) { - -@@ -914,6 +921,11 @@ pcmk__unpack_colocation(xmlNode *xml_obj, pe_working_set_t *data_set) - return; - } - -+ if (pcmk__str_empty(ID(set))) { -+ pcmk__config_err("Ignoring " XML_CONS_TAG_RSC_SET -+ " without " CRM_ATTR_ID); -+ continue; -+ } - unpack_colocation_set(set, score_i, id, influence_s, data_set); - - if (last != NULL) { - -From e830a9663c80ea348eff694a8e71a1e07d380690 Mon Sep 17 00:00:00 2001 -From: Ken Gaillot -Date: Thu, 29 Jun 2023 09:40:57 -0500 -Subject: [PATCH 15/17] Log: scheduler: improve colocation unpacking messages - (and comments) - ---- - lib/pacemaker/pcmk_sched_colocation.c | 60 ++++++++++++++------------- - 1 file changed, 32 insertions(+), 28 deletions(-) - -diff --git a/lib/pacemaker/pcmk_sched_colocation.c b/lib/pacemaker/pcmk_sched_colocation.c -index a2baddbbb5c..9c9195ed02c 100644 ---- a/lib/pacemaker/pcmk_sched_colocation.c -+++ b/lib/pacemaker/pcmk_sched_colocation.c -@@ -136,13 +136,13 @@ pcmk__add_this_with(GList **list, const pcmk__colocation_t *colocation) - { - CRM_ASSERT((list != NULL) && (colocation != NULL)); - -- crm_trace("Adding colocation %s (%s with %s%s%s @%d) " -+ crm_trace("Adding colocation %s (%s with %s%s%s @%s) " - "to 'this with' list", - colocation->id, colocation->dependent->id, - colocation->primary->id, - (colocation->node_attribute == NULL)? "" : " using ", - pcmk__s(colocation->node_attribute, ""), -- colocation->score); -+ pcmk_readable_score(colocation->score)); - *list = g_list_insert_sorted(*list, (gpointer) colocation, - cmp_primary_priority); - } -@@ -187,13 +187,13 @@ pcmk__add_with_this(GList **list, const pcmk__colocation_t *colocation) - { - CRM_ASSERT((list != NULL) && (colocation != NULL)); - -- crm_trace("Adding colocation %s (%s with %s%s%s @%d) " -+ crm_trace("Adding colocation %s (%s with %s%s%s @%s) " - "to 'with this' list", - colocation->id, colocation->dependent->id, - colocation->primary->id, - (colocation->node_attribute == NULL)? "" : " using ", - pcmk__s(colocation->node_attribute, ""), -- colocation->score); -+ pcmk_readable_score(colocation->score)); - *list = g_list_insert_sorted(*list, (gpointer) colocation, - cmp_dependent_priority); - } -@@ -339,10 +339,6 @@ pcmk__new_colocation(const char *id, const char *node_attr, int score, - node_attr = CRM_ATTR_UNAME; - } - -- pe_rsc_trace(dependent, "Added colocation %s (%s with %s @%s using %s)", -- new_con->id, dependent->id, primary->id, -- pcmk_readable_score(score), node_attr); -- - pcmk__add_this_with(&(dependent->rsc_cons), new_con); - pcmk__add_with_this(&(primary->rsc_cons_lhs), new_con); - -@@ -495,8 +491,6 @@ unpack_colocation_set(xmlNode *set, int score, const char *coloc_id, - other = pcmk__find_constraint_resource(data_set->resources, - xml_rsc_id); - CRM_ASSERT(other != NULL); // We already processed it -- pe_rsc_trace(resource, "Anti-Colocating %s with %s", -- resource->id, other->id); - pcmk__new_colocation(set_id, NULL, local_score, - resource, other, role, role, flags); - } -@@ -504,9 +498,21 @@ unpack_colocation_set(xmlNode *set, int score, const char *coloc_id, - } - } - -+/*! -+ * \internal -+ * \brief Colocate two resource sets relative to each other -+ * -+ * \param[in] id Colocation XML ID -+ * \param[in] set1 Dependent set -+ * \param[in] set2 Primary set -+ * \param[in] score Colocation score -+ * \param[in] influence_s Value of colocation's "influence" attribute -+ * \param[in,out] data_set Cluster working set -+ */ - static void --colocate_rsc_sets(const char *id, xmlNode *set1, xmlNode *set2, int score, -- const char *influence_s, pe_working_set_t *data_set) -+colocate_rsc_sets(const char *id, const xmlNode *set1, const xmlNode *set2, -+ int score, const char *influence_s, -+ pe_working_set_t *data_set) - { - xmlNode *xml_rsc = NULL; - pe_resource_t *rsc_1 = NULL; -@@ -521,8 +527,8 @@ colocate_rsc_sets(const char *id, xmlNode *set1, xmlNode *set2, int score, - uint32_t flags = pcmk__coloc_none; - - if (score == 0) { -- crm_trace("Ignoring colocation '%s' between sets because score is 0", -- id); -+ crm_trace("Ignoring colocation '%s' between sets %s and %s " -+ "because score is 0", id, ID(set1), ID(set2)); - return; - } - -@@ -562,12 +568,12 @@ colocate_rsc_sets(const char *id, xmlNode *set1, xmlNode *set2, int score, - } - } - -- if ((rsc_1 != NULL) && (rsc_2 != NULL)) { -+ if ((rsc_1 != NULL) && (rsc_2 != NULL)) { // Both sets are sequential - flags = pcmk__coloc_explicit | unpack_influence(id, rsc_1, influence_s); - pcmk__new_colocation(id, NULL, score, rsc_1, rsc_2, role_1, role_2, - flags); - -- } else if (rsc_1 != NULL) { -+ } else if (rsc_1 != NULL) { // Only set1 is sequential - flags = pcmk__coloc_explicit | unpack_influence(id, rsc_1, influence_s); - for (xml_rsc = first_named_child(set2, XML_TAG_RESOURCE_REF); - xml_rsc != NULL; xml_rsc = crm_next_same_xml(xml_rsc)) { -@@ -576,19 +582,17 @@ colocate_rsc_sets(const char *id, xmlNode *set1, xmlNode *set2, int score, - rsc_2 = pcmk__find_constraint_resource(data_set->resources, - xml_rsc_id); - if (rsc_2 == NULL) { -- pcmk__config_err("%s: No resource found for %s", -- id, xml_rsc_id); - // Should be possible only with validation disabled -- pcmk__config_err("Ignoring resource %s and later in set %s " -- "for colocation with set %s: No such resource", -- xml_rsc_id, set2, set1); -+ pcmk__config_err("Ignoring set %s colocation with resource %s " -+ "and later in set %s: No such resource", -+ ID(set1), xml_rsc_id, ID(set2)); - return; - } - pcmk__new_colocation(id, NULL, score, rsc_1, rsc_2, role_1, - role_2, flags); - } - -- } else if (rsc_2 != NULL) { -+ } else if (rsc_2 != NULL) { // Only set2 is sequential - for (xml_rsc = first_named_child(set1, XML_TAG_RESOURCE_REF); - xml_rsc != NULL; xml_rsc = crm_next_same_xml(xml_rsc)) { - -@@ -599,7 +603,7 @@ colocate_rsc_sets(const char *id, xmlNode *set1, xmlNode *set2, int score, - // Should be possible only with validation disabled - pcmk__config_err("Ignoring resource %s and later in set %s " - "for colocation with set %s: No such resource", -- xml_rsc_id, set1, set2); -+ xml_rsc_id, ID(set1), ID(set2)); - return; - } - flags = pcmk__coloc_explicit -@@ -608,7 +612,7 @@ colocate_rsc_sets(const char *id, xmlNode *set1, xmlNode *set2, int score, - role_2, flags); - } - -- } else { -+ } else { // Neither set is sequential - for (xml_rsc = first_named_child(set1, XML_TAG_RESOURCE_REF); - xml_rsc != NULL; xml_rsc = crm_next_same_xml(xml_rsc)) { - -@@ -621,7 +625,7 @@ colocate_rsc_sets(const char *id, xmlNode *set1, xmlNode *set2, int score, - // Should be possible only with validation disabled - pcmk__config_err("Ignoring resource %s and later in set %s " - "for colocation with set %s: No such resource", -- xml_rsc_id, set1, set2); -+ xml_rsc_id, ID(set1), ID(set2)); - return; - } - -@@ -636,10 +640,10 @@ colocate_rsc_sets(const char *id, xmlNode *set1, xmlNode *set2, int score, - xml_rsc_id); - if (rsc_2 == NULL) { - // Should be possible only with validation disabled -- pcmk__config_err("Ignoring resource %s and later in set %s " -- "for colocation with %s in set %s: " -+ pcmk__config_err("Ignoring set %s resource %s colocation with " -+ "resource %s and later in set %s: " - "No such resource", -- xml_rsc_id, set2, ID(xml_rsc), set1); -+ ID(set1), ID(xml_rsc), xml_rsc_id, ID(set2)); - return; - } - pcmk__new_colocation(id, NULL, score, rsc_1, rsc_2, - -From 737d74b656cad7b5514397bb461b8a18fb5590df Mon Sep 17 00:00:00 2001 -From: Ken Gaillot -Date: Thu, 29 Jun 2023 09:49:13 -0500 -Subject: [PATCH 16/17] Low: scheduler: continue with non-sequential set - members after error - ---- - lib/pacemaker/pcmk_sched_colocation.c | 30 +++++++++++++-------------- - 1 file changed, 15 insertions(+), 15 deletions(-) - -diff --git a/lib/pacemaker/pcmk_sched_colocation.c b/lib/pacemaker/pcmk_sched_colocation.c -index 9c9195ed02c..3e094a4b87b 100644 ---- a/lib/pacemaker/pcmk_sched_colocation.c -+++ b/lib/pacemaker/pcmk_sched_colocation.c -@@ -584,9 +584,9 @@ colocate_rsc_sets(const char *id, const xmlNode *set1, const xmlNode *set2, - if (rsc_2 == NULL) { - // Should be possible only with validation disabled - pcmk__config_err("Ignoring set %s colocation with resource %s " -- "and later in set %s: No such resource", -+ "in set %s: No such resource", - ID(set1), xml_rsc_id, ID(set2)); -- return; -+ continue; - } - pcmk__new_colocation(id, NULL, score, rsc_1, rsc_2, role_1, - role_2, flags); -@@ -601,10 +601,10 @@ colocate_rsc_sets(const char *id, const xmlNode *set1, const xmlNode *set2, - xml_rsc_id); - if (rsc_1 == NULL) { - // Should be possible only with validation disabled -- pcmk__config_err("Ignoring resource %s and later in set %s " -- "for colocation with set %s: No such resource", -- xml_rsc_id, ID(set1), ID(set2)); -- return; -+ pcmk__config_err("Ignoring colocation of set %s resource %s " -+ "with set %s: No such resource", -+ ID(set1), xml_rsc_id, ID(set2)); -+ continue; - } - flags = pcmk__coloc_explicit - | unpack_influence(id, rsc_1, influence_s); -@@ -623,10 +623,10 @@ colocate_rsc_sets(const char *id, const xmlNode *set1, const xmlNode *set2, - xml_rsc_id); - if (rsc_1 == NULL) { - // Should be possible only with validation disabled -- pcmk__config_err("Ignoring resource %s and later in set %s " -- "for colocation with set %s: No such resource", -- xml_rsc_id, ID(set1), ID(set2)); -- return; -+ pcmk__config_err("Ignoring colocation of set %s resource %s " -+ "with set %s: No such resource", -+ ID(set1), xml_rsc_id, ID(set2)); -+ continue; - } - - flags = pcmk__coloc_explicit -@@ -640,11 +640,11 @@ colocate_rsc_sets(const char *id, const xmlNode *set1, const xmlNode *set2, - xml_rsc_id); - if (rsc_2 == NULL) { - // Should be possible only with validation disabled -- pcmk__config_err("Ignoring set %s resource %s colocation with " -- "resource %s and later in set %s: " -- "No such resource", -- ID(set1), ID(xml_rsc), xml_rsc_id, ID(set2)); -- return; -+ pcmk__config_err("Ignoring colocation of set %s resource " -+ "%s with set %s resource %s: No such " -+ "resource", ID(set1), ID(xml_rsc), -+ ID(set2), xml_rsc_id); -+ continue; - } - pcmk__new_colocation(id, NULL, score, rsc_1, rsc_2, - role_1, role_2, flags); - -From d9c8593f17975371e64e0c187bc8234e901349a9 Mon Sep 17 00:00:00 2001 -From: Ken Gaillot -Date: Thu, 29 Jun 2023 09:49:55 -0500 -Subject: [PATCH 17/17] Refactor: scheduler: make some variables const that can - be - ---- - lib/pacemaker/pcmk_sched_group.c | 9 +++++---- - 1 file changed, 5 insertions(+), 4 deletions(-) - -diff --git a/lib/pacemaker/pcmk_sched_group.c b/lib/pacemaker/pcmk_sched_group.c -index 95e2d77aa5f..a2bf5f6dcd4 100644 ---- a/lib/pacemaker/pcmk_sched_group.c -+++ b/lib/pacemaker/pcmk_sched_group.c -@@ -699,8 +699,8 @@ pcmk__with_group_colocations(const pe_resource_t *rsc, - } - - // Add explicit colocations with the group's (other) children -- for (GList *iter = rsc->children; iter != NULL; iter = iter->next) { -- pe_resource_t *member = iter->data; -+ for (const GList *iter = rsc->children; iter != NULL; iter = iter->next) { -+ const pe_resource_t *member = iter->data; - - if (member != orig_rsc) { - member->cmds->with_this_colocations(member, orig_rsc, list); -@@ -747,7 +747,8 @@ pcmk__group_with_colocations(const pe_resource_t *rsc, - } - - // Add explicit colocations involving the group's (other) children -- for (GList *iter = rsc->children; iter != NULL; iter = iter->next) { -+ for (const GList *iter = rsc->children; -+ iter != NULL; iter = iter->next) { - member = iter->data; - if (member != orig_rsc) { - member->cmds->this_with_colocations(member, orig_rsc, list); -@@ -761,7 +762,7 @@ pcmk__group_with_colocations(const pe_resource_t *rsc, - * However, if an earlier group member is unmanaged, this chaining will not - * happen, so the group's mandatory colocations must be explicitly added. - */ -- for (GList *iter = rsc->children; iter != NULL; iter = iter->next) { -+ for (const GList *iter = rsc->children; iter != NULL; iter = iter->next) { - member = iter->data; - if (orig_rsc == member) { - break; // We've seen all earlier members, and none are unmanaged diff --git a/003-clone-shuffle.patch b/003-clone-shuffle.patch deleted file mode 100644 index e615705..0000000 --- a/003-clone-shuffle.patch +++ /dev/null @@ -1,15630 +0,0 @@ -From eae355ca4c869f7ccf1ad3d1f5ce488375a6f353 Mon Sep 17 00:00:00 2001 -From: Reid Wahl -Date: Tue, 20 Apr 2021 12:55:45 -0700 -Subject: [PATCH 01/19] Refactor: libpe_status: Add pe__rsc_node flag enum - -This commit adds a new pe__rsc_node flag enum containing values for -assigned, current, and pending. This indicates the criterion used to -look up a resource's location. - -After a compatibility break, native_location() could use these flags -instead of an int. - -Signed-off-by: Reid Wahl ---- - include/crm/pengine/internal.h | 14 ++++++++++++++ - lib/pengine/native.c | 1 + - 2 files changed, 15 insertions(+) - -diff --git a/include/crm/pengine/internal.h b/include/crm/pengine/internal.h -index 1b5f6f1d8d9..8fcb9c991f3 100644 ---- a/include/crm/pengine/internal.h -+++ b/include/crm/pengine/internal.h -@@ -235,6 +235,19 @@ bool pe_can_fence(const pe_working_set_t *data_set, const pe_node_t *node); - - void add_hash_param(GHashTable * hash, const char *name, const char *value); - -+/*! -+ * \internal -+ * \enum pe__rsc_node -+ * \brief Type of resource location lookup to perform -+ */ -+enum pe__rsc_node { -+ pe__rsc_node_assigned = 0, //!< Where resource is assigned -+ pe__rsc_node_current = 1, //!< Where resource is running -+ -+ // @COMPAT: Use in native_location() at a compatibility break -+ pe__rsc_node_pending = 2, //!< Where resource is pending -+}; -+ - char *native_parameter(pe_resource_t * rsc, pe_node_t * node, gboolean create, const char *name, - pe_working_set_t * data_set); - pe_node_t *native_location(const pe_resource_t *rsc, GList **list, int current); -@@ -576,6 +589,7 @@ bool pe__bundle_needs_remote_name(pe_resource_t *rsc); - const char *pe__add_bundle_remote_name(pe_resource_t *rsc, - pe_working_set_t *data_set, - xmlNode *xml, const char *field); -+ - const char *pe_node_attribute_calculated(const pe_node_t *node, - const char *name, - const pe_resource_t *rsc); -diff --git a/lib/pengine/native.c b/lib/pengine/native.c -index 5e92ddcefdf..44d4805ac56 100644 ---- a/lib/pengine/native.c -+++ b/lib/pengine/native.c -@@ -1092,6 +1092,7 @@ native_resource_state(const pe_resource_t * rsc, gboolean current) - pe_node_t * - native_location(const pe_resource_t *rsc, GList **list, int current) - { -+ // @COMPAT: Accept a pe__rsc_node argument instead of int current - pe_node_t *one = NULL; - GList *result = NULL; - - -From 809b9c2ea13e5f32bfa6eecf3482eb257802b92d Mon Sep 17 00:00:00 2001 -From: Reid Wahl -Date: Sun, 11 Sep 2022 19:36:07 -0700 -Subject: [PATCH 02/19] Refactor: libpe_status: pe_node_attribute_calculated() - accepts node type - -Use enum pe__rsc_node in pe_node_attribute_calculated() to determine -which container host (assigned or current) to get the attribute value -from. For now, there's no use case for pending. - -Pass pe__rsc_node_current for existing calls, since that maintains the -existing behavior. - -Signed-off-by: Reid Wahl ---- - include/crm/pengine/internal.h | 3 +- - lib/pacemaker/pcmk_sched_location.c | 5 ++- - lib/pacemaker/pcmk_sched_promotable.c | 3 +- - lib/pengine/common.c | 60 ++++++++++++++++++++++----- - 4 files changed, 57 insertions(+), 14 deletions(-) - -diff --git a/include/crm/pengine/internal.h b/include/crm/pengine/internal.h -index 8fcb9c991f3..ef8c382f62b 100644 ---- a/include/crm/pengine/internal.h -+++ b/include/crm/pengine/internal.h -@@ -592,7 +592,8 @@ const char *pe__add_bundle_remote_name(pe_resource_t *rsc, - - const char *pe_node_attribute_calculated(const pe_node_t *node, - const char *name, -- const pe_resource_t *rsc); -+ const pe_resource_t *rsc, -+ enum pe__rsc_node node_type); - const char *pe_node_attribute_raw(const pe_node_t *node, const char *name); - bool pe__is_universal_clone(const pe_resource_t *rsc, - const pe_working_set_t *data_set); -diff --git a/lib/pacemaker/pcmk_sched_location.c b/lib/pacemaker/pcmk_sched_location.c -index b4ce4ff07dc..5f42ec0fc8c 100644 ---- a/lib/pacemaker/pcmk_sched_location.c -+++ b/lib/pacemaker/pcmk_sched_location.c -@@ -31,7 +31,10 @@ get_node_score(const char *rule, const char *score, bool raw, - score_f = char2score(score); - - } else { -- const char *attr_score = pe_node_attribute_calculated(node, score, rsc); -+ const char *attr_score = NULL; -+ -+ attr_score = pe_node_attribute_calculated(node, score, rsc, -+ pe__rsc_node_current); - - if (attr_score == NULL) { - crm_debug("Rule %s: %s did not have a value for %s", -diff --git a/lib/pacemaker/pcmk_sched_promotable.c b/lib/pacemaker/pcmk_sched_promotable.c -index d12d017bab2..2bad1d0c487 100644 ---- a/lib/pacemaker/pcmk_sched_promotable.c -+++ b/lib/pacemaker/pcmk_sched_promotable.c -@@ -649,7 +649,8 @@ promotion_attr_value(const pe_resource_t *rsc, const pe_node_t *node, - CRM_CHECK((rsc != NULL) && (node != NULL) && (name != NULL), return NULL); - - attr_name = pcmk_promotion_score_name(name); -- attr_value = pe_node_attribute_calculated(node, attr_name, rsc); -+ attr_value = pe_node_attribute_calculated(node, attr_name, rsc, -+ pe__rsc_node_current); - free(attr_name); - return attr_value; - } -diff --git a/lib/pengine/common.c b/lib/pengine/common.c -index 6c69bfcb41a..af41c1f6e89 100644 ---- a/lib/pengine/common.c -+++ b/lib/pengine/common.c -@@ -516,9 +516,15 @@ add_hash_param(GHashTable * hash, const char *name, const char *value) - - const char * - pe_node_attribute_calculated(const pe_node_t *node, const char *name, -- const pe_resource_t *rsc) -+ const pe_resource_t *rsc, -+ enum pe__rsc_node node_type) - { -- const char *source; -+ const char *source = NULL; -+ const char *node_type_s = NULL; -+ const char *reason = NULL; -+ -+ const pe_resource_t *container = NULL; -+ const pe_node_t *host = NULL; - - if(node == NULL) { - return NULL; -@@ -539,18 +545,50 @@ pe_node_attribute_calculated(const pe_node_t *node, const char *name, - * storage - */ - -- CRM_ASSERT(node->details->remote_rsc); -- CRM_ASSERT(node->details->remote_rsc->container); -+ CRM_ASSERT(node->details->remote_rsc != NULL); -+ -+ container = node->details->remote_rsc->container; -+ CRM_ASSERT(container != NULL); -+ -+ switch (node_type) { -+ case pe__rsc_node_assigned: -+ node_type_s = "assigned"; -+ host = container->allocated_to; -+ if (host == NULL) { -+ reason = "not assigned"; -+ } -+ break; -+ -+ case pe__rsc_node_current: -+ node_type_s = "current"; - -- if(node->details->remote_rsc->container->running_on) { -- pe_node_t *host = node->details->remote_rsc->container->running_on->data; -- pe_rsc_trace(rsc, "%s: Looking for %s on the container host %s", -- rsc->id, name, pe__node_name(host)); -- return g_hash_table_lookup(host->details->attrs, name); -+ if (container->running_on != NULL) { -+ host = container->running_on->data; -+ } -+ if (host == NULL) { -+ reason = "inactive"; -+ } -+ break; -+ -+ default: -+ // Add support for other enum pe__rsc_node values if needed -+ CRM_ASSERT(false); -+ break; - } - -- pe_rsc_trace(rsc, "%s: Not looking for %s on the container host: %s is inactive", -- rsc->id, name, node->details->remote_rsc->container->id); -+ if (host != NULL) { -+ const char *value = g_hash_table_lookup(host->details->attrs, name); -+ -+ pe_rsc_trace(rsc, -+ "%s: Value lookup for %s on %s container host %s %s%s", -+ rsc->id, name, node_type_s, pe__node_name(host), -+ ((value != NULL)? "succeeded: " : "failed"), -+ pcmk__s(value, "")); -+ return value; -+ } -+ pe_rsc_trace(rsc, -+ "%s: Not looking for %s on %s container host: %s is %s", -+ rsc->id, name, node_type_s, container->id, reason); - return NULL; - } - - -From d5a56afd2ecd861e0cf0d1049157e82a034f3f7a Mon Sep 17 00:00:00 2001 -From: Reid Wahl -Date: Thu, 15 Jun 2023 00:34:39 -0700 -Subject: [PATCH 03/19] Fix: libpacemaker: Get container attr from assigned - node, if any - -promotion_attr_value() should get a container's promotion score from -the host to which it's assigned (if it's been assigned), rather than the -host on which it's running. - -Ref T489 - -Signed-off-by: Reid Wahl ---- - lib/pacemaker/pcmk_sched_promotable.c | 10 ++++++---- - 1 file changed, 6 insertions(+), 4 deletions(-) - -diff --git a/lib/pacemaker/pcmk_sched_promotable.c b/lib/pacemaker/pcmk_sched_promotable.c -index 2bad1d0c487..8612c25a51d 100644 ---- a/lib/pacemaker/pcmk_sched_promotable.c -+++ b/lib/pacemaker/pcmk_sched_promotable.c -@@ -645,12 +645,14 @@ promotion_attr_value(const pe_resource_t *rsc, const pe_node_t *node, - { - char *attr_name = NULL; - const char *attr_value = NULL; -+ enum pe__rsc_node node_type = pe__rsc_node_assigned; - -- CRM_CHECK((rsc != NULL) && (node != NULL) && (name != NULL), return NULL); -- -+ if (pcmk_is_set(rsc->flags, pe_rsc_provisional)) { -+ // Not assigned yet -+ node_type = pe__rsc_node_current; -+ } - attr_name = pcmk_promotion_score_name(name); -- attr_value = pe_node_attribute_calculated(node, attr_name, rsc, -- pe__rsc_node_current); -+ attr_value = pe_node_attribute_calculated(node, attr_name, rsc, node_type); - free(attr_name); - return attr_value; - } - -From cfc2cd20e15c0f1c6b6ed8517c310acd756c1533 Mon Sep 17 00:00:00 2001 -From: Reid Wahl -Date: Mon, 10 Jul 2023 02:26:26 -0700 -Subject: [PATCH 04/19] Test: scheduler: Update outputs for - promotion_attr_value() fix - -Update outputs after previous commit (get container's promotion score -from assigned host). - -There are a few changes to scores, as well as dot and exp files. The -behavior in the bundle-interleave-start test appears to be an -improvement. - -Ref T489 - -Signed-off-by: Reid Wahl ---- - cts/scheduler/dot/bundle-interleave-start.dot | 44 +- - cts/scheduler/exp/bundle-interleave-start.exp | 556 +++++++++++------- - .../exp/no-promote-on-unrunnable-guest.exp | 14 +- - .../scores/bundle-interleave-start.scores | 12 +- - .../scores/cancel-behind-moving-remote.scores | 2 +- - .../scores/guest-host-not-fenceable.scores | 2 +- - .../no-promote-on-unrunnable-guest.scores | 2 +- - .../summary/bundle-interleave-start.summary | 54 +- - cts/scheduler/xml/bundle-interleave-start.xml | 3 +- - 9 files changed, 445 insertions(+), 244 deletions(-) - -diff --git a/cts/scheduler/dot/bundle-interleave-start.dot b/cts/scheduler/dot/bundle-interleave-start.dot -index bf6ed7f9edb..a513ac5806b 100644 ---- a/cts/scheduler/dot/bundle-interleave-start.dot -+++ b/cts/scheduler/dot/bundle-interleave-start.dot -@@ -41,9 +41,15 @@ - "app-bundle-2_monitor_0 node5" [ style=bold color="green" fontcolor="black"] - "app-bundle-2_monitor_30000 node4" [ style=bold color="green" fontcolor="black"] - "app-bundle-2_start_0 node4" -> "app-bundle-2_monitor_30000 node4" [ style = bold] --"app-bundle-2_start_0 node4" -> "app:2_monitor_16000 app-bundle-2" [ style = bold] -+"app-bundle-2_start_0 node4" -> "app:2_monitor_15000 app-bundle-2" [ style = bold] -+"app-bundle-2_start_0 node4" -> "app:2_promote_0 app-bundle-2" [ style = bold] - "app-bundle-2_start_0 node4" -> "app:2_start_0 app-bundle-2" [ style = bold] - "app-bundle-2_start_0 node4" [ style=bold color="green" fontcolor="black"] -+"app-bundle-clone_promote_0" -> "app:2_promote_0 app-bundle-2" [ style = bold] -+"app-bundle-clone_promote_0" [ style=bold color="green" fontcolor="orange"] -+"app-bundle-clone_promoted_0" -> "app-bundle_promoted_0" [ style = bold] -+"app-bundle-clone_promoted_0" [ style=bold color="green" fontcolor="orange"] -+"app-bundle-clone_running_0" -> "app-bundle-clone_promote_0" [ style = bold] - "app-bundle-clone_running_0" -> "app-bundle_running_0" [ style = bold] - "app-bundle-clone_running_0" [ style=bold color="green" fontcolor="orange"] - "app-bundle-clone_start_0" -> "app-bundle-clone_running_0" [ style = bold] -@@ -133,8 +139,13 @@ - "app-bundle-podman-2_start_0 node4" -> "app-bundle-2_start_0 node4" [ style = bold] - "app-bundle-podman-2_start_0 node4" -> "app-bundle-podman-2_monitor_60000 node4" [ style = bold] - "app-bundle-podman-2_start_0 node4" -> "app-bundle_running_0" [ style = bold] -+"app-bundle-podman-2_start_0 node4" -> "app:2_promote_0 app-bundle-2" [ style = bold] - "app-bundle-podman-2_start_0 node4" -> "app:2_start_0 app-bundle-2" [ style = bold] - "app-bundle-podman-2_start_0 node4" [ style=bold color="green" fontcolor="black"] -+"app-bundle_promote_0" -> "app-bundle-clone_promote_0" [ style = bold] -+"app-bundle_promote_0" [ style=bold color="green" fontcolor="orange"] -+"app-bundle_promoted_0" [ style=bold color="green" fontcolor="orange"] -+"app-bundle_running_0" -> "app-bundle_promote_0" [ style = bold] - "app-bundle_running_0" [ style=bold color="green" fontcolor="orange"] - "app-bundle_start_0" -> "app-bundle-clone_start_0" [ style = bold] - "app-bundle_start_0" -> "app-bundle-podman-0_start_0 node2" [ style = bold] -@@ -151,9 +162,13 @@ - "app:1_start_0 app-bundle-1" -> "app:1_monitor_16000 app-bundle-1" [ style = bold] - "app:1_start_0 app-bundle-1" -> "app:2_start_0 app-bundle-2" [ style = bold] - "app:1_start_0 app-bundle-1" [ style=bold color="green" fontcolor="black"] --"app:2_monitor_16000 app-bundle-2" [ style=bold color="green" fontcolor="black"] -+"app:2_monitor_15000 app-bundle-2" [ style=bold color="green" fontcolor="black"] -+"app:2_promote_0 app-bundle-2" -> "app-bundle-clone_promoted_0" [ style = bold] -+"app:2_promote_0 app-bundle-2" -> "app:2_monitor_15000 app-bundle-2" [ style = bold] -+"app:2_promote_0 app-bundle-2" [ style=bold color="green" fontcolor="black"] - "app:2_start_0 app-bundle-2" -> "app-bundle-clone_running_0" [ style = bold] --"app:2_start_0 app-bundle-2" -> "app:2_monitor_16000 app-bundle-2" [ style = bold] -+"app:2_start_0 app-bundle-2" -> "app:2_monitor_15000 app-bundle-2" [ style = bold] -+"app:2_start_0 app-bundle-2" -> "app:2_promote_0 app-bundle-2" [ style = bold] - "app:2_start_0 app-bundle-2" [ style=bold color="green" fontcolor="black"] - "base-bundle-0_monitor_0 node1" -> "base-bundle-0_start_0 node2" [ style = bold] - "base-bundle-0_monitor_0 node1" [ style=bold color="green" fontcolor="black"] -@@ -197,9 +212,15 @@ - "base-bundle-2_monitor_0 node5" [ style=bold color="green" fontcolor="black"] - "base-bundle-2_monitor_30000 node4" [ style=bold color="green" fontcolor="black"] - "base-bundle-2_start_0 node4" -> "base-bundle-2_monitor_30000 node4" [ style = bold] --"base-bundle-2_start_0 node4" -> "base:2_monitor_16000 base-bundle-2" [ style = bold] -+"base-bundle-2_start_0 node4" -> "base:2_monitor_15000 base-bundle-2" [ style = bold] -+"base-bundle-2_start_0 node4" -> "base:2_promote_0 base-bundle-2" [ style = bold] - "base-bundle-2_start_0 node4" -> "base:2_start_0 base-bundle-2" [ style = bold] - "base-bundle-2_start_0 node4" [ style=bold color="green" fontcolor="black"] -+"base-bundle-clone_promote_0" -> "base:2_promote_0 base-bundle-2" [ style = bold] -+"base-bundle-clone_promote_0" [ style=bold color="green" fontcolor="orange"] -+"base-bundle-clone_promoted_0" -> "base-bundle_promoted_0" [ style = bold] -+"base-bundle-clone_promoted_0" [ style=bold color="green" fontcolor="orange"] -+"base-bundle-clone_running_0" -> "base-bundle-clone_promote_0" [ style = bold] - "base-bundle-clone_running_0" -> "base-bundle_running_0" [ style = bold] - "base-bundle-clone_running_0" [ style=bold color="green" fontcolor="orange"] - "base-bundle-clone_start_0" -> "base-bundle-clone_running_0" [ style = bold] -@@ -289,9 +310,15 @@ - "base-bundle-podman-2_start_0 node4" -> "base-bundle-2_start_0 node4" [ style = bold] - "base-bundle-podman-2_start_0 node4" -> "base-bundle-podman-2_monitor_60000 node4" [ style = bold] - "base-bundle-podman-2_start_0 node4" -> "base-bundle_running_0" [ style = bold] -+"base-bundle-podman-2_start_0 node4" -> "base:2_promote_0 base-bundle-2" [ style = bold] - "base-bundle-podman-2_start_0 node4" -> "base:2_start_0 base-bundle-2" [ style = bold] - "base-bundle-podman-2_start_0 node4" [ style=bold color="green" fontcolor="black"] -+"base-bundle_promote_0" -> "base-bundle-clone_promote_0" [ style = bold] -+"base-bundle_promote_0" [ style=bold color="green" fontcolor="orange"] -+"base-bundle_promoted_0" -> "app-bundle_promote_0" [ style = bold] -+"base-bundle_promoted_0" [ style=bold color="green" fontcolor="orange"] - "base-bundle_running_0" -> "app-bundle_start_0" [ style = bold] -+"base-bundle_running_0" -> "base-bundle_promote_0" [ style = bold] - "base-bundle_running_0" [ style=bold color="green" fontcolor="orange"] - "base-bundle_start_0" -> "base-bundle-clone_start_0" [ style = bold] - "base-bundle_start_0" -> "base-bundle-podman-0_start_0 node2" [ style = bold] -@@ -310,9 +337,14 @@ - "base:1_start_0 base-bundle-1" -> "base:1_monitor_16000 base-bundle-1" [ style = bold] - "base:1_start_0 base-bundle-1" -> "base:2_start_0 base-bundle-2" [ style = bold] - "base:1_start_0 base-bundle-1" [ style=bold color="green" fontcolor="black"] --"base:2_monitor_16000 base-bundle-2" [ style=bold color="green" fontcolor="black"] -+"base:2_monitor_15000 base-bundle-2" [ style=bold color="green" fontcolor="black"] -+"base:2_promote_0 base-bundle-2" -> "app:2_promote_0 app-bundle-2" [ style = bold] -+"base:2_promote_0 base-bundle-2" -> "base-bundle-clone_promoted_0" [ style = bold] -+"base:2_promote_0 base-bundle-2" -> "base:2_monitor_15000 base-bundle-2" [ style = bold] -+"base:2_promote_0 base-bundle-2" [ style=bold color="green" fontcolor="black"] - "base:2_start_0 base-bundle-2" -> "app-bundle-podman-2_start_0 node4" [ style = bold] - "base:2_start_0 base-bundle-2" -> "base-bundle-clone_running_0" [ style = bold] --"base:2_start_0 base-bundle-2" -> "base:2_monitor_16000 base-bundle-2" [ style = bold] -+"base:2_start_0 base-bundle-2" -> "base:2_monitor_15000 base-bundle-2" [ style = bold] -+"base:2_start_0 base-bundle-2" -> "base:2_promote_0 base-bundle-2" [ style = bold] - "base:2_start_0 base-bundle-2" [ style=bold color="green" fontcolor="black"] - } -diff --git a/cts/scheduler/exp/bundle-interleave-start.exp b/cts/scheduler/exp/bundle-interleave-start.exp -index e676b1bfba9..57e551c487e 100644 ---- a/cts/scheduler/exp/bundle-interleave-start.exp -+++ b/cts/scheduler/exp/bundle-interleave-start.exp -@@ -1,7 +1,7 @@ - - - -- -+ - - - -@@ -11,13 +11,13 @@ - - - -- -+ - - - - - -- -+ - - - -@@ -30,13 +30,13 @@ - - - -- -+ - - - - - -- -+ - - - -@@ -46,13 +46,13 @@ - - - -- -+ - - - - - -- -+ - - - -@@ -65,18 +65,18 @@ - - - -- -+ - - -- -+ - - - - - -- -+ - -- -+ - - - -@@ -84,13 +84,38 @@ - - - -- -+ -+ -+ -+ - - - - - -- -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ - - - -@@ -103,37 +128,64 @@ - - - -- -+ - - -- -+ - - - -- -+ - -- -+ - - - - - -- -+ - -+ -+ -+ -+ -+ -+ -+ -+ -+ - -- -+ - - -- -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ - - -- -+ -+ -+ -+ -+ -+ -+ - - - -- -+ - -- -+ - - - -@@ -188,7 +240,7 @@ - - - -- -+ - - - -@@ -201,7 +253,7 @@ - - - -- -+ - - - -@@ -235,7 +287,7 @@ - - - -- -+ - - - -@@ -244,7 +296,7 @@ - - - -- -+ - - - -@@ -253,7 +305,7 @@ - - - -- -+ - - - -@@ -262,7 +314,7 @@ - - - -- -+ - - - -@@ -271,7 +323,7 @@ - - - -- -+ - - - -@@ -280,7 +332,7 @@ - - - -- -+ - - - -@@ -293,7 +345,7 @@ - - - -- -+ - - - -@@ -321,7 +373,7 @@ - - - -- -+ - - - -@@ -334,7 +386,7 @@ - - - -- -+ - - - -@@ -347,7 +399,7 @@ - - - -- -+ - - - -@@ -360,7 +412,7 @@ - - - -- -+ - - - -@@ -373,7 +425,7 @@ - - - -- -+ - - - -@@ -386,7 +438,7 @@ - - - -- -+ - - - -@@ -399,7 +451,7 @@ - - - -- -+ - - - -@@ -433,7 +485,7 @@ - - - -- -+ - - - -@@ -442,7 +494,7 @@ - - - -- -+ - - - -@@ -451,7 +503,7 @@ - - - -- -+ - - - -@@ -460,7 +512,7 @@ - - - -- -+ - - - -@@ -469,7 +521,7 @@ - - - -- -+ - - - -@@ -478,7 +530,7 @@ - - - -- -+ - - - -@@ -491,7 +543,7 @@ - - - -- -+ - - - -@@ -519,7 +571,7 @@ - - - -- -+ - - - -@@ -532,7 +584,7 @@ - - - -- -+ - - - -@@ -545,7 +597,7 @@ - - - -- -+ - - - -@@ -558,7 +610,7 @@ - - - -- -+ - - - -@@ -571,7 +623,7 @@ - - - -- -+ - - - -@@ -584,7 +636,7 @@ - - - -- -+ - - - -@@ -597,7 +649,7 @@ - - - -- -+ - - - -@@ -631,7 +683,7 @@ - - - -- -+ - - - -@@ -640,7 +692,7 @@ - - - -- -+ - - - -@@ -649,7 +701,7 @@ - - - -- -+ - - - -@@ -658,7 +710,7 @@ - - - -- -+ - - - -@@ -667,7 +719,7 @@ - - - -- -+ - - - -@@ -676,7 +728,7 @@ - - - -- -+ - - - -@@ -689,7 +741,7 @@ - - - -- -+ - - - -@@ -717,7 +769,7 @@ - - - -- -+ - - - -@@ -730,7 +782,7 @@ - - - -- -+ - - - -@@ -743,7 +795,7 @@ - - - -- -+ - - - -@@ -756,7 +808,7 @@ - - - -- -+ - - - -@@ -769,7 +821,7 @@ - - - -- -+ - - - -@@ -782,141 +834,196 @@ - - - -- -+ - -- -+ - - - - - - -- -+ - - -- -+ - - - -- -+ - -- -+ - - - - - - -- -+ - - -- -+ - - -- -+ - - - -- -+ - -- -+ - - - - - - -- -+ - - -- -+ - - - -- -+ - -- -+ - - - - - - -- -+ - - -- -+ - - -- -+ - - -- -+ - - - -- -+ - -- -+ - -- -+ - - - - -- -+ - - -- -+ -+ -+ -+ - - - -- -+ - -- -+ - - - - - - -- -+ -+ -+ -+ - - -- -+ - - -- -+ - - -- -+ - - - -- -+ - -- -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ - - - - - -- -+ - - -- -+ - - -- -+ - - -- -+ - - - -- -+ - -- -+ - - - -@@ -967,26 +1074,26 @@ - - - -- -+ - - - -- -+ - -- -+ - - - - - - -- -+ - - - -- -+ - -- -+ - - - -@@ -1014,14 +1121,14 @@ - - - -- -+ - - -- -+ - - - -- -+ - - - -@@ -1030,7 +1137,7 @@ - - - -- -+ - - - -@@ -1039,7 +1146,7 @@ - - - -- -+ - - - -@@ -1048,7 +1155,7 @@ - - - -- -+ - - - -@@ -1057,7 +1164,7 @@ - - - -- -+ - - - -@@ -1066,22 +1173,22 @@ - - - -- -+ - -- -+ - - - - - - -- -+ - - - -- -+ - -- -+ - - - -@@ -1103,11 +1210,11 @@ - - - -- -+ - - - -- -+ - - - -@@ -1116,11 +1223,11 @@ - - - -- -+ - - - -- -+ - - - -@@ -1129,11 +1236,11 @@ - - - -- -+ - - - -- -+ - - - -@@ -1142,11 +1249,11 @@ - - - -- -+ - - - -- -+ - - - -@@ -1155,11 +1262,11 @@ - - - -- -+ - - - -- -+ - - - -@@ -1168,26 +1275,26 @@ - - - -- -+ - - - -- -+ - -- -+ - - - - - - -- -+ - - - -- -+ - -- -+ - - - -@@ -1215,14 +1322,14 @@ - - - -- -+ - - -- -+ - - - -- -+ - - - -@@ -1231,7 +1338,7 @@ - - - -- -+ - - - -@@ -1240,7 +1347,7 @@ - - - -- -+ - - - -@@ -1249,7 +1356,7 @@ - - - -- -+ - - - -@@ -1258,7 +1365,7 @@ - - - -- -+ - - - -@@ -1267,22 +1374,22 @@ - - - -- -+ - -- -+ - - - - - - -- -+ - - - -- -+ - -- -+ - - - -@@ -1304,11 +1411,11 @@ - - - -- -+ - - - -- -+ - - - -@@ -1317,11 +1424,11 @@ - - - -- -+ - - - -- -+ - - - -@@ -1330,11 +1437,11 @@ - - - -- -+ - - - -- -+ - - - -@@ -1343,11 +1450,11 @@ - - - -- -+ - - - -- -+ - - - -@@ -1356,11 +1463,11 @@ - - - -- -+ - - - -- -+ - - - -@@ -1369,26 +1476,26 @@ - - - -- -+ - - - -- -+ - -- -+ - - - - - - -- -+ - - - -- -+ - -- -+ - - - -@@ -1416,14 +1523,14 @@ - - - -- -+ - - -- -+ - - - -- -+ - - - -@@ -1432,7 +1539,7 @@ - - - -- -+ - - - -@@ -1441,7 +1548,7 @@ - - - -- -+ - - - -@@ -1450,7 +1557,7 @@ - - - -- -+ - - - -@@ -1459,7 +1566,7 @@ - - - -- -+ - - - -@@ -1468,22 +1575,22 @@ - - - -- -+ - -- -+ - - - - - - -- -+ - - - -- -+ - -- -+ - - - -@@ -1505,11 +1612,11 @@ - - - -- -+ - - - -- -+ - - - -@@ -1518,11 +1625,11 @@ - - - -- -+ - - - -- -+ - - - -@@ -1531,11 +1638,11 @@ - - - -- -+ - - - -- -+ - - - -@@ -1544,11 +1651,11 @@ - - - -- -+ - - - -- -+ - - - -@@ -1557,11 +1664,11 @@ - - - -- -+ - - - -- -+ - - - -@@ -1570,34 +1677,61 @@ - - - -- -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ - - - -- -+ - -- -+ - - - - - -- -+ - - -- -+ - -+ -+ -+ -+ -+ -+ -+ -+ -+ - -- -+ - - -- -+ -+ -+ -+ -+ -+ -+ - - - -- -+ - -- -+ - - - -@@ -1607,7 +1741,31 @@ - - - -- -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ - - - -@@ -1624,11 +1782,11 @@ - - - -- -+ - - - -- -+ - - - -diff --git a/cts/scheduler/exp/no-promote-on-unrunnable-guest.exp b/cts/scheduler/exp/no-promote-on-unrunnable-guest.exp -index 351aec11df0..350495f4a6f 100644 ---- a/cts/scheduler/exp/no-promote-on-unrunnable-guest.exp -+++ b/cts/scheduler/exp/no-promote-on-unrunnable-guest.exp -@@ -14,7 +14,7 @@ - - - -- -+ - - - -@@ -82,14 +82,14 @@ - - - -- -+ - - - - - - -- -+ - - - -@@ -101,7 +101,7 @@ - - - -- -+ - - - -@@ -250,7 +250,7 @@ - - - -- -+ - - - -@@ -396,7 +396,7 @@ - - - -- -+ - - - -@@ -473,7 +473,7 @@ - - - -- -+ - - - -diff --git a/cts/scheduler/scores/bundle-interleave-start.scores b/cts/scheduler/scores/bundle-interleave-start.scores -index 7f4a370474d..b3aa9b571e8 100644 ---- a/cts/scheduler/scores/bundle-interleave-start.scores -+++ b/cts/scheduler/scores/bundle-interleave-start.scores -@@ -1,10 +1,10 @@ - --app:0 promotion score on app-bundle-0: -1 --app:1 promotion score on app-bundle-1: -1 --app:2 promotion score on app-bundle-2: -1 --base:0 promotion score on base-bundle-0: -1 --base:1 promotion score on base-bundle-1: -1 --base:2 promotion score on base-bundle-2: -1 -+app:0 promotion score on app-bundle-0: 12 -+app:1 promotion score on app-bundle-1: 13 -+app:2 promotion score on app-bundle-2: 14 -+base:0 promotion score on base-bundle-0: 12 -+base:1 promotion score on base-bundle-1: 13 -+base:2 promotion score on base-bundle-2: 14 - pcmk__bundle_allocate: app-bundle allocation score on node1: 0 - pcmk__bundle_allocate: app-bundle allocation score on node2: 0 - pcmk__bundle_allocate: app-bundle allocation score on node3: 0 -diff --git a/cts/scheduler/scores/cancel-behind-moving-remote.scores b/cts/scheduler/scores/cancel-behind-moving-remote.scores -index 0dfd78caa92..0e11b225aea 100644 ---- a/cts/scheduler/scores/cancel-behind-moving-remote.scores -+++ b/cts/scheduler/scores/cancel-behind-moving-remote.scores -@@ -2,7 +2,7 @@ - galera:0 promotion score on galera-bundle-0: 100 - galera:1 promotion score on galera-bundle-1: 100 - galera:2 promotion score on galera-bundle-2: 100 --ovndb_servers:0 promotion score on ovn-dbs-bundle-0: -1 -+ovndb_servers:0 promotion score on ovn-dbs-bundle-0: 5 - ovndb_servers:1 promotion score on ovn-dbs-bundle-1: 5 - ovndb_servers:2 promotion score on ovn-dbs-bundle-2: 5 - pcmk__bundle_allocate: galera-bundle allocation score on compute-0: -INFINITY -diff --git a/cts/scheduler/scores/guest-host-not-fenceable.scores b/cts/scheduler/scores/guest-host-not-fenceable.scores -index e4c7fc2033d..5f43bcb0812 100644 ---- a/cts/scheduler/scores/guest-host-not-fenceable.scores -+++ b/cts/scheduler/scores/guest-host-not-fenceable.scores -@@ -1,6 +1,6 @@ - - galera:0 promotion score on galera-bundle-0: 100 --galera:1 promotion score on galera-bundle-1: 100 -+galera:1 promotion score on galera-bundle-1: -1 - galera:2 promotion score on galera-bundle-2: -1 - pcmk__bundle_allocate: galera-bundle allocation score on node1: 0 - pcmk__bundle_allocate: galera-bundle allocation score on node2: 0 -diff --git a/cts/scheduler/scores/no-promote-on-unrunnable-guest.scores b/cts/scheduler/scores/no-promote-on-unrunnable-guest.scores -index 7923cdc2320..9362dc0e1f2 100644 ---- a/cts/scheduler/scores/no-promote-on-unrunnable-guest.scores -+++ b/cts/scheduler/scores/no-promote-on-unrunnable-guest.scores -@@ -2,7 +2,7 @@ - galera:0 promotion score on galera-bundle-0: 100 - galera:1 promotion score on galera-bundle-1: 100 - galera:2 promotion score on galera-bundle-2: 100 --ovndb_servers:0 promotion score on ovn-dbs-bundle-0: 5 -+ovndb_servers:0 promotion score on ovn-dbs-bundle-0: -1 - ovndb_servers:1 promotion score on ovn-dbs-bundle-1: 5 - ovndb_servers:2 promotion score on ovn-dbs-bundle-2: 5 - pcmk__bundle_allocate: galera-bundle allocation score on controller-0: 0 -diff --git a/cts/scheduler/summary/bundle-interleave-start.summary b/cts/scheduler/summary/bundle-interleave-start.summary -index 1648e929bf7..07ff7561968 100644 ---- a/cts/scheduler/summary/bundle-interleave-start.summary -+++ b/cts/scheduler/summary/bundle-interleave-start.summary -@@ -14,24 +14,24 @@ Current cluster status: - * app-bundle-2 (ocf:pacemaker:Stateful): Stopped - - Transition Summary: -- * Start base-bundle-podman-0 ( node2 ) -- * Start base-bundle-0 ( node2 ) -- * Start base:0 ( base-bundle-0 ) -- * Start base-bundle-podman-1 ( node3 ) -- * Start base-bundle-1 ( node3 ) -- * Start base:1 ( base-bundle-1 ) -- * Start base-bundle-podman-2 ( node4 ) -- * Start base-bundle-2 ( node4 ) -- * Start base:2 ( base-bundle-2 ) -- * Start app-bundle-podman-0 ( node2 ) -- * Start app-bundle-0 ( node2 ) -- * Start app:0 ( app-bundle-0 ) -- * Start app-bundle-podman-1 ( node3 ) -- * Start app-bundle-1 ( node3 ) -- * Start app:1 ( app-bundle-1 ) -- * Start app-bundle-podman-2 ( node4 ) -- * Start app-bundle-2 ( node4 ) -- * Start app:2 ( app-bundle-2 ) -+ * Start base-bundle-podman-0 ( node2 ) -+ * Start base-bundle-0 ( node2 ) -+ * Start base:0 ( base-bundle-0 ) -+ * Start base-bundle-podman-1 ( node3 ) -+ * Start base-bundle-1 ( node3 ) -+ * Start base:1 ( base-bundle-1 ) -+ * Start base-bundle-podman-2 ( node4 ) -+ * Start base-bundle-2 ( node4 ) -+ * Promote base:2 ( Stopped -> Promoted base-bundle-2 ) -+ * Start app-bundle-podman-0 ( node2 ) -+ * Start app-bundle-0 ( node2 ) -+ * Start app:0 ( app-bundle-0 ) -+ * Start app-bundle-podman-1 ( node3 ) -+ * Start app-bundle-1 ( node3 ) -+ * Start app:1 ( app-bundle-1 ) -+ * Start app-bundle-podman-2 ( node4 ) -+ * Start app-bundle-2 ( node4 ) -+ * Promote app:2 ( Stopped -> Promoted app-bundle-2 ) - - Executing Cluster Transition: - * Resource action: base-bundle-podman-0 monitor on node5 -@@ -100,8 +100,9 @@ Executing Cluster Transition: - * Pseudo action: base-bundle_running_0 - * Resource action: base:0 monitor=16000 on base-bundle-0 - * Resource action: base:1 monitor=16000 on base-bundle-1 -- * Resource action: base:2 monitor=16000 on base-bundle-2 - * Pseudo action: app-bundle_start_0 -+ * Pseudo action: base-bundle_promote_0 -+ * Pseudo action: base-bundle-clone_promote_0 - * Pseudo action: app-bundle-clone_start_0 - * Resource action: app-bundle-podman-0 start on node2 - * Resource action: app-bundle-0 monitor on node5 -@@ -121,12 +122,16 @@ Executing Cluster Transition: - * Resource action: app-bundle-2 monitor on node3 - * Resource action: app-bundle-2 monitor on node2 - * Resource action: app-bundle-2 monitor on node1 -+ * Resource action: base:2 promote on base-bundle-2 -+ * Pseudo action: base-bundle-clone_promoted_0 - * Resource action: app-bundle-podman-0 monitor=60000 on node2 - * Resource action: app-bundle-0 start on node2 - * Resource action: app-bundle-podman-1 monitor=60000 on node3 - * Resource action: app-bundle-1 start on node3 - * Resource action: app-bundle-podman-2 monitor=60000 on node4 - * Resource action: app-bundle-2 start on node4 -+ * Pseudo action: base-bundle_promoted_0 -+ * Resource action: base:2 monitor=15000 on base-bundle-2 - * Resource action: app:0 start on app-bundle-0 - * Resource action: app:1 start on app-bundle-1 - * Resource action: app:2 start on app-bundle-2 -@@ -137,7 +142,12 @@ Executing Cluster Transition: - * Pseudo action: app-bundle_running_0 - * Resource action: app:0 monitor=16000 on app-bundle-0 - * Resource action: app:1 monitor=16000 on app-bundle-1 -- * Resource action: app:2 monitor=16000 on app-bundle-2 -+ * Pseudo action: app-bundle_promote_0 -+ * Pseudo action: app-bundle-clone_promote_0 -+ * Resource action: app:2 promote on app-bundle-2 -+ * Pseudo action: app-bundle-clone_promoted_0 -+ * Pseudo action: app-bundle_promoted_0 -+ * Resource action: app:2 monitor=15000 on app-bundle-2 - - Revised Cluster Status: - * Node List: -@@ -149,8 +159,8 @@ Revised Cluster Status: - * Container bundle set: base-bundle [localhost/pcmktest:base]: - * base-bundle-0 (ocf:pacemaker:Stateful): Unpromoted node2 - * base-bundle-1 (ocf:pacemaker:Stateful): Unpromoted node3 -- * base-bundle-2 (ocf:pacemaker:Stateful): Unpromoted node4 -+ * base-bundle-2 (ocf:pacemaker:Stateful): Promoted node4 - * Container bundle set: app-bundle [localhost/pcmktest:app]: - * app-bundle-0 (ocf:pacemaker:Stateful): Unpromoted node2 - * app-bundle-1 (ocf:pacemaker:Stateful): Unpromoted node3 -- * app-bundle-2 (ocf:pacemaker:Stateful): Unpromoted node4 -+ * app-bundle-2 (ocf:pacemaker:Stateful): Promoted node4 -diff --git a/cts/scheduler/xml/bundle-interleave-start.xml b/cts/scheduler/xml/bundle-interleave-start.xml -index e8630cdf054..facb181b2a4 100644 ---- a/cts/scheduler/xml/bundle-interleave-start.xml -+++ b/cts/scheduler/xml/bundle-interleave-start.xml -@@ -6,7 +6,8 @@ - and its promoted role is colocated with base's. App's starts and - promotes are ordered after base's. - -- In this test, all are stopped and must be started. -+ In this test, all are stopped and must be started. One replica of each -+ bundle must be promoted. - --> - - - -From 6e5bc0d119c1609a3228763a5116a68829870948 Mon Sep 17 00:00:00 2001 -From: Reid Wahl -Date: Mon, 26 Jun 2023 12:42:10 -0700 -Subject: [PATCH 05/19] Refactor: libpacemaker: De-functionize - pcmk__finalize_assignment() - -Move it into pcmk__assign_resource(). Also correct the "was assignment -changed" logic, and allocate rc_stopped only once. - -Signed-off-by: Reid Wahl ---- - lib/pacemaker/libpacemaker_private.h | 4 - - lib/pacemaker/pcmk_sched_primitive.c | 9 +- - lib/pacemaker/pcmk_sched_resource.c | 156 ++++++++++++--------------- - 3 files changed, 74 insertions(+), 95 deletions(-) - -diff --git a/lib/pacemaker/libpacemaker_private.h b/lib/pacemaker/libpacemaker_private.h -index 192d5a703ff..614d695f83f 100644 ---- a/lib/pacemaker/libpacemaker_private.h -+++ b/lib/pacemaker/libpacemaker_private.h -@@ -908,10 +908,6 @@ void pcmk__noop_add_graph_meta(const pe_resource_t *rsc, xmlNode *xml); - G_GNUC_INTERNAL - void pcmk__output_resource_actions(pe_resource_t *rsc); - --G_GNUC_INTERNAL --bool pcmk__finalize_assignment(pe_resource_t *rsc, pe_node_t *chosen, -- bool force); -- - G_GNUC_INTERNAL - bool pcmk__assign_resource(pe_resource_t *rsc, pe_node_t *node, bool force); - -diff --git a/lib/pacemaker/pcmk_sched_primitive.c b/lib/pacemaker/pcmk_sched_primitive.c -index aefbf9aa140..2470b08ed69 100644 ---- a/lib/pacemaker/pcmk_sched_primitive.c -+++ b/lib/pacemaker/pcmk_sched_primitive.c -@@ -152,7 +152,6 @@ assign_best_node(pe_resource_t *rsc, const pe_node_t *prefer) - GList *nodes = NULL; - pe_node_t *chosen = NULL; - pe_node_t *best = NULL; -- bool result = false; - const pe_node_t *most_free_node = pcmk__ban_insufficient_capacity(rsc); - - if (prefer == NULL) { -@@ -260,9 +259,9 @@ assign_best_node(pe_resource_t *rsc, const pe_node_t *prefer) - pe__node_name(chosen), rsc->id, g_list_length(nodes)); - } - -- result = pcmk__finalize_assignment(rsc, chosen, false); -+ pcmk__assign_resource(rsc, chosen, false); - g_list_free(nodes); -- return result; -+ return rsc->allocated_to != NULL; - } - - /*! -@@ -475,11 +474,11 @@ pcmk__primitive_assign(pe_resource_t *rsc, const pe_node_t *prefer) - } - pe_rsc_info(rsc, "Unmanaged resource %s assigned to %s: %s", rsc->id, - (assign_to? assign_to->details->uname : "no node"), reason); -- pcmk__finalize_assignment(rsc, assign_to, true); -+ pcmk__assign_resource(rsc, assign_to, true); - - } else if (pcmk_is_set(rsc->cluster->flags, pe_flag_stop_everything)) { - pe_rsc_debug(rsc, "Forcing %s to stop: stop-all-resources", rsc->id); -- pcmk__finalize_assignment(rsc, NULL, true); -+ pcmk__assign_resource(rsc, NULL, true); - - } else if (pcmk_is_set(rsc->flags, pe_rsc_provisional) - && assign_best_node(rsc, prefer)) { -diff --git a/lib/pacemaker/pcmk_sched_resource.c b/lib/pacemaker/pcmk_sched_resource.c -index b8554998197..dd9939a42a6 100644 ---- a/lib/pacemaker/pcmk_sched_resource.c -+++ b/lib/pacemaker/pcmk_sched_resource.c -@@ -331,140 +331,124 @@ pcmk__output_resource_actions(pe_resource_t *rsc) - - /*! - * \internal -- * \brief Assign a specified primitive resource to a node -+ * \brief Assign a specified resource (of any variant) to a node - * -- * Assign a specified primitive resource to a specified node, if the node can -- * run the resource (or unconditionally, if \p force is true). Mark the resource -- * as no longer provisional. If the primitive can't be assigned (or \p chosen is -- * NULL), unassign any previous assignment for it, set its next role to stopped, -- * and update any existing actions scheduled for it. This is not done -- * recursively for children, so it should be called only for primitives. -+ * Assign a specified resource and its children (if any) to a specified node, if -+ * the node can run the resource (or unconditionally, if \p force is true). Mark -+ * the resources as no longer provisional. If a resource can't be assigned (or -+ * \p node is \c NULL), unassign any previous assignment, set next role to -+ * stopped, and update any existing actions scheduled for it. - * -- * \param[in,out] rsc Resource to assign -- * \param[in,out] chosen Node to assign \p rsc to -- * \param[in] force If true, assign to \p chosen even if unavailable -+ * \param[in,out] rsc Resource to assign -+ * \param[in,out] node Node to assign \p rsc to -+ * \param[in] force If true, assign to \p node even if unavailable - * -- * \return true if \p rsc could be assigned, otherwise false -+ * \return \c true if the assignment of \p rsc changed, or \c false otherwise - * - * \note Assigning a resource to the NULL node using this function is different - * from calling pcmk__unassign_resource(), in that it will also update any - * actions created for the resource. -+ * \note The \c resource_alloc_functions_t:assign() method is preferred, unless -+ * a resource should be assigned to the \c NULL node or every resource in -+ * a tree should be assigned to the same node. - */ - bool --pcmk__finalize_assignment(pe_resource_t *rsc, pe_node_t *chosen, bool force) -+pcmk__assign_resource(pe_resource_t *rsc, pe_node_t *node, bool force) - { -- pcmk__output_t *out = rsc->cluster->priv; -+ bool changed = false; -+ -+ CRM_ASSERT(rsc != NULL); - -- CRM_ASSERT(rsc->variant == pe_native); -- -- if (!force && (chosen != NULL)) { -- if ((chosen->weight < 0) -- // Allow the graph to assume that guest node connections will come up -- || (!pcmk__node_available(chosen, true, false) -- && !pe__is_guest_node(chosen))) { -- -- crm_debug("All nodes for resource %s are unavailable, unclean or " -- "shutting down (%s can%s run resources, with weight %d)", -- rsc->id, pe__node_name(chosen), -- (pcmk__node_available(chosen, true, false)? "" : "not"), -- chosen->weight); -- pe__set_next_role(rsc, RSC_ROLE_STOPPED, "node availability"); -- chosen = NULL; -+ if (rsc->children != NULL) { -+ for (GList *iter = rsc->children; iter != NULL; iter = iter->next) { -+ pe_resource_t *child_rsc = iter->data; -+ -+ changed |= pcmk__assign_resource(child_rsc, node, force); - } -+ return changed; - } - -+ // Assigning a primitive -+ -+ if (!force && (node != NULL) -+ && ((node->weight < 0) -+ // Allow graph to assume that guest node connections will come up -+ || (!pcmk__node_available(node, true, false) -+ && !pe__is_guest_node(node)))) { -+ -+ pe_rsc_debug(rsc, -+ "All nodes for resource %s are unavailable, unclean or " -+ "shutting down (%s can%s run resources, with score %s)", -+ rsc->id, pe__node_name(node), -+ (pcmk__node_available(node, true, false)? "" : "not"), -+ pcmk_readable_score(node->weight)); -+ pe__set_next_role(rsc, RSC_ROLE_STOPPED, "node availability"); -+ node = NULL; -+ } -+ -+ if (rsc->allocated_to != NULL) { -+ changed = !pe__same_node(rsc->allocated_to, node); -+ } else { -+ changed = (node != NULL); -+ } - pcmk__unassign_resource(rsc); - pe__clear_resource_flags(rsc, pe_rsc_provisional); - -- if (chosen == NULL) { -- crm_debug("Could not allocate a node for %s", rsc->id); -- pe__set_next_role(rsc, RSC_ROLE_STOPPED, "unable to allocate"); -+ if (node == NULL) { -+ char *rc_stopped = NULL; -+ -+ pe_rsc_debug(rsc, "Could not assign %s to a node", rsc->id); -+ pe__set_next_role(rsc, RSC_ROLE_STOPPED, "unable to assign"); - - for (GList *iter = rsc->actions; iter != NULL; iter = iter->next) { - pe_action_t *op = (pe_action_t *) iter->data; - -- crm_debug("Updating %s for allocation failure", op->uuid); -+ pe_rsc_debug(rsc, "Updating %s for %s assignment failure", -+ op->uuid, rsc->id); - - if (pcmk__str_eq(op->task, RSC_STOP, pcmk__str_casei)) { - pe__clear_action_flags(op, pe_action_optional); - - } else if (pcmk__str_eq(op->task, RSC_START, pcmk__str_casei)) { - pe__clear_action_flags(op, pe_action_runnable); -- //pe__set_resource_flags(rsc, pe_rsc_block); - - } else { - // Cancel recurring actions, unless for stopped state - const char *interval_ms_s = NULL; - const char *target_rc_s = NULL; -- char *rc_stopped = pcmk__itoa(PCMK_OCF_NOT_RUNNING); - - interval_ms_s = g_hash_table_lookup(op->meta, - XML_LRM_ATTR_INTERVAL_MS); - target_rc_s = g_hash_table_lookup(op->meta, - XML_ATTR_TE_TARGET_RC); -- if ((interval_ms_s != NULL) -- && !pcmk__str_eq(interval_ms_s, "0", pcmk__str_none) -+ if (rc_stopped == NULL) { -+ rc_stopped = pcmk__itoa(PCMK_OCF_NOT_RUNNING); -+ } -+ -+ if (!pcmk__str_eq(interval_ms_s, "0", pcmk__str_null_matches) - && !pcmk__str_eq(rc_stopped, target_rc_s, pcmk__str_none)) { -+ - pe__clear_action_flags(op, pe_action_runnable); - } -- free(rc_stopped); - } - } -- return false; -+ free(rc_stopped); -+ return changed; - } - -- crm_debug("Assigning %s to %s", rsc->id, pe__node_name(chosen)); -- rsc->allocated_to = pe__copy_node(chosen); -+ pe_rsc_debug(rsc, "Assigning %s to %s", rsc->id, pe__node_name(node)); -+ rsc->allocated_to = pe__copy_node(node); - -- chosen->details->allocated_rsc = g_list_prepend(chosen->details->allocated_rsc, -- rsc); -- chosen->details->num_resources++; -- chosen->count++; -- pcmk__consume_node_capacity(chosen->details->utilization, rsc); -+ node->details->allocated_rsc = g_list_prepend(node->details->allocated_rsc, -+ rsc); -+ node->details->num_resources++; -+ node->count++; -+ pcmk__consume_node_capacity(node->details->utilization, rsc); - - if (pcmk_is_set(rsc->cluster->flags, pe_flag_show_utilization)) { -- out->message(out, "resource-util", rsc, chosen, __func__); -- } -- return true; --} -- --/*! -- * \internal -- * \brief Assign a specified resource (of any variant) to a node -- * -- * Assign a specified resource and its children (if any) to a specified node, if -- * the node can run the resource (or unconditionally, if \p force is true). Mark -- * the resources as no longer provisional. If the resources can't be assigned -- * (or \p chosen is NULL), unassign any previous assignments, set next role to -- * stopped, and update any existing actions scheduled for them. -- * -- * \param[in,out] rsc Resource to assign -- * \param[in,out] chosen Node to assign \p rsc to -- * \param[in] force If true, assign to \p chosen even if unavailable -- * -- * \return true if \p rsc could be assigned, otherwise false -- * -- * \note Assigning a resource to the NULL node using this function is different -- * from calling pcmk__unassign_resource(), in that it will also update any -- * actions created for the resource. -- */ --bool --pcmk__assign_resource(pe_resource_t *rsc, pe_node_t *node, bool force) --{ -- bool changed = false; -- -- if (rsc->children == NULL) { -- if (rsc->allocated_to != NULL) { -- changed = true; -- } -- pcmk__finalize_assignment(rsc, node, force); -- -- } else { -- for (GList *iter = rsc->children; iter != NULL; iter = iter->next) { -- pe_resource_t *child_rsc = (pe_resource_t *) iter->data; -+ pcmk__output_t *out = rsc->cluster->priv; - -- changed |= pcmk__assign_resource(child_rsc, node, force); -- } -+ out->message(out, "resource-util", rsc, node, __func__); - } - return changed; - } - -From b01ecf9444e856227cd61c53f1c0106936eccd74 Mon Sep 17 00:00:00 2001 -From: Reid Wahl -Date: Mon, 10 Jul 2023 02:28:54 -0700 -Subject: [PATCH 06/19] Test: cts-cli: Update tests after defunctionization - -pcmk__finalize_assignment() -> pcmk__assign_resource() - -Signed-off-by: Reid Wahl ---- - cts/cli/regression.tools.exp | 42 ++++++++++++++++++------------------ - 1 file changed, 21 insertions(+), 21 deletions(-) - -diff --git a/cts/cli/regression.tools.exp b/cts/cli/regression.tools.exp -index a8e2236063c..506e9ba01b6 100644 ---- a/cts/cli/regression.tools.exp -+++ b/cts/cli/regression.tools.exp -@@ -5711,26 +5711,26 @@ Original: cluster02 capacity: - Original: httpd-bundle-0 capacity: - Original: httpd-bundle-1 capacity: - Original: httpd-bundle-2 capacity: --pcmk__finalize_assignment: ping:0 utilization on cluster02: --pcmk__finalize_assignment: ping:1 utilization on cluster01: --pcmk__finalize_assignment: Fencing utilization on cluster01: --pcmk__finalize_assignment: dummy utilization on cluster02: --pcmk__finalize_assignment: httpd-bundle-docker-0 utilization on cluster01: --pcmk__finalize_assignment: httpd-bundle-docker-1 utilization on cluster02: --pcmk__finalize_assignment: httpd-bundle-ip-192.168.122.131 utilization on cluster01: --pcmk__finalize_assignment: httpd-bundle-0 utilization on cluster01: --pcmk__finalize_assignment: httpd:0 utilization on httpd-bundle-0: --pcmk__finalize_assignment: httpd-bundle-ip-192.168.122.132 utilization on cluster02: --pcmk__finalize_assignment: httpd-bundle-1 utilization on cluster02: --pcmk__finalize_assignment: httpd:1 utilization on httpd-bundle-1: --pcmk__finalize_assignment: httpd-bundle-2 utilization on cluster01: --pcmk__finalize_assignment: httpd:2 utilization on httpd-bundle-2: --pcmk__finalize_assignment: Public-IP utilization on cluster02: --pcmk__finalize_assignment: Email utilization on cluster02: --pcmk__finalize_assignment: mysql-proxy:0 utilization on cluster02: --pcmk__finalize_assignment: mysql-proxy:1 utilization on cluster01: --pcmk__finalize_assignment: promotable-rsc:0 utilization on cluster02: --pcmk__finalize_assignment: promotable-rsc:1 utilization on cluster01: -+pcmk__assign_resource: ping:0 utilization on cluster02: -+pcmk__assign_resource: ping:1 utilization on cluster01: -+pcmk__assign_resource: Fencing utilization on cluster01: -+pcmk__assign_resource: dummy utilization on cluster02: -+pcmk__assign_resource: httpd-bundle-docker-0 utilization on cluster01: -+pcmk__assign_resource: httpd-bundle-docker-1 utilization on cluster02: -+pcmk__assign_resource: httpd-bundle-ip-192.168.122.131 utilization on cluster01: -+pcmk__assign_resource: httpd-bundle-0 utilization on cluster01: -+pcmk__assign_resource: httpd:0 utilization on httpd-bundle-0: -+pcmk__assign_resource: httpd-bundle-ip-192.168.122.132 utilization on cluster02: -+pcmk__assign_resource: httpd-bundle-1 utilization on cluster02: -+pcmk__assign_resource: httpd:1 utilization on httpd-bundle-1: -+pcmk__assign_resource: httpd-bundle-2 utilization on cluster01: -+pcmk__assign_resource: httpd:2 utilization on httpd-bundle-2: -+pcmk__assign_resource: Public-IP utilization on cluster02: -+pcmk__assign_resource: Email utilization on cluster02: -+pcmk__assign_resource: mysql-proxy:0 utilization on cluster02: -+pcmk__assign_resource: mysql-proxy:1 utilization on cluster01: -+pcmk__assign_resource: promotable-rsc:0 utilization on cluster02: -+pcmk__assign_resource: promotable-rsc:1 utilization on cluster01: - Remaining: cluster01 capacity: - Remaining: cluster02 capacity: - Remaining: httpd-bundle-0 capacity: -@@ -5961,7 +5961,7 @@ Transition Summary: - * Move Public-IP ( cluster02 -> cluster01 ) - * Move Email ( cluster02 -> cluster01 ) - * Stop mysql-proxy:0 ( cluster02 ) due to node availability -- * Stop promotable-rsc:0 ( Promoted cluster02 ) due to node availability -+ * Stop promotable-rsc:0 ( Promoted cluster02 ) due to node availability - - Executing Cluster Transition: - * Pseudo action: httpd-bundle-1_stop_0 - -From 0ad4a3c8404d57e2026e41a234a9b8a0a237b2bd Mon Sep 17 00:00:00 2001 -From: Reid Wahl -Date: Tue, 20 Jun 2023 23:22:54 -0700 -Subject: [PATCH 07/19] Test: scheduler: Clone instances should not shuffle - unnecessarily - -In some cases, clone instances may be shuffled when a new instance is -scheduled to start or promote. This can cause instances to be stopped -and started unnecessarily. - -Here we add tests for three types of clones: -* "Bare" clones of primitives -* Clones of groups -* Clones of primitives within bundles (clone is bundle child resource) - -For each clone type, we add four tests. In each test, no clone instance -is running on node 1, and a new instance should be started and possibly -promoted there. -* No constraints or stickiness -* Location constraint preferring node 1 -* Promotable clone where node 1 has the highest promotion score -* Promotable clone where node 1 does not have the highest promotion - score - -The following tests are currently incorrect: -* clone-no-recover-shuffle-4 (shuffling) -* clone-no-recover-shuffle-5 (all instances of an anonymous clone move - to one node) -* clone-no-recover-shuffle-6 (shuffling) -* clone-no-recover-shuffle-7 (shuffling) - -Ref T489 - -Signed-off-by: Reid Wahl ---- - cts/cts-scheduler.in | 33 ++ - .../dot/clone-recover-no-shuffle-1.dot | 10 + - .../dot/clone-recover-no-shuffle-10.dot | 10 + - .../dot/clone-recover-no-shuffle-11.dot | 21 + - .../dot/clone-recover-no-shuffle-12.dot | 35 ++ - .../dot/clone-recover-no-shuffle-2.dot | 21 + - .../dot/clone-recover-no-shuffle-3.dot | 32 ++ - .../dot/clone-recover-no-shuffle-4.dot | 23 + - .../dot/clone-recover-no-shuffle-5.dot | 80 +++ - .../dot/clone-recover-no-shuffle-6.dot | 97 ++++ - .../dot/clone-recover-no-shuffle-7.dot | 45 ++ - .../dot/clone-recover-no-shuffle-8.dot | 63 +++ - .../dot/clone-recover-no-shuffle-9.dot | 69 +++ - .../exp/clone-recover-no-shuffle-1.exp | 51 ++ - .../exp/clone-recover-no-shuffle-10.exp | 51 ++ - .../exp/clone-recover-no-shuffle-11.exp | 110 ++++ - .../exp/clone-recover-no-shuffle-12.exp | 187 +++++++ - .../exp/clone-recover-no-shuffle-2.exp | 110 ++++ - .../exp/clone-recover-no-shuffle-3.exp | 171 ++++++ - .../exp/clone-recover-no-shuffle-4.exp | 123 +++++ - .../exp/clone-recover-no-shuffle-5.exp | 452 ++++++++++++++++ - .../exp/clone-recover-no-shuffle-6.exp | 507 ++++++++++++++++++ - .../exp/clone-recover-no-shuffle-7.exp | 240 +++++++++ - .../exp/clone-recover-no-shuffle-8.exp | 338 ++++++++++++ - .../exp/clone-recover-no-shuffle-9.exp | 364 +++++++++++++ - .../scores/clone-recover-no-shuffle-1.scores | 25 + - .../scores/clone-recover-no-shuffle-10.scores | 31 ++ - .../scores/clone-recover-no-shuffle-11.scores | 82 +++ - .../scores/clone-recover-no-shuffle-12.scores | 67 +++ - .../scores/clone-recover-no-shuffle-2.scores | 79 +++ - .../scores/clone-recover-no-shuffle-3.scores | 64 +++ - .../scores/clone-recover-no-shuffle-4.scores | 31 ++ - .../scores/clone-recover-no-shuffle-5.scores | 79 +++ - .../scores/clone-recover-no-shuffle-6.scores | 70 +++ - .../scores/clone-recover-no-shuffle-7.scores | 34 ++ - .../scores/clone-recover-no-shuffle-8.scores | 82 +++ - .../scores/clone-recover-no-shuffle-9.scores | 67 +++ - .../clone-recover-no-shuffle-1.summary | 29 + - .../clone-recover-no-shuffle-10.summary | 29 + - .../clone-recover-no-shuffle-11.summary | 34 ++ - .../clone-recover-no-shuffle-12.summary | 43 ++ - .../clone-recover-no-shuffle-2.summary | 32 ++ - .../clone-recover-no-shuffle-3.summary | 42 ++ - .../clone-recover-no-shuffle-4.summary | 35 ++ - .../clone-recover-no-shuffle-5.summary | 59 ++ - .../clone-recover-no-shuffle-6.summary | 68 +++ - .../clone-recover-no-shuffle-7.summary | 44 ++ - .../clone-recover-no-shuffle-8.summary | 52 ++ - .../clone-recover-no-shuffle-9.summary | 56 ++ - .../xml/clone-recover-no-shuffle-1.xml | 113 ++++ - .../xml/clone-recover-no-shuffle-10.xml | 120 +++++ - .../xml/clone-recover-no-shuffle-11.xml | 153 ++++++ - .../xml/clone-recover-no-shuffle-12.xml | 186 +++++++ - .../xml/clone-recover-no-shuffle-2.xml | 141 +++++ - .../xml/clone-recover-no-shuffle-3.xml | 180 +++++++ - .../xml/clone-recover-no-shuffle-4.xml | 120 +++++ - .../xml/clone-recover-no-shuffle-5.xml | 148 +++++ - .../xml/clone-recover-no-shuffle-6.xml | 187 +++++++ - .../xml/clone-recover-no-shuffle-7.xml | 125 +++++ - .../xml/clone-recover-no-shuffle-8.xml | 153 ++++++ - .../xml/clone-recover-no-shuffle-9.xml | 186 +++++++ - 61 files changed, 6289 insertions(+) - create mode 100644 cts/scheduler/dot/clone-recover-no-shuffle-1.dot - create mode 100644 cts/scheduler/dot/clone-recover-no-shuffle-10.dot - create mode 100644 cts/scheduler/dot/clone-recover-no-shuffle-11.dot - create mode 100644 cts/scheduler/dot/clone-recover-no-shuffle-12.dot - create mode 100644 cts/scheduler/dot/clone-recover-no-shuffle-2.dot - create mode 100644 cts/scheduler/dot/clone-recover-no-shuffle-3.dot - create mode 100644 cts/scheduler/dot/clone-recover-no-shuffle-4.dot - create mode 100644 cts/scheduler/dot/clone-recover-no-shuffle-5.dot - create mode 100644 cts/scheduler/dot/clone-recover-no-shuffle-6.dot - create mode 100644 cts/scheduler/dot/clone-recover-no-shuffle-7.dot - create mode 100644 cts/scheduler/dot/clone-recover-no-shuffle-8.dot - create mode 100644 cts/scheduler/dot/clone-recover-no-shuffle-9.dot - create mode 100644 cts/scheduler/exp/clone-recover-no-shuffle-1.exp - create mode 100644 cts/scheduler/exp/clone-recover-no-shuffle-10.exp - create mode 100644 cts/scheduler/exp/clone-recover-no-shuffle-11.exp - create mode 100644 cts/scheduler/exp/clone-recover-no-shuffle-12.exp - create mode 100644 cts/scheduler/exp/clone-recover-no-shuffle-2.exp - create mode 100644 cts/scheduler/exp/clone-recover-no-shuffle-3.exp - create mode 100644 cts/scheduler/exp/clone-recover-no-shuffle-4.exp - create mode 100644 cts/scheduler/exp/clone-recover-no-shuffle-5.exp - create mode 100644 cts/scheduler/exp/clone-recover-no-shuffle-6.exp - create mode 100644 cts/scheduler/exp/clone-recover-no-shuffle-7.exp - create mode 100644 cts/scheduler/exp/clone-recover-no-shuffle-8.exp - create mode 100644 cts/scheduler/exp/clone-recover-no-shuffle-9.exp - create mode 100644 cts/scheduler/scores/clone-recover-no-shuffle-1.scores - create mode 100644 cts/scheduler/scores/clone-recover-no-shuffle-10.scores - create mode 100644 cts/scheduler/scores/clone-recover-no-shuffle-11.scores - create mode 100644 cts/scheduler/scores/clone-recover-no-shuffle-12.scores - create mode 100644 cts/scheduler/scores/clone-recover-no-shuffle-2.scores - create mode 100644 cts/scheduler/scores/clone-recover-no-shuffle-3.scores - create mode 100644 cts/scheduler/scores/clone-recover-no-shuffle-4.scores - create mode 100644 cts/scheduler/scores/clone-recover-no-shuffle-5.scores - create mode 100644 cts/scheduler/scores/clone-recover-no-shuffle-6.scores - create mode 100644 cts/scheduler/scores/clone-recover-no-shuffle-7.scores - create mode 100644 cts/scheduler/scores/clone-recover-no-shuffle-8.scores - create mode 100644 cts/scheduler/scores/clone-recover-no-shuffle-9.scores - create mode 100644 cts/scheduler/summary/clone-recover-no-shuffle-1.summary - create mode 100644 cts/scheduler/summary/clone-recover-no-shuffle-10.summary - create mode 100644 cts/scheduler/summary/clone-recover-no-shuffle-11.summary - create mode 100644 cts/scheduler/summary/clone-recover-no-shuffle-12.summary - create mode 100644 cts/scheduler/summary/clone-recover-no-shuffle-2.summary - create mode 100644 cts/scheduler/summary/clone-recover-no-shuffle-3.summary - create mode 100644 cts/scheduler/summary/clone-recover-no-shuffle-4.summary - create mode 100644 cts/scheduler/summary/clone-recover-no-shuffle-5.summary - create mode 100644 cts/scheduler/summary/clone-recover-no-shuffle-6.summary - create mode 100644 cts/scheduler/summary/clone-recover-no-shuffle-7.summary - create mode 100644 cts/scheduler/summary/clone-recover-no-shuffle-8.summary - create mode 100644 cts/scheduler/summary/clone-recover-no-shuffle-9.summary - create mode 100644 cts/scheduler/xml/clone-recover-no-shuffle-1.xml - create mode 100644 cts/scheduler/xml/clone-recover-no-shuffle-10.xml - create mode 100644 cts/scheduler/xml/clone-recover-no-shuffle-11.xml - create mode 100644 cts/scheduler/xml/clone-recover-no-shuffle-12.xml - create mode 100644 cts/scheduler/xml/clone-recover-no-shuffle-2.xml - create mode 100644 cts/scheduler/xml/clone-recover-no-shuffle-3.xml - create mode 100644 cts/scheduler/xml/clone-recover-no-shuffle-4.xml - create mode 100644 cts/scheduler/xml/clone-recover-no-shuffle-5.xml - create mode 100644 cts/scheduler/xml/clone-recover-no-shuffle-6.xml - create mode 100644 cts/scheduler/xml/clone-recover-no-shuffle-7.xml - create mode 100644 cts/scheduler/xml/clone-recover-no-shuffle-8.xml - create mode 100644 cts/scheduler/xml/clone-recover-no-shuffle-9.xml - -diff --git a/cts/cts-scheduler.in b/cts/cts-scheduler.in -index ee0cb7b4722..4ff035c23a3 100644 ---- a/cts/cts-scheduler.in -+++ b/cts/cts-scheduler.in -@@ -441,6 +441,39 @@ TESTS = [ - [ "cloned-group", "Make sure only the correct number of cloned groups are started" ], - [ "cloned-group-stop", "Ensure stopping qpidd also stops glance and cinder" ], - [ "clone-no-shuffle", "Don't prioritize allocation of instances that must be moved" ], -+ [ "clone-recover-no-shuffle-1", -+ "Don't shuffle instances when starting a new primitive instance" ], -+ [ "clone-recover-no-shuffle-2", -+ "Don't shuffle instances when starting a new group instance" ], -+ [ "clone-recover-no-shuffle-3", -+ "Don't shuffle instances when starting a new bundle instance" ], -+ [ "clone-recover-no-shuffle-4", -+ "Don't shuffle instances when starting a new primitive instance with " -+ "location preference "], -+ [ "clone-recover-no-shuffle-5", -+ "Don't shuffle instances when starting a new group instance with " -+ "location preference" ], -+ [ "clone-recover-no-shuffle-6", -+ "Don't shuffle instances when starting a new bundle instance with " -+ "location preference" ], -+ [ "clone-recover-no-shuffle-7", -+ "Don't shuffle instances when starting a new primitive instance that " -+ "will be promoted" ], -+ [ "clone-recover-no-shuffle-8", -+ "Don't shuffle instances when starting a new group instance that " -+ "will be promoted " ], -+ [ "clone-recover-no-shuffle-9", -+ "Don't shuffle instances when starting a new bundle instance that " -+ "will be promoted " ], -+ [ "clone-recover-no-shuffle-10", -+ "Don't shuffle instances when starting a new primitive instance that " -+ "won't be promoted" ], -+ [ "clone-recover-no-shuffle-11", -+ "Don't shuffle instances when starting a new group instance that " -+ "won't be promoted " ], -+ [ "clone-recover-no-shuffle-12", -+ "Don't shuffle instances when starting a new bundle instance that " -+ "won't be promoted " ], - [ "clone-max-zero", "Orphan processing with clone-max=0" ], - [ "clone-anon-dup", - "Bug LF#2087 - Correctly parse the state of anonymous clones that are active more than once per node" ], -diff --git a/cts/scheduler/dot/clone-recover-no-shuffle-1.dot b/cts/scheduler/dot/clone-recover-no-shuffle-1.dot -new file mode 100644 -index 00000000000..287d82d3806 ---- /dev/null -+++ b/cts/scheduler/dot/clone-recover-no-shuffle-1.dot -@@ -0,0 +1,10 @@ -+ digraph "g" { -+"dummy-clone_running_0" [ style=bold color="green" fontcolor="orange"] -+"dummy-clone_start_0" -> "dummy-clone_running_0" [ style = bold] -+"dummy-clone_start_0" -> "dummy:2_start_0 node1" [ style = bold] -+"dummy-clone_start_0" [ style=bold color="green" fontcolor="orange"] -+"dummy:2_monitor_10000 node1" [ style=bold color="green" fontcolor="black"] -+"dummy:2_start_0 node1" -> "dummy-clone_running_0" [ style = bold] -+"dummy:2_start_0 node1" -> "dummy:2_monitor_10000 node1" [ style = bold] -+"dummy:2_start_0 node1" [ style=bold color="green" fontcolor="black"] -+} -diff --git a/cts/scheduler/dot/clone-recover-no-shuffle-10.dot b/cts/scheduler/dot/clone-recover-no-shuffle-10.dot -new file mode 100644 -index 00000000000..1e1840966fa ---- /dev/null -+++ b/cts/scheduler/dot/clone-recover-no-shuffle-10.dot -@@ -0,0 +1,10 @@ -+ digraph "g" { -+"dummy-clone_running_0" [ style=bold color="green" fontcolor="orange"] -+"dummy-clone_start_0" -> "dummy-clone_running_0" [ style = bold] -+"dummy-clone_start_0" -> "dummy:2_start_0 node1" [ style = bold] -+"dummy-clone_start_0" [ style=bold color="green" fontcolor="orange"] -+"dummy:2_monitor_11000 node1" [ style=bold color="green" fontcolor="black"] -+"dummy:2_start_0 node1" -> "dummy-clone_running_0" [ style = bold] -+"dummy:2_start_0 node1" -> "dummy:2_monitor_11000 node1" [ style = bold] -+"dummy:2_start_0 node1" [ style=bold color="green" fontcolor="black"] -+} -diff --git a/cts/scheduler/dot/clone-recover-no-shuffle-11.dot b/cts/scheduler/dot/clone-recover-no-shuffle-11.dot -new file mode 100644 -index 00000000000..2b08a594561 ---- /dev/null -+++ b/cts/scheduler/dot/clone-recover-no-shuffle-11.dot -@@ -0,0 +1,21 @@ -+ digraph "g" { -+"grp-clone_running_0" [ style=bold color="green" fontcolor="orange"] -+"grp-clone_start_0" -> "grp-clone_running_0" [ style = bold] -+"grp-clone_start_0" -> "grp:2_start_0" [ style = bold] -+"grp-clone_start_0" [ style=bold color="green" fontcolor="orange"] -+"grp:2_running_0" -> "grp-clone_running_0" [ style = bold] -+"grp:2_running_0" [ style=bold color="green" fontcolor="orange"] -+"grp:2_start_0" -> "grp:2_running_0" [ style = bold] -+"grp:2_start_0" -> "rsc1:2_start_0 node1" [ style = bold] -+"grp:2_start_0" -> "rsc2:2_start_0 node1" [ style = bold] -+"grp:2_start_0" [ style=bold color="green" fontcolor="orange"] -+"rsc1:2_monitor_11000 node1" [ style=bold color="green" fontcolor="black"] -+"rsc1:2_start_0 node1" -> "grp:2_running_0" [ style = bold] -+"rsc1:2_start_0 node1" -> "rsc1:2_monitor_11000 node1" [ style = bold] -+"rsc1:2_start_0 node1" -> "rsc2:2_start_0 node1" [ style = bold] -+"rsc1:2_start_0 node1" [ style=bold color="green" fontcolor="black"] -+"rsc2:2_monitor_11000 node1" [ style=bold color="green" fontcolor="black"] -+"rsc2:2_start_0 node1" -> "grp:2_running_0" [ style = bold] -+"rsc2:2_start_0 node1" -> "rsc2:2_monitor_11000 node1" [ style = bold] -+"rsc2:2_start_0 node1" [ style=bold color="green" fontcolor="black"] -+} -diff --git a/cts/scheduler/dot/clone-recover-no-shuffle-12.dot b/cts/scheduler/dot/clone-recover-no-shuffle-12.dot -new file mode 100644 -index 00000000000..ebc1dc6a815 ---- /dev/null -+++ b/cts/scheduler/dot/clone-recover-no-shuffle-12.dot -@@ -0,0 +1,35 @@ -+ digraph "g" { -+"base-bundle-2_monitor_0 node1" -> "base-bundle-2_start_0 node1" [ style = bold] -+"base-bundle-2_monitor_0 node1" [ style=bold color="green" fontcolor="black"] -+"base-bundle-2_monitor_0 node2" -> "base-bundle-2_start_0 node1" [ style = bold] -+"base-bundle-2_monitor_0 node2" [ style=bold color="green" fontcolor="black"] -+"base-bundle-2_monitor_0 node3" -> "base-bundle-2_start_0 node1" [ style = bold] -+"base-bundle-2_monitor_0 node3" [ style=bold color="green" fontcolor="black"] -+"base-bundle-2_monitor_30000 node1" [ style=bold color="green" fontcolor="black"] -+"base-bundle-2_start_0 node1" -> "base-bundle-2_monitor_30000 node1" [ style = bold] -+"base-bundle-2_start_0 node1" -> "base:2_monitor_16000 base-bundle-2" [ style = bold] -+"base-bundle-2_start_0 node1" -> "base:2_start_0 base-bundle-2" [ style = bold] -+"base-bundle-2_start_0 node1" [ style=bold color="green" fontcolor="black"] -+"base-bundle-clone_running_0" -> "base-bundle_running_0" [ style = bold] -+"base-bundle-clone_running_0" [ style=bold color="green" fontcolor="orange"] -+"base-bundle-clone_start_0" -> "base-bundle-clone_running_0" [ style = bold] -+"base-bundle-clone_start_0" -> "base:2_start_0 base-bundle-2" [ style = bold] -+"base-bundle-clone_start_0" [ style=bold color="green" fontcolor="orange"] -+"base-bundle-podman-2_monitor_60000 node1" [ style=bold color="green" fontcolor="black"] -+"base-bundle-podman-2_start_0 node1" -> "base-bundle-2_monitor_0 node1" [ style = bold] -+"base-bundle-podman-2_start_0 node1" -> "base-bundle-2_monitor_0 node2" [ style = bold] -+"base-bundle-podman-2_start_0 node1" -> "base-bundle-2_monitor_0 node3" [ style = bold] -+"base-bundle-podman-2_start_0 node1" -> "base-bundle-2_start_0 node1" [ style = bold] -+"base-bundle-podman-2_start_0 node1" -> "base-bundle-podman-2_monitor_60000 node1" [ style = bold] -+"base-bundle-podman-2_start_0 node1" -> "base-bundle_running_0" [ style = bold] -+"base-bundle-podman-2_start_0 node1" -> "base:2_start_0 base-bundle-2" [ style = bold] -+"base-bundle-podman-2_start_0 node1" [ style=bold color="green" fontcolor="black"] -+"base-bundle_running_0" [ style=bold color="green" fontcolor="orange"] -+"base-bundle_start_0" -> "base-bundle-clone_start_0" [ style = bold] -+"base-bundle_start_0" -> "base-bundle-podman-2_start_0 node1" [ style = bold] -+"base-bundle_start_0" [ style=bold color="green" fontcolor="orange"] -+"base:2_monitor_16000 base-bundle-2" [ style=bold color="green" fontcolor="black"] -+"base:2_start_0 base-bundle-2" -> "base-bundle-clone_running_0" [ style = bold] -+"base:2_start_0 base-bundle-2" -> "base:2_monitor_16000 base-bundle-2" [ style = bold] -+"base:2_start_0 base-bundle-2" [ style=bold color="green" fontcolor="black"] -+} -diff --git a/cts/scheduler/dot/clone-recover-no-shuffle-2.dot b/cts/scheduler/dot/clone-recover-no-shuffle-2.dot -new file mode 100644 -index 00000000000..d3bdf04baa9 ---- /dev/null -+++ b/cts/scheduler/dot/clone-recover-no-shuffle-2.dot -@@ -0,0 +1,21 @@ -+ digraph "g" { -+"grp-clone_running_0" [ style=bold color="green" fontcolor="orange"] -+"grp-clone_start_0" -> "grp-clone_running_0" [ style = bold] -+"grp-clone_start_0" -> "grp:2_start_0" [ style = bold] -+"grp-clone_start_0" [ style=bold color="green" fontcolor="orange"] -+"grp:2_running_0" -> "grp-clone_running_0" [ style = bold] -+"grp:2_running_0" [ style=bold color="green" fontcolor="orange"] -+"grp:2_start_0" -> "grp:2_running_0" [ style = bold] -+"grp:2_start_0" -> "rsc1:2_start_0 node1" [ style = bold] -+"grp:2_start_0" -> "rsc2:2_start_0 node1" [ style = bold] -+"grp:2_start_0" [ style=bold color="green" fontcolor="orange"] -+"rsc1:2_monitor_10000 node1" [ style=bold color="green" fontcolor="black"] -+"rsc1:2_start_0 node1" -> "grp:2_running_0" [ style = bold] -+"rsc1:2_start_0 node1" -> "rsc1:2_monitor_10000 node1" [ style = bold] -+"rsc1:2_start_0 node1" -> "rsc2:2_start_0 node1" [ style = bold] -+"rsc1:2_start_0 node1" [ style=bold color="green" fontcolor="black"] -+"rsc2:2_monitor_10000 node1" [ style=bold color="green" fontcolor="black"] -+"rsc2:2_start_0 node1" -> "grp:2_running_0" [ style = bold] -+"rsc2:2_start_0 node1" -> "rsc2:2_monitor_10000 node1" [ style = bold] -+"rsc2:2_start_0 node1" [ style=bold color="green" fontcolor="black"] -+} -diff --git a/cts/scheduler/dot/clone-recover-no-shuffle-3.dot b/cts/scheduler/dot/clone-recover-no-shuffle-3.dot -new file mode 100644 -index 00000000000..f60fd2cc04e ---- /dev/null -+++ b/cts/scheduler/dot/clone-recover-no-shuffle-3.dot -@@ -0,0 +1,32 @@ -+ digraph "g" { -+"base-bundle-2_monitor_0 node1" -> "base-bundle-2_start_0 node1" [ style = bold] -+"base-bundle-2_monitor_0 node1" [ style=bold color="green" fontcolor="black"] -+"base-bundle-2_monitor_0 node2" -> "base-bundle-2_start_0 node1" [ style = bold] -+"base-bundle-2_monitor_0 node2" [ style=bold color="green" fontcolor="black"] -+"base-bundle-2_monitor_0 node3" -> "base-bundle-2_start_0 node1" [ style = bold] -+"base-bundle-2_monitor_0 node3" [ style=bold color="green" fontcolor="black"] -+"base-bundle-2_monitor_30000 node1" [ style=bold color="green" fontcolor="black"] -+"base-bundle-2_start_0 node1" -> "base-bundle-2_monitor_30000 node1" [ style = bold] -+"base-bundle-2_start_0 node1" -> "base:2_start_0 base-bundle-2" [ style = bold] -+"base-bundle-2_start_0 node1" [ style=bold color="green" fontcolor="black"] -+"base-bundle-clone_running_0" -> "base-bundle_running_0" [ style = bold] -+"base-bundle-clone_running_0" [ style=bold color="green" fontcolor="orange"] -+"base-bundle-clone_start_0" -> "base-bundle-clone_running_0" [ style = bold] -+"base-bundle-clone_start_0" -> "base:2_start_0 base-bundle-2" [ style = bold] -+"base-bundle-clone_start_0" [ style=bold color="green" fontcolor="orange"] -+"base-bundle-podman-2_monitor_60000 node1" [ style=bold color="green" fontcolor="black"] -+"base-bundle-podman-2_start_0 node1" -> "base-bundle-2_monitor_0 node1" [ style = bold] -+"base-bundle-podman-2_start_0 node1" -> "base-bundle-2_monitor_0 node2" [ style = bold] -+"base-bundle-podman-2_start_0 node1" -> "base-bundle-2_monitor_0 node3" [ style = bold] -+"base-bundle-podman-2_start_0 node1" -> "base-bundle-2_start_0 node1" [ style = bold] -+"base-bundle-podman-2_start_0 node1" -> "base-bundle-podman-2_monitor_60000 node1" [ style = bold] -+"base-bundle-podman-2_start_0 node1" -> "base-bundle_running_0" [ style = bold] -+"base-bundle-podman-2_start_0 node1" -> "base:2_start_0 base-bundle-2" [ style = bold] -+"base-bundle-podman-2_start_0 node1" [ style=bold color="green" fontcolor="black"] -+"base-bundle_running_0" [ style=bold color="green" fontcolor="orange"] -+"base-bundle_start_0" -> "base-bundle-clone_start_0" [ style = bold] -+"base-bundle_start_0" -> "base-bundle-podman-2_start_0 node1" [ style = bold] -+"base-bundle_start_0" [ style=bold color="green" fontcolor="orange"] -+"base:2_start_0 base-bundle-2" -> "base-bundle-clone_running_0" [ style = bold] -+"base:2_start_0 base-bundle-2" [ style=bold color="green" fontcolor="black"] -+} -diff --git a/cts/scheduler/dot/clone-recover-no-shuffle-4.dot b/cts/scheduler/dot/clone-recover-no-shuffle-4.dot -new file mode 100644 -index 00000000000..fd002f28fcf ---- /dev/null -+++ b/cts/scheduler/dot/clone-recover-no-shuffle-4.dot -@@ -0,0 +1,23 @@ -+ digraph "g" { -+"dummy-clone_running_0" [ style=bold color="green" fontcolor="orange"] -+"dummy-clone_start_0" -> "dummy-clone_running_0" [ style = bold] -+"dummy-clone_start_0" -> "dummy:2_start_0 node2" [ style = bold] -+"dummy-clone_start_0" -> "dummy_start_0 node1" [ style = bold] -+"dummy-clone_start_0" [ style=bold color="green" fontcolor="orange"] -+"dummy-clone_stop_0" -> "dummy-clone_stopped_0" [ style = bold] -+"dummy-clone_stop_0" -> "dummy_stop_0 node2" [ style = bold] -+"dummy-clone_stop_0" [ style=bold color="green" fontcolor="orange"] -+"dummy-clone_stopped_0" -> "dummy-clone_start_0" [ style = bold] -+"dummy-clone_stopped_0" [ style=bold color="green" fontcolor="orange"] -+"dummy:2_monitor_10000 node2" [ style=bold color="green" fontcolor="black"] -+"dummy:2_start_0 node2" -> "dummy-clone_running_0" [ style = bold] -+"dummy:2_start_0 node2" -> "dummy:2_monitor_10000 node2" [ style = bold] -+"dummy:2_start_0 node2" [ style=bold color="green" fontcolor="black"] -+"dummy_monitor_10000 node1" [ style=bold color="green" fontcolor="black"] -+"dummy_start_0 node1" -> "dummy-clone_running_0" [ style = bold] -+"dummy_start_0 node1" -> "dummy_monitor_10000 node1" [ style = bold] -+"dummy_start_0 node1" [ style=bold color="green" fontcolor="black"] -+"dummy_stop_0 node2" -> "dummy-clone_stopped_0" [ style = bold] -+"dummy_stop_0 node2" -> "dummy_start_0 node1" [ style = bold] -+"dummy_stop_0 node2" [ style=bold color="green" fontcolor="black"] -+} -diff --git a/cts/scheduler/dot/clone-recover-no-shuffle-5.dot b/cts/scheduler/dot/clone-recover-no-shuffle-5.dot -new file mode 100644 -index 00000000000..7219ee5a6d3 ---- /dev/null -+++ b/cts/scheduler/dot/clone-recover-no-shuffle-5.dot -@@ -0,0 +1,80 @@ -+ digraph "g" { -+"grp-clone_running_0" [ style=bold color="green" fontcolor="orange"] -+"grp-clone_start_0" -> "grp-clone_running_0" [ style = bold] -+"grp-clone_start_0" -> "grp:0_start_0" [ style = bold] -+"grp-clone_start_0" -> "grp:1_start_0" [ style = bold] -+"grp-clone_start_0" -> "grp:2_start_0" [ style = bold] -+"grp-clone_start_0" [ style=bold color="green" fontcolor="orange"] -+"grp-clone_stop_0" -> "grp-clone_stopped_0" [ style = bold] -+"grp-clone_stop_0" -> "grp:0_stop_0" [ style = bold] -+"grp-clone_stop_0" -> "grp:1_stop_0" [ style = bold] -+"grp-clone_stop_0" [ style=bold color="green" fontcolor="orange"] -+"grp-clone_stopped_0" -> "grp-clone_start_0" [ style = bold] -+"grp-clone_stopped_0" [ style=bold color="green" fontcolor="orange"] -+"grp:0_running_0" -> "grp-clone_running_0" [ style = bold] -+"grp:0_running_0" [ style=bold color="green" fontcolor="orange"] -+"grp:0_start_0" -> "grp:0_running_0" [ style = bold] -+"grp:0_start_0" -> "rsc1_start_0 node1" [ style = bold] -+"grp:0_start_0" -> "rsc2_start_0 node1" [ style = bold] -+"grp:0_start_0" [ style=bold color="green" fontcolor="orange"] -+"grp:0_stop_0" -> "grp:0_stopped_0" [ style = bold] -+"grp:0_stop_0" -> "rsc1_stop_0 node2" [ style = bold] -+"grp:0_stop_0" -> "rsc2_stop_0 node2" [ style = bold] -+"grp:0_stop_0" [ style=bold color="green" fontcolor="orange"] -+"grp:0_stopped_0" -> "grp-clone_stopped_0" [ style = bold] -+"grp:0_stopped_0" -> "grp:0_start_0" [ style = bold] -+"grp:0_stopped_0" [ style=bold color="green" fontcolor="orange"] -+"grp:1_running_0" -> "grp-clone_running_0" [ style = bold] -+"grp:1_running_0" [ style=bold color="green" fontcolor="orange"] -+"grp:1_start_0" -> "grp:1_running_0" [ style = bold] -+"grp:1_start_0" -> "rsc1_start_0 node1" [ style = bold] -+"grp:1_start_0" -> "rsc2_start_0 node1" [ style = bold] -+"grp:1_start_0" [ style=bold color="green" fontcolor="orange"] -+"grp:1_stop_0" -> "grp:1_stopped_0" [ style = bold] -+"grp:1_stop_0" -> "rsc1_stop_0 node3" [ style = bold] -+"grp:1_stop_0" -> "rsc2_stop_0 node3" [ style = bold] -+"grp:1_stop_0" [ style=bold color="green" fontcolor="orange"] -+"grp:1_stopped_0" -> "grp-clone_stopped_0" [ style = bold] -+"grp:1_stopped_0" -> "grp:1_start_0" [ style = bold] -+"grp:1_stopped_0" [ style=bold color="green" fontcolor="orange"] -+"grp:2_running_0" -> "grp-clone_running_0" [ style = bold] -+"grp:2_running_0" [ style=bold color="green" fontcolor="orange"] -+"grp:2_start_0" -> "grp:2_running_0" [ style = bold] -+"grp:2_start_0" -> "rsc1:2_start_0 node1" [ style = bold] -+"grp:2_start_0" -> "rsc2:2_start_0 node1" [ style = bold] -+"grp:2_start_0" [ style=bold color="green" fontcolor="orange"] -+"rsc1:2_monitor_10000 node1" [ style=bold color="green" fontcolor="black"] -+"rsc1:2_start_0 node1" -> "grp:2_running_0" [ style = bold] -+"rsc1:2_start_0 node1" -> "rsc1:2_monitor_10000 node1" [ style = bold] -+"rsc1:2_start_0 node1" -> "rsc2:2_start_0 node1" [ style = bold] -+"rsc1:2_start_0 node1" [ style=bold color="green" fontcolor="black"] -+"rsc1_monitor_10000 node1" [ style=bold color="green" fontcolor="black"] -+"rsc1_start_0 node1" -> "grp:0_running_0" [ style = bold] -+"rsc1_start_0 node1" -> "grp:1_running_0" [ style = bold] -+"rsc1_start_0 node1" -> "rsc1_monitor_10000 node1" [ style = bold] -+"rsc1_start_0 node1" -> "rsc2_start_0 node1" [ style = bold] -+"rsc1_start_0 node1" [ style=bold color="green" fontcolor="black"] -+"rsc1_stop_0 node2" -> "grp:0_stopped_0" [ style = bold] -+"rsc1_stop_0 node2" -> "rsc1_start_0 node1" [ style = bold] -+"rsc1_stop_0 node2" [ style=bold color="green" fontcolor="black"] -+"rsc1_stop_0 node3" -> "grp:1_stopped_0" [ style = bold] -+"rsc1_stop_0 node3" -> "rsc1_start_0 node1" [ style = bold] -+"rsc1_stop_0 node3" [ style=bold color="green" fontcolor="black"] -+"rsc2:2_monitor_10000 node1" [ style=bold color="green" fontcolor="black"] -+"rsc2:2_start_0 node1" -> "grp:2_running_0" [ style = bold] -+"rsc2:2_start_0 node1" -> "rsc2:2_monitor_10000 node1" [ style = bold] -+"rsc2:2_start_0 node1" [ style=bold color="green" fontcolor="black"] -+"rsc2_monitor_10000 node1" [ style=bold color="green" fontcolor="black"] -+"rsc2_start_0 node1" -> "grp:0_running_0" [ style = bold] -+"rsc2_start_0 node1" -> "grp:1_running_0" [ style = bold] -+"rsc2_start_0 node1" -> "rsc2_monitor_10000 node1" [ style = bold] -+"rsc2_start_0 node1" [ style=bold color="green" fontcolor="black"] -+"rsc2_stop_0 node2" -> "grp:0_stopped_0" [ style = bold] -+"rsc2_stop_0 node2" -> "rsc1_stop_0 node2" [ style = bold] -+"rsc2_stop_0 node2" -> "rsc2_start_0 node1" [ style = bold] -+"rsc2_stop_0 node2" [ style=bold color="green" fontcolor="black"] -+"rsc2_stop_0 node3" -> "grp:1_stopped_0" [ style = bold] -+"rsc2_stop_0 node3" -> "rsc1_stop_0 node3" [ style = bold] -+"rsc2_stop_0 node3" -> "rsc2_start_0 node1" [ style = bold] -+"rsc2_stop_0 node3" [ style=bold color="green" fontcolor="black"] -+} -diff --git a/cts/scheduler/dot/clone-recover-no-shuffle-6.dot b/cts/scheduler/dot/clone-recover-no-shuffle-6.dot -new file mode 100644 -index 00000000000..f8cfe9252d2 ---- /dev/null -+++ b/cts/scheduler/dot/clone-recover-no-shuffle-6.dot -@@ -0,0 +1,97 @@ -+ digraph "g" { -+"base-bundle-0_monitor_30000 node1" [ style=bold color="green" fontcolor="black"] -+"base-bundle-0_start_0 node1" -> "base-bundle-0_monitor_30000 node1" [ style = bold] -+"base-bundle-0_start_0 node1" -> "base_start_0 base-bundle-0" [ style = bold] -+"base-bundle-0_start_0 node1" [ style=bold color="green" fontcolor="black"] -+"base-bundle-0_stop_0 node3" -> "base-bundle-0_start_0 node1" [ style = bold] -+"base-bundle-0_stop_0 node3" -> "base-bundle-podman-0_stop_0 node3" [ style = bold] -+"base-bundle-0_stop_0 node3" [ style=bold color="green" fontcolor="black"] -+"base-bundle-1_monitor_30000 node3" [ style=bold color="green" fontcolor="black"] -+"base-bundle-1_start_0 node3" -> "base-bundle-1_monitor_30000 node3" [ style = bold] -+"base-bundle-1_start_0 node3" -> "base_start_0 base-bundle-1" [ style = bold] -+"base-bundle-1_start_0 node3" [ style=bold color="green" fontcolor="black"] -+"base-bundle-1_stop_0 node2" -> "base-bundle-1_start_0 node3" [ style = bold] -+"base-bundle-1_stop_0 node2" -> "base-bundle-podman-1_stop_0 node2" [ style = bold] -+"base-bundle-1_stop_0 node2" [ style=bold color="green" fontcolor="black"] -+"base-bundle-2_monitor_0 node1" -> "base-bundle-2_start_0 node2" [ style = bold] -+"base-bundle-2_monitor_0 node1" [ style=bold color="green" fontcolor="black"] -+"base-bundle-2_monitor_0 node2" -> "base-bundle-2_start_0 node2" [ style = bold] -+"base-bundle-2_monitor_0 node2" [ style=bold color="green" fontcolor="black"] -+"base-bundle-2_monitor_0 node3" -> "base-bundle-2_start_0 node2" [ style = bold] -+"base-bundle-2_monitor_0 node3" [ style=bold color="green" fontcolor="black"] -+"base-bundle-2_monitor_30000 node2" [ style=bold color="green" fontcolor="black"] -+"base-bundle-2_start_0 node2" -> "base-bundle-2_monitor_30000 node2" [ style = bold] -+"base-bundle-2_start_0 node2" -> "base:2_start_0 base-bundle-2" [ style = bold] -+"base-bundle-2_start_0 node2" [ style=bold color="green" fontcolor="black"] -+"base-bundle-clone_running_0" -> "base-bundle_running_0" [ style = bold] -+"base-bundle-clone_running_0" [ style=bold color="green" fontcolor="orange"] -+"base-bundle-clone_start_0" -> "base-bundle-clone_running_0" [ style = bold] -+"base-bundle-clone_start_0" -> "base:2_start_0 base-bundle-2" [ style = bold] -+"base-bundle-clone_start_0" -> "base_start_0 base-bundle-0" [ style = bold] -+"base-bundle-clone_start_0" -> "base_start_0 base-bundle-1" [ style = bold] -+"base-bundle-clone_start_0" [ style=bold color="green" fontcolor="orange"] -+"base-bundle-clone_stop_0" -> "base-bundle-clone_stopped_0" [ style = bold] -+"base-bundle-clone_stop_0" -> "base_stop_0 base-bundle-0" [ style = bold] -+"base-bundle-clone_stop_0" -> "base_stop_0 base-bundle-1" [ style = bold] -+"base-bundle-clone_stop_0" [ style=bold color="green" fontcolor="orange"] -+"base-bundle-clone_stopped_0" -> "base-bundle-clone_start_0" [ style = bold] -+"base-bundle-clone_stopped_0" -> "base-bundle_stopped_0" [ style = bold] -+"base-bundle-clone_stopped_0" [ style=bold color="green" fontcolor="orange"] -+"base-bundle-podman-0_monitor_60000 node1" [ style=bold color="green" fontcolor="black"] -+"base-bundle-podman-0_start_0 node1" -> "base-bundle-0_start_0 node1" [ style = bold] -+"base-bundle-podman-0_start_0 node1" -> "base-bundle-podman-0_monitor_60000 node1" [ style = bold] -+"base-bundle-podman-0_start_0 node1" -> "base-bundle_running_0" [ style = bold] -+"base-bundle-podman-0_start_0 node1" -> "base_start_0 base-bundle-0" [ style = bold] -+"base-bundle-podman-0_start_0 node1" [ style=bold color="green" fontcolor="black"] -+"base-bundle-podman-0_stop_0 node3" -> "base-bundle-podman-0_start_0 node1" [ style = bold] -+"base-bundle-podman-0_stop_0 node3" -> "base-bundle_stopped_0" [ style = bold] -+"base-bundle-podman-0_stop_0 node3" [ style=bold color="green" fontcolor="black"] -+"base-bundle-podman-1_monitor_60000 node3" [ style=bold color="green" fontcolor="black"] -+"base-bundle-podman-1_start_0 node3" -> "base-bundle-1_start_0 node3" [ style = bold] -+"base-bundle-podman-1_start_0 node3" -> "base-bundle-podman-1_monitor_60000 node3" [ style = bold] -+"base-bundle-podman-1_start_0 node3" -> "base-bundle_running_0" [ style = bold] -+"base-bundle-podman-1_start_0 node3" -> "base_start_0 base-bundle-1" [ style = bold] -+"base-bundle-podman-1_start_0 node3" [ style=bold color="green" fontcolor="black"] -+"base-bundle-podman-1_stop_0 node2" -> "base-bundle-podman-1_start_0 node3" [ style = bold] -+"base-bundle-podman-1_stop_0 node2" -> "base-bundle_stopped_0" [ style = bold] -+"base-bundle-podman-1_stop_0 node2" [ style=bold color="green" fontcolor="black"] -+"base-bundle-podman-2_monitor_60000 node2" [ style=bold color="green" fontcolor="black"] -+"base-bundle-podman-2_start_0 node2" -> "base-bundle-2_monitor_0 node1" [ style = bold] -+"base-bundle-podman-2_start_0 node2" -> "base-bundle-2_monitor_0 node2" [ style = bold] -+"base-bundle-podman-2_start_0 node2" -> "base-bundle-2_monitor_0 node3" [ style = bold] -+"base-bundle-podman-2_start_0 node2" -> "base-bundle-2_start_0 node2" [ style = bold] -+"base-bundle-podman-2_start_0 node2" -> "base-bundle-podman-2_monitor_60000 node2" [ style = bold] -+"base-bundle-podman-2_start_0 node2" -> "base-bundle_running_0" [ style = bold] -+"base-bundle-podman-2_start_0 node2" -> "base:2_start_0 base-bundle-2" [ style = bold] -+"base-bundle-podman-2_start_0 node2" [ style=bold color="green" fontcolor="black"] -+"base-bundle_running_0" [ style=bold color="green" fontcolor="orange"] -+"base-bundle_start_0" -> "base-bundle-clone_start_0" [ style = bold] -+"base-bundle_start_0" -> "base-bundle-podman-0_start_0 node1" [ style = bold] -+"base-bundle_start_0" -> "base-bundle-podman-1_start_0 node3" [ style = bold] -+"base-bundle_start_0" -> "base-bundle-podman-2_start_0 node2" [ style = bold] -+"base-bundle_start_0" [ style=bold color="green" fontcolor="orange"] -+"base-bundle_stop_0" -> "base-bundle-clone_stop_0" [ style = bold] -+"base-bundle_stop_0" -> "base-bundle-podman-0_stop_0 node3" [ style = bold] -+"base-bundle_stop_0" -> "base-bundle-podman-1_stop_0 node2" [ style = bold] -+"base-bundle_stop_0" -> "base_stop_0 base-bundle-0" [ style = bold] -+"base-bundle_stop_0" -> "base_stop_0 base-bundle-1" [ style = bold] -+"base-bundle_stop_0" [ style=bold color="green" fontcolor="orange"] -+"base-bundle_stopped_0" [ style=bold color="green" fontcolor="orange"] -+"base:2_start_0 base-bundle-2" -> "base-bundle-clone_running_0" [ style = bold] -+"base:2_start_0 base-bundle-2" [ style=bold color="green" fontcolor="black"] -+"base_start_0 base-bundle-0" -> "base-bundle-clone_running_0" [ style = bold] -+"base_start_0 base-bundle-0" -> "base_start_0 base-bundle-1" [ style = bold] -+"base_start_0 base-bundle-0" [ style=bold color="green" fontcolor="black"] -+"base_start_0 base-bundle-1" -> "base-bundle-clone_running_0" [ style = bold] -+"base_start_0 base-bundle-1" -> "base:2_start_0 base-bundle-2" [ style = bold] -+"base_start_0 base-bundle-1" [ style=bold color="green" fontcolor="black"] -+"base_stop_0 base-bundle-0" -> "base-bundle-0_stop_0 node3" [ style = bold] -+"base_stop_0 base-bundle-0" -> "base-bundle-clone_stopped_0" [ style = bold] -+"base_stop_0 base-bundle-0" -> "base_start_0 base-bundle-0" [ style = bold] -+"base_stop_0 base-bundle-0" [ style=bold color="green" fontcolor="black"] -+"base_stop_0 base-bundle-1" -> "base-bundle-1_stop_0 node2" [ style = bold] -+"base_stop_0 base-bundle-1" -> "base-bundle-clone_stopped_0" [ style = bold] -+"base_stop_0 base-bundle-1" -> "base_start_0 base-bundle-1" [ style = bold] -+"base_stop_0 base-bundle-1" -> "base_stop_0 base-bundle-0" [ style = bold] -+"base_stop_0 base-bundle-1" [ style=bold color="green" fontcolor="black"] -+} -diff --git a/cts/scheduler/dot/clone-recover-no-shuffle-7.dot b/cts/scheduler/dot/clone-recover-no-shuffle-7.dot -new file mode 100644 -index 00000000000..8bff7da01db ---- /dev/null -+++ b/cts/scheduler/dot/clone-recover-no-shuffle-7.dot -@@ -0,0 +1,45 @@ -+ digraph "g" { -+"Cancel dummy_monitor_10000 node2" -> "dummy_demote_0 node2" [ style = bold] -+"Cancel dummy_monitor_10000 node2" [ style=bold color="green" fontcolor="black"] -+"dummy-clone_demote_0" -> "dummy-clone_demoted_0" [ style = bold] -+"dummy-clone_demote_0" -> "dummy_demote_0 node2" [ style = bold] -+"dummy-clone_demote_0" [ style=bold color="green" fontcolor="orange"] -+"dummy-clone_demoted_0" -> "dummy-clone_promote_0" [ style = bold] -+"dummy-clone_demoted_0" -> "dummy-clone_start_0" [ style = bold] -+"dummy-clone_demoted_0" -> "dummy-clone_stop_0" [ style = bold] -+"dummy-clone_demoted_0" [ style=bold color="green" fontcolor="orange"] -+"dummy-clone_promote_0" -> "dummy_promote_0 node1" [ style = bold] -+"dummy-clone_promote_0" [ style=bold color="green" fontcolor="orange"] -+"dummy-clone_promoted_0" [ style=bold color="green" fontcolor="orange"] -+"dummy-clone_running_0" -> "dummy-clone_promote_0" [ style = bold] -+"dummy-clone_running_0" [ style=bold color="green" fontcolor="orange"] -+"dummy-clone_start_0" -> "dummy-clone_running_0" [ style = bold] -+"dummy-clone_start_0" -> "dummy:2_start_0 node3" [ style = bold] -+"dummy-clone_start_0" -> "dummy_start_0 node1" [ style = bold] -+"dummy-clone_start_0" [ style=bold color="green" fontcolor="orange"] -+"dummy-clone_stop_0" -> "dummy-clone_stopped_0" [ style = bold] -+"dummy-clone_stop_0" -> "dummy_stop_0 node3" [ style = bold] -+"dummy-clone_stop_0" [ style=bold color="green" fontcolor="orange"] -+"dummy-clone_stopped_0" -> "dummy-clone_promote_0" [ style = bold] -+"dummy-clone_stopped_0" -> "dummy-clone_start_0" [ style = bold] -+"dummy-clone_stopped_0" [ style=bold color="green" fontcolor="orange"] -+"dummy:2_monitor_11000 node3" [ style=bold color="green" fontcolor="black"] -+"dummy:2_start_0 node3" -> "dummy-clone_running_0" [ style = bold] -+"dummy:2_start_0 node3" -> "dummy:2_monitor_11000 node3" [ style = bold] -+"dummy:2_start_0 node3" [ style=bold color="green" fontcolor="black"] -+"dummy_demote_0 node2" -> "dummy-clone_demoted_0" [ style = bold] -+"dummy_demote_0 node2" -> "dummy_monitor_11000 node2" [ style = bold] -+"dummy_demote_0 node2" [ style=bold color="green" fontcolor="black"] -+"dummy_monitor_10000 node1" [ style=bold color="green" fontcolor="black"] -+"dummy_monitor_11000 node2" [ style=bold color="green" fontcolor="black"] -+"dummy_promote_0 node1" -> "dummy-clone_promoted_0" [ style = bold] -+"dummy_promote_0 node1" -> "dummy_monitor_10000 node1" [ style = bold] -+"dummy_promote_0 node1" [ style=bold color="green" fontcolor="black"] -+"dummy_start_0 node1" -> "dummy-clone_running_0" [ style = bold] -+"dummy_start_0 node1" -> "dummy_monitor_10000 node1" [ style = bold] -+"dummy_start_0 node1" -> "dummy_promote_0 node1" [ style = bold] -+"dummy_start_0 node1" [ style=bold color="green" fontcolor="black"] -+"dummy_stop_0 node3" -> "dummy-clone_stopped_0" [ style = bold] -+"dummy_stop_0 node3" -> "dummy_start_0 node1" [ style = bold] -+"dummy_stop_0 node3" [ style=bold color="green" fontcolor="black"] -+} -diff --git a/cts/scheduler/dot/clone-recover-no-shuffle-8.dot b/cts/scheduler/dot/clone-recover-no-shuffle-8.dot -new file mode 100644 -index 00000000000..d9c311a67cb ---- /dev/null -+++ b/cts/scheduler/dot/clone-recover-no-shuffle-8.dot -@@ -0,0 +1,63 @@ -+ digraph "g" { -+"Cancel rsc1_monitor_10000 node2" -> "rsc1_demote_0 node2" [ style = bold] -+"Cancel rsc1_monitor_10000 node2" [ style=bold color="green" fontcolor="black"] -+"Cancel rsc2_monitor_10000 node2" -> "rsc2_demote_0 node2" [ style = bold] -+"Cancel rsc2_monitor_10000 node2" [ style=bold color="green" fontcolor="black"] -+"grp-clone_demote_0" -> "grp-clone_demoted_0" [ style = bold] -+"grp-clone_demote_0" -> "grp:1_demote_0" [ style = bold] -+"grp-clone_demote_0" [ style=bold color="green" fontcolor="orange"] -+"grp-clone_demoted_0" -> "grp-clone_promote_0" [ style = bold] -+"grp-clone_demoted_0" -> "grp-clone_start_0" [ style = bold] -+"grp-clone_demoted_0" [ style=bold color="green" fontcolor="orange"] -+"grp-clone_promote_0" -> "grp:2_promote_0" [ style = bold] -+"grp-clone_promote_0" [ style=bold color="green" fontcolor="orange"] -+"grp-clone_promoted_0" [ style=bold color="green" fontcolor="orange"] -+"grp-clone_running_0" -> "grp-clone_promote_0" [ style = bold] -+"grp-clone_running_0" [ style=bold color="green" fontcolor="orange"] -+"grp-clone_start_0" -> "grp-clone_running_0" [ style = bold] -+"grp-clone_start_0" -> "grp:2_start_0" [ style = bold] -+"grp-clone_start_0" [ style=bold color="green" fontcolor="orange"] -+"grp:1_demote_0" -> "rsc1_demote_0 node2" [ style = bold] -+"grp:1_demote_0" -> "rsc2_demote_0 node2" [ style = bold] -+"grp:1_demote_0" [ style=bold color="green" fontcolor="orange"] -+"grp:1_demoted_0" -> "grp-clone_demoted_0" [ style = bold] -+"grp:1_demoted_0" [ style=bold color="green" fontcolor="orange"] -+"grp:2_promote_0" -> "rsc1:2_promote_0 node1" [ style = bold] -+"grp:2_promote_0" -> "rsc2:2_promote_0 node1" [ style = bold] -+"grp:2_promote_0" [ style=bold color="green" fontcolor="orange"] -+"grp:2_promoted_0" -> "grp-clone_promoted_0" [ style = bold] -+"grp:2_promoted_0" [ style=bold color="green" fontcolor="orange"] -+"grp:2_running_0" -> "grp-clone_running_0" [ style = bold] -+"grp:2_running_0" [ style=bold color="green" fontcolor="orange"] -+"grp:2_start_0" -> "grp:2_running_0" [ style = bold] -+"grp:2_start_0" -> "rsc1:2_start_0 node1" [ style = bold] -+"grp:2_start_0" -> "rsc2:2_start_0 node1" [ style = bold] -+"grp:2_start_0" [ style=bold color="green" fontcolor="orange"] -+"rsc1:2_monitor_10000 node1" [ style=bold color="green" fontcolor="black"] -+"rsc1:2_promote_0 node1" -> "grp:2_promoted_0" [ style = bold] -+"rsc1:2_promote_0 node1" -> "rsc1:2_monitor_10000 node1" [ style = bold] -+"rsc1:2_promote_0 node1" -> "rsc2:2_promote_0 node1" [ style = bold] -+"rsc1:2_promote_0 node1" [ style=bold color="green" fontcolor="black"] -+"rsc1:2_start_0 node1" -> "grp:2_running_0" [ style = bold] -+"rsc1:2_start_0 node1" -> "rsc1:2_monitor_10000 node1" [ style = bold] -+"rsc1:2_start_0 node1" -> "rsc1:2_promote_0 node1" [ style = bold] -+"rsc1:2_start_0 node1" -> "rsc2:2_start_0 node1" [ style = bold] -+"rsc1:2_start_0 node1" [ style=bold color="green" fontcolor="black"] -+"rsc1_demote_0 node2" -> "grp:1_demoted_0" [ style = bold] -+"rsc1_demote_0 node2" -> "rsc1_monitor_11000 node2" [ style = bold] -+"rsc1_demote_0 node2" [ style=bold color="green" fontcolor="black"] -+"rsc1_monitor_11000 node2" [ style=bold color="green" fontcolor="black"] -+"rsc2:2_monitor_10000 node1" [ style=bold color="green" fontcolor="black"] -+"rsc2:2_promote_0 node1" -> "grp:2_promoted_0" [ style = bold] -+"rsc2:2_promote_0 node1" -> "rsc2:2_monitor_10000 node1" [ style = bold] -+"rsc2:2_promote_0 node1" [ style=bold color="green" fontcolor="black"] -+"rsc2:2_start_0 node1" -> "grp:2_running_0" [ style = bold] -+"rsc2:2_start_0 node1" -> "rsc2:2_monitor_10000 node1" [ style = bold] -+"rsc2:2_start_0 node1" -> "rsc2:2_promote_0 node1" [ style = bold] -+"rsc2:2_start_0 node1" [ style=bold color="green" fontcolor="black"] -+"rsc2_demote_0 node2" -> "grp:1_demoted_0" [ style = bold] -+"rsc2_demote_0 node2" -> "rsc1_demote_0 node2" [ style = bold] -+"rsc2_demote_0 node2" -> "rsc2_monitor_11000 node2" [ style = bold] -+"rsc2_demote_0 node2" [ style=bold color="green" fontcolor="black"] -+"rsc2_monitor_11000 node2" [ style=bold color="green" fontcolor="black"] -+} -diff --git a/cts/scheduler/dot/clone-recover-no-shuffle-9.dot b/cts/scheduler/dot/clone-recover-no-shuffle-9.dot -new file mode 100644 -index 00000000000..45dbac47e2b ---- /dev/null -+++ b/cts/scheduler/dot/clone-recover-no-shuffle-9.dot -@@ -0,0 +1,69 @@ -+ digraph "g" { -+"Cancel base_monitor_15000 base-bundle-1" -> "base_demote_0 base-bundle-1" [ style = bold] -+"Cancel base_monitor_15000 base-bundle-1" [ style=bold color="green" fontcolor="black"] -+"base-bundle-2_monitor_0 node1" -> "base-bundle-2_start_0 node1" [ style = bold] -+"base-bundle-2_monitor_0 node1" [ style=bold color="green" fontcolor="black"] -+"base-bundle-2_monitor_0 node2" -> "base-bundle-2_start_0 node1" [ style = bold] -+"base-bundle-2_monitor_0 node2" [ style=bold color="green" fontcolor="black"] -+"base-bundle-2_monitor_0 node3" -> "base-bundle-2_start_0 node1" [ style = bold] -+"base-bundle-2_monitor_0 node3" [ style=bold color="green" fontcolor="black"] -+"base-bundle-2_monitor_30000 node1" [ style=bold color="green" fontcolor="black"] -+"base-bundle-2_start_0 node1" -> "base-bundle-2_monitor_30000 node1" [ style = bold] -+"base-bundle-2_start_0 node1" -> "base:2_monitor_15000 base-bundle-2" [ style = bold] -+"base-bundle-2_start_0 node1" -> "base:2_promote_0 base-bundle-2" [ style = bold] -+"base-bundle-2_start_0 node1" -> "base:2_start_0 base-bundle-2" [ style = bold] -+"base-bundle-2_start_0 node1" [ style=bold color="green" fontcolor="black"] -+"base-bundle-clone_demote_0" -> "base-bundle-clone_demoted_0" [ style = bold] -+"base-bundle-clone_demote_0" -> "base_demote_0 base-bundle-1" [ style = bold] -+"base-bundle-clone_demote_0" [ style=bold color="green" fontcolor="orange"] -+"base-bundle-clone_demoted_0" -> "base-bundle-clone_promote_0" [ style = bold] -+"base-bundle-clone_demoted_0" -> "base-bundle-clone_start_0" [ style = bold] -+"base-bundle-clone_demoted_0" -> "base-bundle_demoted_0" [ style = bold] -+"base-bundle-clone_demoted_0" [ style=bold color="green" fontcolor="orange"] -+"base-bundle-clone_promote_0" -> "base:2_promote_0 base-bundle-2" [ style = bold] -+"base-bundle-clone_promote_0" [ style=bold color="green" fontcolor="orange"] -+"base-bundle-clone_promoted_0" -> "base-bundle_promoted_0" [ style = bold] -+"base-bundle-clone_promoted_0" [ style=bold color="green" fontcolor="orange"] -+"base-bundle-clone_running_0" -> "base-bundle-clone_promote_0" [ style = bold] -+"base-bundle-clone_running_0" -> "base-bundle_running_0" [ style = bold] -+"base-bundle-clone_running_0" [ style=bold color="green" fontcolor="orange"] -+"base-bundle-clone_start_0" -> "base-bundle-clone_running_0" [ style = bold] -+"base-bundle-clone_start_0" -> "base:2_start_0 base-bundle-2" [ style = bold] -+"base-bundle-clone_start_0" [ style=bold color="green" fontcolor="orange"] -+"base-bundle-podman-2_monitor_60000 node1" [ style=bold color="green" fontcolor="black"] -+"base-bundle-podman-2_start_0 node1" -> "base-bundle-2_monitor_0 node1" [ style = bold] -+"base-bundle-podman-2_start_0 node1" -> "base-bundle-2_monitor_0 node2" [ style = bold] -+"base-bundle-podman-2_start_0 node1" -> "base-bundle-2_monitor_0 node3" [ style = bold] -+"base-bundle-podman-2_start_0 node1" -> "base-bundle-2_start_0 node1" [ style = bold] -+"base-bundle-podman-2_start_0 node1" -> "base-bundle-podman-2_monitor_60000 node1" [ style = bold] -+"base-bundle-podman-2_start_0 node1" -> "base-bundle_running_0" [ style = bold] -+"base-bundle-podman-2_start_0 node1" -> "base:2_promote_0 base-bundle-2" [ style = bold] -+"base-bundle-podman-2_start_0 node1" -> "base:2_start_0 base-bundle-2" [ style = bold] -+"base-bundle-podman-2_start_0 node1" [ style=bold color="green" fontcolor="black"] -+"base-bundle_demote_0" -> "base-bundle-clone_demote_0" [ style = bold] -+"base-bundle_demote_0" -> "base-bundle_demoted_0" [ style = bold] -+"base-bundle_demote_0" [ style=bold color="green" fontcolor="orange"] -+"base-bundle_demoted_0" -> "base-bundle_promote_0" [ style = bold] -+"base-bundle_demoted_0" -> "base-bundle_start_0" [ style = bold] -+"base-bundle_demoted_0" [ style=bold color="green" fontcolor="orange"] -+"base-bundle_promote_0" -> "base-bundle-clone_promote_0" [ style = bold] -+"base-bundle_promote_0" [ style=bold color="green" fontcolor="orange"] -+"base-bundle_promoted_0" [ style=bold color="green" fontcolor="orange"] -+"base-bundle_running_0" -> "base-bundle_promote_0" [ style = bold] -+"base-bundle_running_0" [ style=bold color="green" fontcolor="orange"] -+"base-bundle_start_0" -> "base-bundle-clone_start_0" [ style = bold] -+"base-bundle_start_0" -> "base-bundle-podman-2_start_0 node1" [ style = bold] -+"base-bundle_start_0" [ style=bold color="green" fontcolor="orange"] -+"base:2_monitor_15000 base-bundle-2" [ style=bold color="green" fontcolor="black"] -+"base:2_promote_0 base-bundle-2" -> "base-bundle-clone_promoted_0" [ style = bold] -+"base:2_promote_0 base-bundle-2" -> "base:2_monitor_15000 base-bundle-2" [ style = bold] -+"base:2_promote_0 base-bundle-2" [ style=bold color="green" fontcolor="black"] -+"base:2_start_0 base-bundle-2" -> "base-bundle-clone_running_0" [ style = bold] -+"base:2_start_0 base-bundle-2" -> "base:2_monitor_15000 base-bundle-2" [ style = bold] -+"base:2_start_0 base-bundle-2" -> "base:2_promote_0 base-bundle-2" [ style = bold] -+"base:2_start_0 base-bundle-2" [ style=bold color="green" fontcolor="black"] -+"base_demote_0 base-bundle-1" -> "base-bundle-clone_demoted_0" [ style = bold] -+"base_demote_0 base-bundle-1" -> "base_monitor_16000 base-bundle-1" [ style = bold] -+"base_demote_0 base-bundle-1" [ style=bold color="green" fontcolor="black"] -+"base_monitor_16000 base-bundle-1" [ style=bold color="green" fontcolor="black"] -+} -diff --git a/cts/scheduler/exp/clone-recover-no-shuffle-1.exp b/cts/scheduler/exp/clone-recover-no-shuffle-1.exp -new file mode 100644 -index 00000000000..670a823dac9 ---- /dev/null -+++ b/cts/scheduler/exp/clone-recover-no-shuffle-1.exp -@@ -0,0 +1,51 @@ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -diff --git a/cts/scheduler/exp/clone-recover-no-shuffle-10.exp b/cts/scheduler/exp/clone-recover-no-shuffle-10.exp -new file mode 100644 -index 00000000000..27b8b7037c3 ---- /dev/null -+++ b/cts/scheduler/exp/clone-recover-no-shuffle-10.exp -@@ -0,0 +1,51 @@ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -diff --git a/cts/scheduler/exp/clone-recover-no-shuffle-11.exp b/cts/scheduler/exp/clone-recover-no-shuffle-11.exp -new file mode 100644 -index 00000000000..40cf1f69c11 ---- /dev/null -+++ b/cts/scheduler/exp/clone-recover-no-shuffle-11.exp -@@ -0,0 +1,110 @@ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -diff --git a/cts/scheduler/exp/clone-recover-no-shuffle-12.exp b/cts/scheduler/exp/clone-recover-no-shuffle-12.exp -new file mode 100644 -index 00000000000..919e6b291c0 ---- /dev/null -+++ b/cts/scheduler/exp/clone-recover-no-shuffle-12.exp -@@ -0,0 +1,187 @@ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -diff --git a/cts/scheduler/exp/clone-recover-no-shuffle-2.exp b/cts/scheduler/exp/clone-recover-no-shuffle-2.exp -new file mode 100644 -index 00000000000..84b1e1bc98c ---- /dev/null -+++ b/cts/scheduler/exp/clone-recover-no-shuffle-2.exp -@@ -0,0 +1,110 @@ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -diff --git a/cts/scheduler/exp/clone-recover-no-shuffle-3.exp b/cts/scheduler/exp/clone-recover-no-shuffle-3.exp -new file mode 100644 -index 00000000000..6b6ed075f57 ---- /dev/null -+++ b/cts/scheduler/exp/clone-recover-no-shuffle-3.exp -@@ -0,0 +1,171 @@ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -diff --git a/cts/scheduler/exp/clone-recover-no-shuffle-4.exp b/cts/scheduler/exp/clone-recover-no-shuffle-4.exp -new file mode 100644 -index 00000000000..4596c685d0a ---- /dev/null -+++ b/cts/scheduler/exp/clone-recover-no-shuffle-4.exp -@@ -0,0 +1,123 @@ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -diff --git a/cts/scheduler/exp/clone-recover-no-shuffle-5.exp b/cts/scheduler/exp/clone-recover-no-shuffle-5.exp -new file mode 100644 -index 00000000000..8a8e799793e ---- /dev/null -+++ b/cts/scheduler/exp/clone-recover-no-shuffle-5.exp -@@ -0,0 +1,452 @@ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -diff --git a/cts/scheduler/exp/clone-recover-no-shuffle-6.exp b/cts/scheduler/exp/clone-recover-no-shuffle-6.exp -new file mode 100644 -index 00000000000..e6704c9e254 ---- /dev/null -+++ b/cts/scheduler/exp/clone-recover-no-shuffle-6.exp -@@ -0,0 +1,507 @@ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -diff --git a/cts/scheduler/exp/clone-recover-no-shuffle-7.exp b/cts/scheduler/exp/clone-recover-no-shuffle-7.exp -new file mode 100644 -index 00000000000..950de9e0312 ---- /dev/null -+++ b/cts/scheduler/exp/clone-recover-no-shuffle-7.exp -@@ -0,0 +1,240 @@ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -diff --git a/cts/scheduler/exp/clone-recover-no-shuffle-8.exp b/cts/scheduler/exp/clone-recover-no-shuffle-8.exp -new file mode 100644 -index 00000000000..763a2f02fb0 ---- /dev/null -+++ b/cts/scheduler/exp/clone-recover-no-shuffle-8.exp -@@ -0,0 +1,338 @@ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -diff --git a/cts/scheduler/exp/clone-recover-no-shuffle-9.exp b/cts/scheduler/exp/clone-recover-no-shuffle-9.exp -new file mode 100644 -index 00000000000..7bfe3c47281 ---- /dev/null -+++ b/cts/scheduler/exp/clone-recover-no-shuffle-9.exp -@@ -0,0 +1,364 @@ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -diff --git a/cts/scheduler/scores/clone-recover-no-shuffle-1.scores b/cts/scheduler/scores/clone-recover-no-shuffle-1.scores -new file mode 100644 -index 00000000000..c1d60b2f39a ---- /dev/null -+++ b/cts/scheduler/scores/clone-recover-no-shuffle-1.scores -@@ -0,0 +1,25 @@ -+ -+pcmk__clone_assign: dummy-clone allocation score on node1: 0 -+pcmk__clone_assign: dummy-clone allocation score on node2: 0 -+pcmk__clone_assign: dummy-clone allocation score on node3: 0 -+pcmk__clone_assign: dummy:0 allocation score on node1: 0 -+pcmk__clone_assign: dummy:0 allocation score on node2: 1 -+pcmk__clone_assign: dummy:0 allocation score on node3: 0 -+pcmk__clone_assign: dummy:1 allocation score on node1: 0 -+pcmk__clone_assign: dummy:1 allocation score on node2: 0 -+pcmk__clone_assign: dummy:1 allocation score on node3: 1 -+pcmk__clone_assign: dummy:2 allocation score on node1: 0 -+pcmk__clone_assign: dummy:2 allocation score on node2: 0 -+pcmk__clone_assign: dummy:2 allocation score on node3: 0 -+pcmk__primitive_assign: Fencing allocation score on node1: 0 -+pcmk__primitive_assign: Fencing allocation score on node2: 0 -+pcmk__primitive_assign: Fencing allocation score on node3: 0 -+pcmk__primitive_assign: dummy:0 allocation score on node1: 0 -+pcmk__primitive_assign: dummy:0 allocation score on node2: 1 -+pcmk__primitive_assign: dummy:0 allocation score on node3: 0 -+pcmk__primitive_assign: dummy:1 allocation score on node1: 0 -+pcmk__primitive_assign: dummy:1 allocation score on node2: -INFINITY -+pcmk__primitive_assign: dummy:1 allocation score on node3: 1 -+pcmk__primitive_assign: dummy:2 allocation score on node1: 0 -+pcmk__primitive_assign: dummy:2 allocation score on node2: -INFINITY -+pcmk__primitive_assign: dummy:2 allocation score on node3: -INFINITY -diff --git a/cts/scheduler/scores/clone-recover-no-shuffle-10.scores b/cts/scheduler/scores/clone-recover-no-shuffle-10.scores -new file mode 100644 -index 00000000000..4ac63e37058 ---- /dev/null -+++ b/cts/scheduler/scores/clone-recover-no-shuffle-10.scores -@@ -0,0 +1,31 @@ -+ -+dummy:0 promotion score on node3: 5 -+dummy:1 promotion score on node2: 15 -+dummy:2 promotion score on node1: 10 -+pcmk__clone_assign: dummy-clone allocation score on node1: 0 -+pcmk__clone_assign: dummy-clone allocation score on node2: 0 -+pcmk__clone_assign: dummy-clone allocation score on node3: 0 -+pcmk__clone_assign: dummy:0 allocation score on node1: 10 -+pcmk__clone_assign: dummy:0 allocation score on node2: 0 -+pcmk__clone_assign: dummy:0 allocation score on node3: 6 -+pcmk__clone_assign: dummy:1 allocation score on node1: 10 -+pcmk__clone_assign: dummy:1 allocation score on node2: 16 -+pcmk__clone_assign: dummy:1 allocation score on node3: 0 -+pcmk__clone_assign: dummy:2 allocation score on node1: 10 -+pcmk__clone_assign: dummy:2 allocation score on node2: 15 -+pcmk__clone_assign: dummy:2 allocation score on node3: 5 -+pcmk__primitive_assign: Fencing allocation score on node1: 0 -+pcmk__primitive_assign: Fencing allocation score on node2: 0 -+pcmk__primitive_assign: Fencing allocation score on node3: 0 -+pcmk__primitive_assign: dummy:0 allocation score on node1: -INFINITY -+pcmk__primitive_assign: dummy:0 allocation score on node1: 10 -+pcmk__primitive_assign: dummy:0 allocation score on node2: -INFINITY -+pcmk__primitive_assign: dummy:0 allocation score on node2: -INFINITY -+pcmk__primitive_assign: dummy:0 allocation score on node3: 6 -+pcmk__primitive_assign: dummy:0 allocation score on node3: 6 -+pcmk__primitive_assign: dummy:1 allocation score on node1: 10 -+pcmk__primitive_assign: dummy:1 allocation score on node2: 16 -+pcmk__primitive_assign: dummy:1 allocation score on node3: 0 -+pcmk__primitive_assign: dummy:2 allocation score on node1: 10 -+pcmk__primitive_assign: dummy:2 allocation score on node2: -INFINITY -+pcmk__primitive_assign: dummy:2 allocation score on node3: 5 -diff --git a/cts/scheduler/scores/clone-recover-no-shuffle-11.scores b/cts/scheduler/scores/clone-recover-no-shuffle-11.scores -new file mode 100644 -index 00000000000..1216dba711a ---- /dev/null -+++ b/cts/scheduler/scores/clone-recover-no-shuffle-11.scores -@@ -0,0 +1,82 @@ -+ -+grp:0 promotion score on node3: 10 -+grp:1 promotion score on node2: 30 -+grp:2 promotion score on node1: 20 -+pcmk__clone_assign: grp-clone allocation score on node1: 0 -+pcmk__clone_assign: grp-clone allocation score on node2: 0 -+pcmk__clone_assign: grp-clone allocation score on node3: 0 -+pcmk__clone_assign: grp:0 allocation score on node1: 20 -+pcmk__clone_assign: grp:0 allocation score on node2: 0 -+pcmk__clone_assign: grp:0 allocation score on node3: 10 -+pcmk__clone_assign: grp:1 allocation score on node1: 20 -+pcmk__clone_assign: grp:1 allocation score on node2: 30 -+pcmk__clone_assign: grp:1 allocation score on node3: 0 -+pcmk__clone_assign: grp:2 allocation score on node1: 20 -+pcmk__clone_assign: grp:2 allocation score on node2: 30 -+pcmk__clone_assign: grp:2 allocation score on node3: 10 -+pcmk__clone_assign: rsc1:0 allocation score on node1: 0 -+pcmk__clone_assign: rsc1:0 allocation score on node2: 0 -+pcmk__clone_assign: rsc1:0 allocation score on node3: 1 -+pcmk__clone_assign: rsc1:1 allocation score on node1: 0 -+pcmk__clone_assign: rsc1:1 allocation score on node2: 1 -+pcmk__clone_assign: rsc1:1 allocation score on node3: 0 -+pcmk__clone_assign: rsc1:2 allocation score on node1: 0 -+pcmk__clone_assign: rsc1:2 allocation score on node2: 0 -+pcmk__clone_assign: rsc1:2 allocation score on node3: 0 -+pcmk__clone_assign: rsc2:0 allocation score on node1: 0 -+pcmk__clone_assign: rsc2:0 allocation score on node2: 0 -+pcmk__clone_assign: rsc2:0 allocation score on node3: 1 -+pcmk__clone_assign: rsc2:1 allocation score on node1: 0 -+pcmk__clone_assign: rsc2:1 allocation score on node2: 1 -+pcmk__clone_assign: rsc2:1 allocation score on node3: 0 -+pcmk__clone_assign: rsc2:2 allocation score on node1: 0 -+pcmk__clone_assign: rsc2:2 allocation score on node2: 0 -+pcmk__clone_assign: rsc2:2 allocation score on node3: 0 -+pcmk__group_assign: grp:0 allocation score on node1: 20 -+pcmk__group_assign: grp:0 allocation score on node2: -INFINITY -+pcmk__group_assign: grp:0 allocation score on node3: 10 -+pcmk__group_assign: grp:1 allocation score on node1: 20 -+pcmk__group_assign: grp:1 allocation score on node2: 30 -+pcmk__group_assign: grp:1 allocation score on node3: 0 -+pcmk__group_assign: grp:2 allocation score on node1: 20 -+pcmk__group_assign: grp:2 allocation score on node2: -INFINITY -+pcmk__group_assign: grp:2 allocation score on node3: -INFINITY -+pcmk__group_assign: rsc1:0 allocation score on node1: 0 -+pcmk__group_assign: rsc1:0 allocation score on node2: -INFINITY -+pcmk__group_assign: rsc1:0 allocation score on node3: 1 -+pcmk__group_assign: rsc1:1 allocation score on node1: 0 -+pcmk__group_assign: rsc1:1 allocation score on node2: 1 -+pcmk__group_assign: rsc1:1 allocation score on node3: 0 -+pcmk__group_assign: rsc1:2 allocation score on node1: 0 -+pcmk__group_assign: rsc1:2 allocation score on node2: -INFINITY -+pcmk__group_assign: rsc1:2 allocation score on node3: -INFINITY -+pcmk__group_assign: rsc2:0 allocation score on node1: 0 -+pcmk__group_assign: rsc2:0 allocation score on node2: -INFINITY -+pcmk__group_assign: rsc2:0 allocation score on node3: 1 -+pcmk__group_assign: rsc2:1 allocation score on node1: 0 -+pcmk__group_assign: rsc2:1 allocation score on node2: 1 -+pcmk__group_assign: rsc2:1 allocation score on node3: 0 -+pcmk__group_assign: rsc2:2 allocation score on node1: 0 -+pcmk__group_assign: rsc2:2 allocation score on node2: -INFINITY -+pcmk__group_assign: rsc2:2 allocation score on node3: -INFINITY -+pcmk__primitive_assign: Fencing allocation score on node1: 0 -+pcmk__primitive_assign: Fencing allocation score on node2: 0 -+pcmk__primitive_assign: Fencing allocation score on node3: 0 -+pcmk__primitive_assign: rsc1:0 allocation score on node1: 0 -+pcmk__primitive_assign: rsc1:0 allocation score on node2: -INFINITY -+pcmk__primitive_assign: rsc1:0 allocation score on node3: 2 -+pcmk__primitive_assign: rsc1:1 allocation score on node1: 0 -+pcmk__primitive_assign: rsc1:1 allocation score on node2: 2 -+pcmk__primitive_assign: rsc1:1 allocation score on node3: 0 -+pcmk__primitive_assign: rsc1:2 allocation score on node1: 0 -+pcmk__primitive_assign: rsc1:2 allocation score on node2: -INFINITY -+pcmk__primitive_assign: rsc1:2 allocation score on node3: -INFINITY -+pcmk__primitive_assign: rsc2:0 allocation score on node1: -INFINITY -+pcmk__primitive_assign: rsc2:0 allocation score on node2: -INFINITY -+pcmk__primitive_assign: rsc2:0 allocation score on node3: 1 -+pcmk__primitive_assign: rsc2:1 allocation score on node1: -INFINITY -+pcmk__primitive_assign: rsc2:1 allocation score on node2: 1 -+pcmk__primitive_assign: rsc2:1 allocation score on node3: -INFINITY -+pcmk__primitive_assign: rsc2:2 allocation score on node1: 0 -+pcmk__primitive_assign: rsc2:2 allocation score on node2: -INFINITY -+pcmk__primitive_assign: rsc2:2 allocation score on node3: -INFINITY -diff --git a/cts/scheduler/scores/clone-recover-no-shuffle-12.scores b/cts/scheduler/scores/clone-recover-no-shuffle-12.scores -new file mode 100644 -index 00000000000..24cf3148c4c ---- /dev/null -+++ b/cts/scheduler/scores/clone-recover-no-shuffle-12.scores -@@ -0,0 +1,67 @@ -+ -+base:0 promotion score on base-bundle-0: 5 -+base:1 promotion score on base-bundle-1: 15 -+base:2 promotion score on base-bundle-2: 10 -+pcmk__bundle_allocate: base-bundle allocation score on node1: 0 -+pcmk__bundle_allocate: base-bundle allocation score on node2: 0 -+pcmk__bundle_allocate: base-bundle allocation score on node3: 0 -+pcmk__bundle_allocate: base-bundle-0 allocation score on node1: 0 -+pcmk__bundle_allocate: base-bundle-0 allocation score on node2: 0 -+pcmk__bundle_allocate: base-bundle-0 allocation score on node3: 0 -+pcmk__bundle_allocate: base-bundle-1 allocation score on node1: 0 -+pcmk__bundle_allocate: base-bundle-1 allocation score on node2: 0 -+pcmk__bundle_allocate: base-bundle-1 allocation score on node3: 0 -+pcmk__bundle_allocate: base-bundle-2 allocation score on node1: 0 -+pcmk__bundle_allocate: base-bundle-2 allocation score on node2: 0 -+pcmk__bundle_allocate: base-bundle-2 allocation score on node3: 0 -+pcmk__bundle_allocate: base-bundle-clone allocation score on base-bundle-0: -INFINITY -+pcmk__bundle_allocate: base-bundle-clone allocation score on base-bundle-1: -INFINITY -+pcmk__bundle_allocate: base-bundle-clone allocation score on base-bundle-2: -INFINITY -+pcmk__bundle_allocate: base-bundle-clone allocation score on node1: 0 -+pcmk__bundle_allocate: base-bundle-clone allocation score on node2: 0 -+pcmk__bundle_allocate: base-bundle-clone allocation score on node3: 0 -+pcmk__bundle_allocate: base-bundle-podman-0 allocation score on node1: 0 -+pcmk__bundle_allocate: base-bundle-podman-0 allocation score on node2: 0 -+pcmk__bundle_allocate: base-bundle-podman-0 allocation score on node3: 0 -+pcmk__bundle_allocate: base-bundle-podman-1 allocation score on node1: 0 -+pcmk__bundle_allocate: base-bundle-podman-1 allocation score on node2: 0 -+pcmk__bundle_allocate: base-bundle-podman-1 allocation score on node3: 0 -+pcmk__bundle_allocate: base-bundle-podman-2 allocation score on node1: 0 -+pcmk__bundle_allocate: base-bundle-podman-2 allocation score on node2: 0 -+pcmk__bundle_allocate: base-bundle-podman-2 allocation score on node3: 0 -+pcmk__bundle_allocate: base:0 allocation score on base-bundle-0: 501 -+pcmk__bundle_allocate: base:1 allocation score on base-bundle-1: 501 -+pcmk__bundle_allocate: base:2 allocation score on base-bundle-2: 500 -+pcmk__clone_assign: base-bundle-clone allocation score on base-bundle-0: 0 -+pcmk__clone_assign: base-bundle-clone allocation score on base-bundle-1: 0 -+pcmk__clone_assign: base-bundle-clone allocation score on base-bundle-2: 0 -+pcmk__clone_assign: base-bundle-clone allocation score on node1: -INFINITY -+pcmk__clone_assign: base-bundle-clone allocation score on node2: -INFINITY -+pcmk__clone_assign: base-bundle-clone allocation score on node3: -INFINITY -+pcmk__clone_assign: base:0 allocation score on base-bundle-0: INFINITY -+pcmk__clone_assign: base:1 allocation score on base-bundle-1: INFINITY -+pcmk__clone_assign: base:2 allocation score on base-bundle-2: INFINITY -+pcmk__primitive_assign: Fencing allocation score on node1: 0 -+pcmk__primitive_assign: Fencing allocation score on node2: 0 -+pcmk__primitive_assign: Fencing allocation score on node3: 0 -+pcmk__primitive_assign: base-bundle-0 allocation score on node1: 0 -+pcmk__primitive_assign: base-bundle-0 allocation score on node2: 0 -+pcmk__primitive_assign: base-bundle-0 allocation score on node3: 10000 -+pcmk__primitive_assign: base-bundle-1 allocation score on node1: 0 -+pcmk__primitive_assign: base-bundle-1 allocation score on node2: 10000 -+pcmk__primitive_assign: base-bundle-1 allocation score on node3: 0 -+pcmk__primitive_assign: base-bundle-2 allocation score on node1: 10000 -+pcmk__primitive_assign: base-bundle-2 allocation score on node2: 0 -+pcmk__primitive_assign: base-bundle-2 allocation score on node3: 0 -+pcmk__primitive_assign: base-bundle-podman-0 allocation score on node1: 0 -+pcmk__primitive_assign: base-bundle-podman-0 allocation score on node2: 0 -+pcmk__primitive_assign: base-bundle-podman-0 allocation score on node3: 0 -+pcmk__primitive_assign: base-bundle-podman-1 allocation score on node1: 0 -+pcmk__primitive_assign: base-bundle-podman-1 allocation score on node2: 0 -+pcmk__primitive_assign: base-bundle-podman-1 allocation score on node3: -INFINITY -+pcmk__primitive_assign: base-bundle-podman-2 allocation score on node1: 0 -+pcmk__primitive_assign: base-bundle-podman-2 allocation score on node2: -INFINITY -+pcmk__primitive_assign: base-bundle-podman-2 allocation score on node3: -INFINITY -+pcmk__primitive_assign: base:0 allocation score on base-bundle-0: INFINITY -+pcmk__primitive_assign: base:1 allocation score on base-bundle-1: INFINITY -+pcmk__primitive_assign: base:2 allocation score on base-bundle-2: INFINITY -diff --git a/cts/scheduler/scores/clone-recover-no-shuffle-2.scores b/cts/scheduler/scores/clone-recover-no-shuffle-2.scores -new file mode 100644 -index 00000000000..cfbd5bf5337 ---- /dev/null -+++ b/cts/scheduler/scores/clone-recover-no-shuffle-2.scores -@@ -0,0 +1,79 @@ -+ -+pcmk__clone_assign: grp-clone allocation score on node1: 0 -+pcmk__clone_assign: grp-clone allocation score on node2: 0 -+pcmk__clone_assign: grp-clone allocation score on node3: 0 -+pcmk__clone_assign: grp:0 allocation score on node1: 0 -+pcmk__clone_assign: grp:0 allocation score on node2: 0 -+pcmk__clone_assign: grp:0 allocation score on node3: 0 -+pcmk__clone_assign: grp:1 allocation score on node1: 0 -+pcmk__clone_assign: grp:1 allocation score on node2: 0 -+pcmk__clone_assign: grp:1 allocation score on node3: 0 -+pcmk__clone_assign: grp:2 allocation score on node1: 0 -+pcmk__clone_assign: grp:2 allocation score on node2: 0 -+pcmk__clone_assign: grp:2 allocation score on node3: 0 -+pcmk__clone_assign: rsc1:0 allocation score on node1: 0 -+pcmk__clone_assign: rsc1:0 allocation score on node2: 1 -+pcmk__clone_assign: rsc1:0 allocation score on node3: 0 -+pcmk__clone_assign: rsc1:1 allocation score on node1: 0 -+pcmk__clone_assign: rsc1:1 allocation score on node2: 0 -+pcmk__clone_assign: rsc1:1 allocation score on node3: 1 -+pcmk__clone_assign: rsc1:2 allocation score on node1: 0 -+pcmk__clone_assign: rsc1:2 allocation score on node2: 0 -+pcmk__clone_assign: rsc1:2 allocation score on node3: 0 -+pcmk__clone_assign: rsc2:0 allocation score on node1: 0 -+pcmk__clone_assign: rsc2:0 allocation score on node2: 1 -+pcmk__clone_assign: rsc2:0 allocation score on node3: 0 -+pcmk__clone_assign: rsc2:1 allocation score on node1: 0 -+pcmk__clone_assign: rsc2:1 allocation score on node2: 0 -+pcmk__clone_assign: rsc2:1 allocation score on node3: 1 -+pcmk__clone_assign: rsc2:2 allocation score on node1: 0 -+pcmk__clone_assign: rsc2:2 allocation score on node2: 0 -+pcmk__clone_assign: rsc2:2 allocation score on node3: 0 -+pcmk__group_assign: grp:0 allocation score on node1: 0 -+pcmk__group_assign: grp:0 allocation score on node2: 0 -+pcmk__group_assign: grp:0 allocation score on node3: 0 -+pcmk__group_assign: grp:1 allocation score on node1: 0 -+pcmk__group_assign: grp:1 allocation score on node2: -INFINITY -+pcmk__group_assign: grp:1 allocation score on node3: 0 -+pcmk__group_assign: grp:2 allocation score on node1: 0 -+pcmk__group_assign: grp:2 allocation score on node2: -INFINITY -+pcmk__group_assign: grp:2 allocation score on node3: -INFINITY -+pcmk__group_assign: rsc1:0 allocation score on node1: 0 -+pcmk__group_assign: rsc1:0 allocation score on node2: 1 -+pcmk__group_assign: rsc1:0 allocation score on node3: 0 -+pcmk__group_assign: rsc1:1 allocation score on node1: 0 -+pcmk__group_assign: rsc1:1 allocation score on node2: -INFINITY -+pcmk__group_assign: rsc1:1 allocation score on node3: 1 -+pcmk__group_assign: rsc1:2 allocation score on node1: 0 -+pcmk__group_assign: rsc1:2 allocation score on node2: -INFINITY -+pcmk__group_assign: rsc1:2 allocation score on node3: -INFINITY -+pcmk__group_assign: rsc2:0 allocation score on node1: 0 -+pcmk__group_assign: rsc2:0 allocation score on node2: 1 -+pcmk__group_assign: rsc2:0 allocation score on node3: 0 -+pcmk__group_assign: rsc2:1 allocation score on node1: 0 -+pcmk__group_assign: rsc2:1 allocation score on node2: -INFINITY -+pcmk__group_assign: rsc2:1 allocation score on node3: 1 -+pcmk__group_assign: rsc2:2 allocation score on node1: 0 -+pcmk__group_assign: rsc2:2 allocation score on node2: -INFINITY -+pcmk__group_assign: rsc2:2 allocation score on node3: -INFINITY -+pcmk__primitive_assign: Fencing allocation score on node1: 0 -+pcmk__primitive_assign: Fencing allocation score on node2: 0 -+pcmk__primitive_assign: Fencing allocation score on node3: 0 -+pcmk__primitive_assign: rsc1:0 allocation score on node1: 0 -+pcmk__primitive_assign: rsc1:0 allocation score on node2: 2 -+pcmk__primitive_assign: rsc1:0 allocation score on node3: 0 -+pcmk__primitive_assign: rsc1:1 allocation score on node1: 0 -+pcmk__primitive_assign: rsc1:1 allocation score on node2: -INFINITY -+pcmk__primitive_assign: rsc1:1 allocation score on node3: 2 -+pcmk__primitive_assign: rsc1:2 allocation score on node1: 0 -+pcmk__primitive_assign: rsc1:2 allocation score on node2: -INFINITY -+pcmk__primitive_assign: rsc1:2 allocation score on node3: -INFINITY -+pcmk__primitive_assign: rsc2:0 allocation score on node1: -INFINITY -+pcmk__primitive_assign: rsc2:0 allocation score on node2: 1 -+pcmk__primitive_assign: rsc2:0 allocation score on node3: -INFINITY -+pcmk__primitive_assign: rsc2:1 allocation score on node1: -INFINITY -+pcmk__primitive_assign: rsc2:1 allocation score on node2: -INFINITY -+pcmk__primitive_assign: rsc2:1 allocation score on node3: 1 -+pcmk__primitive_assign: rsc2:2 allocation score on node1: 0 -+pcmk__primitive_assign: rsc2:2 allocation score on node2: -INFINITY -+pcmk__primitive_assign: rsc2:2 allocation score on node3: -INFINITY -diff --git a/cts/scheduler/scores/clone-recover-no-shuffle-3.scores b/cts/scheduler/scores/clone-recover-no-shuffle-3.scores -new file mode 100644 -index 00000000000..461c11633b1 ---- /dev/null -+++ b/cts/scheduler/scores/clone-recover-no-shuffle-3.scores -@@ -0,0 +1,64 @@ -+ -+pcmk__bundle_allocate: base-bundle allocation score on node1: 0 -+pcmk__bundle_allocate: base-bundle allocation score on node2: 0 -+pcmk__bundle_allocate: base-bundle allocation score on node3: 0 -+pcmk__bundle_allocate: base-bundle-0 allocation score on node1: 0 -+pcmk__bundle_allocate: base-bundle-0 allocation score on node2: 0 -+pcmk__bundle_allocate: base-bundle-0 allocation score on node3: 0 -+pcmk__bundle_allocate: base-bundle-1 allocation score on node1: 0 -+pcmk__bundle_allocate: base-bundle-1 allocation score on node2: 0 -+pcmk__bundle_allocate: base-bundle-1 allocation score on node3: 0 -+pcmk__bundle_allocate: base-bundle-2 allocation score on node1: 0 -+pcmk__bundle_allocate: base-bundle-2 allocation score on node2: 0 -+pcmk__bundle_allocate: base-bundle-2 allocation score on node3: 0 -+pcmk__bundle_allocate: base-bundle-clone allocation score on base-bundle-0: -INFINITY -+pcmk__bundle_allocate: base-bundle-clone allocation score on base-bundle-1: -INFINITY -+pcmk__bundle_allocate: base-bundle-clone allocation score on base-bundle-2: -INFINITY -+pcmk__bundle_allocate: base-bundle-clone allocation score on node1: 0 -+pcmk__bundle_allocate: base-bundle-clone allocation score on node2: 0 -+pcmk__bundle_allocate: base-bundle-clone allocation score on node3: 0 -+pcmk__bundle_allocate: base-bundle-podman-0 allocation score on node1: 0 -+pcmk__bundle_allocate: base-bundle-podman-0 allocation score on node2: 0 -+pcmk__bundle_allocate: base-bundle-podman-0 allocation score on node3: 0 -+pcmk__bundle_allocate: base-bundle-podman-1 allocation score on node1: 0 -+pcmk__bundle_allocate: base-bundle-podman-1 allocation score on node2: 0 -+pcmk__bundle_allocate: base-bundle-podman-1 allocation score on node3: 0 -+pcmk__bundle_allocate: base-bundle-podman-2 allocation score on node1: 0 -+pcmk__bundle_allocate: base-bundle-podman-2 allocation score on node2: 0 -+pcmk__bundle_allocate: base-bundle-podman-2 allocation score on node3: 0 -+pcmk__bundle_allocate: base:0 allocation score on base-bundle-0: 501 -+pcmk__bundle_allocate: base:1 allocation score on base-bundle-1: 501 -+pcmk__bundle_allocate: base:2 allocation score on base-bundle-2: 500 -+pcmk__clone_assign: base-bundle-clone allocation score on base-bundle-0: 0 -+pcmk__clone_assign: base-bundle-clone allocation score on base-bundle-1: 0 -+pcmk__clone_assign: base-bundle-clone allocation score on base-bundle-2: 0 -+pcmk__clone_assign: base-bundle-clone allocation score on node1: -INFINITY -+pcmk__clone_assign: base-bundle-clone allocation score on node2: -INFINITY -+pcmk__clone_assign: base-bundle-clone allocation score on node3: -INFINITY -+pcmk__clone_assign: base:0 allocation score on base-bundle-0: INFINITY -+pcmk__clone_assign: base:1 allocation score on base-bundle-1: INFINITY -+pcmk__clone_assign: base:2 allocation score on base-bundle-2: INFINITY -+pcmk__primitive_assign: Fencing allocation score on node1: 0 -+pcmk__primitive_assign: Fencing allocation score on node2: 0 -+pcmk__primitive_assign: Fencing allocation score on node3: 0 -+pcmk__primitive_assign: base-bundle-0 allocation score on node1: 0 -+pcmk__primitive_assign: base-bundle-0 allocation score on node2: 0 -+pcmk__primitive_assign: base-bundle-0 allocation score on node3: 10000 -+pcmk__primitive_assign: base-bundle-1 allocation score on node1: 0 -+pcmk__primitive_assign: base-bundle-1 allocation score on node2: 10000 -+pcmk__primitive_assign: base-bundle-1 allocation score on node3: 0 -+pcmk__primitive_assign: base-bundle-2 allocation score on node1: 10000 -+pcmk__primitive_assign: base-bundle-2 allocation score on node2: 0 -+pcmk__primitive_assign: base-bundle-2 allocation score on node3: 0 -+pcmk__primitive_assign: base-bundle-podman-0 allocation score on node1: 0 -+pcmk__primitive_assign: base-bundle-podman-0 allocation score on node2: 0 -+pcmk__primitive_assign: base-bundle-podman-0 allocation score on node3: 0 -+pcmk__primitive_assign: base-bundle-podman-1 allocation score on node1: 0 -+pcmk__primitive_assign: base-bundle-podman-1 allocation score on node2: 0 -+pcmk__primitive_assign: base-bundle-podman-1 allocation score on node3: -INFINITY -+pcmk__primitive_assign: base-bundle-podman-2 allocation score on node1: 0 -+pcmk__primitive_assign: base-bundle-podman-2 allocation score on node2: -INFINITY -+pcmk__primitive_assign: base-bundle-podman-2 allocation score on node3: -INFINITY -+pcmk__primitive_assign: base:0 allocation score on base-bundle-0: INFINITY -+pcmk__primitive_assign: base:1 allocation score on base-bundle-1: INFINITY -+pcmk__primitive_assign: base:2 allocation score on base-bundle-2: INFINITY -diff --git a/cts/scheduler/scores/clone-recover-no-shuffle-4.scores b/cts/scheduler/scores/clone-recover-no-shuffle-4.scores -new file mode 100644 -index 00000000000..492dad1baa4 ---- /dev/null -+++ b/cts/scheduler/scores/clone-recover-no-shuffle-4.scores -@@ -0,0 +1,31 @@ -+ -+pcmk__clone_assign: dummy-clone allocation score on node1: 100 -+pcmk__clone_assign: dummy-clone allocation score on node2: 0 -+pcmk__clone_assign: dummy-clone allocation score on node3: 0 -+pcmk__clone_assign: dummy:0 allocation score on node1: 100 -+pcmk__clone_assign: dummy:0 allocation score on node2: 1 -+pcmk__clone_assign: dummy:0 allocation score on node3: 0 -+pcmk__clone_assign: dummy:1 allocation score on node1: 100 -+pcmk__clone_assign: dummy:1 allocation score on node2: 0 -+pcmk__clone_assign: dummy:1 allocation score on node3: 1 -+pcmk__clone_assign: dummy:2 allocation score on node1: 100 -+pcmk__clone_assign: dummy:2 allocation score on node2: 0 -+pcmk__clone_assign: dummy:2 allocation score on node3: 0 -+pcmk__primitive_assign: Fencing allocation score on node1: 0 -+pcmk__primitive_assign: Fencing allocation score on node2: 0 -+pcmk__primitive_assign: Fencing allocation score on node3: 0 -+pcmk__primitive_assign: dummy:0 allocation score on node1: 100 -+pcmk__primitive_assign: dummy:0 allocation score on node1: 100 -+pcmk__primitive_assign: dummy:0 allocation score on node2: 1 -+pcmk__primitive_assign: dummy:0 allocation score on node2: 1 -+pcmk__primitive_assign: dummy:0 allocation score on node3: 0 -+pcmk__primitive_assign: dummy:0 allocation score on node3: 0 -+pcmk__primitive_assign: dummy:1 allocation score on node1: -INFINITY -+pcmk__primitive_assign: dummy:1 allocation score on node1: 100 -+pcmk__primitive_assign: dummy:1 allocation score on node2: 0 -+pcmk__primitive_assign: dummy:1 allocation score on node2: 0 -+pcmk__primitive_assign: dummy:1 allocation score on node3: 1 -+pcmk__primitive_assign: dummy:1 allocation score on node3: 1 -+pcmk__primitive_assign: dummy:2 allocation score on node1: -INFINITY -+pcmk__primitive_assign: dummy:2 allocation score on node2: 0 -+pcmk__primitive_assign: dummy:2 allocation score on node3: -INFINITY -diff --git a/cts/scheduler/scores/clone-recover-no-shuffle-5.scores b/cts/scheduler/scores/clone-recover-no-shuffle-5.scores -new file mode 100644 -index 00000000000..eecba43fae0 ---- /dev/null -+++ b/cts/scheduler/scores/clone-recover-no-shuffle-5.scores -@@ -0,0 +1,79 @@ -+ -+pcmk__clone_assign: grp-clone allocation score on node1: 100 -+pcmk__clone_assign: grp-clone allocation score on node2: 0 -+pcmk__clone_assign: grp-clone allocation score on node3: 0 -+pcmk__clone_assign: grp:0 allocation score on node1: 100 -+pcmk__clone_assign: grp:0 allocation score on node2: 0 -+pcmk__clone_assign: grp:0 allocation score on node3: 0 -+pcmk__clone_assign: grp:1 allocation score on node1: 100 -+pcmk__clone_assign: grp:1 allocation score on node2: 0 -+pcmk__clone_assign: grp:1 allocation score on node3: 0 -+pcmk__clone_assign: grp:2 allocation score on node1: 100 -+pcmk__clone_assign: grp:2 allocation score on node2: 0 -+pcmk__clone_assign: grp:2 allocation score on node3: 0 -+pcmk__clone_assign: rsc1:0 allocation score on node1: 100 -+pcmk__clone_assign: rsc1:0 allocation score on node2: 1 -+pcmk__clone_assign: rsc1:0 allocation score on node3: 0 -+pcmk__clone_assign: rsc1:1 allocation score on node1: 100 -+pcmk__clone_assign: rsc1:1 allocation score on node2: 0 -+pcmk__clone_assign: rsc1:1 allocation score on node3: 1 -+pcmk__clone_assign: rsc1:2 allocation score on node1: 100 -+pcmk__clone_assign: rsc1:2 allocation score on node2: 0 -+pcmk__clone_assign: rsc1:2 allocation score on node3: 0 -+pcmk__clone_assign: rsc2:0 allocation score on node1: 0 -+pcmk__clone_assign: rsc2:0 allocation score on node2: 1 -+pcmk__clone_assign: rsc2:0 allocation score on node3: 0 -+pcmk__clone_assign: rsc2:1 allocation score on node1: 0 -+pcmk__clone_assign: rsc2:1 allocation score on node2: 0 -+pcmk__clone_assign: rsc2:1 allocation score on node3: 1 -+pcmk__clone_assign: rsc2:2 allocation score on node1: 0 -+pcmk__clone_assign: rsc2:2 allocation score on node2: 0 -+pcmk__clone_assign: rsc2:2 allocation score on node3: 0 -+pcmk__group_assign: grp:0 allocation score on node1: 100 -+pcmk__group_assign: grp:0 allocation score on node2: 0 -+pcmk__group_assign: grp:0 allocation score on node3: 0 -+pcmk__group_assign: grp:1 allocation score on node1: 100 -+pcmk__group_assign: grp:1 allocation score on node2: 0 -+pcmk__group_assign: grp:1 allocation score on node3: 0 -+pcmk__group_assign: grp:2 allocation score on node1: 100 -+pcmk__group_assign: grp:2 allocation score on node2: 0 -+pcmk__group_assign: grp:2 allocation score on node3: 0 -+pcmk__group_assign: rsc1:0 allocation score on node1: 100 -+pcmk__group_assign: rsc1:0 allocation score on node2: 1 -+pcmk__group_assign: rsc1:0 allocation score on node3: 0 -+pcmk__group_assign: rsc1:1 allocation score on node1: 100 -+pcmk__group_assign: rsc1:1 allocation score on node2: 0 -+pcmk__group_assign: rsc1:1 allocation score on node3: 1 -+pcmk__group_assign: rsc1:2 allocation score on node1: 100 -+pcmk__group_assign: rsc1:2 allocation score on node2: 0 -+pcmk__group_assign: rsc1:2 allocation score on node3: 0 -+pcmk__group_assign: rsc2:0 allocation score on node1: 0 -+pcmk__group_assign: rsc2:0 allocation score on node2: 1 -+pcmk__group_assign: rsc2:0 allocation score on node3: 0 -+pcmk__group_assign: rsc2:1 allocation score on node1: 0 -+pcmk__group_assign: rsc2:1 allocation score on node2: 0 -+pcmk__group_assign: rsc2:1 allocation score on node3: 1 -+pcmk__group_assign: rsc2:2 allocation score on node1: 0 -+pcmk__group_assign: rsc2:2 allocation score on node2: 0 -+pcmk__group_assign: rsc2:2 allocation score on node3: 0 -+pcmk__primitive_assign: Fencing allocation score on node1: 0 -+pcmk__primitive_assign: Fencing allocation score on node2: 0 -+pcmk__primitive_assign: Fencing allocation score on node3: 0 -+pcmk__primitive_assign: rsc1:0 allocation score on node1: 100 -+pcmk__primitive_assign: rsc1:0 allocation score on node2: 2 -+pcmk__primitive_assign: rsc1:0 allocation score on node3: 0 -+pcmk__primitive_assign: rsc1:1 allocation score on node1: 100 -+pcmk__primitive_assign: rsc1:1 allocation score on node2: 0 -+pcmk__primitive_assign: rsc1:1 allocation score on node3: 2 -+pcmk__primitive_assign: rsc1:2 allocation score on node1: 100 -+pcmk__primitive_assign: rsc1:2 allocation score on node2: 0 -+pcmk__primitive_assign: rsc1:2 allocation score on node3: 0 -+pcmk__primitive_assign: rsc2:0 allocation score on node1: 0 -+pcmk__primitive_assign: rsc2:0 allocation score on node2: -INFINITY -+pcmk__primitive_assign: rsc2:0 allocation score on node3: -INFINITY -+pcmk__primitive_assign: rsc2:1 allocation score on node1: 0 -+pcmk__primitive_assign: rsc2:1 allocation score on node2: -INFINITY -+pcmk__primitive_assign: rsc2:1 allocation score on node3: -INFINITY -+pcmk__primitive_assign: rsc2:2 allocation score on node1: 0 -+pcmk__primitive_assign: rsc2:2 allocation score on node2: -INFINITY -+pcmk__primitive_assign: rsc2:2 allocation score on node3: -INFINITY -diff --git a/cts/scheduler/scores/clone-recover-no-shuffle-6.scores b/cts/scheduler/scores/clone-recover-no-shuffle-6.scores -new file mode 100644 -index 00000000000..643e30f9d18 ---- /dev/null -+++ b/cts/scheduler/scores/clone-recover-no-shuffle-6.scores -@@ -0,0 +1,70 @@ -+ -+pcmk__bundle_allocate: base-bundle allocation score on node1: 100 -+pcmk__bundle_allocate: base-bundle allocation score on node2: 0 -+pcmk__bundle_allocate: base-bundle allocation score on node3: 0 -+pcmk__bundle_allocate: base-bundle-0 allocation score on node1: 0 -+pcmk__bundle_allocate: base-bundle-0 allocation score on node2: 0 -+pcmk__bundle_allocate: base-bundle-0 allocation score on node3: 0 -+pcmk__bundle_allocate: base-bundle-1 allocation score on node1: 0 -+pcmk__bundle_allocate: base-bundle-1 allocation score on node2: 0 -+pcmk__bundle_allocate: base-bundle-1 allocation score on node3: 0 -+pcmk__bundle_allocate: base-bundle-2 allocation score on node1: 0 -+pcmk__bundle_allocate: base-bundle-2 allocation score on node2: 0 -+pcmk__bundle_allocate: base-bundle-2 allocation score on node3: 0 -+pcmk__bundle_allocate: base-bundle-clone allocation score on base-bundle-0: -INFINITY -+pcmk__bundle_allocate: base-bundle-clone allocation score on base-bundle-1: -INFINITY -+pcmk__bundle_allocate: base-bundle-clone allocation score on base-bundle-2: -INFINITY -+pcmk__bundle_allocate: base-bundle-clone allocation score on node1: 0 -+pcmk__bundle_allocate: base-bundle-clone allocation score on node2: 0 -+pcmk__bundle_allocate: base-bundle-clone allocation score on node3: 0 -+pcmk__bundle_allocate: base-bundle-podman-0 allocation score on node1: 100 -+pcmk__bundle_allocate: base-bundle-podman-0 allocation score on node2: 0 -+pcmk__bundle_allocate: base-bundle-podman-0 allocation score on node3: 0 -+pcmk__bundle_allocate: base-bundle-podman-1 allocation score on node1: 100 -+pcmk__bundle_allocate: base-bundle-podman-1 allocation score on node2: 0 -+pcmk__bundle_allocate: base-bundle-podman-1 allocation score on node3: 0 -+pcmk__bundle_allocate: base-bundle-podman-2 allocation score on node1: 100 -+pcmk__bundle_allocate: base-bundle-podman-2 allocation score on node2: 0 -+pcmk__bundle_allocate: base-bundle-podman-2 allocation score on node3: 0 -+pcmk__bundle_allocate: base:0 allocation score on base-bundle-0: 501 -+pcmk__bundle_allocate: base:1 allocation score on base-bundle-1: 501 -+pcmk__bundle_allocate: base:2 allocation score on base-bundle-2: 500 -+pcmk__clone_assign: base-bundle-clone allocation score on base-bundle-0: 0 -+pcmk__clone_assign: base-bundle-clone allocation score on base-bundle-1: 0 -+pcmk__clone_assign: base-bundle-clone allocation score on base-bundle-2: 0 -+pcmk__clone_assign: base-bundle-clone allocation score on node1: -INFINITY -+pcmk__clone_assign: base-bundle-clone allocation score on node2: -INFINITY -+pcmk__clone_assign: base-bundle-clone allocation score on node3: -INFINITY -+pcmk__clone_assign: base:0 allocation score on base-bundle-0: INFINITY -+pcmk__clone_assign: base:1 allocation score on base-bundle-1: INFINITY -+pcmk__clone_assign: base:2 allocation score on base-bundle-2: INFINITY -+pcmk__primitive_assign: Fencing allocation score on node1: 0 -+pcmk__primitive_assign: Fencing allocation score on node2: 0 -+pcmk__primitive_assign: Fencing allocation score on node3: 0 -+pcmk__primitive_assign: base-bundle-0 allocation score on node1: 10000 -+pcmk__primitive_assign: base-bundle-0 allocation score on node2: 0 -+pcmk__primitive_assign: base-bundle-0 allocation score on node3: 0 -+pcmk__primitive_assign: base-bundle-1 allocation score on node1: 0 -+pcmk__primitive_assign: base-bundle-1 allocation score on node2: 0 -+pcmk__primitive_assign: base-bundle-1 allocation score on node3: 10000 -+pcmk__primitive_assign: base-bundle-2 allocation score on node1: 0 -+pcmk__primitive_assign: base-bundle-2 allocation score on node2: 10000 -+pcmk__primitive_assign: base-bundle-2 allocation score on node3: 0 -+pcmk__primitive_assign: base-bundle-podman-0 allocation score on node1: 100 -+pcmk__primitive_assign: base-bundle-podman-0 allocation score on node1: 100 -+pcmk__primitive_assign: base-bundle-podman-0 allocation score on node2: 0 -+pcmk__primitive_assign: base-bundle-podman-0 allocation score on node2: 0 -+pcmk__primitive_assign: base-bundle-podman-0 allocation score on node3: 0 -+pcmk__primitive_assign: base-bundle-podman-0 allocation score on node3: 0 -+pcmk__primitive_assign: base-bundle-podman-1 allocation score on node1: -INFINITY -+pcmk__primitive_assign: base-bundle-podman-1 allocation score on node1: 100 -+pcmk__primitive_assign: base-bundle-podman-1 allocation score on node2: 0 -+pcmk__primitive_assign: base-bundle-podman-1 allocation score on node2: 0 -+pcmk__primitive_assign: base-bundle-podman-1 allocation score on node3: 0 -+pcmk__primitive_assign: base-bundle-podman-1 allocation score on node3: 0 -+pcmk__primitive_assign: base-bundle-podman-2 allocation score on node1: -INFINITY -+pcmk__primitive_assign: base-bundle-podman-2 allocation score on node2: 0 -+pcmk__primitive_assign: base-bundle-podman-2 allocation score on node3: -INFINITY -+pcmk__primitive_assign: base:0 allocation score on base-bundle-0: INFINITY -+pcmk__primitive_assign: base:1 allocation score on base-bundle-1: INFINITY -+pcmk__primitive_assign: base:2 allocation score on base-bundle-2: INFINITY -diff --git a/cts/scheduler/scores/clone-recover-no-shuffle-7.scores b/cts/scheduler/scores/clone-recover-no-shuffle-7.scores -new file mode 100644 -index 00000000000..fc45bf740fd ---- /dev/null -+++ b/cts/scheduler/scores/clone-recover-no-shuffle-7.scores -@@ -0,0 +1,34 @@ -+ -+dummy:0 promotion score on node1: 15 -+dummy:1 promotion score on node2: 10 -+dummy:2 promotion score on node3: 5 -+pcmk__clone_assign: dummy-clone allocation score on node1: 0 -+pcmk__clone_assign: dummy-clone allocation score on node2: 0 -+pcmk__clone_assign: dummy-clone allocation score on node3: 0 -+pcmk__clone_assign: dummy:0 allocation score on node1: 15 -+pcmk__clone_assign: dummy:0 allocation score on node2: 0 -+pcmk__clone_assign: dummy:0 allocation score on node3: 6 -+pcmk__clone_assign: dummy:1 allocation score on node1: 15 -+pcmk__clone_assign: dummy:1 allocation score on node2: 11 -+pcmk__clone_assign: dummy:1 allocation score on node3: 0 -+pcmk__clone_assign: dummy:2 allocation score on node1: 15 -+pcmk__clone_assign: dummy:2 allocation score on node2: 10 -+pcmk__clone_assign: dummy:2 allocation score on node3: 5 -+pcmk__primitive_assign: Fencing allocation score on node1: 0 -+pcmk__primitive_assign: Fencing allocation score on node2: 0 -+pcmk__primitive_assign: Fencing allocation score on node3: 0 -+pcmk__primitive_assign: dummy:0 allocation score on node1: 15 -+pcmk__primitive_assign: dummy:0 allocation score on node1: 15 -+pcmk__primitive_assign: dummy:0 allocation score on node2: 0 -+pcmk__primitive_assign: dummy:0 allocation score on node2: 0 -+pcmk__primitive_assign: dummy:0 allocation score on node3: 6 -+pcmk__primitive_assign: dummy:0 allocation score on node3: 6 -+pcmk__primitive_assign: dummy:1 allocation score on node1: -INFINITY -+pcmk__primitive_assign: dummy:1 allocation score on node1: 15 -+pcmk__primitive_assign: dummy:1 allocation score on node2: 11 -+pcmk__primitive_assign: dummy:1 allocation score on node2: 11 -+pcmk__primitive_assign: dummy:1 allocation score on node3: 0 -+pcmk__primitive_assign: dummy:1 allocation score on node3: 0 -+pcmk__primitive_assign: dummy:2 allocation score on node1: -INFINITY -+pcmk__primitive_assign: dummy:2 allocation score on node2: -INFINITY -+pcmk__primitive_assign: dummy:2 allocation score on node3: 5 -diff --git a/cts/scheduler/scores/clone-recover-no-shuffle-8.scores b/cts/scheduler/scores/clone-recover-no-shuffle-8.scores -new file mode 100644 -index 00000000000..56d4cc8395a ---- /dev/null -+++ b/cts/scheduler/scores/clone-recover-no-shuffle-8.scores -@@ -0,0 +1,82 @@ -+ -+grp:0 promotion score on node3: 10 -+grp:1 promotion score on node2: 20 -+grp:2 promotion score on node1: 30 -+pcmk__clone_assign: grp-clone allocation score on node1: 0 -+pcmk__clone_assign: grp-clone allocation score on node2: 0 -+pcmk__clone_assign: grp-clone allocation score on node3: 0 -+pcmk__clone_assign: grp:0 allocation score on node1: 30 -+pcmk__clone_assign: grp:0 allocation score on node2: 0 -+pcmk__clone_assign: grp:0 allocation score on node3: 10 -+pcmk__clone_assign: grp:1 allocation score on node1: 30 -+pcmk__clone_assign: grp:1 allocation score on node2: 20 -+pcmk__clone_assign: grp:1 allocation score on node3: 0 -+pcmk__clone_assign: grp:2 allocation score on node1: 30 -+pcmk__clone_assign: grp:2 allocation score on node2: 20 -+pcmk__clone_assign: grp:2 allocation score on node3: 10 -+pcmk__clone_assign: rsc1:0 allocation score on node1: 0 -+pcmk__clone_assign: rsc1:0 allocation score on node2: 0 -+pcmk__clone_assign: rsc1:0 allocation score on node3: 1 -+pcmk__clone_assign: rsc1:1 allocation score on node1: 0 -+pcmk__clone_assign: rsc1:1 allocation score on node2: 1 -+pcmk__clone_assign: rsc1:1 allocation score on node3: 0 -+pcmk__clone_assign: rsc1:2 allocation score on node1: 0 -+pcmk__clone_assign: rsc1:2 allocation score on node2: 0 -+pcmk__clone_assign: rsc1:2 allocation score on node3: 0 -+pcmk__clone_assign: rsc2:0 allocation score on node1: 0 -+pcmk__clone_assign: rsc2:0 allocation score on node2: 0 -+pcmk__clone_assign: rsc2:0 allocation score on node3: 1 -+pcmk__clone_assign: rsc2:1 allocation score on node1: 0 -+pcmk__clone_assign: rsc2:1 allocation score on node2: 1 -+pcmk__clone_assign: rsc2:1 allocation score on node3: 0 -+pcmk__clone_assign: rsc2:2 allocation score on node1: 0 -+pcmk__clone_assign: rsc2:2 allocation score on node2: 0 -+pcmk__clone_assign: rsc2:2 allocation score on node3: 0 -+pcmk__group_assign: grp:0 allocation score on node1: 30 -+pcmk__group_assign: grp:0 allocation score on node2: 0 -+pcmk__group_assign: grp:0 allocation score on node3: 10 -+pcmk__group_assign: grp:1 allocation score on node1: 30 -+pcmk__group_assign: grp:1 allocation score on node2: 20 -+pcmk__group_assign: grp:1 allocation score on node3: -INFINITY -+pcmk__group_assign: grp:2 allocation score on node1: 30 -+pcmk__group_assign: grp:2 allocation score on node2: -INFINITY -+pcmk__group_assign: grp:2 allocation score on node3: -INFINITY -+pcmk__group_assign: rsc1:0 allocation score on node1: 0 -+pcmk__group_assign: rsc1:0 allocation score on node2: 0 -+pcmk__group_assign: rsc1:0 allocation score on node3: 1 -+pcmk__group_assign: rsc1:1 allocation score on node1: 0 -+pcmk__group_assign: rsc1:1 allocation score on node2: 1 -+pcmk__group_assign: rsc1:1 allocation score on node3: -INFINITY -+pcmk__group_assign: rsc1:2 allocation score on node1: 0 -+pcmk__group_assign: rsc1:2 allocation score on node2: -INFINITY -+pcmk__group_assign: rsc1:2 allocation score on node3: -INFINITY -+pcmk__group_assign: rsc2:0 allocation score on node1: 0 -+pcmk__group_assign: rsc2:0 allocation score on node2: 0 -+pcmk__group_assign: rsc2:0 allocation score on node3: 1 -+pcmk__group_assign: rsc2:1 allocation score on node1: 0 -+pcmk__group_assign: rsc2:1 allocation score on node2: 1 -+pcmk__group_assign: rsc2:1 allocation score on node3: -INFINITY -+pcmk__group_assign: rsc2:2 allocation score on node1: 0 -+pcmk__group_assign: rsc2:2 allocation score on node2: -INFINITY -+pcmk__group_assign: rsc2:2 allocation score on node3: -INFINITY -+pcmk__primitive_assign: Fencing allocation score on node1: 0 -+pcmk__primitive_assign: Fencing allocation score on node2: 0 -+pcmk__primitive_assign: Fencing allocation score on node3: 0 -+pcmk__primitive_assign: rsc1:0 allocation score on node1: 0 -+pcmk__primitive_assign: rsc1:0 allocation score on node2: 0 -+pcmk__primitive_assign: rsc1:0 allocation score on node3: 2 -+pcmk__primitive_assign: rsc1:1 allocation score on node1: 0 -+pcmk__primitive_assign: rsc1:1 allocation score on node2: 2 -+pcmk__primitive_assign: rsc1:1 allocation score on node3: -INFINITY -+pcmk__primitive_assign: rsc1:2 allocation score on node1: 0 -+pcmk__primitive_assign: rsc1:2 allocation score on node2: -INFINITY -+pcmk__primitive_assign: rsc1:2 allocation score on node3: -INFINITY -+pcmk__primitive_assign: rsc2:0 allocation score on node1: -INFINITY -+pcmk__primitive_assign: rsc2:0 allocation score on node2: -INFINITY -+pcmk__primitive_assign: rsc2:0 allocation score on node3: 1 -+pcmk__primitive_assign: rsc2:1 allocation score on node1: -INFINITY -+pcmk__primitive_assign: rsc2:1 allocation score on node2: 1 -+pcmk__primitive_assign: rsc2:1 allocation score on node3: -INFINITY -+pcmk__primitive_assign: rsc2:2 allocation score on node1: 0 -+pcmk__primitive_assign: rsc2:2 allocation score on node2: -INFINITY -+pcmk__primitive_assign: rsc2:2 allocation score on node3: -INFINITY -diff --git a/cts/scheduler/scores/clone-recover-no-shuffle-9.scores b/cts/scheduler/scores/clone-recover-no-shuffle-9.scores -new file mode 100644 -index 00000000000..947c86b262c ---- /dev/null -+++ b/cts/scheduler/scores/clone-recover-no-shuffle-9.scores -@@ -0,0 +1,67 @@ -+ -+base:0 promotion score on base-bundle-0: 5 -+base:1 promotion score on base-bundle-1: 10 -+base:2 promotion score on base-bundle-2: 15 -+pcmk__bundle_allocate: base-bundle allocation score on node1: 0 -+pcmk__bundle_allocate: base-bundle allocation score on node2: 0 -+pcmk__bundle_allocate: base-bundle allocation score on node3: 0 -+pcmk__bundle_allocate: base-bundle-0 allocation score on node1: 0 -+pcmk__bundle_allocate: base-bundle-0 allocation score on node2: 0 -+pcmk__bundle_allocate: base-bundle-0 allocation score on node3: 0 -+pcmk__bundle_allocate: base-bundle-1 allocation score on node1: 0 -+pcmk__bundle_allocate: base-bundle-1 allocation score on node2: 0 -+pcmk__bundle_allocate: base-bundle-1 allocation score on node3: 0 -+pcmk__bundle_allocate: base-bundle-2 allocation score on node1: 0 -+pcmk__bundle_allocate: base-bundle-2 allocation score on node2: 0 -+pcmk__bundle_allocate: base-bundle-2 allocation score on node3: 0 -+pcmk__bundle_allocate: base-bundle-clone allocation score on base-bundle-0: -INFINITY -+pcmk__bundle_allocate: base-bundle-clone allocation score on base-bundle-1: -INFINITY -+pcmk__bundle_allocate: base-bundle-clone allocation score on base-bundle-2: -INFINITY -+pcmk__bundle_allocate: base-bundle-clone allocation score on node1: 0 -+pcmk__bundle_allocate: base-bundle-clone allocation score on node2: 0 -+pcmk__bundle_allocate: base-bundle-clone allocation score on node3: 0 -+pcmk__bundle_allocate: base-bundle-podman-0 allocation score on node1: 0 -+pcmk__bundle_allocate: base-bundle-podman-0 allocation score on node2: 0 -+pcmk__bundle_allocate: base-bundle-podman-0 allocation score on node3: 0 -+pcmk__bundle_allocate: base-bundle-podman-1 allocation score on node1: 0 -+pcmk__bundle_allocate: base-bundle-podman-1 allocation score on node2: 0 -+pcmk__bundle_allocate: base-bundle-podman-1 allocation score on node3: 0 -+pcmk__bundle_allocate: base-bundle-podman-2 allocation score on node1: 0 -+pcmk__bundle_allocate: base-bundle-podman-2 allocation score on node2: 0 -+pcmk__bundle_allocate: base-bundle-podman-2 allocation score on node3: 0 -+pcmk__bundle_allocate: base:0 allocation score on base-bundle-0: 501 -+pcmk__bundle_allocate: base:1 allocation score on base-bundle-1: 501 -+pcmk__bundle_allocate: base:2 allocation score on base-bundle-2: 500 -+pcmk__clone_assign: base-bundle-clone allocation score on base-bundle-0: 0 -+pcmk__clone_assign: base-bundle-clone allocation score on base-bundle-1: 0 -+pcmk__clone_assign: base-bundle-clone allocation score on base-bundle-2: 0 -+pcmk__clone_assign: base-bundle-clone allocation score on node1: -INFINITY -+pcmk__clone_assign: base-bundle-clone allocation score on node2: -INFINITY -+pcmk__clone_assign: base-bundle-clone allocation score on node3: -INFINITY -+pcmk__clone_assign: base:0 allocation score on base-bundle-0: INFINITY -+pcmk__clone_assign: base:1 allocation score on base-bundle-1: INFINITY -+pcmk__clone_assign: base:2 allocation score on base-bundle-2: INFINITY -+pcmk__primitive_assign: Fencing allocation score on node1: 0 -+pcmk__primitive_assign: Fencing allocation score on node2: 0 -+pcmk__primitive_assign: Fencing allocation score on node3: 0 -+pcmk__primitive_assign: base-bundle-0 allocation score on node1: 0 -+pcmk__primitive_assign: base-bundle-0 allocation score on node2: 0 -+pcmk__primitive_assign: base-bundle-0 allocation score on node3: 10000 -+pcmk__primitive_assign: base-bundle-1 allocation score on node1: 0 -+pcmk__primitive_assign: base-bundle-1 allocation score on node2: 10000 -+pcmk__primitive_assign: base-bundle-1 allocation score on node3: 0 -+pcmk__primitive_assign: base-bundle-2 allocation score on node1: 10000 -+pcmk__primitive_assign: base-bundle-2 allocation score on node2: 0 -+pcmk__primitive_assign: base-bundle-2 allocation score on node3: 0 -+pcmk__primitive_assign: base-bundle-podman-0 allocation score on node1: 0 -+pcmk__primitive_assign: base-bundle-podman-0 allocation score on node2: 0 -+pcmk__primitive_assign: base-bundle-podman-0 allocation score on node3: 0 -+pcmk__primitive_assign: base-bundle-podman-1 allocation score on node1: 0 -+pcmk__primitive_assign: base-bundle-podman-1 allocation score on node2: 0 -+pcmk__primitive_assign: base-bundle-podman-1 allocation score on node3: -INFINITY -+pcmk__primitive_assign: base-bundle-podman-2 allocation score on node1: 0 -+pcmk__primitive_assign: base-bundle-podman-2 allocation score on node2: -INFINITY -+pcmk__primitive_assign: base-bundle-podman-2 allocation score on node3: -INFINITY -+pcmk__primitive_assign: base:0 allocation score on base-bundle-0: INFINITY -+pcmk__primitive_assign: base:1 allocation score on base-bundle-1: INFINITY -+pcmk__primitive_assign: base:2 allocation score on base-bundle-2: INFINITY -diff --git a/cts/scheduler/summary/clone-recover-no-shuffle-1.summary b/cts/scheduler/summary/clone-recover-no-shuffle-1.summary -new file mode 100644 -index 00000000000..0b6866ec16c ---- /dev/null -+++ b/cts/scheduler/summary/clone-recover-no-shuffle-1.summary -@@ -0,0 +1,29 @@ -+Using the original execution date of: 2023-06-21 00:59:59Z -+Current cluster status: -+ * Node List: -+ * Online: [ node1 node2 node3 ] -+ -+ * Full List of Resources: -+ * Fencing (stonith:fence_xvm): Started node2 -+ * Clone Set: dummy-clone [dummy]: -+ * Started: [ node2 node3 ] -+ * Stopped: [ node1 ] -+ -+Transition Summary: -+ * Start dummy:2 ( node1 ) -+ -+Executing Cluster Transition: -+ * Pseudo action: dummy-clone_start_0 -+ * Resource action: dummy start on node1 -+ * Pseudo action: dummy-clone_running_0 -+ * Resource action: dummy monitor=10000 on node1 -+Using the original execution date of: 2023-06-21 00:59:59Z -+ -+Revised Cluster Status: -+ * Node List: -+ * Online: [ node1 node2 node3 ] -+ -+ * Full List of Resources: -+ * Fencing (stonith:fence_xvm): Started node2 -+ * Clone Set: dummy-clone [dummy]: -+ * Started: [ node1 node2 node3 ] -diff --git a/cts/scheduler/summary/clone-recover-no-shuffle-10.summary b/cts/scheduler/summary/clone-recover-no-shuffle-10.summary -new file mode 100644 -index 00000000000..5b0f9b6d685 ---- /dev/null -+++ b/cts/scheduler/summary/clone-recover-no-shuffle-10.summary -@@ -0,0 +1,29 @@ -+Current cluster status: -+ * Node List: -+ * Online: [ node1 node2 node3 ] -+ -+ * Full List of Resources: -+ * Fencing (stonith:fence_xvm): Started node2 -+ * Clone Set: dummy-clone [dummy] (promotable): -+ * Promoted: [ node2 ] -+ * Unpromoted: [ node3 ] -+ * Stopped: [ node1 ] -+ -+Transition Summary: -+ * Start dummy:2 ( node1 ) -+ -+Executing Cluster Transition: -+ * Pseudo action: dummy-clone_start_0 -+ * Resource action: dummy start on node1 -+ * Pseudo action: dummy-clone_running_0 -+ * Resource action: dummy monitor=11000 on node1 -+ -+Revised Cluster Status: -+ * Node List: -+ * Online: [ node1 node2 node3 ] -+ -+ * Full List of Resources: -+ * Fencing (stonith:fence_xvm): Started node2 -+ * Clone Set: dummy-clone [dummy] (promotable): -+ * Promoted: [ node2 ] -+ * Unpromoted: [ node1 node3 ] -diff --git a/cts/scheduler/summary/clone-recover-no-shuffle-11.summary b/cts/scheduler/summary/clone-recover-no-shuffle-11.summary -new file mode 100644 -index 00000000000..e0bdb61d605 ---- /dev/null -+++ b/cts/scheduler/summary/clone-recover-no-shuffle-11.summary -@@ -0,0 +1,34 @@ -+Current cluster status: -+ * Node List: -+ * Online: [ node1 node2 node3 ] -+ -+ * Full List of Resources: -+ * Fencing (stonith:fence_xvm): Started node2 -+ * Clone Set: grp-clone [grp] (promotable): -+ * Promoted: [ node2 ] -+ * Unpromoted: [ node3 ] -+ * Stopped: [ node1 ] -+ -+Transition Summary: -+ * Start rsc1:2 ( node1 ) -+ * Start rsc2:2 ( node1 ) -+ -+Executing Cluster Transition: -+ * Pseudo action: grp-clone_start_0 -+ * Pseudo action: grp:2_start_0 -+ * Resource action: rsc1 start on node1 -+ * Resource action: rsc2 start on node1 -+ * Pseudo action: grp:2_running_0 -+ * Resource action: rsc1 monitor=11000 on node1 -+ * Resource action: rsc2 monitor=11000 on node1 -+ * Pseudo action: grp-clone_running_0 -+ -+Revised Cluster Status: -+ * Node List: -+ * Online: [ node1 node2 node3 ] -+ -+ * Full List of Resources: -+ * Fencing (stonith:fence_xvm): Started node2 -+ * Clone Set: grp-clone [grp] (promotable): -+ * Promoted: [ node2 ] -+ * Unpromoted: [ node1 node3 ] -diff --git a/cts/scheduler/summary/clone-recover-no-shuffle-12.summary b/cts/scheduler/summary/clone-recover-no-shuffle-12.summary -new file mode 100644 -index 00000000000..6e55a0b7f2f ---- /dev/null -+++ b/cts/scheduler/summary/clone-recover-no-shuffle-12.summary -@@ -0,0 +1,43 @@ -+Current cluster status: -+ * Node List: -+ * Online: [ node1 node2 node3 ] -+ * GuestOnline: [ base-bundle-0 base-bundle-1 ] -+ -+ * Full List of Resources: -+ * Fencing (stonith:fence_xvm): Started node2 -+ * Container bundle set: base-bundle [localhost/pcmktest]: -+ * base-bundle-0 (ocf:pacemaker:Stateful): Unpromoted node3 -+ * base-bundle-1 (ocf:pacemaker:Stateful): Promoted node2 -+ * base-bundle-2 (ocf:pacemaker:Stateful): Stopped -+ -+Transition Summary: -+ * Start base-bundle-podman-2 ( node1 ) -+ * Start base-bundle-2 ( node1 ) -+ * Start base:2 ( base-bundle-2 ) -+ -+Executing Cluster Transition: -+ * Pseudo action: base-bundle_start_0 -+ * Pseudo action: base-bundle-clone_start_0 -+ * Resource action: base-bundle-podman-2 start on node1 -+ * Resource action: base-bundle-2 monitor on node3 -+ * Resource action: base-bundle-2 monitor on node2 -+ * Resource action: base-bundle-2 monitor on node1 -+ * Resource action: base-bundle-podman-2 monitor=60000 on node1 -+ * Resource action: base-bundle-2 start on node1 -+ * Resource action: base start on base-bundle-2 -+ * Pseudo action: base-bundle-clone_running_0 -+ * Resource action: base-bundle-2 monitor=30000 on node1 -+ * Pseudo action: base-bundle_running_0 -+ * Resource action: base monitor=16000 on base-bundle-2 -+ -+Revised Cluster Status: -+ * Node List: -+ * Online: [ node1 node2 node3 ] -+ * GuestOnline: [ base-bundle-0 base-bundle-1 base-bundle-2 ] -+ -+ * Full List of Resources: -+ * Fencing (stonith:fence_xvm): Started node2 -+ * Container bundle set: base-bundle [localhost/pcmktest]: -+ * base-bundle-0 (ocf:pacemaker:Stateful): Unpromoted node3 -+ * base-bundle-1 (ocf:pacemaker:Stateful): Promoted node2 -+ * base-bundle-2 (ocf:pacemaker:Stateful): Unpromoted node1 -diff --git a/cts/scheduler/summary/clone-recover-no-shuffle-2.summary b/cts/scheduler/summary/clone-recover-no-shuffle-2.summary -new file mode 100644 -index 00000000000..8b18120ad8d ---- /dev/null -+++ b/cts/scheduler/summary/clone-recover-no-shuffle-2.summary -@@ -0,0 +1,32 @@ -+Current cluster status: -+ * Node List: -+ * Online: [ node1 node2 node3 ] -+ -+ * Full List of Resources: -+ * Fencing (stonith:fence_xvm): Started node2 -+ * Clone Set: grp-clone [grp]: -+ * Started: [ node2 node3 ] -+ * Stopped: [ node1 ] -+ -+Transition Summary: -+ * Start rsc1:2 ( node1 ) -+ * Start rsc2:2 ( node1 ) -+ -+Executing Cluster Transition: -+ * Pseudo action: grp-clone_start_0 -+ * Pseudo action: grp:2_start_0 -+ * Resource action: rsc1 start on node1 -+ * Resource action: rsc2 start on node1 -+ * Pseudo action: grp:2_running_0 -+ * Resource action: rsc1 monitor=10000 on node1 -+ * Resource action: rsc2 monitor=10000 on node1 -+ * Pseudo action: grp-clone_running_0 -+ -+Revised Cluster Status: -+ * Node List: -+ * Online: [ node1 node2 node3 ] -+ -+ * Full List of Resources: -+ * Fencing (stonith:fence_xvm): Started node2 -+ * Clone Set: grp-clone [grp]: -+ * Started: [ node1 node2 node3 ] -diff --git a/cts/scheduler/summary/clone-recover-no-shuffle-3.summary b/cts/scheduler/summary/clone-recover-no-shuffle-3.summary -new file mode 100644 -index 00000000000..5702177e33d ---- /dev/null -+++ b/cts/scheduler/summary/clone-recover-no-shuffle-3.summary -@@ -0,0 +1,42 @@ -+Current cluster status: -+ * Node List: -+ * Online: [ node1 node2 node3 ] -+ * GuestOnline: [ base-bundle-0 base-bundle-1 ] -+ -+ * Full List of Resources: -+ * Fencing (stonith:fence_xvm): Started node2 -+ * Container bundle set: base-bundle [localhost/pcmktest]: -+ * base-bundle-0 (ocf:pacemaker:Stateful): Started node3 -+ * base-bundle-1 (ocf:pacemaker:Stateful): Started node2 -+ * base-bundle-2 (ocf:pacemaker:Stateful): Stopped -+ -+Transition Summary: -+ * Start base-bundle-podman-2 ( node1 ) -+ * Start base-bundle-2 ( node1 ) -+ * Start base:2 ( base-bundle-2 ) -+ -+Executing Cluster Transition: -+ * Pseudo action: base-bundle_start_0 -+ * Pseudo action: base-bundle-clone_start_0 -+ * Resource action: base-bundle-podman-2 start on node1 -+ * Resource action: base-bundle-2 monitor on node3 -+ * Resource action: base-bundle-2 monitor on node2 -+ * Resource action: base-bundle-2 monitor on node1 -+ * Resource action: base-bundle-podman-2 monitor=60000 on node1 -+ * Resource action: base-bundle-2 start on node1 -+ * Resource action: base start on base-bundle-2 -+ * Pseudo action: base-bundle-clone_running_0 -+ * Resource action: base-bundle-2 monitor=30000 on node1 -+ * Pseudo action: base-bundle_running_0 -+ -+Revised Cluster Status: -+ * Node List: -+ * Online: [ node1 node2 node3 ] -+ * GuestOnline: [ base-bundle-0 base-bundle-1 base-bundle-2 ] -+ -+ * Full List of Resources: -+ * Fencing (stonith:fence_xvm): Started node2 -+ * Container bundle set: base-bundle [localhost/pcmktest]: -+ * base-bundle-0 (ocf:pacemaker:Stateful): Started node3 -+ * base-bundle-1 (ocf:pacemaker:Stateful): Started node2 -+ * base-bundle-2 (ocf:pacemaker:Stateful): Started node1 -diff --git a/cts/scheduler/summary/clone-recover-no-shuffle-4.summary b/cts/scheduler/summary/clone-recover-no-shuffle-4.summary -new file mode 100644 -index 00000000000..944bcb834b3 ---- /dev/null -+++ b/cts/scheduler/summary/clone-recover-no-shuffle-4.summary -@@ -0,0 +1,35 @@ -+Using the original execution date of: 2023-06-21 00:59:59Z -+Current cluster status: -+ * Node List: -+ * Online: [ node1 node2 node3 ] -+ -+ * Full List of Resources: -+ * Fencing (stonith:fence_xvm): Started node2 -+ * Clone Set: dummy-clone [dummy]: -+ * Started: [ node2 node3 ] -+ * Stopped: [ node1 ] -+ -+Transition Summary: -+ * Move dummy:0 ( node2 -> node1 ) -+ * Start dummy:2 ( node2 ) -+ -+Executing Cluster Transition: -+ * Pseudo action: dummy-clone_stop_0 -+ * Resource action: dummy stop on node2 -+ * Pseudo action: dummy-clone_stopped_0 -+ * Pseudo action: dummy-clone_start_0 -+ * Resource action: dummy start on node1 -+ * Resource action: dummy start on node2 -+ * Pseudo action: dummy-clone_running_0 -+ * Resource action: dummy monitor=10000 on node1 -+ * Resource action: dummy monitor=10000 on node2 -+Using the original execution date of: 2023-06-21 00:59:59Z -+ -+Revised Cluster Status: -+ * Node List: -+ * Online: [ node1 node2 node3 ] -+ -+ * Full List of Resources: -+ * Fencing (stonith:fence_xvm): Started node2 -+ * Clone Set: dummy-clone [dummy]: -+ * Started: [ node1 node2 node3 ] -diff --git a/cts/scheduler/summary/clone-recover-no-shuffle-5.summary b/cts/scheduler/summary/clone-recover-no-shuffle-5.summary -new file mode 100644 -index 00000000000..e84d0a574de ---- /dev/null -+++ b/cts/scheduler/summary/clone-recover-no-shuffle-5.summary -@@ -0,0 +1,59 @@ -+Current cluster status: -+ * Node List: -+ * Online: [ node1 node2 node3 ] -+ -+ * Full List of Resources: -+ * Fencing (stonith:fence_xvm): Started node2 -+ * Clone Set: grp-clone [grp]: -+ * Started: [ node2 node3 ] -+ * Stopped: [ node1 ] -+ -+Transition Summary: -+ * Move rsc1:0 ( node2 -> node1 ) -+ * Move rsc2:0 ( node2 -> node1 ) -+ * Move rsc1:1 ( node3 -> node1 ) -+ * Move rsc2:1 ( node3 -> node1 ) -+ * Start rsc1:2 ( node1 ) -+ * Start rsc2:2 ( node1 ) -+ -+Executing Cluster Transition: -+ * Pseudo action: grp-clone_stop_0 -+ * Pseudo action: grp:0_stop_0 -+ * Resource action: rsc2 stop on node2 -+ * Pseudo action: grp:1_stop_0 -+ * Resource action: rsc2 stop on node3 -+ * Resource action: rsc1 stop on node2 -+ * Resource action: rsc1 stop on node3 -+ * Pseudo action: grp:0_stopped_0 -+ * Pseudo action: grp:1_stopped_0 -+ * Pseudo action: grp-clone_stopped_0 -+ * Pseudo action: grp-clone_start_0 -+ * Pseudo action: grp:0_start_0 -+ * Resource action: rsc1 start on node1 -+ * Resource action: rsc2 start on node1 -+ * Pseudo action: grp:1_start_0 -+ * Resource action: rsc1 start on node1 -+ * Resource action: rsc2 start on node1 -+ * Pseudo action: grp:2_start_0 -+ * Resource action: rsc1 start on node1 -+ * Resource action: rsc2 start on node1 -+ * Pseudo action: grp:0_running_0 -+ * Resource action: rsc1 monitor=10000 on node1 -+ * Resource action: rsc2 monitor=10000 on node1 -+ * Pseudo action: grp:1_running_0 -+ * Resource action: rsc1 monitor=10000 on node1 -+ * Resource action: rsc2 monitor=10000 on node1 -+ * Pseudo action: grp:2_running_0 -+ * Resource action: rsc1 monitor=10000 on node1 -+ * Resource action: rsc2 monitor=10000 on node1 -+ * Pseudo action: grp-clone_running_0 -+ -+Revised Cluster Status: -+ * Node List: -+ * Online: [ node1 node2 node3 ] -+ -+ * Full List of Resources: -+ * Fencing (stonith:fence_xvm): Started node2 -+ * Clone Set: grp-clone [grp]: -+ * Started: [ node1 ] -+ * Stopped: [ node2 node3 ] -diff --git a/cts/scheduler/summary/clone-recover-no-shuffle-6.summary b/cts/scheduler/summary/clone-recover-no-shuffle-6.summary -new file mode 100644 -index 00000000000..19a957e15fb ---- /dev/null -+++ b/cts/scheduler/summary/clone-recover-no-shuffle-6.summary -@@ -0,0 +1,68 @@ -+Current cluster status: -+ * Node List: -+ * Online: [ node1 node2 node3 ] -+ * GuestOnline: [ base-bundle-0 base-bundle-1 ] -+ -+ * Full List of Resources: -+ * Fencing (stonith:fence_xvm): Started node2 -+ * Container bundle set: base-bundle [localhost/pcmktest]: -+ * base-bundle-0 (ocf:pacemaker:Stateful): Started node3 -+ * base-bundle-1 (ocf:pacemaker:Stateful): Started node2 -+ * base-bundle-2 (ocf:pacemaker:Stateful): Stopped -+ -+Transition Summary: -+ * Move base-bundle-podman-0 ( node3 -> node1 ) -+ * Move base-bundle-0 ( node3 -> node1 ) -+ * Restart base:0 ( base-bundle-0 ) due to required base-bundle-podman-0 start -+ * Move base-bundle-podman-1 ( node2 -> node3 ) -+ * Move base-bundle-1 ( node2 -> node3 ) -+ * Restart base:1 ( base-bundle-1 ) due to required base-bundle-podman-1 start -+ * Start base-bundle-podman-2 ( node2 ) -+ * Start base-bundle-2 ( node2 ) -+ * Start base:2 ( base-bundle-2 ) -+ -+Executing Cluster Transition: -+ * Pseudo action: base-bundle_stop_0 -+ * Pseudo action: base-bundle_start_0 -+ * Pseudo action: base-bundle-clone_stop_0 -+ * Resource action: base-bundle-podman-2 start on node2 -+ * Resource action: base-bundle-2 monitor on node3 -+ * Resource action: base-bundle-2 monitor on node2 -+ * Resource action: base-bundle-2 monitor on node1 -+ * Resource action: base stop on base-bundle-1 -+ * Resource action: base-bundle-1 stop on node2 -+ * Resource action: base-bundle-podman-2 monitor=60000 on node2 -+ * Resource action: base-bundle-2 start on node2 -+ * Resource action: base stop on base-bundle-0 -+ * Pseudo action: base-bundle-clone_stopped_0 -+ * Pseudo action: base-bundle-clone_start_0 -+ * Resource action: base-bundle-0 stop on node3 -+ * Resource action: base-bundle-podman-1 stop on node2 -+ * Resource action: base-bundle-2 monitor=30000 on node2 -+ * Resource action: base-bundle-podman-0 stop on node3 -+ * Resource action: base-bundle-podman-1 start on node3 -+ * Resource action: base-bundle-1 start on node3 -+ * Pseudo action: base-bundle_stopped_0 -+ * Resource action: base-bundle-podman-0 start on node1 -+ * Resource action: base-bundle-0 start on node1 -+ * Resource action: base-bundle-podman-1 monitor=60000 on node3 -+ * Resource action: base-bundle-1 monitor=30000 on node3 -+ * Resource action: base start on base-bundle-0 -+ * Resource action: base start on base-bundle-1 -+ * Resource action: base start on base-bundle-2 -+ * Pseudo action: base-bundle-clone_running_0 -+ * Resource action: base-bundle-podman-0 monitor=60000 on node1 -+ * Resource action: base-bundle-0 monitor=30000 on node1 -+ * Pseudo action: base-bundle_running_0 -+ -+Revised Cluster Status: -+ * Node List: -+ * Online: [ node1 node2 node3 ] -+ * GuestOnline: [ base-bundle-0 base-bundle-1 base-bundle-2 ] -+ -+ * Full List of Resources: -+ * Fencing (stonith:fence_xvm): Started node2 -+ * Container bundle set: base-bundle [localhost/pcmktest]: -+ * base-bundle-0 (ocf:pacemaker:Stateful): Started node1 -+ * base-bundle-1 (ocf:pacemaker:Stateful): Started node3 -+ * base-bundle-2 (ocf:pacemaker:Stateful): Started node2 -diff --git a/cts/scheduler/summary/clone-recover-no-shuffle-7.summary b/cts/scheduler/summary/clone-recover-no-shuffle-7.summary -new file mode 100644 -index 00000000000..e6c9baed0db ---- /dev/null -+++ b/cts/scheduler/summary/clone-recover-no-shuffle-7.summary -@@ -0,0 +1,44 @@ -+Current cluster status: -+ * Node List: -+ * Online: [ node1 node2 node3 ] -+ -+ * Full List of Resources: -+ * Fencing (stonith:fence_xvm): Started node2 -+ * Clone Set: dummy-clone [dummy] (promotable): -+ * Promoted: [ node2 ] -+ * Unpromoted: [ node3 ] -+ * Stopped: [ node1 ] -+ -+Transition Summary: -+ * Move dummy:0 ( Unpromoted node3 -> Promoted node1 ) -+ * Demote dummy:1 ( Promoted -> Unpromoted node2 ) -+ * Start dummy:2 ( node3 ) -+ -+Executing Cluster Transition: -+ * Resource action: dummy cancel=10000 on node2 -+ * Pseudo action: dummy-clone_demote_0 -+ * Resource action: dummy demote on node2 -+ * Pseudo action: dummy-clone_demoted_0 -+ * Pseudo action: dummy-clone_stop_0 -+ * Resource action: dummy stop on node3 -+ * Resource action: dummy monitor=11000 on node2 -+ * Pseudo action: dummy-clone_stopped_0 -+ * Pseudo action: dummy-clone_start_0 -+ * Resource action: dummy start on node1 -+ * Resource action: dummy start on node3 -+ * Pseudo action: dummy-clone_running_0 -+ * Resource action: dummy monitor=11000 on node3 -+ * Pseudo action: dummy-clone_promote_0 -+ * Resource action: dummy promote on node1 -+ * Pseudo action: dummy-clone_promoted_0 -+ * Resource action: dummy monitor=10000 on node1 -+ -+Revised Cluster Status: -+ * Node List: -+ * Online: [ node1 node2 node3 ] -+ -+ * Full List of Resources: -+ * Fencing (stonith:fence_xvm): Started node2 -+ * Clone Set: dummy-clone [dummy] (promotable): -+ * Promoted: [ node1 ] -+ * Unpromoted: [ node2 node3 ] -diff --git a/cts/scheduler/summary/clone-recover-no-shuffle-8.summary b/cts/scheduler/summary/clone-recover-no-shuffle-8.summary -new file mode 100644 -index 00000000000..878f24801dd ---- /dev/null -+++ b/cts/scheduler/summary/clone-recover-no-shuffle-8.summary -@@ -0,0 +1,52 @@ -+Current cluster status: -+ * Node List: -+ * Online: [ node1 node2 node3 ] -+ -+ * Full List of Resources: -+ * Fencing (stonith:fence_xvm): Started node2 -+ * Clone Set: grp-clone [grp] (promotable): -+ * Promoted: [ node2 ] -+ * Unpromoted: [ node3 ] -+ * Stopped: [ node1 ] -+ -+Transition Summary: -+ * Demote rsc1:1 ( Promoted -> Unpromoted node2 ) -+ * Demote rsc2:1 ( Promoted -> Unpromoted node2 ) -+ * Promote rsc1:2 ( Stopped -> Promoted node1 ) -+ * Promote rsc2:2 ( Stopped -> Promoted node1 ) -+ -+Executing Cluster Transition: -+ * Resource action: rsc1 cancel=10000 on node2 -+ * Resource action: rsc2 cancel=10000 on node2 -+ * Pseudo action: grp-clone_demote_0 -+ * Pseudo action: grp:1_demote_0 -+ * Resource action: rsc2 demote on node2 -+ * Resource action: rsc1 demote on node2 -+ * Resource action: rsc2 monitor=11000 on node2 -+ * Pseudo action: grp:1_demoted_0 -+ * Resource action: rsc1 monitor=11000 on node2 -+ * Pseudo action: grp-clone_demoted_0 -+ * Pseudo action: grp-clone_start_0 -+ * Pseudo action: grp:2_start_0 -+ * Resource action: rsc1 start on node1 -+ * Resource action: rsc2 start on node1 -+ * Pseudo action: grp:2_running_0 -+ * Pseudo action: grp-clone_running_0 -+ * Pseudo action: grp-clone_promote_0 -+ * Pseudo action: grp:2_promote_0 -+ * Resource action: rsc1 promote on node1 -+ * Resource action: rsc2 promote on node1 -+ * Pseudo action: grp:2_promoted_0 -+ * Resource action: rsc1 monitor=10000 on node1 -+ * Resource action: rsc2 monitor=10000 on node1 -+ * Pseudo action: grp-clone_promoted_0 -+ -+Revised Cluster Status: -+ * Node List: -+ * Online: [ node1 node2 node3 ] -+ -+ * Full List of Resources: -+ * Fencing (stonith:fence_xvm): Started node2 -+ * Clone Set: grp-clone [grp] (promotable): -+ * Promoted: [ node1 ] -+ * Unpromoted: [ node2 node3 ] -diff --git a/cts/scheduler/summary/clone-recover-no-shuffle-9.summary b/cts/scheduler/summary/clone-recover-no-shuffle-9.summary -new file mode 100644 -index 00000000000..7ede39a6e58 ---- /dev/null -+++ b/cts/scheduler/summary/clone-recover-no-shuffle-9.summary -@@ -0,0 +1,56 @@ -+Current cluster status: -+ * Node List: -+ * Online: [ node1 node2 node3 ] -+ * GuestOnline: [ base-bundle-0 base-bundle-1 ] -+ -+ * Full List of Resources: -+ * Fencing (stonith:fence_xvm): Started node2 -+ * Container bundle set: base-bundle [localhost/pcmktest]: -+ * base-bundle-0 (ocf:pacemaker:Stateful): Unpromoted node3 -+ * base-bundle-1 (ocf:pacemaker:Stateful): Promoted node2 -+ * base-bundle-2 (ocf:pacemaker:Stateful): Stopped -+ -+Transition Summary: -+ * Demote base:1 ( Promoted -> Unpromoted base-bundle-1 ) -+ * Start base-bundle-podman-2 ( node1 ) -+ * Start base-bundle-2 ( node1 ) -+ * Promote base:2 ( Stopped -> Promoted base-bundle-2 ) -+ -+Executing Cluster Transition: -+ * Resource action: base cancel=15000 on base-bundle-1 -+ * Pseudo action: base-bundle_demote_0 -+ * Pseudo action: base-bundle-clone_demote_0 -+ * Resource action: base demote on base-bundle-1 -+ * Pseudo action: base-bundle-clone_demoted_0 -+ * Pseudo action: base-bundle_demoted_0 -+ * Pseudo action: base-bundle_start_0 -+ * Resource action: base monitor=16000 on base-bundle-1 -+ * Pseudo action: base-bundle-clone_start_0 -+ * Resource action: base-bundle-podman-2 start on node1 -+ * Resource action: base-bundle-2 monitor on node3 -+ * Resource action: base-bundle-2 monitor on node2 -+ * Resource action: base-bundle-2 monitor on node1 -+ * Resource action: base-bundle-podman-2 monitor=60000 on node1 -+ * Resource action: base-bundle-2 start on node1 -+ * Resource action: base start on base-bundle-2 -+ * Pseudo action: base-bundle-clone_running_0 -+ * Resource action: base-bundle-2 monitor=30000 on node1 -+ * Pseudo action: base-bundle_running_0 -+ * Pseudo action: base-bundle_promote_0 -+ * Pseudo action: base-bundle-clone_promote_0 -+ * Resource action: base promote on base-bundle-2 -+ * Pseudo action: base-bundle-clone_promoted_0 -+ * Pseudo action: base-bundle_promoted_0 -+ * Resource action: base monitor=15000 on base-bundle-2 -+ -+Revised Cluster Status: -+ * Node List: -+ * Online: [ node1 node2 node3 ] -+ * GuestOnline: [ base-bundle-0 base-bundle-1 base-bundle-2 ] -+ -+ * Full List of Resources: -+ * Fencing (stonith:fence_xvm): Started node2 -+ * Container bundle set: base-bundle [localhost/pcmktest]: -+ * base-bundle-0 (ocf:pacemaker:Stateful): Unpromoted node3 -+ * base-bundle-1 (ocf:pacemaker:Stateful): Unpromoted node2 -+ * base-bundle-2 (ocf:pacemaker:Stateful): Promoted node1 -diff --git a/cts/scheduler/xml/clone-recover-no-shuffle-1.xml b/cts/scheduler/xml/clone-recover-no-shuffle-1.xml -new file mode 100644 -index 00000000000..a634ff352cd ---- /dev/null -+++ b/cts/scheduler/xml/clone-recover-no-shuffle-1.xml -@@ -0,0 +1,113 @@ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -diff --git a/cts/scheduler/xml/clone-recover-no-shuffle-10.xml b/cts/scheduler/xml/clone-recover-no-shuffle-10.xml -new file mode 100644 -index 00000000000..faa202a0ae0 ---- /dev/null -+++ b/cts/scheduler/xml/clone-recover-no-shuffle-10.xml -@@ -0,0 +1,120 @@ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -diff --git a/cts/scheduler/xml/clone-recover-no-shuffle-11.xml b/cts/scheduler/xml/clone-recover-no-shuffle-11.xml -new file mode 100644 -index 00000000000..43d6d749525 ---- /dev/null -+++ b/cts/scheduler/xml/clone-recover-no-shuffle-11.xml -@@ -0,0 +1,153 @@ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -diff --git a/cts/scheduler/xml/clone-recover-no-shuffle-12.xml b/cts/scheduler/xml/clone-recover-no-shuffle-12.xml -new file mode 100644 -index 00000000000..e3026903533 ---- /dev/null -+++ b/cts/scheduler/xml/clone-recover-no-shuffle-12.xml -@@ -0,0 +1,186 @@ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -diff --git a/cts/scheduler/xml/clone-recover-no-shuffle-2.xml b/cts/scheduler/xml/clone-recover-no-shuffle-2.xml -new file mode 100644 -index 00000000000..486666c1f26 ---- /dev/null -+++ b/cts/scheduler/xml/clone-recover-no-shuffle-2.xml -@@ -0,0 +1,141 @@ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -diff --git a/cts/scheduler/xml/clone-recover-no-shuffle-3.xml b/cts/scheduler/xml/clone-recover-no-shuffle-3.xml -new file mode 100644 -index 00000000000..ddafb741dce ---- /dev/null -+++ b/cts/scheduler/xml/clone-recover-no-shuffle-3.xml -@@ -0,0 +1,180 @@ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -diff --git a/cts/scheduler/xml/clone-recover-no-shuffle-4.xml b/cts/scheduler/xml/clone-recover-no-shuffle-4.xml -new file mode 100644 -index 00000000000..40e6520c6d0 ---- /dev/null -+++ b/cts/scheduler/xml/clone-recover-no-shuffle-4.xml -@@ -0,0 +1,120 @@ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -diff --git a/cts/scheduler/xml/clone-recover-no-shuffle-5.xml b/cts/scheduler/xml/clone-recover-no-shuffle-5.xml -new file mode 100644 -index 00000000000..67176dc1a03 ---- /dev/null -+++ b/cts/scheduler/xml/clone-recover-no-shuffle-5.xml -@@ -0,0 +1,148 @@ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -diff --git a/cts/scheduler/xml/clone-recover-no-shuffle-6.xml b/cts/scheduler/xml/clone-recover-no-shuffle-6.xml -new file mode 100644 -index 00000000000..3de42f581d4 ---- /dev/null -+++ b/cts/scheduler/xml/clone-recover-no-shuffle-6.xml -@@ -0,0 +1,187 @@ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -diff --git a/cts/scheduler/xml/clone-recover-no-shuffle-7.xml b/cts/scheduler/xml/clone-recover-no-shuffle-7.xml -new file mode 100644 -index 00000000000..6e9dad50db4 ---- /dev/null -+++ b/cts/scheduler/xml/clone-recover-no-shuffle-7.xml -@@ -0,0 +1,125 @@ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -diff --git a/cts/scheduler/xml/clone-recover-no-shuffle-8.xml b/cts/scheduler/xml/clone-recover-no-shuffle-8.xml -new file mode 100644 -index 00000000000..6f882b80785 ---- /dev/null -+++ b/cts/scheduler/xml/clone-recover-no-shuffle-8.xml -@@ -0,0 +1,153 @@ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -diff --git a/cts/scheduler/xml/clone-recover-no-shuffle-9.xml b/cts/scheduler/xml/clone-recover-no-shuffle-9.xml -new file mode 100644 -index 00000000000..104331d6c00 ---- /dev/null -+++ b/cts/scheduler/xml/clone-recover-no-shuffle-9.xml -@@ -0,0 +1,186 @@ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ - -From 44dfe36a316bddc562c07f7e1adbbaa57b9adf77 Mon Sep 17 00:00:00 2001 -From: Reid Wahl -Date: Wed, 28 Jun 2023 02:04:45 -0700 -Subject: [PATCH 08/19] Refactor: libpacemaker: Recursively copy and restore - allowed node tables - -Given a resource, these two new functions create copies of the allowed -nodes tables of its entire tree of descendants, or restore from such a -backup copy. - -Ref T678 - -Signed-off-by: Reid Wahl ---- - lib/pacemaker/libpacemaker_private.h | 6 +++ - lib/pacemaker/pcmk_sched_nodes.c | 76 ++++++++++++++++++++++++++++ - 2 files changed, 82 insertions(+) - -diff --git a/lib/pacemaker/libpacemaker_private.h b/lib/pacemaker/libpacemaker_private.h -index 614d695f83f..8cdd13f7304 100644 ---- a/lib/pacemaker/libpacemaker_private.h -+++ b/lib/pacemaker/libpacemaker_private.h -@@ -874,6 +874,12 @@ bool pcmk__any_node_available(GHashTable *nodes); - G_GNUC_INTERNAL - GHashTable *pcmk__copy_node_table(GHashTable *nodes); - -+G_GNUC_INTERNAL -+void pcmk__copy_node_tables(const pe_resource_t *rsc, GHashTable **copy); -+ -+G_GNUC_INTERNAL -+void pcmk__restore_node_tables(pe_resource_t *rsc, GHashTable *backup); -+ - G_GNUC_INTERNAL - GList *pcmk__sort_nodes(GList *nodes, pe_node_t *active_node); - -diff --git a/lib/pacemaker/pcmk_sched_nodes.c b/lib/pacemaker/pcmk_sched_nodes.c -index d7d5ba46169..eb0b2a41e39 100644 ---- a/lib/pacemaker/pcmk_sched_nodes.c -+++ b/lib/pacemaker/pcmk_sched_nodes.c -@@ -82,6 +82,82 @@ pcmk__copy_node_table(GHashTable *nodes) - return new_table; - } - -+/*! -+ * \internal -+ * \brief Free a table of node tables -+ * -+ * \param[in,out] data Table to free -+ * -+ * \note This is a \c GDestroyNotify wrapper for \c g_hash_table_destroy(). -+ */ -+static void -+destroy_node_tables(gpointer data) -+{ -+ g_hash_table_destroy((GHashTable *) data); -+} -+ -+/*! -+ * \internal -+ * \brief Recursively copy the node tables of a resource -+ * -+ * Build a hash table containing copies of the allowed nodes tables of \p rsc -+ * and its entire tree of descendants. The key is the resource ID, and the value -+ * is a copy of the resource's node table. -+ * -+ * \param[in] rsc Resource whose node table to copy -+ * \param[in,out] copy Where to store the copied node tables -+ * -+ * \note \p *copy should be \c NULL for the top-level call. -+ * \note The caller is responsible for freeing \p copy using -+ * \c g_hash_table_destroy(). -+ */ -+void -+pcmk__copy_node_tables(const pe_resource_t *rsc, GHashTable **copy) -+{ -+ CRM_ASSERT((rsc != NULL) && (copy != NULL)); -+ -+ if (*copy == NULL) { -+ *copy = pcmk__strkey_table(NULL, destroy_node_tables); -+ } -+ -+ g_hash_table_insert(*copy, rsc->id, -+ pcmk__copy_node_table(rsc->allowed_nodes)); -+ -+ for (const GList *iter = rsc->children; iter != NULL; iter = iter->next) { -+ pcmk__copy_node_tables((const pe_resource_t *) iter->data, copy); -+ } -+} -+ -+/*! -+ * \internal -+ * \brief Recursively restore the node tables of a resource from backup -+ * -+ * Given a hash table containing backup copies of the allowed nodes tables of -+ * \p rsc and its entire tree of descendants, replace the resources' current -+ * node tables with the backed-up copies. -+ * -+ * \param[in,out] rsc Resource whose node tables to restore -+ * \param[in] backup Table of backup node tables (created by -+ * \c pcmk__copy_node_tables()) -+ * -+ * \note This function frees the resources' current node tables. -+ */ -+void -+pcmk__restore_node_tables(pe_resource_t *rsc, GHashTable *backup) -+{ -+ CRM_ASSERT((rsc != NULL) && (backup != NULL)); -+ -+ g_hash_table_destroy(rsc->allowed_nodes); -+ -+ // Copy to avoid danger with multiple restores -+ rsc->allowed_nodes = g_hash_table_lookup(backup, rsc->id); -+ rsc->allowed_nodes = pcmk__copy_node_table(rsc->allowed_nodes); -+ -+ for (GList *iter = rsc->children; iter != NULL; iter = iter->next) { -+ pcmk__restore_node_tables((pe_resource_t *) iter->data, backup); -+ } -+} -+ - /*! - * \internal - * \brief Copy a list of node objects - -From a3c120c4c0aeb48efd55bac6de68423be099831d Mon Sep 17 00:00:00 2001 -From: Reid Wahl -Date: Wed, 28 Jun 2023 02:09:28 -0700 -Subject: [PATCH 09/19] Refactor: libpacemaker: Restore node tables if cloned - group assign fails - -Currently, when assigning an instance of a cloned group (that is, one of -the groups), we make a copy only of the group's allowed nodes table. We -restore only that table if an early assignment attempt fails. - -Here, we make a recursive copy containing the allowed nodes tables of -the group itself and of all the resources in the group. Then we restore -all of them from backup if the assignment fails. - -This doesn't visibly fix anything yet, but it's a necessary part of the -fix for T678. And it was obviously wrong before :) - -Ref T678 - -Signed-off-by: Reid Wahl ---- - lib/pacemaker/pcmk_sched_instances.c | 9 ++++----- - 1 file changed, 4 insertions(+), 5 deletions(-) - -diff --git a/lib/pacemaker/pcmk_sched_instances.c b/lib/pacemaker/pcmk_sched_instances.c -index c880196f70f..783820bbf69 100644 ---- a/lib/pacemaker/pcmk_sched_instances.c -+++ b/lib/pacemaker/pcmk_sched_instances.c -@@ -600,8 +600,9 @@ assign_instance(pe_resource_t *instance, const pe_node_t *prefer, - chosen = instance->cmds->assign(instance, NULL); - - } else { // Possible early assignment to preferred node -- GHashTable *backup = pcmk__copy_node_table(instance->allowed_nodes); -+ GHashTable *backup = NULL; - -+ pcmk__copy_node_tables(instance, &backup); - chosen = instance->cmds->assign(instance, prefer); - - // Revert nodes if preferred node won't be assigned -@@ -609,13 +610,11 @@ assign_instance(pe_resource_t *instance, const pe_node_t *prefer, - crm_info("Not assigning %s to preferred node %s: %s is better", - instance->id, pe__node_name(prefer), - pe__node_name(chosen)); -- g_hash_table_destroy(instance->allowed_nodes); -- instance->allowed_nodes = backup; -+ pcmk__restore_node_tables(instance, backup); - pcmk__unassign_resource(instance); - chosen = NULL; -- } else if (backup != NULL) { -- g_hash_table_destroy(backup); - } -+ g_hash_table_destroy(backup); - } - - // The parent tracks how many instances have been assigned to each node - -From a5a5c76333365be87f5d3d62f354b45376894506 Mon Sep 17 00:00:00 2001 -From: Reid Wahl -Date: Wed, 28 Jun 2023 02:08:44 -0700 -Subject: [PATCH 10/19] Fix: libpacemaker: Respect clone-node-max for cloned - groups - -Currently, cloned groups may have more than clone-node-max instances -assigned to a given node. This can happen when a location constraint -exists for the clone. - -For example, consider the case of the clone-recover-no-shuffle-5 test. -The cloned group prefers node1 with a score of 100. The location score -is applied only to a group's first member. - -So in the early assignment attempt (within pcmk__assign_instances()), we -try to assign each instance (group) to its current node. However, the -first member prefers a different node (node1) and gets assigned there -instead. The second member has to follow the first due to the group's -internal colocation. - -However, node1 wasn't the preferred node. So assign_instance() tries to -revert the assignment by calling pcmk__unassign_resource() on the -instance (the group). But this leaves the group members assigned, -because pcmk__unassign_resource() doesn't act recursively. - -With this commit, pcmk__unassign_resource() acts recursively. We can now -unassign a resource and all its children recursively. - -Fixes T678 - -Signed-off-by: Reid Wahl ---- - lib/pacemaker/pcmk_sched_resource.c | 43 +++++++++++++++++++---------- - 1 file changed, 28 insertions(+), 15 deletions(-) - -diff --git a/lib/pacemaker/pcmk_sched_resource.c b/lib/pacemaker/pcmk_sched_resource.c -index dd9939a42a6..8f703789b20 100644 ---- a/lib/pacemaker/pcmk_sched_resource.c -+++ b/lib/pacemaker/pcmk_sched_resource.c -@@ -455,13 +455,14 @@ pcmk__assign_resource(pe_resource_t *rsc, pe_node_t *node, bool force) - - /*! - * \internal -- * \brief Remove any assignment of a specified resource to a node -+ * \brief Remove any node assignment from a specified resource and its children - * - * If a specified resource has been assigned to a node, remove that assignment -- * and mark the resource as provisional again. This is not done recursively for -- * children, so it should be called only for primitives. -+ * and mark the resource as provisional again. - * - * \param[in,out] rsc Resource to unassign -+ * -+ * \note This function is called recursively on \p rsc and its children. - */ - void - pcmk__unassign_resource(pe_resource_t *rsc) -@@ -469,21 +470,33 @@ pcmk__unassign_resource(pe_resource_t *rsc) - pe_node_t *old = rsc->allocated_to; - - if (old == NULL) { -- return; -+ crm_info("Unassigning %s", rsc->id); -+ } else { -+ crm_info("Unassigning %s from %s", rsc->id, pe__node_name(old)); - } - -- crm_info("Unassigning %s from %s", rsc->id, pe__node_name(old)); - pe__set_resource_flags(rsc, pe_rsc_provisional); -- rsc->allocated_to = NULL; -- -- /* We're going to free the pe_node_t, but its details member is shared and -- * will remain, so update that appropriately first. -- */ -- old->details->allocated_rsc = g_list_remove(old->details->allocated_rsc, -- rsc); -- old->details->num_resources--; -- pcmk__release_node_capacity(old->details->utilization, rsc); -- free(old); -+ -+ if (rsc->children == NULL) { -+ if (old == NULL) { -+ return; -+ } -+ rsc->allocated_to = NULL; -+ -+ /* We're going to free the pe_node_t, but its details member is shared -+ * and will remain, so update that appropriately first. -+ */ -+ old->details->allocated_rsc = g_list_remove(old->details->allocated_rsc, -+ rsc); -+ old->details->num_resources--; -+ pcmk__release_node_capacity(old->details->utilization, rsc); -+ free(old); -+ return; -+ } -+ -+ for (GList *iter = rsc->children; iter != NULL; iter = iter->next) { -+ pcmk__unassign_resource((pe_resource_t *) iter->data); -+ } - } - - /*! - -From edd9b4ef2094e776530ff540047848aa6d2a1b42 Mon Sep 17 00:00:00 2001 -From: Reid Wahl -Date: Wed, 28 Jun 2023 02:39:39 -0700 -Subject: [PATCH 11/19] Test: scheduler: Update tests for cloned group - clone-node-max fix - -Ref T678 - -Signed-off-by: Reid Wahl ---- - .../dot/clone-recover-no-shuffle-5.dot | 46 +--- - .../exp/clone-recover-no-shuffle-5.exp | 231 +++--------------- - .../scores/clone-recover-no-shuffle-5.scores | 50 +++- - .../clone-recover-no-shuffle-5.summary | 27 +- - .../xml/clone-recover-no-shuffle-5.xml | 6 +- - 5 files changed, 97 insertions(+), 263 deletions(-) - -diff --git a/cts/scheduler/dot/clone-recover-no-shuffle-5.dot b/cts/scheduler/dot/clone-recover-no-shuffle-5.dot -index 7219ee5a6d3..a2356f2280b 100644 ---- a/cts/scheduler/dot/clone-recover-no-shuffle-5.dot -+++ b/cts/scheduler/dot/clone-recover-no-shuffle-5.dot -@@ -2,12 +2,10 @@ - "grp-clone_running_0" [ style=bold color="green" fontcolor="orange"] - "grp-clone_start_0" -> "grp-clone_running_0" [ style = bold] - "grp-clone_start_0" -> "grp:0_start_0" [ style = bold] --"grp-clone_start_0" -> "grp:1_start_0" [ style = bold] - "grp-clone_start_0" -> "grp:2_start_0" [ style = bold] - "grp-clone_start_0" [ style=bold color="green" fontcolor="orange"] - "grp-clone_stop_0" -> "grp-clone_stopped_0" [ style = bold] - "grp-clone_stop_0" -> "grp:0_stop_0" [ style = bold] --"grp-clone_stop_0" -> "grp:1_stop_0" [ style = bold] - "grp-clone_stop_0" [ style=bold color="green" fontcolor="orange"] - "grp-clone_stopped_0" -> "grp-clone_start_0" [ style = bold] - "grp-clone_stopped_0" [ style=bold color="green" fontcolor="orange"] -@@ -24,57 +22,35 @@ - "grp:0_stopped_0" -> "grp-clone_stopped_0" [ style = bold] - "grp:0_stopped_0" -> "grp:0_start_0" [ style = bold] - "grp:0_stopped_0" [ style=bold color="green" fontcolor="orange"] --"grp:1_running_0" -> "grp-clone_running_0" [ style = bold] --"grp:1_running_0" [ style=bold color="green" fontcolor="orange"] --"grp:1_start_0" -> "grp:1_running_0" [ style = bold] --"grp:1_start_0" -> "rsc1_start_0 node1" [ style = bold] --"grp:1_start_0" -> "rsc2_start_0 node1" [ style = bold] --"grp:1_start_0" [ style=bold color="green" fontcolor="orange"] --"grp:1_stop_0" -> "grp:1_stopped_0" [ style = bold] --"grp:1_stop_0" -> "rsc1_stop_0 node3" [ style = bold] --"grp:1_stop_0" -> "rsc2_stop_0 node3" [ style = bold] --"grp:1_stop_0" [ style=bold color="green" fontcolor="orange"] --"grp:1_stopped_0" -> "grp-clone_stopped_0" [ style = bold] --"grp:1_stopped_0" -> "grp:1_start_0" [ style = bold] --"grp:1_stopped_0" [ style=bold color="green" fontcolor="orange"] - "grp:2_running_0" -> "grp-clone_running_0" [ style = bold] - "grp:2_running_0" [ style=bold color="green" fontcolor="orange"] - "grp:2_start_0" -> "grp:2_running_0" [ style = bold] --"grp:2_start_0" -> "rsc1:2_start_0 node1" [ style = bold] --"grp:2_start_0" -> "rsc2:2_start_0 node1" [ style = bold] -+"grp:2_start_0" -> "rsc1:2_start_0 node2" [ style = bold] -+"grp:2_start_0" -> "rsc2:2_start_0 node2" [ style = bold] - "grp:2_start_0" [ style=bold color="green" fontcolor="orange"] --"rsc1:2_monitor_10000 node1" [ style=bold color="green" fontcolor="black"] --"rsc1:2_start_0 node1" -> "grp:2_running_0" [ style = bold] --"rsc1:2_start_0 node1" -> "rsc1:2_monitor_10000 node1" [ style = bold] --"rsc1:2_start_0 node1" -> "rsc2:2_start_0 node1" [ style = bold] --"rsc1:2_start_0 node1" [ style=bold color="green" fontcolor="black"] -+"rsc1:2_monitor_10000 node2" [ style=bold color="green" fontcolor="black"] -+"rsc1:2_start_0 node2" -> "grp:2_running_0" [ style = bold] -+"rsc1:2_start_0 node2" -> "rsc1:2_monitor_10000 node2" [ style = bold] -+"rsc1:2_start_0 node2" -> "rsc2:2_start_0 node2" [ style = bold] -+"rsc1:2_start_0 node2" [ style=bold color="green" fontcolor="black"] - "rsc1_monitor_10000 node1" [ style=bold color="green" fontcolor="black"] - "rsc1_start_0 node1" -> "grp:0_running_0" [ style = bold] --"rsc1_start_0 node1" -> "grp:1_running_0" [ style = bold] - "rsc1_start_0 node1" -> "rsc1_monitor_10000 node1" [ style = bold] - "rsc1_start_0 node1" -> "rsc2_start_0 node1" [ style = bold] - "rsc1_start_0 node1" [ style=bold color="green" fontcolor="black"] - "rsc1_stop_0 node2" -> "grp:0_stopped_0" [ style = bold] - "rsc1_stop_0 node2" -> "rsc1_start_0 node1" [ style = bold] - "rsc1_stop_0 node2" [ style=bold color="green" fontcolor="black"] --"rsc1_stop_0 node3" -> "grp:1_stopped_0" [ style = bold] --"rsc1_stop_0 node3" -> "rsc1_start_0 node1" [ style = bold] --"rsc1_stop_0 node3" [ style=bold color="green" fontcolor="black"] --"rsc2:2_monitor_10000 node1" [ style=bold color="green" fontcolor="black"] --"rsc2:2_start_0 node1" -> "grp:2_running_0" [ style = bold] --"rsc2:2_start_0 node1" -> "rsc2:2_monitor_10000 node1" [ style = bold] --"rsc2:2_start_0 node1" [ style=bold color="green" fontcolor="black"] -+"rsc2:2_monitor_10000 node2" [ style=bold color="green" fontcolor="black"] -+"rsc2:2_start_0 node2" -> "grp:2_running_0" [ style = bold] -+"rsc2:2_start_0 node2" -> "rsc2:2_monitor_10000 node2" [ style = bold] -+"rsc2:2_start_0 node2" [ style=bold color="green" fontcolor="black"] - "rsc2_monitor_10000 node1" [ style=bold color="green" fontcolor="black"] - "rsc2_start_0 node1" -> "grp:0_running_0" [ style = bold] --"rsc2_start_0 node1" -> "grp:1_running_0" [ style = bold] - "rsc2_start_0 node1" -> "rsc2_monitor_10000 node1" [ style = bold] - "rsc2_start_0 node1" [ style=bold color="green" fontcolor="black"] - "rsc2_stop_0 node2" -> "grp:0_stopped_0" [ style = bold] - "rsc2_stop_0 node2" -> "rsc1_stop_0 node2" [ style = bold] - "rsc2_stop_0 node2" -> "rsc2_start_0 node1" [ style = bold] - "rsc2_stop_0 node2" [ style=bold color="green" fontcolor="black"] --"rsc2_stop_0 node3" -> "grp:1_stopped_0" [ style = bold] --"rsc2_stop_0 node3" -> "rsc1_stop_0 node3" [ style = bold] --"rsc2_stop_0 node3" -> "rsc2_start_0 node1" [ style = bold] --"rsc2_stop_0 node3" [ style=bold color="green" fontcolor="black"] - } -diff --git a/cts/scheduler/exp/clone-recover-no-shuffle-5.exp b/cts/scheduler/exp/clone-recover-no-shuffle-5.exp -index 8a8e799793e..c1cee43b12f 100644 ---- a/cts/scheduler/exp/clone-recover-no-shuffle-5.exp -+++ b/cts/scheduler/exp/clone-recover-no-shuffle-5.exp -@@ -25,7 +25,7 @@ - - - -- -+ - - - -@@ -58,7 +58,7 @@ - - - -- -+ - - - -@@ -154,245 +154,92 @@ - - - -- -+ - - - - - -- -+ - - -- -+ - - -- -+ - - - - - -- -+ - - - - - -- -+ - - - - - -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -+ - -- -+ - - - - -- -+ - - - -- -+ - -- -+ - -- -+ - - - - -- -+ - - - -- -+ - -- -+ - -- -+ - - - - -- -+ - - - -- -+ - -- -+ - -- -+ - - - - -- -+ - - -- -+ - - - -- -+ - -- -+ - - - -@@ -401,24 +248,21 @@ - - - -- -- -- -- -+ - - - -- -+ - -- -+ - - - - - -- -+ - -- -+ - - - -@@ -427,25 +271,22 @@ - - - -- -+ - - -- -- -- -- -+ - - - -- -+ - -- -+ - - - - - -- -+ - - - -diff --git a/cts/scheduler/scores/clone-recover-no-shuffle-5.scores b/cts/scheduler/scores/clone-recover-no-shuffle-5.scores -index eecba43fae0..0dd9728830c 100644 ---- a/cts/scheduler/scores/clone-recover-no-shuffle-5.scores -+++ b/cts/scheduler/scores/clone-recover-no-shuffle-5.scores -@@ -30,50 +30,80 @@ pcmk__clone_assign: rsc2:2 allocation score on node1: 0 - pcmk__clone_assign: rsc2:2 allocation score on node2: 0 - pcmk__clone_assign: rsc2:2 allocation score on node3: 0 - pcmk__group_assign: grp:0 allocation score on node1: 100 -+pcmk__group_assign: grp:0 allocation score on node1: 100 -+pcmk__group_assign: grp:0 allocation score on node2: 0 - pcmk__group_assign: grp:0 allocation score on node2: 0 - pcmk__group_assign: grp:0 allocation score on node3: 0 -+pcmk__group_assign: grp:0 allocation score on node3: 0 -+pcmk__group_assign: grp:1 allocation score on node1: -INFINITY - pcmk__group_assign: grp:1 allocation score on node1: 100 - pcmk__group_assign: grp:1 allocation score on node2: 0 -+pcmk__group_assign: grp:1 allocation score on node2: 0 -+pcmk__group_assign: grp:1 allocation score on node3: 0 - pcmk__group_assign: grp:1 allocation score on node3: 0 --pcmk__group_assign: grp:2 allocation score on node1: 100 -+pcmk__group_assign: grp:2 allocation score on node1: -INFINITY - pcmk__group_assign: grp:2 allocation score on node2: 0 --pcmk__group_assign: grp:2 allocation score on node3: 0 -+pcmk__group_assign: grp:2 allocation score on node3: -INFINITY -+pcmk__group_assign: rsc1:0 allocation score on node1: 100 - pcmk__group_assign: rsc1:0 allocation score on node1: 100 - pcmk__group_assign: rsc1:0 allocation score on node2: 1 -+pcmk__group_assign: rsc1:0 allocation score on node2: 1 -+pcmk__group_assign: rsc1:0 allocation score on node3: 0 - pcmk__group_assign: rsc1:0 allocation score on node3: 0 -+pcmk__group_assign: rsc1:1 allocation score on node1: -INFINITY - pcmk__group_assign: rsc1:1 allocation score on node1: 100 - pcmk__group_assign: rsc1:1 allocation score on node2: 0 -+pcmk__group_assign: rsc1:1 allocation score on node2: 0 - pcmk__group_assign: rsc1:1 allocation score on node3: 1 --pcmk__group_assign: rsc1:2 allocation score on node1: 100 -+pcmk__group_assign: rsc1:1 allocation score on node3: 1 -+pcmk__group_assign: rsc1:2 allocation score on node1: -INFINITY - pcmk__group_assign: rsc1:2 allocation score on node2: 0 --pcmk__group_assign: rsc1:2 allocation score on node3: 0 -+pcmk__group_assign: rsc1:2 allocation score on node3: -INFINITY -+pcmk__group_assign: rsc2:0 allocation score on node1: 0 - pcmk__group_assign: rsc2:0 allocation score on node1: 0 - pcmk__group_assign: rsc2:0 allocation score on node2: 1 -+pcmk__group_assign: rsc2:0 allocation score on node2: 1 - pcmk__group_assign: rsc2:0 allocation score on node3: 0 -+pcmk__group_assign: rsc2:0 allocation score on node3: 0 -+pcmk__group_assign: rsc2:1 allocation score on node1: -INFINITY - pcmk__group_assign: rsc2:1 allocation score on node1: 0 - pcmk__group_assign: rsc2:1 allocation score on node2: 0 -+pcmk__group_assign: rsc2:1 allocation score on node2: 0 -+pcmk__group_assign: rsc2:1 allocation score on node3: 1 - pcmk__group_assign: rsc2:1 allocation score on node3: 1 --pcmk__group_assign: rsc2:2 allocation score on node1: 0 -+pcmk__group_assign: rsc2:2 allocation score on node1: -INFINITY - pcmk__group_assign: rsc2:2 allocation score on node2: 0 --pcmk__group_assign: rsc2:2 allocation score on node3: 0 -+pcmk__group_assign: rsc2:2 allocation score on node3: -INFINITY - pcmk__primitive_assign: Fencing allocation score on node1: 0 - pcmk__primitive_assign: Fencing allocation score on node2: 0 - pcmk__primitive_assign: Fencing allocation score on node3: 0 - pcmk__primitive_assign: rsc1:0 allocation score on node1: 100 -+pcmk__primitive_assign: rsc1:0 allocation score on node1: 100 -+pcmk__primitive_assign: rsc1:0 allocation score on node2: 2 - pcmk__primitive_assign: rsc1:0 allocation score on node2: 2 - pcmk__primitive_assign: rsc1:0 allocation score on node3: 0 -+pcmk__primitive_assign: rsc1:0 allocation score on node3: 0 -+pcmk__primitive_assign: rsc1:1 allocation score on node1: -INFINITY - pcmk__primitive_assign: rsc1:1 allocation score on node1: 100 - pcmk__primitive_assign: rsc1:1 allocation score on node2: 0 -+pcmk__primitive_assign: rsc1:1 allocation score on node2: 0 -+pcmk__primitive_assign: rsc1:1 allocation score on node3: 2 - pcmk__primitive_assign: rsc1:1 allocation score on node3: 2 --pcmk__primitive_assign: rsc1:2 allocation score on node1: 100 -+pcmk__primitive_assign: rsc1:2 allocation score on node1: -INFINITY - pcmk__primitive_assign: rsc1:2 allocation score on node2: 0 --pcmk__primitive_assign: rsc1:2 allocation score on node3: 0 -+pcmk__primitive_assign: rsc1:2 allocation score on node3: -INFINITY -+pcmk__primitive_assign: rsc2:0 allocation score on node1: 0 - pcmk__primitive_assign: rsc2:0 allocation score on node1: 0 - pcmk__primitive_assign: rsc2:0 allocation score on node2: -INFINITY -+pcmk__primitive_assign: rsc2:0 allocation score on node2: -INFINITY -+pcmk__primitive_assign: rsc2:0 allocation score on node3: -INFINITY - pcmk__primitive_assign: rsc2:0 allocation score on node3: -INFINITY -+pcmk__primitive_assign: rsc2:1 allocation score on node1: -INFINITY - pcmk__primitive_assign: rsc2:1 allocation score on node1: 0 - pcmk__primitive_assign: rsc2:1 allocation score on node2: -INFINITY -+pcmk__primitive_assign: rsc2:1 allocation score on node2: -INFINITY - pcmk__primitive_assign: rsc2:1 allocation score on node3: -INFINITY --pcmk__primitive_assign: rsc2:2 allocation score on node1: 0 --pcmk__primitive_assign: rsc2:2 allocation score on node2: -INFINITY -+pcmk__primitive_assign: rsc2:1 allocation score on node3: 1 -+pcmk__primitive_assign: rsc2:2 allocation score on node1: -INFINITY -+pcmk__primitive_assign: rsc2:2 allocation score on node2: 0 - pcmk__primitive_assign: rsc2:2 allocation score on node3: -INFINITY -diff --git a/cts/scheduler/summary/clone-recover-no-shuffle-5.summary b/cts/scheduler/summary/clone-recover-no-shuffle-5.summary -index e84d0a574de..121214c42ab 100644 ---- a/cts/scheduler/summary/clone-recover-no-shuffle-5.summary -+++ b/cts/scheduler/summary/clone-recover-no-shuffle-5.summary -@@ -11,41 +11,29 @@ Current cluster status: - Transition Summary: - * Move rsc1:0 ( node2 -> node1 ) - * Move rsc2:0 ( node2 -> node1 ) -- * Move rsc1:1 ( node3 -> node1 ) -- * Move rsc2:1 ( node3 -> node1 ) -- * Start rsc1:2 ( node1 ) -- * Start rsc2:2 ( node1 ) -+ * Start rsc1:2 ( node2 ) -+ * Start rsc2:2 ( node2 ) - - Executing Cluster Transition: - * Pseudo action: grp-clone_stop_0 - * Pseudo action: grp:0_stop_0 - * Resource action: rsc2 stop on node2 -- * Pseudo action: grp:1_stop_0 -- * Resource action: rsc2 stop on node3 - * Resource action: rsc1 stop on node2 -- * Resource action: rsc1 stop on node3 - * Pseudo action: grp:0_stopped_0 -- * Pseudo action: grp:1_stopped_0 - * Pseudo action: grp-clone_stopped_0 - * Pseudo action: grp-clone_start_0 - * Pseudo action: grp:0_start_0 - * Resource action: rsc1 start on node1 - * Resource action: rsc2 start on node1 -- * Pseudo action: grp:1_start_0 -- * Resource action: rsc1 start on node1 -- * Resource action: rsc2 start on node1 - * Pseudo action: grp:2_start_0 -- * Resource action: rsc1 start on node1 -- * Resource action: rsc2 start on node1 -+ * Resource action: rsc1 start on node2 -+ * Resource action: rsc2 start on node2 - * Pseudo action: grp:0_running_0 - * Resource action: rsc1 monitor=10000 on node1 - * Resource action: rsc2 monitor=10000 on node1 -- * Pseudo action: grp:1_running_0 -- * Resource action: rsc1 monitor=10000 on node1 -- * Resource action: rsc2 monitor=10000 on node1 - * Pseudo action: grp:2_running_0 -- * Resource action: rsc1 monitor=10000 on node1 -- * Resource action: rsc2 monitor=10000 on node1 -+ * Resource action: rsc1 monitor=10000 on node2 -+ * Resource action: rsc2 monitor=10000 on node2 - * Pseudo action: grp-clone_running_0 - - Revised Cluster Status: -@@ -55,5 +43,4 @@ Revised Cluster Status: - * Full List of Resources: - * Fencing (stonith:fence_xvm): Started node2 - * Clone Set: grp-clone [grp]: -- * Started: [ node1 ] -- * Stopped: [ node2 node3 ] -+ * Started: [ node1 node2 node3 ] -diff --git a/cts/scheduler/xml/clone-recover-no-shuffle-5.xml b/cts/scheduler/xml/clone-recover-no-shuffle-5.xml -index 67176dc1a03..45f3b5a9f3a 100644 ---- a/cts/scheduler/xml/clone-recover-no-shuffle-5.xml -+++ b/cts/scheduler/xml/clone-recover-no-shuffle-5.xml -@@ -14,9 +14,9 @@ - * Instance grp:2 should start on node1 - - This test output is incorrect: -- * Instance grp:0 moves from node2 to node1 -- * Instance grp:1 moves from node3 to node1 -- * Instance grp:2 starts on node1 (correct) -+ * Instance grp:0 moves to node1 -+ * Instance grp:1 remains started on node3 (correct) -+ * Instance grp:2 starts on node2 - --> - - - -From ff60c47e89c6434819dbe5e5e9a87d01122e165e Mon Sep 17 00:00:00 2001 -From: Reid Wahl -Date: Thu, 6 Jul 2023 13:52:59 -0700 -Subject: [PATCH 12/19] Refactor: libpacemaker: Move instance provisional check - to loop body - -Avoid calling preferred_node() this way. Since assign_instance() is -static and has only two callers, we don't have to worry about a sanity -provisional check inside the function. - -Signed-off-by: Reid Wahl ---- - lib/pacemaker/pcmk_sched_instances.c | 9 ++++----- - 1 file changed, 4 insertions(+), 5 deletions(-) - -diff --git a/lib/pacemaker/pcmk_sched_instances.c b/lib/pacemaker/pcmk_sched_instances.c -index 783820bbf69..58fad741729 100644 ---- a/lib/pacemaker/pcmk_sched_instances.c -+++ b/lib/pacemaker/pcmk_sched_instances.c -@@ -568,11 +568,6 @@ assign_instance(pe_resource_t *instance, const pe_node_t *prefer, - pe_rsc_trace(instance, "Assigning %s (preferring %s)", instance->id, - ((prefer == NULL)? "no node" : prefer->details->uname)); - -- if (!pcmk_is_set(instance->flags, pe_rsc_provisional)) { -- // Instance is already assigned -- return instance->fns->location(instance, NULL, FALSE) != NULL; -- } -- - if (pcmk_is_set(instance->flags, pe_rsc_allocating)) { - pe_rsc_debug(instance, - "Assignment loop detected involving %s colocations", -@@ -745,6 +740,10 @@ pcmk__assign_instances(pe_resource_t *collective, GList *instances, - iter = iter->next) { - instance = (pe_resource_t *) iter->data; - -+ if (!pcmk_is_set(instance->flags, pe_rsc_provisional)) { -+ continue; // Already assigned -+ } -+ - current = preferred_node(collective, instance, optimal_per_node); - if ((current != NULL) - && assign_instance(instance, current, max_per_node)) { - -From 0f9e84238a4778da71488ff67ea9f1772e797d80 Mon Sep 17 00:00:00 2001 -From: Reid Wahl -Date: Fri, 23 Jun 2023 15:16:57 -0700 -Subject: [PATCH 13/19] Refactor: libpacemaker: Functionize updating parent - allowed node count - -...in pcmk_sched_instances.c:assign_instance(). We'll use this elsewhere -in an upcoming commit. - -Ref T489 - -Signed-off-by: Reid Wahl ---- - lib/pacemaker/pcmk_sched_instances.c | 54 ++++++++++++++++++---------- - 1 file changed, 36 insertions(+), 18 deletions(-) - -diff --git a/lib/pacemaker/pcmk_sched_instances.c b/lib/pacemaker/pcmk_sched_instances.c -index 58fad741729..1b051cb2ed9 100644 ---- a/lib/pacemaker/pcmk_sched_instances.c -+++ b/lib/pacemaker/pcmk_sched_instances.c -@@ -545,6 +545,39 @@ pcmk__cmp_instance(gconstpointer a, gconstpointer b) - return rc; - } - -+/*! -+ * \internal -+ * \brief Increment the parent's instance count after assigning an instance -+ * -+ * An instance's parent tracks how many instances have been assigned to each -+ * node via its pe_node_t:count member. After assigning an instance to a node, -+ * find the corresponding node in the parent's allowed table and increment it. -+ * -+ * \param[in,out] instance Instance whose parent to update -+ * \param[in] assigned_to Node to which the instance was assigned -+ */ -+static void -+increment_parent_count(pe_resource_t *instance, const pe_node_t *assigned_to) -+{ -+ pe_node_t *allowed = NULL; -+ -+ if (assigned_to == NULL) { -+ return; -+ } -+ allowed = pcmk__top_allowed_node(instance, assigned_to); -+ -+ if (allowed == NULL) { -+ /* The instance is allowed on the node, but its parent isn't. This -+ * shouldn't be possible if the resource is managed, and we won't be -+ * able to limit the number of instances assigned to the node. -+ */ -+ CRM_LOG_ASSERT(!pcmk_is_set(instance->flags, pe_rsc_managed)); -+ -+ } else { -+ allowed->count++; -+ } -+} -+ - /*! - * \internal - * \brief Choose a node for an instance -@@ -562,9 +595,7 @@ assign_instance(pe_resource_t *instance, const pe_node_t *prefer, - int max_per_node) - { - pe_node_t *chosen = NULL; -- pe_node_t *allowed = NULL; - -- CRM_ASSERT(instance != NULL); - pe_rsc_trace(instance, "Assigning %s (preferring %s)", instance->id, - ((prefer == NULL)? "no node" : prefer->details->uname)); - -@@ -578,8 +609,8 @@ assign_instance(pe_resource_t *instance, const pe_node_t *prefer, - if (prefer != NULL) { // Possible early assignment to preferred node - - // Get preferred node with instance's scores -- allowed = g_hash_table_lookup(instance->allowed_nodes, -- prefer->details->id); -+ pe_node_t *allowed = g_hash_table_lookup(instance->allowed_nodes, -+ prefer->details->id); - - if ((allowed == NULL) || (allowed->weight < 0)) { - pe_rsc_trace(instance, -@@ -612,20 +643,7 @@ assign_instance(pe_resource_t *instance, const pe_node_t *prefer, - g_hash_table_destroy(backup); - } - -- // The parent tracks how many instances have been assigned to each node -- if (chosen != NULL) { -- allowed = pcmk__top_allowed_node(instance, chosen); -- if (allowed == NULL) { -- /* The instance is allowed on the node, but its parent isn't. This -- * shouldn't be possible if the resource is managed, and we won't be -- * able to limit the number of instances assigned to the node. -- */ -- CRM_LOG_ASSERT(!pcmk_is_set(instance->flags, pe_rsc_managed)); -- -- } else { -- allowed->count++; -- } -- } -+ increment_parent_count(instance, chosen); - return chosen != NULL; - } - - -From 6cddfe269531661112537eb3ef7c90975feb73ea Mon Sep 17 00:00:00 2001 -From: Reid Wahl -Date: Thu, 22 Jun 2023 13:49:42 -0700 -Subject: [PATCH 14/19] Refactor: libpe_status: Copy count in pe__copy_node() - -pe__copy_node() is supposed to make a shallow copy of a pe_node_t -object. That should include the count member. The caller is free to -reset it to 0 if desired. - -Signed-off-by: Reid Wahl ---- - lib/pengine/utils.c | 1 + - 1 file changed, 1 insertion(+) - -diff --git a/lib/pengine/utils.c b/lib/pengine/utils.c -index ef0a092dc16..199ce87e61f 100644 ---- a/lib/pengine/utils.c -+++ b/lib/pengine/utils.c -@@ -98,6 +98,7 @@ pe__copy_node(const pe_node_t *this_node) - new_node->rsc_discover_mode = this_node->rsc_discover_mode; - new_node->weight = this_node->weight; - new_node->fixed = this_node->fixed; // @COMPAT deprecated and unused -+ new_node->count = this_node->count; - new_node->details = this_node->details; - - return new_node; - -From 30385bedeb5177b703b3b68d9579d55356187f26 Mon Sep 17 00:00:00 2001 -From: Reid Wahl -Date: Fri, 23 Jun 2023 15:29:17 -0700 -Subject: [PATCH 15/19] Refactor: libpacemaker: Return chosen node from - assign_instance() - -The return type was changed to bool by commit 97f67da8. However, an -upcoming commit will need the assigned-to node. - -Ref T489 - -Signed-off-by: Reid Wahl ---- - lib/pacemaker/pcmk_sched_instances.c | 28 ++++++++++++++++------------ - 1 file changed, 16 insertions(+), 12 deletions(-) - -diff --git a/lib/pacemaker/pcmk_sched_instances.c b/lib/pacemaker/pcmk_sched_instances.c -index 1b051cb2ed9..64c027b20b1 100644 ---- a/lib/pacemaker/pcmk_sched_instances.c -+++ b/lib/pacemaker/pcmk_sched_instances.c -@@ -580,7 +580,7 @@ increment_parent_count(pe_resource_t *instance, const pe_node_t *assigned_to) - - /*! - * \internal -- * \brief Choose a node for an instance -+ * \brief Assign an instance to a node - * - * \param[in,out] instance Clone instance or bundle replica container - * \param[in] prefer If not NULL, attempt early assignment to this -@@ -588,9 +588,9 @@ increment_parent_count(pe_resource_t *instance, const pe_node_t *assigned_to) - * perform final assignment - * \param[in] max_per_node Assign at most this many instances to one node - * -- * \return true if \p instance could be assigned to a node, otherwise false -+ * \return Node to which \p instance is assigned - */ --static bool -+static const pe_node_t * - assign_instance(pe_resource_t *instance, const pe_node_t *prefer, - int max_per_node) - { -@@ -603,7 +603,7 @@ assign_instance(pe_resource_t *instance, const pe_node_t *prefer, - pe_rsc_debug(instance, - "Assignment loop detected involving %s colocations", - instance->id); -- return false; -+ return NULL; - } - - if (prefer != NULL) { // Possible early assignment to preferred node -@@ -616,7 +616,7 @@ assign_instance(pe_resource_t *instance, const pe_node_t *prefer, - pe_rsc_trace(instance, - "Not assigning %s to preferred node %s: unavailable", - instance->id, pe__node_name(prefer)); -- return false; -+ return NULL; - } - } - -@@ -644,7 +644,7 @@ assign_instance(pe_resource_t *instance, const pe_node_t *prefer, - } - - increment_parent_count(instance, chosen); -- return chosen != NULL; -+ return chosen; - } - - /*! -@@ -763,11 +763,15 @@ pcmk__assign_instances(pe_resource_t *collective, GList *instances, - } - - current = preferred_node(collective, instance, optimal_per_node); -- if ((current != NULL) -- && assign_instance(instance, current, max_per_node)) { -- pe_rsc_trace(collective, "Assigned %s to current node %s", -- instance->id, pe__node_name(current)); -- assigned++; -+ if (current != NULL) { -+ const pe_node_t *chosen = assign_instance(instance, current, -+ max_per_node); -+ -+ if (pe__same_node(chosen, current)) { -+ pe_rsc_trace(collective, "Assigned %s to current node %s", -+ instance->id, pe__node_name(current)); -+ assigned++; -+ } - } - } - -@@ -802,7 +806,7 @@ pcmk__assign_instances(pe_resource_t *collective, GList *instances, - resource_location(instance, NULL, -INFINITY, - "collective_limit_reached", collective->cluster); - -- } else if (assign_instance(instance, NULL, max_per_node)) { -+ } else if (assign_instance(instance, NULL, max_per_node) != NULL) { - assigned++; - } - } - -From 010649ef135ee0d4aca916d2d61c79bcba446951 Mon Sep 17 00:00:00 2001 -From: Reid Wahl -Date: Fri, 23 Jun 2023 21:30:47 -0700 -Subject: [PATCH 16/19] Refactor: libpacemaker: New stop_if_fail argument for - assign() method - -...of resource_alloc_functions_t. This will allow us to do a fully -reversible assignment. - -Currently pcmk__unassign_resource() undoes everything assignment-related -but can't undo changes to roles and actions. - -Now, if stop_if_fail is true, the assign() method and -pcmk__assign_resource() behave as before. - -If stop_if_fail is false and assignment succeeds, we can safely either -consider the assignment final or revert it via -pcmk__unassign_resource(). If assignment fails, the effect is as if we -had called pcmk__unassign_resource(); there are no side effects on next -role or actions. - -Ref T489 - -Signed-off-by: Reid Wahl ---- - include/pcmki/pcmki_sched_allocate.h | 3 +- - lib/pacemaker/libpacemaker_private.h | 30 ++++++++++++---- - lib/pacemaker/pcmk_sched_bundle.c | 30 +++++++++++----- - lib/pacemaker/pcmk_sched_clone.c | 22 +++++++++--- - lib/pacemaker/pcmk_sched_group.c | 18 +++++++--- - lib/pacemaker/pcmk_sched_instances.c | 24 +++++++------ - lib/pacemaker/pcmk_sched_primitive.c | 52 ++++++++++++++++++++-------- - lib/pacemaker/pcmk_sched_resource.c | 41 ++++++++++++++++------ - lib/pacemaker/pcmk_scheduler.c | 4 +-- - 9 files changed, 163 insertions(+), 61 deletions(-) - -diff --git a/include/pcmki/pcmki_sched_allocate.h b/include/pcmki/pcmki_sched_allocate.h -index 32044ea96d4..f027d1211f0 100644 ---- a/include/pcmki/pcmki_sched_allocate.h -+++ b/include/pcmki/pcmki_sched_allocate.h -@@ -19,7 +19,8 @@ - # include - # include - --pe_node_t *pcmk__bundle_allocate(pe_resource_t *rsc, const pe_node_t *prefer); -+pe_node_t *pcmk__bundle_allocate(pe_resource_t *rsc, const pe_node_t *prefer, -+ bool stop_if_fail); - void pcmk__bundle_create_actions(pe_resource_t *rsc); - bool pcmk__bundle_create_probe(pe_resource_t *rsc, pe_node_t *node); - void pcmk__bundle_internal_constraints(pe_resource_t *rsc); -diff --git a/lib/pacemaker/libpacemaker_private.h b/lib/pacemaker/libpacemaker_private.h -index 8cdd13f7304..642176aafcd 100644 ---- a/lib/pacemaker/libpacemaker_private.h -+++ b/lib/pacemaker/libpacemaker_private.h -@@ -58,12 +58,24 @@ struct resource_alloc_functions_s { - * \internal - * \brief Assign a resource to a node - * -- * \param[in,out] rsc Resource to assign to a node -- * \param[in] prefer Node to prefer, if all else is equal -+ * \param[in,out] rsc Resource to assign to a node -+ * \param[in] prefer Node to prefer, if all else is equal -+ * \param[in] stop_if_fail If \c true and \p rsc can't be assigned to a -+ * node, set next role to stopped and update -+ * existing actions (if \p rsc is not a -+ * primitive, this applies to its primitive -+ * descendants instead) - * - * \return Node that \p rsc is assigned to, if assigned entirely to one node -+ * -+ * \note If \p stop_if_fail is \c false, then \c pcmk__unassign_resource() -+ * can completely undo the assignment. A successful assignment can be -+ * either undone or left alone as final. A failed assignment has the -+ * same effect as calling pcmk__unassign_resource(); there are no side -+ * effects on roles or actions. - */ -- pe_node_t *(*assign)(pe_resource_t *rsc, const pe_node_t *prefer); -+ pe_node_t *(*assign)(pe_resource_t *rsc, const pe_node_t *prefer, -+ bool stop_if_fail); - - /*! - * \internal -@@ -649,7 +661,8 @@ void pcmk__add_bundle_meta_to_xml(xmlNode *args_xml, const pe_action_t *action); - // Primitives (pcmk_sched_primitive.c) - - G_GNUC_INTERNAL --pe_node_t *pcmk__primitive_assign(pe_resource_t *rsc, const pe_node_t *prefer); -+pe_node_t *pcmk__primitive_assign(pe_resource_t *rsc, const pe_node_t *prefer, -+ bool stop_if_fail); - - G_GNUC_INTERNAL - void pcmk__primitive_create_actions(pe_resource_t *rsc); -@@ -696,7 +709,8 @@ void pcmk__primitive_shutdown_lock(pe_resource_t *rsc); - // Groups (pcmk_sched_group.c) - - G_GNUC_INTERNAL --pe_node_t *pcmk__group_assign(pe_resource_t *rsc, const pe_node_t *prefer); -+pe_node_t *pcmk__group_assign(pe_resource_t *rsc, const pe_node_t *prefer, -+ bool stop_if_fail); - - G_GNUC_INTERNAL - void pcmk__group_create_actions(pe_resource_t *rsc); -@@ -756,7 +770,8 @@ void pcmk__group_shutdown_lock(pe_resource_t *rsc); - // Clones (pcmk_sched_clone.c) - - G_GNUC_INTERNAL --pe_node_t *pcmk__clone_assign(pe_resource_t *rsc, const pe_node_t *prefer); -+pe_node_t *pcmk__clone_assign(pe_resource_t *rsc, const pe_node_t *prefer, -+ bool stop_if_fail); - - G_GNUC_INTERNAL - void pcmk__clone_apply_coloc_score(pe_resource_t *dependent, -@@ -915,7 +930,8 @@ G_GNUC_INTERNAL - void pcmk__output_resource_actions(pe_resource_t *rsc); - - G_GNUC_INTERNAL --bool pcmk__assign_resource(pe_resource_t *rsc, pe_node_t *node, bool force); -+bool pcmk__assign_resource(pe_resource_t *rsc, pe_node_t *node, bool force, -+ bool stop_if_fail); - - G_GNUC_INTERNAL - void pcmk__unassign_resource(pe_resource_t *rsc); -diff --git a/lib/pacemaker/pcmk_sched_bundle.c b/lib/pacemaker/pcmk_sched_bundle.c -index 5682744395a..05a8626c889 100644 ---- a/lib/pacemaker/pcmk_sched_bundle.c -+++ b/lib/pacemaker/pcmk_sched_bundle.c -@@ -36,13 +36,24 @@ is_bundle_node(pe__bundle_variant_data_t *data, pe_node_t *node) - * \internal - * \brief Assign a bundle resource to a node - * -- * \param[in,out] rsc Resource to assign to a node -- * \param[in] prefer Node to prefer, if all else is equal -+ * \param[in,out] rsc Resource to assign to a node -+ * \param[in] prefer Node to prefer, if all else is equal -+ * \param[in] stop_if_fail If \c true and a primitive descendant of \p rsc -+ * can't be assigned to a node, set the -+ * descendant's next role to stopped and update -+ * existing actions - * - * \return Node that \p rsc is assigned to, if assigned entirely to one node -+ * -+ * \note If \p stop_if_fail is \c false, then \c pcmk__unassign_resource() can -+ * completely undo the assignment. A successful assignment can be either -+ * undone or left alone as final. A failed assignment has the same effect -+ * as calling pcmk__unassign_resource(); there are no side effects on -+ * roles or actions. - */ - pe_node_t * --pcmk__bundle_allocate(pe_resource_t *rsc, const pe_node_t *prefer) -+pcmk__bundle_allocate(pe_resource_t *rsc, const pe_node_t *prefer, -+ bool stop_if_fail) - { - GList *containers = NULL; - pe__bundle_variant_data_t *bundle_data = NULL; -@@ -71,7 +82,7 @@ pcmk__bundle_allocate(pe_resource_t *rsc, const pe_node_t *prefer) - if (replica->ip) { - pe_rsc_trace(rsc, "Allocating bundle %s IP %s", - rsc->id, replica->ip->id); -- replica->ip->cmds->assign(replica->ip, prefer); -+ replica->ip->cmds->assign(replica->ip, prefer, stop_if_fail); - } - - container_host = replica->container->allocated_to; -@@ -89,7 +100,8 @@ pcmk__bundle_allocate(pe_resource_t *rsc, const pe_node_t *prefer) - if (replica->remote) { - pe_rsc_trace(rsc, "Allocating bundle %s connection %s", - rsc->id, replica->remote->id); -- replica->remote->cmds->assign(replica->remote, prefer); -+ replica->remote->cmds->assign(replica->remote, prefer, -+ stop_if_fail); - } - - // Explicitly allocate replicas' children before bundle child -@@ -110,7 +122,8 @@ pcmk__bundle_allocate(pe_resource_t *rsc, const pe_node_t *prefer) - pe__set_resource_flags(replica->child->parent, pe_rsc_allocating); - pe_rsc_trace(rsc, "Allocating bundle %s replica child %s", - rsc->id, replica->child->id); -- replica->child->cmds->assign(replica->child, replica->node); -+ replica->child->cmds->assign(replica->child, replica->node, -+ stop_if_fail); - pe__clear_resource_flags(replica->child->parent, - pe_rsc_allocating); - } -@@ -129,7 +142,8 @@ pcmk__bundle_allocate(pe_resource_t *rsc, const pe_node_t *prefer) - } - pe_rsc_trace(rsc, "Allocating bundle %s child %s", - rsc->id, bundle_data->child->id); -- bundle_data->child->cmds->assign(bundle_data->child, prefer); -+ bundle_data->child->cmds->assign(bundle_data->child, prefer, -+ stop_if_fail); - } - - pe__clear_resource_flags(rsc, pe_rsc_allocating|pe_rsc_provisional); -@@ -457,7 +471,7 @@ pcmk__bundle_apply_coloc_score(pe_resource_t *dependent, - } else if (colocation->score >= INFINITY) { - crm_notice("Cannot pair %s with instance of %s", - dependent->id, primary->id); -- pcmk__assign_resource(dependent, NULL, true); -+ pcmk__assign_resource(dependent, NULL, true, true); - - } else { - pe_rsc_debug(primary, "Cannot pair %s with instance of %s", -diff --git a/lib/pacemaker/pcmk_sched_clone.c b/lib/pacemaker/pcmk_sched_clone.c -index 934f512d549..229257fd2be 100644 ---- a/lib/pacemaker/pcmk_sched_clone.c -+++ b/lib/pacemaker/pcmk_sched_clone.c -@@ -18,13 +18,24 @@ - * \internal - * \brief Assign a clone resource's instances to nodes - * -- * \param[in,out] rsc Clone resource to assign -- * \param[in] prefer Node to prefer, if all else is equal -+ * \param[in,out] rsc Clone resource to assign -+ * \param[in] prefer Node to prefer, if all else is equal -+ * \param[in] stop_if_fail If \c true and a primitive descendant of \p rsc -+ * can't be assigned to a node, set the -+ * descendant's next role to stopped and update -+ * existing actions - * - * \return NULL (clones are not assigned to a single node) -+ * -+ * \note If \p stop_if_fail is \c false, then \c pcmk__unassign_resource() can -+ * completely undo the assignment. A successful assignment can be either -+ * undone or left alone as final. A failed assignment has the same effect -+ * as calling pcmk__unassign_resource(); there are no side effects on -+ * roles or actions. - */ - pe_node_t * --pcmk__clone_assign(pe_resource_t *rsc, const pe_node_t *prefer) -+pcmk__clone_assign(pe_resource_t *rsc, const pe_node_t *prefer, -+ bool stop_if_fail) - { - CRM_ASSERT(pe_rsc_is_clone(rsc)); - -@@ -53,7 +64,8 @@ pcmk__clone_assign(pe_resource_t *rsc, const pe_node_t *prefer) - - pe_rsc_trace(rsc, "%s: Assigning colocation %s primary %s first", - rsc->id, constraint->id, constraint->primary->id); -- constraint->primary->cmds->assign(constraint->primary, prefer); -+ constraint->primary->cmds->assign(constraint->primary, prefer, -+ stop_if_fail); - } - - /* If any resources are colocated with this one, consider their preferences. -@@ -305,7 +317,7 @@ pcmk__clone_apply_coloc_score(pe_resource_t *dependent, - } else if (colocation->score >= INFINITY) { - crm_notice("Cannot pair %s with instance of %s", - dependent->id, primary->id); -- pcmk__assign_resource(dependent, NULL, true); -+ pcmk__assign_resource(dependent, NULL, true, true); - - } else { - pe_rsc_debug(primary, "Cannot pair %s with instance of %s", -diff --git a/lib/pacemaker/pcmk_sched_group.c b/lib/pacemaker/pcmk_sched_group.c -index cb139f7ddf9..55d890a5c4f 100644 ---- a/lib/pacemaker/pcmk_sched_group.c -+++ b/lib/pacemaker/pcmk_sched_group.c -@@ -20,13 +20,23 @@ - * \internal - * \brief Assign a group resource to a node - * -- * \param[in,out] rsc Group resource to assign to a node -- * \param[in] prefer Node to prefer, if all else is equal -+ * \param[in,out] rsc Group resource to assign to a node -+ * \param[in] prefer Node to prefer, if all else is equal -+ * \param[in] stop_if_fail If \c true and a child of \p rsc can't be -+ * assigned to a node, set the child's next role to -+ * stopped and update existing actions - * - * \return Node that \p rsc is assigned to, if assigned entirely to one node -+ * -+ * \note If \p stop_if_fail is \c false, then \c pcmk__unassign_resource() can -+ * completely undo the assignment. A successful assignment can be either -+ * undone or left alone as final. A failed assignment has the same effect -+ * as calling pcmk__unassign_resource(); there are no side effects on -+ * roles or actions. - */ - pe_node_t * --pcmk__group_assign(pe_resource_t *rsc, const pe_node_t *prefer) -+pcmk__group_assign(pe_resource_t *rsc, const pe_node_t *prefer, -+ bool stop_if_fail) - { - pe_node_t *first_assigned_node = NULL; - pe_resource_t *first_member = NULL; -@@ -61,7 +71,7 @@ pcmk__group_assign(pe_resource_t *rsc, const pe_node_t *prefer) - - pe_rsc_trace(rsc, "Assigning group %s member %s", - rsc->id, member->id); -- node = member->cmds->assign(member, prefer); -+ node = member->cmds->assign(member, prefer, stop_if_fail); - if (first_assigned_node == NULL) { - first_assigned_node = node; - } -diff --git a/lib/pacemaker/pcmk_sched_instances.c b/lib/pacemaker/pcmk_sched_instances.c -index 64c027b20b1..b551f3bee61 100644 ---- a/lib/pacemaker/pcmk_sched_instances.c -+++ b/lib/pacemaker/pcmk_sched_instances.c -@@ -623,22 +623,26 @@ assign_instance(pe_resource_t *instance, const pe_node_t *prefer, - ban_unavailable_allowed_nodes(instance, max_per_node); - - if (prefer == NULL) { // Final assignment -- chosen = instance->cmds->assign(instance, NULL); -+ chosen = instance->cmds->assign(instance, NULL, true); - - } else { // Possible early assignment to preferred node - GHashTable *backup = NULL; - - pcmk__copy_node_tables(instance, &backup); -- chosen = instance->cmds->assign(instance, prefer); -- -- // Revert nodes if preferred node won't be assigned -- if ((chosen != NULL) && (chosen->details != prefer->details)) { -- crm_info("Not assigning %s to preferred node %s: %s is better", -- instance->id, pe__node_name(prefer), -- pe__node_name(chosen)); -+ chosen = instance->cmds->assign(instance, prefer, false); -+ -+ if (!pe__same_node(chosen, prefer)) { -+ // Revert nodes if preferred node won't be assigned -+ if (chosen != NULL) { -+ pe_rsc_info(instance, -+ "Not assigning %s to preferred node %s: " -+ "%s is better", -+ instance->id, pe__node_name(prefer), -+ pe__node_name(chosen)); -+ chosen = NULL; -+ } - pcmk__restore_node_tables(instance, backup); - pcmk__unassign_resource(instance); -- chosen = NULL; - } - g_hash_table_destroy(backup); - } -@@ -1181,7 +1185,7 @@ unassign_if_mandatory(const pe_action_t *first, const pe_action_t *then, - "Inhibiting %s from being active " - "because there is no %s instance to interleave", - then_instance->id, first->rsc->id); -- return pcmk__assign_resource(then_instance, NULL, true); -+ return pcmk__assign_resource(then_instance, NULL, true, true); - } - return false; - } -diff --git a/lib/pacemaker/pcmk_sched_primitive.c b/lib/pacemaker/pcmk_sched_primitive.c -index 2470b08ed69..50f11138f23 100644 ---- a/lib/pacemaker/pcmk_sched_primitive.c -+++ b/lib/pacemaker/pcmk_sched_primitive.c -@@ -141,13 +141,23 @@ sorted_allowed_nodes(const pe_resource_t *rsc) - * \internal - * \brief Assign a resource to its best allowed node, if possible - * -- * \param[in,out] rsc Resource to choose a node for -- * \param[in] prefer If not NULL, prefer this node when all else equal -+ * \param[in,out] rsc Resource to choose a node for -+ * \param[in] prefer If not \c NULL, prefer this node when all else -+ * equal -+ * \param[in] stop_if_fail If \c true and \p rsc can't be assigned to a -+ * node, set next role to stopped and update -+ * existing actions - * - * \return true if \p rsc could be assigned to a node, otherwise false -+ * -+ * \note If \p stop_if_fail is \c false, then \c pcmk__unassign_resource() can -+ * completely undo the assignment. A successful assignment can be either -+ * undone or left alone as final. A failed assignment has the same effect -+ * as calling pcmk__unassign_resource(); there are no side effects on -+ * roles or actions. - */ - static bool --assign_best_node(pe_resource_t *rsc, const pe_node_t *prefer) -+assign_best_node(pe_resource_t *rsc, const pe_node_t *prefer, bool stop_if_fail) - { - GList *nodes = NULL; - pe_node_t *chosen = NULL; -@@ -259,7 +269,7 @@ assign_best_node(pe_resource_t *rsc, const pe_node_t *prefer) - pe__node_name(chosen), rsc->id, g_list_length(nodes)); - } - -- pcmk__assign_resource(rsc, chosen, false); -+ pcmk__assign_resource(rsc, chosen, false, stop_if_fail); - g_list_free(nodes); - return rsc->allocated_to != NULL; - } -@@ -292,7 +302,7 @@ apply_this_with(gpointer data, gpointer user_data) - "(score=%d role=%s)", - rsc->id, colocation->id, other->id, - colocation->score, role2text(colocation->dependent_role)); -- other->cmds->assign(other, NULL); -+ other->cmds->assign(other, NULL, true); - } - - // Apply the colocation score to this resource's allowed node scores -@@ -351,13 +361,23 @@ remote_connection_assigned(const pe_resource_t *connection) - * \internal - * \brief Assign a primitive resource to a node - * -- * \param[in,out] rsc Resource to assign to a node -- * \param[in] prefer Node to prefer, if all else is equal -+ * \param[in,out] rsc Resource to assign to a node -+ * \param[in] prefer Node to prefer, if all else is equal -+ * \param[in] stop_if_fail If \c true and \p rsc can't be assigned to a -+ * node, set next role to stopped and update -+ * existing actions - * - * \return Node that \p rsc is assigned to, if assigned entirely to one node -+ * -+ * \note If \p stop_if_fail is \c false, then \c pcmk__unassign_resource() can -+ * completely undo the assignment. A successful assignment can be either -+ * undone or left alone as final. A failed assignment has the same effect -+ * as calling pcmk__unassign_resource(); there are no side effects on -+ * roles or actions. - */ - pe_node_t * --pcmk__primitive_assign(pe_resource_t *rsc, const pe_node_t *prefer) -+pcmk__primitive_assign(pe_resource_t *rsc, const pe_node_t *prefer, -+ bool stop_if_fail) - { - GList *this_with_colocations = NULL; - GList *with_this_colocations = NULL; -@@ -371,7 +391,7 @@ pcmk__primitive_assign(pe_resource_t *rsc, const pe_node_t *prefer) - && !pcmk_is_set(rsc->parent->flags, pe_rsc_allocating)) { - pe_rsc_debug(rsc, "%s: Assigning parent %s first", - rsc->id, rsc->parent->id); -- rsc->parent->cmds->assign(rsc->parent, prefer); -+ rsc->parent->cmds->assign(rsc->parent, prefer, stop_if_fail); - } - - if (!pcmk_is_set(rsc->flags, pe_rsc_provisional)) { -@@ -474,20 +494,24 @@ pcmk__primitive_assign(pe_resource_t *rsc, const pe_node_t *prefer) - } - pe_rsc_info(rsc, "Unmanaged resource %s assigned to %s: %s", rsc->id, - (assign_to? assign_to->details->uname : "no node"), reason); -- pcmk__assign_resource(rsc, assign_to, true); -+ pcmk__assign_resource(rsc, assign_to, true, stop_if_fail); - - } else if (pcmk_is_set(rsc->cluster->flags, pe_flag_stop_everything)) { -- pe_rsc_debug(rsc, "Forcing %s to stop: stop-all-resources", rsc->id); -- pcmk__assign_resource(rsc, NULL, true); -+ // Must stop at some point, but be consistent with stop_if_fail -+ if (stop_if_fail) { -+ pe_rsc_debug(rsc, "Forcing %s to stop: stop-all-resources", -+ rsc->id); -+ } -+ pcmk__assign_resource(rsc, NULL, true, stop_if_fail); - - } else if (pcmk_is_set(rsc->flags, pe_rsc_provisional) -- && assign_best_node(rsc, prefer)) { -+ && assign_best_node(rsc, prefer, stop_if_fail)) { - // Assignment successful - - } else if (rsc->allocated_to == NULL) { - if (!pcmk_is_set(rsc->flags, pe_rsc_orphan)) { - pe_rsc_info(rsc, "Resource %s cannot run anywhere", rsc->id); -- } else if (rsc->running_on != NULL) { -+ } else if ((rsc->running_on != NULL) && stop_if_fail) { - pe_rsc_info(rsc, "Stopping orphan resource %s", rsc->id); - } - -diff --git a/lib/pacemaker/pcmk_sched_resource.c b/lib/pacemaker/pcmk_sched_resource.c -index 8f703789b20..36f49dc49b9 100644 ---- a/lib/pacemaker/pcmk_sched_resource.c -+++ b/lib/pacemaker/pcmk_sched_resource.c -@@ -335,25 +335,38 @@ pcmk__output_resource_actions(pe_resource_t *rsc) - * - * Assign a specified resource and its children (if any) to a specified node, if - * the node can run the resource (or unconditionally, if \p force is true). Mark -- * the resources as no longer provisional. If a resource can't be assigned (or -- * \p node is \c NULL), unassign any previous assignment, set next role to -- * stopped, and update any existing actions scheduled for it. -+ * the resources as no longer provisional. - * -- * \param[in,out] rsc Resource to assign -- * \param[in,out] node Node to assign \p rsc to -- * \param[in] force If true, assign to \p node even if unavailable -+ * If a resource can't be assigned (or \p node is \c NULL), unassign any -+ * previous assignment. If \p stop_if_fail is \c true, set next role to stopped -+ * and update any existing actions scheduled for the resource. -+ * -+ * \param[in,out] rsc Resource to assign -+ * \param[in,out] node Node to assign \p rsc to -+ * \param[in] force If true, assign to \p node even if unavailable -+ * \param[in] stop_if_fail If \c true and either \p rsc can't be assigned -+ * or \p chosen is \c NULL, set next role to -+ * stopped and update existing actions (if \p rsc -+ * is not a primitive, this applies to its -+ * primitive descendants instead) - * - * \return \c true if the assignment of \p rsc changed, or \c false otherwise - * - * \note Assigning a resource to the NULL node using this function is different -- * from calling pcmk__unassign_resource(), in that it will also update any -+ * from calling pcmk__unassign_resource(), in that it may also update any - * actions created for the resource. - * \note The \c resource_alloc_functions_t:assign() method is preferred, unless - * a resource should be assigned to the \c NULL node or every resource in - * a tree should be assigned to the same node. -+ * \note If \p stop_if_fail is \c false, then \c pcmk__unassign_resource() can -+ * completely undo the assignment. A successful assignment can be either -+ * undone or left alone as final. A failed assignment has the same effect -+ * as calling pcmk__unassign_resource(); there are no side effects on -+ * roles or actions. - */ - bool --pcmk__assign_resource(pe_resource_t *rsc, pe_node_t *node, bool force) -+pcmk__assign_resource(pe_resource_t *rsc, pe_node_t *node, bool force, -+ bool stop_if_fail) - { - bool changed = false; - -@@ -363,7 +376,8 @@ pcmk__assign_resource(pe_resource_t *rsc, pe_node_t *node, bool force) - for (GList *iter = rsc->children; iter != NULL; iter = iter->next) { - pe_resource_t *child_rsc = iter->data; - -- changed |= pcmk__assign_resource(child_rsc, node, force); -+ changed |= pcmk__assign_resource(child_rsc, node, force, -+ stop_if_fail); - } - return changed; - } -@@ -382,7 +396,10 @@ pcmk__assign_resource(pe_resource_t *rsc, pe_node_t *node, bool force) - rsc->id, pe__node_name(node), - (pcmk__node_available(node, true, false)? "" : "not"), - pcmk_readable_score(node->weight)); -- pe__set_next_role(rsc, RSC_ROLE_STOPPED, "node availability"); -+ -+ if (stop_if_fail) { -+ pe__set_next_role(rsc, RSC_ROLE_STOPPED, "node availability"); -+ } - node = NULL; - } - -@@ -398,6 +415,10 @@ pcmk__assign_resource(pe_resource_t *rsc, pe_node_t *node, bool force) - char *rc_stopped = NULL; - - pe_rsc_debug(rsc, "Could not assign %s to a node", rsc->id); -+ -+ if (!stop_if_fail) { -+ return changed; -+ } - pe__set_next_role(rsc, RSC_ROLE_STOPPED, "unable to assign"); - - for (GList *iter = rsc->actions; iter != NULL; iter = iter->next) { -diff --git a/lib/pacemaker/pcmk_scheduler.c b/lib/pacemaker/pcmk_scheduler.c -index b4e670d865c..508cd5721c4 100644 ---- a/lib/pacemaker/pcmk_scheduler.c -+++ b/lib/pacemaker/pcmk_scheduler.c -@@ -318,7 +318,7 @@ allocate_resources(pe_working_set_t *data_set) - if (rsc->is_remote_node) { - pe_rsc_trace(rsc, "Allocating remote connection resource '%s'", - rsc->id); -- rsc->cmds->assign(rsc, rsc->partial_migration_target); -+ rsc->cmds->assign(rsc, rsc->partial_migration_target, true); - } - } - } -@@ -330,7 +330,7 @@ allocate_resources(pe_working_set_t *data_set) - if (!rsc->is_remote_node) { - pe_rsc_trace(rsc, "Allocating %s resource '%s'", - crm_element_name(rsc->xml), rsc->id); -- rsc->cmds->assign(rsc, NULL); -+ rsc->cmds->assign(rsc, NULL, true); - } - } - - -From a698dd1e17f184977f87c4ef44c2eb5b9bd933f6 Mon Sep 17 00:00:00 2001 -From: Reid Wahl -Date: Mon, 10 Jul 2023 02:44:46 -0700 -Subject: [PATCH 17/19] Test: scheduler: Update tests after new stop_if_fail - argument - -Some scores are repeated since we're able to back out of a failed early -assignment now. - -Only one test changes otherwise. bug-1822 has a score change from --INFINITY to 49. However, the partially active group is still not -allowed to promote, which is the purpose of the test. - -Ref T489 - -Signed-off-by: Reid Wahl ---- - cts/scheduler/scores/594.scores | 3 +++ - cts/scheduler/scores/bug-1822.scores | 2 +- - .../bug-5014-CLONE-A-stop-B-started.scores | 1 + - cts/scheduler/scores/bug-lf-2171.scores | 4 ++++ - cts/scheduler/scores/bug-lf-2422.scores | 16 ++++++++++++++++ - cts/scheduler/scores/bug-lf-2453.scores | 4 ++++ - cts/scheduler/scores/bug-lf-2574.scores | 3 +++ - .../scores/bundle-order-stop-clone.scores | 4 ++++ - cts/scheduler/scores/clone-max-zero.scores | 8 ++++++++ - cts/scheduler/scores/cloned-group-stop.scores | 4 ++++ - cts/scheduler/scores/complex_enforce_colo.scores | 9 +++++++++ - cts/scheduler/scores/enforce-colo1.scores | 9 +++++++++ - .../scores/promoted-asymmetrical-order.scores | 4 ++++ - .../scores/promoted-failed-demote-2.scores | 10 ++++++++++ - .../scores/promoted-failed-demote.scores | 10 ++++++++++ - 15 files changed, 90 insertions(+), 1 deletion(-) - -diff --git a/cts/scheduler/scores/594.scores b/cts/scheduler/scores/594.scores -index 5e99750df21..96c8f441b98 100644 ---- a/cts/scheduler/scores/594.scores -+++ b/cts/scheduler/scores/594.scores -@@ -21,8 +21,11 @@ pcmk__primitive_assign: child_DoFencing:1 allocation score on hadev1: 1 - pcmk__primitive_assign: child_DoFencing:1 allocation score on hadev2: -INFINITY - pcmk__primitive_assign: child_DoFencing:1 allocation score on hadev3: -INFINITY - pcmk__primitive_assign: child_DoFencing:2 allocation score on hadev1: -INFINITY -+pcmk__primitive_assign: child_DoFencing:2 allocation score on hadev1: -INFINITY -+pcmk__primitive_assign: child_DoFencing:2 allocation score on hadev2: -INFINITY - pcmk__primitive_assign: child_DoFencing:2 allocation score on hadev2: -INFINITY - pcmk__primitive_assign: child_DoFencing:2 allocation score on hadev3: -INFINITY -+pcmk__primitive_assign: child_DoFencing:2 allocation score on hadev3: -INFINITY - pcmk__primitive_assign: rsc_hadev1 allocation score on hadev1: 100 - pcmk__primitive_assign: rsc_hadev1 allocation score on hadev2: 0 - pcmk__primitive_assign: rsc_hadev1 allocation score on hadev3: 0 -diff --git a/cts/scheduler/scores/bug-1822.scores b/cts/scheduler/scores/bug-1822.scores -index 82191d1e74b..0a9056bbf3e 100644 ---- a/cts/scheduler/scores/bug-1822.scores -+++ b/cts/scheduler/scores/bug-1822.scores -@@ -1,5 +1,5 @@ - --ms-sf_group:0 promotion score on process2b: -INFINITY -+ms-sf_group:0 promotion score on process2b: 49 - ms-sf_group:1 promotion score on none: 0 - pcmk__clone_assign: ms-sf allocation score on process1a: 0 - pcmk__clone_assign: ms-sf allocation score on process2b: 0 -diff --git a/cts/scheduler/scores/bug-5014-CLONE-A-stop-B-started.scores b/cts/scheduler/scores/bug-5014-CLONE-A-stop-B-started.scores -index e698b145274..d79208c7336 100644 ---- a/cts/scheduler/scores/bug-5014-CLONE-A-stop-B-started.scores -+++ b/cts/scheduler/scores/bug-5014-CLONE-A-stop-B-started.scores -@@ -5,3 +5,4 @@ pcmk__clone_assign: clone1 allocation score on fc16-builder: 0 - pcmk__clone_assign: clone2 allocation score on fc16-builder: 0 - pcmk__primitive_assign: ClusterIP2:0 allocation score on fc16-builder: 1 - pcmk__primitive_assign: ClusterIP:0 allocation score on fc16-builder: -INFINITY -+pcmk__primitive_assign: ClusterIP:0 allocation score on fc16-builder: -INFINITY -diff --git a/cts/scheduler/scores/bug-lf-2171.scores b/cts/scheduler/scores/bug-lf-2171.scores -index 7d2bdd45307..14cc28a88c5 100644 ---- a/cts/scheduler/scores/bug-lf-2171.scores -+++ b/cts/scheduler/scores/bug-lf-2171.scores -@@ -12,8 +12,12 @@ pcmk__group_assign: res_Dummy2 allocation score on xenserver2: 0 - pcmk__group_assign: res_Dummy3 allocation score on xenserver1: 200 - pcmk__group_assign: res_Dummy3 allocation score on xenserver2: 0 - pcmk__primitive_assign: res_Dummy1:0 allocation score on xenserver1: -INFINITY -+pcmk__primitive_assign: res_Dummy1:0 allocation score on xenserver1: -INFINITY -+pcmk__primitive_assign: res_Dummy1:0 allocation score on xenserver2: -INFINITY - pcmk__primitive_assign: res_Dummy1:0 allocation score on xenserver2: -INFINITY - pcmk__primitive_assign: res_Dummy1:1 allocation score on xenserver1: -INFINITY -+pcmk__primitive_assign: res_Dummy1:1 allocation score on xenserver1: -INFINITY -+pcmk__primitive_assign: res_Dummy1:1 allocation score on xenserver2: -INFINITY - pcmk__primitive_assign: res_Dummy1:1 allocation score on xenserver2: -INFINITY - pcmk__primitive_assign: res_Dummy2 allocation score on xenserver1: 200 - pcmk__primitive_assign: res_Dummy2 allocation score on xenserver2: 0 -diff --git a/cts/scheduler/scores/bug-lf-2422.scores b/cts/scheduler/scores/bug-lf-2422.scores -index 99ff12e3bb6..77a284da9ce 100644 ---- a/cts/scheduler/scores/bug-lf-2422.scores -+++ b/cts/scheduler/scores/bug-lf-2422.scores -@@ -248,20 +248,36 @@ pcmk__primitive_assign: o2cb:3 allocation score on qa-suse-2: -INFINITY - pcmk__primitive_assign: o2cb:3 allocation score on qa-suse-3: -INFINITY - pcmk__primitive_assign: o2cb:3 allocation score on qa-suse-4: -INFINITY - pcmk__primitive_assign: ocfs:0 allocation score on qa-suse-1: -INFINITY -+pcmk__primitive_assign: ocfs:0 allocation score on qa-suse-1: -INFINITY -+pcmk__primitive_assign: ocfs:0 allocation score on qa-suse-2: -INFINITY - pcmk__primitive_assign: ocfs:0 allocation score on qa-suse-2: -INFINITY - pcmk__primitive_assign: ocfs:0 allocation score on qa-suse-3: -INFINITY -+pcmk__primitive_assign: ocfs:0 allocation score on qa-suse-3: -INFINITY -+pcmk__primitive_assign: ocfs:0 allocation score on qa-suse-4: -INFINITY - pcmk__primitive_assign: ocfs:0 allocation score on qa-suse-4: -INFINITY - pcmk__primitive_assign: ocfs:1 allocation score on qa-suse-1: -INFINITY -+pcmk__primitive_assign: ocfs:1 allocation score on qa-suse-1: -INFINITY -+pcmk__primitive_assign: ocfs:1 allocation score on qa-suse-2: -INFINITY - pcmk__primitive_assign: ocfs:1 allocation score on qa-suse-2: -INFINITY - pcmk__primitive_assign: ocfs:1 allocation score on qa-suse-3: -INFINITY -+pcmk__primitive_assign: ocfs:1 allocation score on qa-suse-3: -INFINITY -+pcmk__primitive_assign: ocfs:1 allocation score on qa-suse-4: -INFINITY - pcmk__primitive_assign: ocfs:1 allocation score on qa-suse-4: -INFINITY - pcmk__primitive_assign: ocfs:2 allocation score on qa-suse-1: -INFINITY -+pcmk__primitive_assign: ocfs:2 allocation score on qa-suse-1: -INFINITY -+pcmk__primitive_assign: ocfs:2 allocation score on qa-suse-2: -INFINITY - pcmk__primitive_assign: ocfs:2 allocation score on qa-suse-2: -INFINITY - pcmk__primitive_assign: ocfs:2 allocation score on qa-suse-3: -INFINITY -+pcmk__primitive_assign: ocfs:2 allocation score on qa-suse-3: -INFINITY -+pcmk__primitive_assign: ocfs:2 allocation score on qa-suse-4: -INFINITY - pcmk__primitive_assign: ocfs:2 allocation score on qa-suse-4: -INFINITY - pcmk__primitive_assign: ocfs:3 allocation score on qa-suse-1: -INFINITY -+pcmk__primitive_assign: ocfs:3 allocation score on qa-suse-1: -INFINITY -+pcmk__primitive_assign: ocfs:3 allocation score on qa-suse-2: -INFINITY - pcmk__primitive_assign: ocfs:3 allocation score on qa-suse-2: -INFINITY - pcmk__primitive_assign: ocfs:3 allocation score on qa-suse-3: -INFINITY -+pcmk__primitive_assign: ocfs:3 allocation score on qa-suse-3: -INFINITY -+pcmk__primitive_assign: ocfs:3 allocation score on qa-suse-4: -INFINITY - pcmk__primitive_assign: ocfs:3 allocation score on qa-suse-4: -INFINITY - pcmk__primitive_assign: sbd_stonith allocation score on qa-suse-1: 0 - pcmk__primitive_assign: sbd_stonith allocation score on qa-suse-2: 0 -diff --git a/cts/scheduler/scores/bug-lf-2453.scores b/cts/scheduler/scores/bug-lf-2453.scores -index eaee72d2002..3ef0f6dc375 100644 ---- a/cts/scheduler/scores/bug-lf-2453.scores -+++ b/cts/scheduler/scores/bug-lf-2453.scores -@@ -17,6 +17,10 @@ pcmk__primitive_assign: DummyResource:1 allocation score on domu1: -INFINITY - pcmk__primitive_assign: DummyResource:1 allocation score on domu2: INFINITY - pcmk__primitive_assign: PrimitiveResource1 allocation score on domu1: INFINITY - pcmk__primitive_assign: apache:0 allocation score on domu1: -INFINITY -+pcmk__primitive_assign: apache:0 allocation score on domu1: -INFINITY -+pcmk__primitive_assign: apache:0 allocation score on domu2: -INFINITY - pcmk__primitive_assign: apache:0 allocation score on domu2: -INFINITY - pcmk__primitive_assign: apache:1 allocation score on domu1: -INFINITY -+pcmk__primitive_assign: apache:1 allocation score on domu1: -INFINITY -+pcmk__primitive_assign: apache:1 allocation score on domu2: -INFINITY - pcmk__primitive_assign: apache:1 allocation score on domu2: -INFINITY -diff --git a/cts/scheduler/scores/bug-lf-2574.scores b/cts/scheduler/scores/bug-lf-2574.scores -index 0f5cf60a7e0..b4a1bd95841 100644 ---- a/cts/scheduler/scores/bug-lf-2574.scores -+++ b/cts/scheduler/scores/bug-lf-2574.scores -@@ -39,8 +39,11 @@ pcmk__primitive_assign: prmDummy1:2 allocation score on srv01: -INFINITY - pcmk__primitive_assign: prmDummy1:2 allocation score on srv02: -INFINITY - pcmk__primitive_assign: prmDummy1:2 allocation score on srv03: -INFINITY - pcmk__primitive_assign: prmPingd:0 allocation score on srv01: -INFINITY -+pcmk__primitive_assign: prmPingd:0 allocation score on srv01: -INFINITY -+pcmk__primitive_assign: prmPingd:0 allocation score on srv02: -INFINITY - pcmk__primitive_assign: prmPingd:0 allocation score on srv02: -INFINITY - pcmk__primitive_assign: prmPingd:0 allocation score on srv03: -INFINITY -+pcmk__primitive_assign: prmPingd:0 allocation score on srv03: -INFINITY - pcmk__primitive_assign: prmPingd:1 allocation score on srv01: -INFINITY - pcmk__primitive_assign: prmPingd:1 allocation score on srv02: -INFINITY - pcmk__primitive_assign: prmPingd:1 allocation score on srv03: INFINITY -diff --git a/cts/scheduler/scores/bundle-order-stop-clone.scores b/cts/scheduler/scores/bundle-order-stop-clone.scores -index 707260b80a9..06596e86a24 100644 ---- a/cts/scheduler/scores/bundle-order-stop-clone.scores -+++ b/cts/scheduler/scores/bundle-order-stop-clone.scores -@@ -147,8 +147,12 @@ pcmk__primitive_assign: galera-bundle-2 allocation score on metal-2: 0 - pcmk__primitive_assign: galera-bundle-2 allocation score on metal-3: INFINITY - pcmk__primitive_assign: galera-bundle-2 allocation score on rabbitmq-bundle-0: -INFINITY - pcmk__primitive_assign: galera-bundle-docker-0 allocation score on metal-1: -INFINITY -+pcmk__primitive_assign: galera-bundle-docker-0 allocation score on metal-1: -INFINITY -+pcmk__primitive_assign: galera-bundle-docker-0 allocation score on metal-2: -INFINITY - pcmk__primitive_assign: galera-bundle-docker-0 allocation score on metal-2: -INFINITY - pcmk__primitive_assign: galera-bundle-docker-0 allocation score on metal-3: -INFINITY -+pcmk__primitive_assign: galera-bundle-docker-0 allocation score on metal-3: -INFINITY -+pcmk__primitive_assign: galera-bundle-docker-0 allocation score on rabbitmq-bundle-0: -INFINITY - pcmk__primitive_assign: galera-bundle-docker-0 allocation score on rabbitmq-bundle-0: -INFINITY - pcmk__primitive_assign: galera-bundle-docker-1 allocation score on metal-1: -INFINITY - pcmk__primitive_assign: galera-bundle-docker-1 allocation score on metal-2: INFINITY -diff --git a/cts/scheduler/scores/clone-max-zero.scores b/cts/scheduler/scores/clone-max-zero.scores -index f1711b7885e..bd116a2764c 100644 ---- a/cts/scheduler/scores/clone-max-zero.scores -+++ b/cts/scheduler/scores/clone-max-zero.scores -@@ -26,10 +26,18 @@ pcmk__primitive_assign: drbd0:1 allocation score on c001n12: -INFINITY - pcmk__primitive_assign: fencing allocation score on c001n11: 0 - pcmk__primitive_assign: fencing allocation score on c001n12: 0 - pcmk__primitive_assign: o2cb:0 allocation score on c001n11: -INFINITY -+pcmk__primitive_assign: o2cb:0 allocation score on c001n11: -INFINITY -+pcmk__primitive_assign: o2cb:0 allocation score on c001n12: -INFINITY - pcmk__primitive_assign: o2cb:0 allocation score on c001n12: -INFINITY - pcmk__primitive_assign: o2cb:1 allocation score on c001n11: -INFINITY -+pcmk__primitive_assign: o2cb:1 allocation score on c001n11: -INFINITY -+pcmk__primitive_assign: o2cb:1 allocation score on c001n12: -INFINITY - pcmk__primitive_assign: o2cb:1 allocation score on c001n12: -INFINITY - pcmk__primitive_assign: ocfs2-1:0 allocation score on c001n11: -INFINITY -+pcmk__primitive_assign: ocfs2-1:0 allocation score on c001n11: -INFINITY -+pcmk__primitive_assign: ocfs2-1:0 allocation score on c001n12: -INFINITY - pcmk__primitive_assign: ocfs2-1:0 allocation score on c001n12: -INFINITY - pcmk__primitive_assign: ocfs2-1:1 allocation score on c001n11: -INFINITY -+pcmk__primitive_assign: ocfs2-1:1 allocation score on c001n11: -INFINITY -+pcmk__primitive_assign: ocfs2-1:1 allocation score on c001n12: -INFINITY - pcmk__primitive_assign: ocfs2-1:1 allocation score on c001n12: -INFINITY -diff --git a/cts/scheduler/scores/cloned-group-stop.scores b/cts/scheduler/scores/cloned-group-stop.scores -index be835fa5371..7e406c6ddc2 100644 ---- a/cts/scheduler/scores/cloned-group-stop.scores -+++ b/cts/scheduler/scores/cloned-group-stop.scores -@@ -122,8 +122,12 @@ pcmk__primitive_assign: mysql-fs allocation score on rhos4-node4: -INFINITY - pcmk__primitive_assign: mysql-vip allocation score on rhos4-node3: 300 - pcmk__primitive_assign: mysql-vip allocation score on rhos4-node4: -INFINITY - pcmk__primitive_assign: qpidd:0 allocation score on rhos4-node3: -INFINITY -+pcmk__primitive_assign: qpidd:0 allocation score on rhos4-node3: -INFINITY -+pcmk__primitive_assign: qpidd:0 allocation score on rhos4-node4: -INFINITY - pcmk__primitive_assign: qpidd:0 allocation score on rhos4-node4: -INFINITY - pcmk__primitive_assign: qpidd:1 allocation score on rhos4-node3: -INFINITY -+pcmk__primitive_assign: qpidd:1 allocation score on rhos4-node3: -INFINITY -+pcmk__primitive_assign: qpidd:1 allocation score on rhos4-node4: -INFINITY - pcmk__primitive_assign: qpidd:1 allocation score on rhos4-node4: -INFINITY - pcmk__primitive_assign: virt-fencing allocation score on rhos4-node3: 100 - pcmk__primitive_assign: virt-fencing allocation score on rhos4-node4: 0 -diff --git a/cts/scheduler/scores/complex_enforce_colo.scores b/cts/scheduler/scores/complex_enforce_colo.scores -index 9968e1097ef..a5d0b2b4125 100644 ---- a/cts/scheduler/scores/complex_enforce_colo.scores -+++ b/cts/scheduler/scores/complex_enforce_colo.scores -@@ -588,13 +588,22 @@ pcmk__primitive_assign: horizon:2 allocation score on rhos6-node1: -INFINITY - pcmk__primitive_assign: horizon:2 allocation score on rhos6-node2: -INFINITY - pcmk__primitive_assign: horizon:2 allocation score on rhos6-node3: 1 - pcmk__primitive_assign: keystone:0 allocation score on rhos6-node1: -INFINITY -+pcmk__primitive_assign: keystone:0 allocation score on rhos6-node1: -INFINITY -+pcmk__primitive_assign: keystone:0 allocation score on rhos6-node2: -INFINITY - pcmk__primitive_assign: keystone:0 allocation score on rhos6-node2: -INFINITY - pcmk__primitive_assign: keystone:0 allocation score on rhos6-node3: -INFINITY -+pcmk__primitive_assign: keystone:0 allocation score on rhos6-node3: -INFINITY - pcmk__primitive_assign: keystone:1 allocation score on rhos6-node1: -INFINITY -+pcmk__primitive_assign: keystone:1 allocation score on rhos6-node1: -INFINITY -+pcmk__primitive_assign: keystone:1 allocation score on rhos6-node2: -INFINITY - pcmk__primitive_assign: keystone:1 allocation score on rhos6-node2: -INFINITY - pcmk__primitive_assign: keystone:1 allocation score on rhos6-node3: -INFINITY -+pcmk__primitive_assign: keystone:1 allocation score on rhos6-node3: -INFINITY -+pcmk__primitive_assign: keystone:2 allocation score on rhos6-node1: -INFINITY - pcmk__primitive_assign: keystone:2 allocation score on rhos6-node1: -INFINITY - pcmk__primitive_assign: keystone:2 allocation score on rhos6-node2: -INFINITY -+pcmk__primitive_assign: keystone:2 allocation score on rhos6-node2: -INFINITY -+pcmk__primitive_assign: keystone:2 allocation score on rhos6-node3: -INFINITY - pcmk__primitive_assign: keystone:2 allocation score on rhos6-node3: -INFINITY - pcmk__primitive_assign: lb-haproxy:0 allocation score on rhos6-node1: 1 - pcmk__primitive_assign: lb-haproxy:0 allocation score on rhos6-node2: 0 -diff --git a/cts/scheduler/scores/enforce-colo1.scores b/cts/scheduler/scores/enforce-colo1.scores -index 8194789648a..262cbd94a30 100644 ---- a/cts/scheduler/scores/enforce-colo1.scores -+++ b/cts/scheduler/scores/enforce-colo1.scores -@@ -18,13 +18,22 @@ pcmk__primitive_assign: engine allocation score on rhel7-auto1: -INFINITY - pcmk__primitive_assign: engine allocation score on rhel7-auto2: -INFINITY - pcmk__primitive_assign: engine allocation score on rhel7-auto3: 0 - pcmk__primitive_assign: keystone:0 allocation score on rhel7-auto1: -INFINITY -+pcmk__primitive_assign: keystone:0 allocation score on rhel7-auto1: -INFINITY -+pcmk__primitive_assign: keystone:0 allocation score on rhel7-auto2: -INFINITY - pcmk__primitive_assign: keystone:0 allocation score on rhel7-auto2: -INFINITY - pcmk__primitive_assign: keystone:0 allocation score on rhel7-auto3: -INFINITY -+pcmk__primitive_assign: keystone:0 allocation score on rhel7-auto3: -INFINITY - pcmk__primitive_assign: keystone:1 allocation score on rhel7-auto1: -INFINITY -+pcmk__primitive_assign: keystone:1 allocation score on rhel7-auto1: -INFINITY -+pcmk__primitive_assign: keystone:1 allocation score on rhel7-auto2: -INFINITY - pcmk__primitive_assign: keystone:1 allocation score on rhel7-auto2: -INFINITY - pcmk__primitive_assign: keystone:1 allocation score on rhel7-auto3: -INFINITY -+pcmk__primitive_assign: keystone:1 allocation score on rhel7-auto3: -INFINITY -+pcmk__primitive_assign: keystone:2 allocation score on rhel7-auto1: -INFINITY - pcmk__primitive_assign: keystone:2 allocation score on rhel7-auto1: -INFINITY - pcmk__primitive_assign: keystone:2 allocation score on rhel7-auto2: -INFINITY -+pcmk__primitive_assign: keystone:2 allocation score on rhel7-auto2: -INFINITY -+pcmk__primitive_assign: keystone:2 allocation score on rhel7-auto3: -INFINITY - pcmk__primitive_assign: keystone:2 allocation score on rhel7-auto3: -INFINITY - pcmk__primitive_assign: shooter allocation score on rhel7-auto1: 0 - pcmk__primitive_assign: shooter allocation score on rhel7-auto2: 0 -diff --git a/cts/scheduler/scores/promoted-asymmetrical-order.scores b/cts/scheduler/scores/promoted-asymmetrical-order.scores -index 382e0ebe285..18bc704551e 100644 ---- a/cts/scheduler/scores/promoted-asymmetrical-order.scores -+++ b/cts/scheduler/scores/promoted-asymmetrical-order.scores -@@ -12,8 +12,12 @@ pcmk__clone_assign: rsc2:0 allocation score on node2: 0 - pcmk__clone_assign: rsc2:1 allocation score on node1: 0 - pcmk__clone_assign: rsc2:1 allocation score on node2: 1 - pcmk__primitive_assign: rsc1:0 allocation score on node1: -INFINITY -+pcmk__primitive_assign: rsc1:0 allocation score on node1: -INFINITY -+pcmk__primitive_assign: rsc1:0 allocation score on node2: -INFINITY - pcmk__primitive_assign: rsc1:0 allocation score on node2: -INFINITY - pcmk__primitive_assign: rsc1:1 allocation score on node1: -INFINITY -+pcmk__primitive_assign: rsc1:1 allocation score on node1: -INFINITY -+pcmk__primitive_assign: rsc1:1 allocation score on node2: -INFINITY - pcmk__primitive_assign: rsc1:1 allocation score on node2: -INFINITY - pcmk__primitive_assign: rsc2:0 allocation score on node1: 1 - pcmk__primitive_assign: rsc2:0 allocation score on node2: 0 -diff --git a/cts/scheduler/scores/promoted-failed-demote-2.scores b/cts/scheduler/scores/promoted-failed-demote-2.scores -index 2a85ae6060e..e457d8c6057 100644 ---- a/cts/scheduler/scores/promoted-failed-demote-2.scores -+++ b/cts/scheduler/scores/promoted-failed-demote-2.scores -@@ -16,22 +16,32 @@ pcmk__clone_assign: stateful-2:0 allocation score on dl380g5b: -INFINITY - pcmk__clone_assign: stateful-2:1 allocation score on dl380g5a: INFINITY - pcmk__clone_assign: stateful-2:1 allocation score on dl380g5b: 0 - pcmk__group_assign: group:0 allocation score on dl380g5a: -INFINITY -+pcmk__group_assign: group:0 allocation score on dl380g5a: -INFINITY -+pcmk__group_assign: group:0 allocation score on dl380g5b: 0 - pcmk__group_assign: group:0 allocation score on dl380g5b: 0 - pcmk__group_assign: group:1 allocation score on dl380g5a: 0 - pcmk__group_assign: group:1 allocation score on dl380g5b: 0 - pcmk__group_assign: stateful-1:0 allocation score on dl380g5a: -INFINITY -+pcmk__group_assign: stateful-1:0 allocation score on dl380g5a: -INFINITY -+pcmk__group_assign: stateful-1:0 allocation score on dl380g5b: -INFINITY - pcmk__group_assign: stateful-1:0 allocation score on dl380g5b: -INFINITY - pcmk__group_assign: stateful-1:1 allocation score on dl380g5a: INFINITY - pcmk__group_assign: stateful-1:1 allocation score on dl380g5b: 0 - pcmk__group_assign: stateful-2:0 allocation score on dl380g5a: -INFINITY -+pcmk__group_assign: stateful-2:0 allocation score on dl380g5a: -INFINITY -+pcmk__group_assign: stateful-2:0 allocation score on dl380g5b: -INFINITY - pcmk__group_assign: stateful-2:0 allocation score on dl380g5b: -INFINITY - pcmk__group_assign: stateful-2:1 allocation score on dl380g5a: INFINITY - pcmk__group_assign: stateful-2:1 allocation score on dl380g5b: 0 - pcmk__primitive_assign: stateful-1:0 allocation score on dl380g5a: -INFINITY -+pcmk__primitive_assign: stateful-1:0 allocation score on dl380g5a: -INFINITY -+pcmk__primitive_assign: stateful-1:0 allocation score on dl380g5b: -INFINITY - pcmk__primitive_assign: stateful-1:0 allocation score on dl380g5b: -INFINITY - pcmk__primitive_assign: stateful-1:1 allocation score on dl380g5a: INFINITY - pcmk__primitive_assign: stateful-1:1 allocation score on dl380g5b: 0 - pcmk__primitive_assign: stateful-2:0 allocation score on dl380g5a: -INFINITY -+pcmk__primitive_assign: stateful-2:0 allocation score on dl380g5a: -INFINITY -+pcmk__primitive_assign: stateful-2:0 allocation score on dl380g5b: -INFINITY - pcmk__primitive_assign: stateful-2:0 allocation score on dl380g5b: -INFINITY - pcmk__primitive_assign: stateful-2:1 allocation score on dl380g5a: INFINITY - pcmk__primitive_assign: stateful-2:1 allocation score on dl380g5b: -INFINITY -diff --git a/cts/scheduler/scores/promoted-failed-demote.scores b/cts/scheduler/scores/promoted-failed-demote.scores -index 2a85ae6060e..e457d8c6057 100644 ---- a/cts/scheduler/scores/promoted-failed-demote.scores -+++ b/cts/scheduler/scores/promoted-failed-demote.scores -@@ -16,22 +16,32 @@ pcmk__clone_assign: stateful-2:0 allocation score on dl380g5b: -INFINITY - pcmk__clone_assign: stateful-2:1 allocation score on dl380g5a: INFINITY - pcmk__clone_assign: stateful-2:1 allocation score on dl380g5b: 0 - pcmk__group_assign: group:0 allocation score on dl380g5a: -INFINITY -+pcmk__group_assign: group:0 allocation score on dl380g5a: -INFINITY -+pcmk__group_assign: group:0 allocation score on dl380g5b: 0 - pcmk__group_assign: group:0 allocation score on dl380g5b: 0 - pcmk__group_assign: group:1 allocation score on dl380g5a: 0 - pcmk__group_assign: group:1 allocation score on dl380g5b: 0 - pcmk__group_assign: stateful-1:0 allocation score on dl380g5a: -INFINITY -+pcmk__group_assign: stateful-1:0 allocation score on dl380g5a: -INFINITY -+pcmk__group_assign: stateful-1:0 allocation score on dl380g5b: -INFINITY - pcmk__group_assign: stateful-1:0 allocation score on dl380g5b: -INFINITY - pcmk__group_assign: stateful-1:1 allocation score on dl380g5a: INFINITY - pcmk__group_assign: stateful-1:1 allocation score on dl380g5b: 0 - pcmk__group_assign: stateful-2:0 allocation score on dl380g5a: -INFINITY -+pcmk__group_assign: stateful-2:0 allocation score on dl380g5a: -INFINITY -+pcmk__group_assign: stateful-2:0 allocation score on dl380g5b: -INFINITY - pcmk__group_assign: stateful-2:0 allocation score on dl380g5b: -INFINITY - pcmk__group_assign: stateful-2:1 allocation score on dl380g5a: INFINITY - pcmk__group_assign: stateful-2:1 allocation score on dl380g5b: 0 - pcmk__primitive_assign: stateful-1:0 allocation score on dl380g5a: -INFINITY -+pcmk__primitive_assign: stateful-1:0 allocation score on dl380g5a: -INFINITY -+pcmk__primitive_assign: stateful-1:0 allocation score on dl380g5b: -INFINITY - pcmk__primitive_assign: stateful-1:0 allocation score on dl380g5b: -INFINITY - pcmk__primitive_assign: stateful-1:1 allocation score on dl380g5a: INFINITY - pcmk__primitive_assign: stateful-1:1 allocation score on dl380g5b: 0 - pcmk__primitive_assign: stateful-2:0 allocation score on dl380g5a: -INFINITY -+pcmk__primitive_assign: stateful-2:0 allocation score on dl380g5a: -INFINITY -+pcmk__primitive_assign: stateful-2:0 allocation score on dl380g5b: -INFINITY - pcmk__primitive_assign: stateful-2:0 allocation score on dl380g5b: -INFINITY - pcmk__primitive_assign: stateful-2:1 allocation score on dl380g5a: INFINITY - pcmk__primitive_assign: stateful-2:1 allocation score on dl380g5b: -INFINITY - -From 4abb93e5c779cf058861a25c5eac456ac1087fd6 Mon Sep 17 00:00:00 2001 -From: Reid Wahl -Date: Wed, 21 Jun 2023 22:40:20 -0700 -Subject: [PATCH 18/19] Fix: libpacemaker: Don't shuffle clone instances - unnecessarily - -Currently, clone instances may be shuffled under certain conditions, -causing an unnecessary resource downtime when an instance is moved -away from its current running node. - -For example, this can happen when a stopped promotable instance is -scheduled to promote and the stickiness is lower than the promotion -score (see the clone-recover-no-shuffle-7 test). Instance 0 gets -assigned first and goes to the node that will be promoted. If instance 0 -is already running on some node, it must stop there before it can start -on the new node. Another instance may start in its place after it stops. - -The fix is to assign an instance to its current node during the early -assignment phase, if that node is going to receive any instance at all. -If the node will receive an instance, it should receive its current -instance. - -The approach is described in detail in comments. - -Previously, if instance 0 was running on node1 and got assigned to node2 -during the early assignment phase (due to node2 having a higher score), -we backed out and immediately gave up on assigning instance 0 early. - -Now, we increment a "number of instances reserved" counter, as well as -the parent's counter of instances assigned to node2. We then try again -to assign instance 0 to node1. If node2 already has the max allowed -number of instances, then it will be marked unavailable for this round. - -Fixes T489 -Fixes RHBZ#1931023 - -Signed-off-by: Reid Wahl ---- - lib/pacemaker/pcmk_sched_instances.c | 163 ++++++++++++++++++++------- - 1 file changed, 122 insertions(+), 41 deletions(-) - -diff --git a/lib/pacemaker/pcmk_sched_instances.c b/lib/pacemaker/pcmk_sched_instances.c -index b551f3bee61..b010d460dbc 100644 ---- a/lib/pacemaker/pcmk_sched_instances.c -+++ b/lib/pacemaker/pcmk_sched_instances.c -@@ -605,50 +605,135 @@ assign_instance(pe_resource_t *instance, const pe_node_t *prefer, - instance->id); - return NULL; - } -+ ban_unavailable_allowed_nodes(instance, max_per_node); -+ -+ // Failed early assignments are reversible (stop_if_fail=false) -+ chosen = instance->cmds->assign(instance, prefer, (prefer == NULL)); -+ increment_parent_count(instance, chosen); -+ return chosen; -+} -+ -+/*! -+ * \internal -+ * \brief Try to assign an instance to its current node early -+ * -+ * \param[in] rsc Clone or bundle being assigned (for logs only) -+ * \param[in] instance Clone instance or bundle replica container -+ * \param[in] current Instance's current node -+ * \param[in] max_per_node Maximum number of instances per node -+ * \param[in] available Number of instances still available for assignment -+ * -+ * \return \c true if \p instance was successfully assigned to its current node, -+ * or \c false otherwise -+ */ -+static bool -+assign_instance_early(const pe_resource_t *rsc, pe_resource_t *instance, -+ const pe_node_t *current, int max_per_node, int available) -+{ -+ const pe_node_t *chosen = NULL; -+ int reserved = 0; - -- if (prefer != NULL) { // Possible early assignment to preferred node -+ pe_resource_t *parent = instance->parent; -+ GHashTable *allowed_orig = NULL; -+ GHashTable *allowed_orig_parent = parent->allowed_nodes; - -- // Get preferred node with instance's scores -- pe_node_t *allowed = g_hash_table_lookup(instance->allowed_nodes, -- prefer->details->id); -+ const pe_node_t *allowed_node = g_hash_table_lookup(instance->allowed_nodes, -+ current->details->id); - -- if ((allowed == NULL) || (allowed->weight < 0)) { -- pe_rsc_trace(instance, -- "Not assigning %s to preferred node %s: unavailable", -- instance->id, pe__node_name(prefer)); -- return NULL; -- } -+ pe_rsc_trace(instance, "Trying to assign %s to its current node %s", -+ instance->id, pe__node_name(current)); -+ -+ if (!pcmk__node_available(allowed_node, true, false)) { -+ pe_rsc_info(instance, -+ "Not assigning %s to current node %s: unavailable", -+ instance->id, pe__node_name(current)); -+ return false; - } - -- ban_unavailable_allowed_nodes(instance, max_per_node); -+ /* On each iteration, if instance gets assigned to a node other than its -+ * current one, we reserve one instance for the chosen node, unassign -+ * instance, restore instance's original node tables, and try again. This -+ * way, instances are proportionally assigned to nodes based on preferences, -+ * but shuffling of specific instances is minimized. If a node will be -+ * assigned instances at all, it preferentially receives instances that are -+ * currently active there. -+ * -+ * parent->allowed_nodes tracks the number of instances assigned to each -+ * node. If a node already has max_per_node instances assigned, -+ * ban_unavailable_allowed_nodes() marks it as unavailable. -+ * -+ * In the end, we restore the original parent->allowed_nodes to undo the -+ * changes to counts during tentative assignments. If we successfully -+ * assigned instance to its current node, we increment that node's counter. -+ */ - -- if (prefer == NULL) { // Final assignment -- chosen = instance->cmds->assign(instance, NULL, true); -+ // Back up the allowed node tables of instance and its children recursively -+ pcmk__copy_node_tables(instance, &allowed_orig); - -- } else { // Possible early assignment to preferred node -- GHashTable *backup = NULL; -+ // Update instances-per-node counts in a scratch table -+ parent->allowed_nodes = pcmk__copy_node_table(parent->allowed_nodes); - -- pcmk__copy_node_tables(instance, &backup); -- chosen = instance->cmds->assign(instance, prefer, false); -+ while (reserved < available) { -+ chosen = assign_instance(instance, current, max_per_node); - -- if (!pe__same_node(chosen, prefer)) { -- // Revert nodes if preferred node won't be assigned -- if (chosen != NULL) { -- pe_rsc_info(instance, -- "Not assigning %s to preferred node %s: " -- "%s is better", -- instance->id, pe__node_name(prefer), -- pe__node_name(chosen)); -- chosen = NULL; -- } -- pcmk__restore_node_tables(instance, backup); -- pcmk__unassign_resource(instance); -+ if (pe__same_node(chosen, current)) { -+ // Successfully assigned to current node -+ break; -+ } -+ -+ // Assignment updates scores, so restore to original state -+ pe_rsc_debug(instance, "Rolling back node scores for %s", instance->id); -+ pcmk__restore_node_tables(instance, allowed_orig); -+ -+ if (chosen == NULL) { -+ // Assignment failed, so give up -+ pe_rsc_info(instance, -+ "Not assigning %s to current node %s: unavailable", -+ instance->id, pe__node_name(current)); -+ pe__set_resource_flags(instance, pe_rsc_provisional); -+ break; -+ } -+ -+ // We prefer more strongly to assign an instance to the chosen node -+ pe_rsc_debug(instance, -+ "Not assigning %s to current node %s: %s is better", -+ instance->id, pe__node_name(current), -+ pe__node_name(chosen)); -+ -+ // Reserve one instance for the chosen node and try again -+ if (++reserved >= available) { -+ pe_rsc_info(instance, -+ "Not assigning %s to current node %s: " -+ "other assignments are more important", -+ instance->id, pe__node_name(current)); -+ -+ } else { -+ pe_rsc_debug(instance, -+ "Reserved an instance of %s for %s. Retrying " -+ "assignment of %s to %s", -+ rsc->id, pe__node_name(chosen), instance->id, -+ pe__node_name(current)); - } -- g_hash_table_destroy(backup); -+ -+ // Clear this assignment (frees chosen); leave instance counts in parent -+ pcmk__unassign_resource(instance); -+ chosen = NULL; - } - -+ g_hash_table_destroy(allowed_orig); -+ -+ // Restore original instances-per-node counts -+ g_hash_table_destroy(parent->allowed_nodes); -+ parent->allowed_nodes = allowed_orig_parent; -+ -+ if (chosen == NULL) { -+ // Couldn't assign instance to current node -+ return false; -+ } -+ pe_rsc_trace(instance, "Assigned %s to current node %s", -+ instance->id, pe__node_name(current)); - increment_parent_count(instance, chosen); -- return chosen; -+ return true; - } - - /*! -@@ -760,22 +845,18 @@ pcmk__assign_instances(pe_resource_t *collective, GList *instances, - // Assign as many instances as possible to their current location - for (iter = instances; (iter != NULL) && (assigned < max_total); - iter = iter->next) { -- instance = (pe_resource_t *) iter->data; -+ int available = max_total - assigned; - -+ instance = iter->data; - if (!pcmk_is_set(instance->flags, pe_rsc_provisional)) { - continue; // Already assigned - } - - current = preferred_node(collective, instance, optimal_per_node); -- if (current != NULL) { -- const pe_node_t *chosen = assign_instance(instance, current, -- max_per_node); -- -- if (pe__same_node(chosen, current)) { -- pe_rsc_trace(collective, "Assigned %s to current node %s", -- instance->id, pe__node_name(current)); -- assigned++; -- } -+ if ((current != NULL) -+ && assign_instance_early(collective, instance, current, -+ max_per_node, available)) { -+ assigned++; - } - } - - -From 59e9950212506a9034db8e90a17033734a1d18a1 Mon Sep 17 00:00:00 2001 -From: Reid Wahl -Date: Mon, 10 Jul 2023 02:50:28 -0700 -Subject: [PATCH 19/19] Test: scheduler: Update test outputs after clone - instance shuffling fix - -The following tests are now correct: -* clone-recover-no-shuffle-4 -* clone-recover-no-shuffle-5 -* clone-recover-no-shuffle-6 -* clone-recover-no-shuffle-7 - -Scores for several other tests are changed in ways (usually duplicates -from additional tentative assignments) that don't impact the resulting -transition. - -One test (cancel-behind-moving-remote) technically breaks. Previously, -due to shuffling, ovn-dbs-bundle-1 moved to controller-0. Since -ovndb_servers:1 gets promoted on ovn-dbs-bundle-1, controller-0 held the -promoted instance of ovn-dbs-bundle. - -Now, since instances correctly prefer their current nodes, -ovn-dbs-bundle-1 remains on controller-2. However, ovndb_servers:1 still -gets promoted on ovn-dbs-bundle-1, so controller-2 holds the promoted -instance of ovn-dbs-bundle. - -ip-172.17.1.87 is colocated with ovn-dbs-bundle's promoted role and is -banned from controller-2. As a result, ip-172.17.1.87 is now stopped. - -This test is believed to have worked properly in the past due only to -luck. At this point (see T672 and the bundle-promoted-*colocation-* -tests), it's well-established that colocations involving promotable -bundles don't work correctly. - -Ref T489 -Ref RHBZ#1931023 - -Signed-off-by: Reid Wahl ---- - .../dot/cancel-behind-moving-remote.dot | 99 +-- - .../dot/clone-recover-no-shuffle-4.dot | 23 +- - .../dot/clone-recover-no-shuffle-5.dot | 57 +- - .../dot/clone-recover-no-shuffle-6.dot | 99 +-- - .../dot/clone-recover-no-shuffle-7.dot | 35 +- - .../exp/cancel-behind-moving-remote.exp | 724 +++++------------- - .../exp/clone-recover-no-shuffle-4.exp | 98 +-- - .../exp/clone-recover-no-shuffle-5.exp | 239 +----- - .../exp/clone-recover-no-shuffle-6.exp | 434 ++--------- - .../exp/clone-recover-no-shuffle-7.exp | 174 ++--- - cts/scheduler/scores/bug-cl-5168.scores | 2 +- - .../scores/cancel-behind-moving-remote.scores | 27 +- - .../scores/clone-recover-no-shuffle-10.scores | 2 +- - .../scores/clone-recover-no-shuffle-4.scores | 10 +- - .../scores/clone-recover-no-shuffle-5.scores | 48 +- - .../scores/clone-recover-no-shuffle-6.scores | 22 +- - .../scores/clone-recover-no-shuffle-7.scores | 14 +- - .../scores/promoted-failed-demote-2.scores | 4 - - .../scores/promoted-failed-demote.scores | 4 - - .../scores/utilization-complex.scores | 24 + - .../scores/utilization-order2.scores | 2 + - .../cancel-behind-moving-remote.summary | 61 +- - .../clone-recover-no-shuffle-4.summary | 8 +- - .../clone-recover-no-shuffle-5.summary | 22 +- - .../clone-recover-no-shuffle-6.summary | 48 +- - .../clone-recover-no-shuffle-7.summary | 12 +- - .../xml/cancel-behind-moving-remote.xml | 14 + - .../xml/clone-recover-no-shuffle-4.xml | 5 - - .../xml/clone-recover-no-shuffle-5.xml | 5 - - .../xml/clone-recover-no-shuffle-6.xml | 5 - - .../xml/clone-recover-no-shuffle-7.xml | 5 - - 31 files changed, 526 insertions(+), 1800 deletions(-) - -diff --git a/cts/scheduler/dot/cancel-behind-moving-remote.dot b/cts/scheduler/dot/cancel-behind-moving-remote.dot -index 1a0dfc8c889..de803a7e299 100644 ---- a/cts/scheduler/dot/cancel-behind-moving-remote.dot -+++ b/cts/scheduler/dot/cancel-behind-moving-remote.dot -@@ -1,28 +1,12 @@ - digraph "g" { - "Cancel ovndb_servers_monitor_30000 ovn-dbs-bundle-1" -> "ovndb_servers_promote_0 ovn-dbs-bundle-1" [ style = bold] - "Cancel ovndb_servers_monitor_30000 ovn-dbs-bundle-1" [ style=bold color="green" fontcolor="black"] --"ip-172.17.1.87_monitor_10000 controller-0" [ style=bold color="green" fontcolor="black"] --"ip-172.17.1.87_start_0 controller-0" -> "ip-172.17.1.87_monitor_10000 controller-0" [ style = bold] --"ip-172.17.1.87_start_0 controller-0" [ style=bold color="green" fontcolor="black"] - "nova-evacuate_clear_failcount_0 messaging-0" [ style=bold color="green" fontcolor="black"] --"ovn-dbs-bundle-0_clear_failcount_0 controller-0" -> "ovn-dbs-bundle-0_start_0 controller-2" [ style = bold] --"ovn-dbs-bundle-0_clear_failcount_0 controller-0" [ style=bold color="green" fontcolor="black"] --"ovn-dbs-bundle-0_monitor_30000 controller-2" [ style=bold color="green" fontcolor="black"] --"ovn-dbs-bundle-0_start_0 controller-2" -> "ovn-dbs-bundle-0_monitor_30000 controller-2" [ style = bold] --"ovn-dbs-bundle-0_start_0 controller-2" -> "ovndb_servers:0_monitor_30000 ovn-dbs-bundle-0" [ style = bold] --"ovn-dbs-bundle-0_start_0 controller-2" -> "ovndb_servers:0_start_0 ovn-dbs-bundle-0" [ style = bold] --"ovn-dbs-bundle-0_start_0 controller-2" [ style=bold color="green" fontcolor="black"] --"ovn-dbs-bundle-1_clear_failcount_0 controller-2" -> "ovn-dbs-bundle-1_start_0 controller-0" [ style = bold] --"ovn-dbs-bundle-1_clear_failcount_0 controller-2" [ style=bold color="green" fontcolor="black"] --"ovn-dbs-bundle-1_monitor_30000 controller-0" [ style=bold color="green" fontcolor="black"] --"ovn-dbs-bundle-1_start_0 controller-0" -> "ovn-dbs-bundle-1_monitor_30000 controller-0" [ style = bold] --"ovn-dbs-bundle-1_start_0 controller-0" -> "ovndb_servers_monitor_10000 ovn-dbs-bundle-1" [ style = bold] --"ovn-dbs-bundle-1_start_0 controller-0" -> "ovndb_servers_promote_0 ovn-dbs-bundle-1" [ style = bold] --"ovn-dbs-bundle-1_start_0 controller-0" -> "ovndb_servers_start_0 ovn-dbs-bundle-1" [ style = bold] --"ovn-dbs-bundle-1_start_0 controller-0" [ style=bold color="green" fontcolor="black"] --"ovn-dbs-bundle-1_stop_0 controller-2" -> "ovn-dbs-bundle-1_start_0 controller-0" [ style = bold] --"ovn-dbs-bundle-1_stop_0 controller-2" -> "ovn-dbs-bundle-podman-1_stop_0 controller-2" [ style = bold] --"ovn-dbs-bundle-1_stop_0 controller-2" [ style=bold color="green" fontcolor="black"] -+"ovn-dbs-bundle-0_monitor_30000 controller-0" [ style=bold color="green" fontcolor="black"] -+"ovn-dbs-bundle-0_start_0 controller-0" -> "ovn-dbs-bundle-0_monitor_30000 controller-0" [ style = bold] -+"ovn-dbs-bundle-0_start_0 controller-0" -> "ovndb_servers:0_monitor_30000 ovn-dbs-bundle-0" [ style = bold] -+"ovn-dbs-bundle-0_start_0 controller-0" -> "ovndb_servers:0_start_0 ovn-dbs-bundle-0" [ style = bold] -+"ovn-dbs-bundle-0_start_0 controller-0" [ style=bold color="green" fontcolor="black"] - "ovn-dbs-bundle-master_confirmed-post_notify_promoted_0" -> "ovn-dbs-bundle_promoted_0" [ style = bold] - "ovn-dbs-bundle-master_confirmed-post_notify_promoted_0" -> "ovndb_servers:0_monitor_30000 ovn-dbs-bundle-0" [ style = bold] - "ovn-dbs-bundle-master_confirmed-post_notify_promoted_0" -> "ovndb_servers_monitor_10000 ovn-dbs-bundle-1" [ style = bold] -@@ -32,19 +16,12 @@ - "ovn-dbs-bundle-master_confirmed-post_notify_running_0" -> "ovndb_servers:0_monitor_30000 ovn-dbs-bundle-0" [ style = bold] - "ovn-dbs-bundle-master_confirmed-post_notify_running_0" -> "ovndb_servers_monitor_10000 ovn-dbs-bundle-1" [ style = bold] - "ovn-dbs-bundle-master_confirmed-post_notify_running_0" [ style=bold color="green" fontcolor="orange"] --"ovn-dbs-bundle-master_confirmed-post_notify_stopped_0" -> "ovn-dbs-bundle-master_pre_notify_promote_0" [ style = bold] --"ovn-dbs-bundle-master_confirmed-post_notify_stopped_0" -> "ovn-dbs-bundle-master_pre_notify_start_0" [ style = bold] --"ovn-dbs-bundle-master_confirmed-post_notify_stopped_0" -> "ovn-dbs-bundle_stopped_0" [ style = bold] --"ovn-dbs-bundle-master_confirmed-post_notify_stopped_0" [ style=bold color="green" fontcolor="orange"] - "ovn-dbs-bundle-master_confirmed-pre_notify_promote_0" -> "ovn-dbs-bundle-master_post_notify_promoted_0" [ style = bold] - "ovn-dbs-bundle-master_confirmed-pre_notify_promote_0" -> "ovn-dbs-bundle-master_promote_0" [ style = bold] - "ovn-dbs-bundle-master_confirmed-pre_notify_promote_0" [ style=bold color="green" fontcolor="orange"] - "ovn-dbs-bundle-master_confirmed-pre_notify_start_0" -> "ovn-dbs-bundle-master_post_notify_running_0" [ style = bold] - "ovn-dbs-bundle-master_confirmed-pre_notify_start_0" -> "ovn-dbs-bundle-master_start_0" [ style = bold] - "ovn-dbs-bundle-master_confirmed-pre_notify_start_0" [ style=bold color="green" fontcolor="orange"] --"ovn-dbs-bundle-master_confirmed-pre_notify_stop_0" -> "ovn-dbs-bundle-master_post_notify_stopped_0" [ style = bold] --"ovn-dbs-bundle-master_confirmed-pre_notify_stop_0" -> "ovn-dbs-bundle-master_stop_0" [ style = bold] --"ovn-dbs-bundle-master_confirmed-pre_notify_stop_0" [ style=bold color="green" fontcolor="orange"] - "ovn-dbs-bundle-master_post_notify_promoted_0" -> "ovn-dbs-bundle-master_confirmed-post_notify_promoted_0" [ style = bold] - "ovn-dbs-bundle-master_post_notify_promoted_0" -> "ovndb_servers:0_post_notify_promote_0 ovn-dbs-bundle-0" [ style = bold] - "ovn-dbs-bundle-master_post_notify_promoted_0" -> "ovndb_servers_post_notify_promoted_0 ovn-dbs-bundle-1" [ style = bold] -@@ -55,21 +32,15 @@ - "ovn-dbs-bundle-master_post_notify_running_0" -> "ovndb_servers_post_notify_running_0 ovn-dbs-bundle-1" [ style = bold] - "ovn-dbs-bundle-master_post_notify_running_0" -> "ovndb_servers_post_notify_running_0 ovn-dbs-bundle-2" [ style = bold] - "ovn-dbs-bundle-master_post_notify_running_0" [ style=bold color="green" fontcolor="orange"] --"ovn-dbs-bundle-master_post_notify_stopped_0" -> "ovn-dbs-bundle-master_confirmed-post_notify_stopped_0" [ style = bold] --"ovn-dbs-bundle-master_post_notify_stopped_0" -> "ovndb_servers_post_notify_stopped_0 ovn-dbs-bundle-2" [ style = bold] --"ovn-dbs-bundle-master_post_notify_stopped_0" [ style=bold color="green" fontcolor="orange"] - "ovn-dbs-bundle-master_pre_notify_promote_0" -> "ovn-dbs-bundle-master_confirmed-pre_notify_promote_0" [ style = bold] - "ovn-dbs-bundle-master_pre_notify_promote_0" -> "ovndb_servers:0_pre_notify_promote_0 ovn-dbs-bundle-0" [ style = bold] - "ovn-dbs-bundle-master_pre_notify_promote_0" -> "ovndb_servers_pre_notify_promote_0 ovn-dbs-bundle-1" [ style = bold] - "ovn-dbs-bundle-master_pre_notify_promote_0" -> "ovndb_servers_pre_notify_promote_0 ovn-dbs-bundle-2" [ style = bold] - "ovn-dbs-bundle-master_pre_notify_promote_0" [ style=bold color="green" fontcolor="orange"] - "ovn-dbs-bundle-master_pre_notify_start_0" -> "ovn-dbs-bundle-master_confirmed-pre_notify_start_0" [ style = bold] -+"ovn-dbs-bundle-master_pre_notify_start_0" -> "ovndb_servers_pre_notify_start_0 ovn-dbs-bundle-1" [ style = bold] - "ovn-dbs-bundle-master_pre_notify_start_0" -> "ovndb_servers_pre_notify_start_0 ovn-dbs-bundle-2" [ style = bold] - "ovn-dbs-bundle-master_pre_notify_start_0" [ style=bold color="green" fontcolor="orange"] --"ovn-dbs-bundle-master_pre_notify_stop_0" -> "ovn-dbs-bundle-master_confirmed-pre_notify_stop_0" [ style = bold] --"ovn-dbs-bundle-master_pre_notify_stop_0" -> "ovndb_servers_pre_notify_stop_0 ovn-dbs-bundle-1" [ style = bold] --"ovn-dbs-bundle-master_pre_notify_stop_0" -> "ovndb_servers_pre_notify_stop_0 ovn-dbs-bundle-2" [ style = bold] --"ovn-dbs-bundle-master_pre_notify_stop_0" [ style=bold color="green" fontcolor="orange"] - "ovn-dbs-bundle-master_promote_0" -> "ovndb_servers_promote_0 ovn-dbs-bundle-1" [ style = bold] - "ovn-dbs-bundle-master_promote_0" [ style=bold color="green" fontcolor="orange"] - "ovn-dbs-bundle-master_promoted_0" -> "ovn-dbs-bundle-master_post_notify_promoted_0" [ style = bold] -@@ -79,48 +50,21 @@ - "ovn-dbs-bundle-master_running_0" [ style=bold color="green" fontcolor="orange"] - "ovn-dbs-bundle-master_start_0" -> "ovn-dbs-bundle-master_running_0" [ style = bold] - "ovn-dbs-bundle-master_start_0" -> "ovndb_servers:0_start_0 ovn-dbs-bundle-0" [ style = bold] --"ovn-dbs-bundle-master_start_0" -> "ovndb_servers_start_0 ovn-dbs-bundle-1" [ style = bold] - "ovn-dbs-bundle-master_start_0" [ style=bold color="green" fontcolor="orange"] --"ovn-dbs-bundle-master_stop_0" -> "ovn-dbs-bundle-master_stopped_0" [ style = bold] --"ovn-dbs-bundle-master_stop_0" -> "ovndb_servers_stop_0 ovn-dbs-bundle-1" [ style = bold] --"ovn-dbs-bundle-master_stop_0" [ style=bold color="green" fontcolor="orange"] --"ovn-dbs-bundle-master_stopped_0" -> "ovn-dbs-bundle-master_post_notify_stopped_0" [ style = bold] --"ovn-dbs-bundle-master_stopped_0" -> "ovn-dbs-bundle-master_promote_0" [ style = bold] --"ovn-dbs-bundle-master_stopped_0" -> "ovn-dbs-bundle-master_start_0" [ style = bold] --"ovn-dbs-bundle-master_stopped_0" [ style=bold color="green" fontcolor="orange"] --"ovn-dbs-bundle-podman-0_monitor_60000 controller-2" [ style=bold color="green" fontcolor="black"] --"ovn-dbs-bundle-podman-0_start_0 controller-2" -> "ovn-dbs-bundle-0_start_0 controller-2" [ style = bold] --"ovn-dbs-bundle-podman-0_start_0 controller-2" -> "ovn-dbs-bundle-podman-0_monitor_60000 controller-2" [ style = bold] --"ovn-dbs-bundle-podman-0_start_0 controller-2" -> "ovn-dbs-bundle_running_0" [ style = bold] --"ovn-dbs-bundle-podman-0_start_0 controller-2" -> "ovndb_servers:0_start_0 ovn-dbs-bundle-0" [ style = bold] --"ovn-dbs-bundle-podman-0_start_0 controller-2" [ style=bold color="green" fontcolor="black"] --"ovn-dbs-bundle-podman-1_monitor_60000 controller-0" [ style=bold color="green" fontcolor="black"] --"ovn-dbs-bundle-podman-1_start_0 controller-0" -> "ovn-dbs-bundle-1_start_0 controller-0" [ style = bold] --"ovn-dbs-bundle-podman-1_start_0 controller-0" -> "ovn-dbs-bundle-podman-1_monitor_60000 controller-0" [ style = bold] --"ovn-dbs-bundle-podman-1_start_0 controller-0" -> "ovn-dbs-bundle_running_0" [ style = bold] --"ovn-dbs-bundle-podman-1_start_0 controller-0" -> "ovndb_servers_promote_0 ovn-dbs-bundle-1" [ style = bold] --"ovn-dbs-bundle-podman-1_start_0 controller-0" -> "ovndb_servers_start_0 ovn-dbs-bundle-1" [ style = bold] --"ovn-dbs-bundle-podman-1_start_0 controller-0" [ style=bold color="green" fontcolor="black"] --"ovn-dbs-bundle-podman-1_stop_0 controller-2" -> "ovn-dbs-bundle-podman-1_start_0 controller-0" [ style = bold] --"ovn-dbs-bundle-podman-1_stop_0 controller-2" -> "ovn-dbs-bundle_stopped_0" [ style = bold] --"ovn-dbs-bundle-podman-1_stop_0 controller-2" [ style=bold color="green" fontcolor="black"] --"ovn-dbs-bundle_promote_0" -> "ip-172.17.1.87_start_0 controller-0" [ style = bold] -+"ovn-dbs-bundle-podman-0_monitor_60000 controller-0" [ style=bold color="green" fontcolor="black"] -+"ovn-dbs-bundle-podman-0_start_0 controller-0" -> "ovn-dbs-bundle-0_start_0 controller-0" [ style = bold] -+"ovn-dbs-bundle-podman-0_start_0 controller-0" -> "ovn-dbs-bundle-podman-0_monitor_60000 controller-0" [ style = bold] -+"ovn-dbs-bundle-podman-0_start_0 controller-0" -> "ovn-dbs-bundle_running_0" [ style = bold] -+"ovn-dbs-bundle-podman-0_start_0 controller-0" -> "ovndb_servers:0_start_0 ovn-dbs-bundle-0" [ style = bold] -+"ovn-dbs-bundle-podman-0_start_0 controller-0" [ style=bold color="green" fontcolor="black"] - "ovn-dbs-bundle_promote_0" -> "ovn-dbs-bundle-master_promote_0" [ style = bold] - "ovn-dbs-bundle_promote_0" [ style=bold color="green" fontcolor="orange"] - "ovn-dbs-bundle_promoted_0" [ style=bold color="green" fontcolor="orange"] - "ovn-dbs-bundle_running_0" -> "ovn-dbs-bundle_promote_0" [ style = bold] - "ovn-dbs-bundle_running_0" [ style=bold color="green" fontcolor="orange"] - "ovn-dbs-bundle_start_0" -> "ovn-dbs-bundle-master_start_0" [ style = bold] --"ovn-dbs-bundle_start_0" -> "ovn-dbs-bundle-podman-0_start_0 controller-2" [ style = bold] --"ovn-dbs-bundle_start_0" -> "ovn-dbs-bundle-podman-1_start_0 controller-0" [ style = bold] -+"ovn-dbs-bundle_start_0" -> "ovn-dbs-bundle-podman-0_start_0 controller-0" [ style = bold] - "ovn-dbs-bundle_start_0" [ style=bold color="green" fontcolor="orange"] --"ovn-dbs-bundle_stop_0" -> "ovn-dbs-bundle-master_stop_0" [ style = bold] --"ovn-dbs-bundle_stop_0" -> "ovn-dbs-bundle-podman-1_stop_0 controller-2" [ style = bold] --"ovn-dbs-bundle_stop_0" -> "ovndb_servers_stop_0 ovn-dbs-bundle-1" [ style = bold] --"ovn-dbs-bundle_stop_0" [ style=bold color="green" fontcolor="orange"] --"ovn-dbs-bundle_stopped_0" -> "ovn-dbs-bundle_promote_0" [ style = bold] --"ovn-dbs-bundle_stopped_0" -> "ovn-dbs-bundle_start_0" [ style = bold] --"ovn-dbs-bundle_stopped_0" [ style=bold color="green" fontcolor="orange"] - "ovndb_servers:0_monitor_30000 ovn-dbs-bundle-0" [ style=bold color="green" fontcolor="black"] - "ovndb_servers:0_post_notify_promote_0 ovn-dbs-bundle-0" -> "ovn-dbs-bundle-master_confirmed-post_notify_promoted_0" [ style = bold] - "ovndb_servers:0_post_notify_promote_0 ovn-dbs-bundle-0" [ style=bold color="green" fontcolor="black"] -@@ -130,7 +74,6 @@ - "ovndb_servers:0_pre_notify_promote_0 ovn-dbs-bundle-0" [ style=bold color="green" fontcolor="black"] - "ovndb_servers:0_start_0 ovn-dbs-bundle-0" -> "ovn-dbs-bundle-master_running_0" [ style = bold] - "ovndb_servers:0_start_0 ovn-dbs-bundle-0" -> "ovndb_servers:0_monitor_30000 ovn-dbs-bundle-0" [ style = bold] --"ovndb_servers:0_start_0 ovn-dbs-bundle-0" -> "ovndb_servers_start_0 ovn-dbs-bundle-1" [ style = bold] - "ovndb_servers:0_start_0 ovn-dbs-bundle-0" [ style=bold color="green" fontcolor="black"] - "ovndb_servers_monitor_10000 ovn-dbs-bundle-1" [ style=bold color="green" fontcolor="black"] - "ovndb_servers_post_notify_promoted_0 ovn-dbs-bundle-1" -> "ovn-dbs-bundle-master_confirmed-post_notify_promoted_0" [ style = bold] -@@ -141,29 +84,17 @@ - "ovndb_servers_post_notify_running_0 ovn-dbs-bundle-1" [ style=bold color="green" fontcolor="black"] - "ovndb_servers_post_notify_running_0 ovn-dbs-bundle-2" -> "ovn-dbs-bundle-master_confirmed-post_notify_running_0" [ style = bold] - "ovndb_servers_post_notify_running_0 ovn-dbs-bundle-2" [ style=bold color="green" fontcolor="black"] --"ovndb_servers_post_notify_stopped_0 ovn-dbs-bundle-2" -> "ovn-dbs-bundle-master_confirmed-post_notify_stopped_0" [ style = bold] --"ovndb_servers_post_notify_stopped_0 ovn-dbs-bundle-2" [ style=bold color="green" fontcolor="black"] - "ovndb_servers_pre_notify_promote_0 ovn-dbs-bundle-1" -> "ovn-dbs-bundle-master_confirmed-pre_notify_promote_0" [ style = bold] - "ovndb_servers_pre_notify_promote_0 ovn-dbs-bundle-1" [ style=bold color="green" fontcolor="black"] - "ovndb_servers_pre_notify_promote_0 ovn-dbs-bundle-2" -> "ovn-dbs-bundle-master_confirmed-pre_notify_promote_0" [ style = bold] - "ovndb_servers_pre_notify_promote_0 ovn-dbs-bundle-2" [ style=bold color="green" fontcolor="black"] -+"ovndb_servers_pre_notify_start_0 ovn-dbs-bundle-1" -> "ovn-dbs-bundle-master_confirmed-pre_notify_start_0" [ style = bold] -+"ovndb_servers_pre_notify_start_0 ovn-dbs-bundle-1" [ style=bold color="green" fontcolor="black"] - "ovndb_servers_pre_notify_start_0 ovn-dbs-bundle-2" -> "ovn-dbs-bundle-master_confirmed-pre_notify_start_0" [ style = bold] - "ovndb_servers_pre_notify_start_0 ovn-dbs-bundle-2" [ style=bold color="green" fontcolor="black"] --"ovndb_servers_pre_notify_stop_0 ovn-dbs-bundle-1" -> "ovn-dbs-bundle-master_confirmed-pre_notify_stop_0" [ style = bold] --"ovndb_servers_pre_notify_stop_0 ovn-dbs-bundle-1" [ style=bold color="green" fontcolor="black"] --"ovndb_servers_pre_notify_stop_0 ovn-dbs-bundle-2" -> "ovn-dbs-bundle-master_confirmed-pre_notify_stop_0" [ style = bold] --"ovndb_servers_pre_notify_stop_0 ovn-dbs-bundle-2" [ style=bold color="green" fontcolor="black"] - "ovndb_servers_promote_0 ovn-dbs-bundle-1" -> "ovn-dbs-bundle-master_promoted_0" [ style = bold] - "ovndb_servers_promote_0 ovn-dbs-bundle-1" -> "ovndb_servers_monitor_10000 ovn-dbs-bundle-1" [ style = bold] - "ovndb_servers_promote_0 ovn-dbs-bundle-1" [ style=bold color="green" fontcolor="black"] --"ovndb_servers_start_0 ovn-dbs-bundle-1" -> "ovn-dbs-bundle-master_running_0" [ style = bold] --"ovndb_servers_start_0 ovn-dbs-bundle-1" -> "ovndb_servers_monitor_10000 ovn-dbs-bundle-1" [ style = bold] --"ovndb_servers_start_0 ovn-dbs-bundle-1" -> "ovndb_servers_promote_0 ovn-dbs-bundle-1" [ style = bold] --"ovndb_servers_start_0 ovn-dbs-bundle-1" [ style=bold color="green" fontcolor="black"] --"ovndb_servers_stop_0 ovn-dbs-bundle-1" -> "ovn-dbs-bundle-1_stop_0 controller-2" [ style = bold] --"ovndb_servers_stop_0 ovn-dbs-bundle-1" -> "ovn-dbs-bundle-master_stopped_0" [ style = bold] --"ovndb_servers_stop_0 ovn-dbs-bundle-1" -> "ovndb_servers_start_0 ovn-dbs-bundle-1" [ style = bold] --"ovndb_servers_stop_0 ovn-dbs-bundle-1" [ style=bold color="green" fontcolor="black"] - "rabbitmq-bundle-1_monitor_30000 controller-0" [ style=dashed color="red" fontcolor="black"] - "rabbitmq-bundle-1_start_0 controller-0" -> "rabbitmq-bundle-1_monitor_30000 controller-0" [ style = dashed] - "rabbitmq-bundle-1_start_0 controller-0" -> "rabbitmq:1_monitor_10000 rabbitmq-bundle-1" [ style = dashed] -diff --git a/cts/scheduler/dot/clone-recover-no-shuffle-4.dot b/cts/scheduler/dot/clone-recover-no-shuffle-4.dot -index fd002f28fcf..287d82d3806 100644 ---- a/cts/scheduler/dot/clone-recover-no-shuffle-4.dot -+++ b/cts/scheduler/dot/clone-recover-no-shuffle-4.dot -@@ -1,23 +1,10 @@ - digraph "g" { - "dummy-clone_running_0" [ style=bold color="green" fontcolor="orange"] - "dummy-clone_start_0" -> "dummy-clone_running_0" [ style = bold] --"dummy-clone_start_0" -> "dummy:2_start_0 node2" [ style = bold] --"dummy-clone_start_0" -> "dummy_start_0 node1" [ style = bold] -+"dummy-clone_start_0" -> "dummy:2_start_0 node1" [ style = bold] - "dummy-clone_start_0" [ style=bold color="green" fontcolor="orange"] --"dummy-clone_stop_0" -> "dummy-clone_stopped_0" [ style = bold] --"dummy-clone_stop_0" -> "dummy_stop_0 node2" [ style = bold] --"dummy-clone_stop_0" [ style=bold color="green" fontcolor="orange"] --"dummy-clone_stopped_0" -> "dummy-clone_start_0" [ style = bold] --"dummy-clone_stopped_0" [ style=bold color="green" fontcolor="orange"] --"dummy:2_monitor_10000 node2" [ style=bold color="green" fontcolor="black"] --"dummy:2_start_0 node2" -> "dummy-clone_running_0" [ style = bold] --"dummy:2_start_0 node2" -> "dummy:2_monitor_10000 node2" [ style = bold] --"dummy:2_start_0 node2" [ style=bold color="green" fontcolor="black"] --"dummy_monitor_10000 node1" [ style=bold color="green" fontcolor="black"] --"dummy_start_0 node1" -> "dummy-clone_running_0" [ style = bold] --"dummy_start_0 node1" -> "dummy_monitor_10000 node1" [ style = bold] --"dummy_start_0 node1" [ style=bold color="green" fontcolor="black"] --"dummy_stop_0 node2" -> "dummy-clone_stopped_0" [ style = bold] --"dummy_stop_0 node2" -> "dummy_start_0 node1" [ style = bold] --"dummy_stop_0 node2" [ style=bold color="green" fontcolor="black"] -+"dummy:2_monitor_10000 node1" [ style=bold color="green" fontcolor="black"] -+"dummy:2_start_0 node1" -> "dummy-clone_running_0" [ style = bold] -+"dummy:2_start_0 node1" -> "dummy:2_monitor_10000 node1" [ style = bold] -+"dummy:2_start_0 node1" [ style=bold color="green" fontcolor="black"] - } -diff --git a/cts/scheduler/dot/clone-recover-no-shuffle-5.dot b/cts/scheduler/dot/clone-recover-no-shuffle-5.dot -index a2356f2280b..d3bdf04baa9 100644 ---- a/cts/scheduler/dot/clone-recover-no-shuffle-5.dot -+++ b/cts/scheduler/dot/clone-recover-no-shuffle-5.dot -@@ -1,56 +1,21 @@ - digraph "g" { - "grp-clone_running_0" [ style=bold color="green" fontcolor="orange"] - "grp-clone_start_0" -> "grp-clone_running_0" [ style = bold] --"grp-clone_start_0" -> "grp:0_start_0" [ style = bold] - "grp-clone_start_0" -> "grp:2_start_0" [ style = bold] - "grp-clone_start_0" [ style=bold color="green" fontcolor="orange"] --"grp-clone_stop_0" -> "grp-clone_stopped_0" [ style = bold] --"grp-clone_stop_0" -> "grp:0_stop_0" [ style = bold] --"grp-clone_stop_0" [ style=bold color="green" fontcolor="orange"] --"grp-clone_stopped_0" -> "grp-clone_start_0" [ style = bold] --"grp-clone_stopped_0" [ style=bold color="green" fontcolor="orange"] --"grp:0_running_0" -> "grp-clone_running_0" [ style = bold] --"grp:0_running_0" [ style=bold color="green" fontcolor="orange"] --"grp:0_start_0" -> "grp:0_running_0" [ style = bold] --"grp:0_start_0" -> "rsc1_start_0 node1" [ style = bold] --"grp:0_start_0" -> "rsc2_start_0 node1" [ style = bold] --"grp:0_start_0" [ style=bold color="green" fontcolor="orange"] --"grp:0_stop_0" -> "grp:0_stopped_0" [ style = bold] --"grp:0_stop_0" -> "rsc1_stop_0 node2" [ style = bold] --"grp:0_stop_0" -> "rsc2_stop_0 node2" [ style = bold] --"grp:0_stop_0" [ style=bold color="green" fontcolor="orange"] --"grp:0_stopped_0" -> "grp-clone_stopped_0" [ style = bold] --"grp:0_stopped_0" -> "grp:0_start_0" [ style = bold] --"grp:0_stopped_0" [ style=bold color="green" fontcolor="orange"] - "grp:2_running_0" -> "grp-clone_running_0" [ style = bold] - "grp:2_running_0" [ style=bold color="green" fontcolor="orange"] - "grp:2_start_0" -> "grp:2_running_0" [ style = bold] --"grp:2_start_0" -> "rsc1:2_start_0 node2" [ style = bold] --"grp:2_start_0" -> "rsc2:2_start_0 node2" [ style = bold] -+"grp:2_start_0" -> "rsc1:2_start_0 node1" [ style = bold] -+"grp:2_start_0" -> "rsc2:2_start_0 node1" [ style = bold] - "grp:2_start_0" [ style=bold color="green" fontcolor="orange"] --"rsc1:2_monitor_10000 node2" [ style=bold color="green" fontcolor="black"] --"rsc1:2_start_0 node2" -> "grp:2_running_0" [ style = bold] --"rsc1:2_start_0 node2" -> "rsc1:2_monitor_10000 node2" [ style = bold] --"rsc1:2_start_0 node2" -> "rsc2:2_start_0 node2" [ style = bold] --"rsc1:2_start_0 node2" [ style=bold color="green" fontcolor="black"] --"rsc1_monitor_10000 node1" [ style=bold color="green" fontcolor="black"] --"rsc1_start_0 node1" -> "grp:0_running_0" [ style = bold] --"rsc1_start_0 node1" -> "rsc1_monitor_10000 node1" [ style = bold] --"rsc1_start_0 node1" -> "rsc2_start_0 node1" [ style = bold] --"rsc1_start_0 node1" [ style=bold color="green" fontcolor="black"] --"rsc1_stop_0 node2" -> "grp:0_stopped_0" [ style = bold] --"rsc1_stop_0 node2" -> "rsc1_start_0 node1" [ style = bold] --"rsc1_stop_0 node2" [ style=bold color="green" fontcolor="black"] --"rsc2:2_monitor_10000 node2" [ style=bold color="green" fontcolor="black"] --"rsc2:2_start_0 node2" -> "grp:2_running_0" [ style = bold] --"rsc2:2_start_0 node2" -> "rsc2:2_monitor_10000 node2" [ style = bold] --"rsc2:2_start_0 node2" [ style=bold color="green" fontcolor="black"] --"rsc2_monitor_10000 node1" [ style=bold color="green" fontcolor="black"] --"rsc2_start_0 node1" -> "grp:0_running_0" [ style = bold] --"rsc2_start_0 node1" -> "rsc2_monitor_10000 node1" [ style = bold] --"rsc2_start_0 node1" [ style=bold color="green" fontcolor="black"] --"rsc2_stop_0 node2" -> "grp:0_stopped_0" [ style = bold] --"rsc2_stop_0 node2" -> "rsc1_stop_0 node2" [ style = bold] --"rsc2_stop_0 node2" -> "rsc2_start_0 node1" [ style = bold] --"rsc2_stop_0 node2" [ style=bold color="green" fontcolor="black"] -+"rsc1:2_monitor_10000 node1" [ style=bold color="green" fontcolor="black"] -+"rsc1:2_start_0 node1" -> "grp:2_running_0" [ style = bold] -+"rsc1:2_start_0 node1" -> "rsc1:2_monitor_10000 node1" [ style = bold] -+"rsc1:2_start_0 node1" -> "rsc2:2_start_0 node1" [ style = bold] -+"rsc1:2_start_0 node1" [ style=bold color="green" fontcolor="black"] -+"rsc2:2_monitor_10000 node1" [ style=bold color="green" fontcolor="black"] -+"rsc2:2_start_0 node1" -> "grp:2_running_0" [ style = bold] -+"rsc2:2_start_0 node1" -> "rsc2:2_monitor_10000 node1" [ style = bold] -+"rsc2:2_start_0 node1" [ style=bold color="green" fontcolor="black"] - } -diff --git a/cts/scheduler/dot/clone-recover-no-shuffle-6.dot b/cts/scheduler/dot/clone-recover-no-shuffle-6.dot -index f8cfe9252d2..f60fd2cc04e 100644 ---- a/cts/scheduler/dot/clone-recover-no-shuffle-6.dot -+++ b/cts/scheduler/dot/clone-recover-no-shuffle-6.dot -@@ -1,97 +1,32 @@ - digraph "g" { --"base-bundle-0_monitor_30000 node1" [ style=bold color="green" fontcolor="black"] --"base-bundle-0_start_0 node1" -> "base-bundle-0_monitor_30000 node1" [ style = bold] --"base-bundle-0_start_0 node1" -> "base_start_0 base-bundle-0" [ style = bold] --"base-bundle-0_start_0 node1" [ style=bold color="green" fontcolor="black"] --"base-bundle-0_stop_0 node3" -> "base-bundle-0_start_0 node1" [ style = bold] --"base-bundle-0_stop_0 node3" -> "base-bundle-podman-0_stop_0 node3" [ style = bold] --"base-bundle-0_stop_0 node3" [ style=bold color="green" fontcolor="black"] --"base-bundle-1_monitor_30000 node3" [ style=bold color="green" fontcolor="black"] --"base-bundle-1_start_0 node3" -> "base-bundle-1_monitor_30000 node3" [ style = bold] --"base-bundle-1_start_0 node3" -> "base_start_0 base-bundle-1" [ style = bold] --"base-bundle-1_start_0 node3" [ style=bold color="green" fontcolor="black"] --"base-bundle-1_stop_0 node2" -> "base-bundle-1_start_0 node3" [ style = bold] --"base-bundle-1_stop_0 node2" -> "base-bundle-podman-1_stop_0 node2" [ style = bold] --"base-bundle-1_stop_0 node2" [ style=bold color="green" fontcolor="black"] --"base-bundle-2_monitor_0 node1" -> "base-bundle-2_start_0 node2" [ style = bold] -+"base-bundle-2_monitor_0 node1" -> "base-bundle-2_start_0 node1" [ style = bold] - "base-bundle-2_monitor_0 node1" [ style=bold color="green" fontcolor="black"] --"base-bundle-2_monitor_0 node2" -> "base-bundle-2_start_0 node2" [ style = bold] -+"base-bundle-2_monitor_0 node2" -> "base-bundle-2_start_0 node1" [ style = bold] - "base-bundle-2_monitor_0 node2" [ style=bold color="green" fontcolor="black"] --"base-bundle-2_monitor_0 node3" -> "base-bundle-2_start_0 node2" [ style = bold] -+"base-bundle-2_monitor_0 node3" -> "base-bundle-2_start_0 node1" [ style = bold] - "base-bundle-2_monitor_0 node3" [ style=bold color="green" fontcolor="black"] --"base-bundle-2_monitor_30000 node2" [ style=bold color="green" fontcolor="black"] --"base-bundle-2_start_0 node2" -> "base-bundle-2_monitor_30000 node2" [ style = bold] --"base-bundle-2_start_0 node2" -> "base:2_start_0 base-bundle-2" [ style = bold] --"base-bundle-2_start_0 node2" [ style=bold color="green" fontcolor="black"] -+"base-bundle-2_monitor_30000 node1" [ style=bold color="green" fontcolor="black"] -+"base-bundle-2_start_0 node1" -> "base-bundle-2_monitor_30000 node1" [ style = bold] -+"base-bundle-2_start_0 node1" -> "base:2_start_0 base-bundle-2" [ style = bold] -+"base-bundle-2_start_0 node1" [ style=bold color="green" fontcolor="black"] - "base-bundle-clone_running_0" -> "base-bundle_running_0" [ style = bold] - "base-bundle-clone_running_0" [ style=bold color="green" fontcolor="orange"] - "base-bundle-clone_start_0" -> "base-bundle-clone_running_0" [ style = bold] - "base-bundle-clone_start_0" -> "base:2_start_0 base-bundle-2" [ style = bold] --"base-bundle-clone_start_0" -> "base_start_0 base-bundle-0" [ style = bold] --"base-bundle-clone_start_0" -> "base_start_0 base-bundle-1" [ style = bold] - "base-bundle-clone_start_0" [ style=bold color="green" fontcolor="orange"] --"base-bundle-clone_stop_0" -> "base-bundle-clone_stopped_0" [ style = bold] --"base-bundle-clone_stop_0" -> "base_stop_0 base-bundle-0" [ style = bold] --"base-bundle-clone_stop_0" -> "base_stop_0 base-bundle-1" [ style = bold] --"base-bundle-clone_stop_0" [ style=bold color="green" fontcolor="orange"] --"base-bundle-clone_stopped_0" -> "base-bundle-clone_start_0" [ style = bold] --"base-bundle-clone_stopped_0" -> "base-bundle_stopped_0" [ style = bold] --"base-bundle-clone_stopped_0" [ style=bold color="green" fontcolor="orange"] --"base-bundle-podman-0_monitor_60000 node1" [ style=bold color="green" fontcolor="black"] --"base-bundle-podman-0_start_0 node1" -> "base-bundle-0_start_0 node1" [ style = bold] --"base-bundle-podman-0_start_0 node1" -> "base-bundle-podman-0_monitor_60000 node1" [ style = bold] --"base-bundle-podman-0_start_0 node1" -> "base-bundle_running_0" [ style = bold] --"base-bundle-podman-0_start_0 node1" -> "base_start_0 base-bundle-0" [ style = bold] --"base-bundle-podman-0_start_0 node1" [ style=bold color="green" fontcolor="black"] --"base-bundle-podman-0_stop_0 node3" -> "base-bundle-podman-0_start_0 node1" [ style = bold] --"base-bundle-podman-0_stop_0 node3" -> "base-bundle_stopped_0" [ style = bold] --"base-bundle-podman-0_stop_0 node3" [ style=bold color="green" fontcolor="black"] --"base-bundle-podman-1_monitor_60000 node3" [ style=bold color="green" fontcolor="black"] --"base-bundle-podman-1_start_0 node3" -> "base-bundle-1_start_0 node3" [ style = bold] --"base-bundle-podman-1_start_0 node3" -> "base-bundle-podman-1_monitor_60000 node3" [ style = bold] --"base-bundle-podman-1_start_0 node3" -> "base-bundle_running_0" [ style = bold] --"base-bundle-podman-1_start_0 node3" -> "base_start_0 base-bundle-1" [ style = bold] --"base-bundle-podman-1_start_0 node3" [ style=bold color="green" fontcolor="black"] --"base-bundle-podman-1_stop_0 node2" -> "base-bundle-podman-1_start_0 node3" [ style = bold] --"base-bundle-podman-1_stop_0 node2" -> "base-bundle_stopped_0" [ style = bold] --"base-bundle-podman-1_stop_0 node2" [ style=bold color="green" fontcolor="black"] --"base-bundle-podman-2_monitor_60000 node2" [ style=bold color="green" fontcolor="black"] --"base-bundle-podman-2_start_0 node2" -> "base-bundle-2_monitor_0 node1" [ style = bold] --"base-bundle-podman-2_start_0 node2" -> "base-bundle-2_monitor_0 node2" [ style = bold] --"base-bundle-podman-2_start_0 node2" -> "base-bundle-2_monitor_0 node3" [ style = bold] --"base-bundle-podman-2_start_0 node2" -> "base-bundle-2_start_0 node2" [ style = bold] --"base-bundle-podman-2_start_0 node2" -> "base-bundle-podman-2_monitor_60000 node2" [ style = bold] --"base-bundle-podman-2_start_0 node2" -> "base-bundle_running_0" [ style = bold] --"base-bundle-podman-2_start_0 node2" -> "base:2_start_0 base-bundle-2" [ style = bold] --"base-bundle-podman-2_start_0 node2" [ style=bold color="green" fontcolor="black"] -+"base-bundle-podman-2_monitor_60000 node1" [ style=bold color="green" fontcolor="black"] -+"base-bundle-podman-2_start_0 node1" -> "base-bundle-2_monitor_0 node1" [ style = bold] -+"base-bundle-podman-2_start_0 node1" -> "base-bundle-2_monitor_0 node2" [ style = bold] -+"base-bundle-podman-2_start_0 node1" -> "base-bundle-2_monitor_0 node3" [ style = bold] -+"base-bundle-podman-2_start_0 node1" -> "base-bundle-2_start_0 node1" [ style = bold] -+"base-bundle-podman-2_start_0 node1" -> "base-bundle-podman-2_monitor_60000 node1" [ style = bold] -+"base-bundle-podman-2_start_0 node1" -> "base-bundle_running_0" [ style = bold] -+"base-bundle-podman-2_start_0 node1" -> "base:2_start_0 base-bundle-2" [ style = bold] -+"base-bundle-podman-2_start_0 node1" [ style=bold color="green" fontcolor="black"] - "base-bundle_running_0" [ style=bold color="green" fontcolor="orange"] - "base-bundle_start_0" -> "base-bundle-clone_start_0" [ style = bold] --"base-bundle_start_0" -> "base-bundle-podman-0_start_0 node1" [ style = bold] --"base-bundle_start_0" -> "base-bundle-podman-1_start_0 node3" [ style = bold] --"base-bundle_start_0" -> "base-bundle-podman-2_start_0 node2" [ style = bold] -+"base-bundle_start_0" -> "base-bundle-podman-2_start_0 node1" [ style = bold] - "base-bundle_start_0" [ style=bold color="green" fontcolor="orange"] --"base-bundle_stop_0" -> "base-bundle-clone_stop_0" [ style = bold] --"base-bundle_stop_0" -> "base-bundle-podman-0_stop_0 node3" [ style = bold] --"base-bundle_stop_0" -> "base-bundle-podman-1_stop_0 node2" [ style = bold] --"base-bundle_stop_0" -> "base_stop_0 base-bundle-0" [ style = bold] --"base-bundle_stop_0" -> "base_stop_0 base-bundle-1" [ style = bold] --"base-bundle_stop_0" [ style=bold color="green" fontcolor="orange"] --"base-bundle_stopped_0" [ style=bold color="green" fontcolor="orange"] - "base:2_start_0 base-bundle-2" -> "base-bundle-clone_running_0" [ style = bold] - "base:2_start_0 base-bundle-2" [ style=bold color="green" fontcolor="black"] --"base_start_0 base-bundle-0" -> "base-bundle-clone_running_0" [ style = bold] --"base_start_0 base-bundle-0" -> "base_start_0 base-bundle-1" [ style = bold] --"base_start_0 base-bundle-0" [ style=bold color="green" fontcolor="black"] --"base_start_0 base-bundle-1" -> "base-bundle-clone_running_0" [ style = bold] --"base_start_0 base-bundle-1" -> "base:2_start_0 base-bundle-2" [ style = bold] --"base_start_0 base-bundle-1" [ style=bold color="green" fontcolor="black"] --"base_stop_0 base-bundle-0" -> "base-bundle-0_stop_0 node3" [ style = bold] --"base_stop_0 base-bundle-0" -> "base-bundle-clone_stopped_0" [ style = bold] --"base_stop_0 base-bundle-0" -> "base_start_0 base-bundle-0" [ style = bold] --"base_stop_0 base-bundle-0" [ style=bold color="green" fontcolor="black"] --"base_stop_0 base-bundle-1" -> "base-bundle-1_stop_0 node2" [ style = bold] --"base_stop_0 base-bundle-1" -> "base-bundle-clone_stopped_0" [ style = bold] --"base_stop_0 base-bundle-1" -> "base_start_0 base-bundle-1" [ style = bold] --"base_stop_0 base-bundle-1" -> "base_stop_0 base-bundle-0" [ style = bold] --"base_stop_0 base-bundle-1" [ style=bold color="green" fontcolor="black"] - } -diff --git a/cts/scheduler/dot/clone-recover-no-shuffle-7.dot b/cts/scheduler/dot/clone-recover-no-shuffle-7.dot -index 8bff7da01db..f61bf0d7acf 100644 ---- a/cts/scheduler/dot/clone-recover-no-shuffle-7.dot -+++ b/cts/scheduler/dot/clone-recover-no-shuffle-7.dot -@@ -6,40 +6,25 @@ - "dummy-clone_demote_0" [ style=bold color="green" fontcolor="orange"] - "dummy-clone_demoted_0" -> "dummy-clone_promote_0" [ style = bold] - "dummy-clone_demoted_0" -> "dummy-clone_start_0" [ style = bold] --"dummy-clone_demoted_0" -> "dummy-clone_stop_0" [ style = bold] - "dummy-clone_demoted_0" [ style=bold color="green" fontcolor="orange"] --"dummy-clone_promote_0" -> "dummy_promote_0 node1" [ style = bold] -+"dummy-clone_promote_0" -> "dummy:2_promote_0 node1" [ style = bold] - "dummy-clone_promote_0" [ style=bold color="green" fontcolor="orange"] - "dummy-clone_promoted_0" [ style=bold color="green" fontcolor="orange"] - "dummy-clone_running_0" -> "dummy-clone_promote_0" [ style = bold] - "dummy-clone_running_0" [ style=bold color="green" fontcolor="orange"] - "dummy-clone_start_0" -> "dummy-clone_running_0" [ style = bold] --"dummy-clone_start_0" -> "dummy:2_start_0 node3" [ style = bold] --"dummy-clone_start_0" -> "dummy_start_0 node1" [ style = bold] -+"dummy-clone_start_0" -> "dummy:2_start_0 node1" [ style = bold] - "dummy-clone_start_0" [ style=bold color="green" fontcolor="orange"] --"dummy-clone_stop_0" -> "dummy-clone_stopped_0" [ style = bold] --"dummy-clone_stop_0" -> "dummy_stop_0 node3" [ style = bold] --"dummy-clone_stop_0" [ style=bold color="green" fontcolor="orange"] --"dummy-clone_stopped_0" -> "dummy-clone_promote_0" [ style = bold] --"dummy-clone_stopped_0" -> "dummy-clone_start_0" [ style = bold] --"dummy-clone_stopped_0" [ style=bold color="green" fontcolor="orange"] --"dummy:2_monitor_11000 node3" [ style=bold color="green" fontcolor="black"] --"dummy:2_start_0 node3" -> "dummy-clone_running_0" [ style = bold] --"dummy:2_start_0 node3" -> "dummy:2_monitor_11000 node3" [ style = bold] --"dummy:2_start_0 node3" [ style=bold color="green" fontcolor="black"] -+"dummy:2_monitor_10000 node1" [ style=bold color="green" fontcolor="black"] -+"dummy:2_promote_0 node1" -> "dummy-clone_promoted_0" [ style = bold] -+"dummy:2_promote_0 node1" -> "dummy:2_monitor_10000 node1" [ style = bold] -+"dummy:2_promote_0 node1" [ style=bold color="green" fontcolor="black"] -+"dummy:2_start_0 node1" -> "dummy-clone_running_0" [ style = bold] -+"dummy:2_start_0 node1" -> "dummy:2_monitor_10000 node1" [ style = bold] -+"dummy:2_start_0 node1" -> "dummy:2_promote_0 node1" [ style = bold] -+"dummy:2_start_0 node1" [ style=bold color="green" fontcolor="black"] - "dummy_demote_0 node2" -> "dummy-clone_demoted_0" [ style = bold] - "dummy_demote_0 node2" -> "dummy_monitor_11000 node2" [ style = bold] - "dummy_demote_0 node2" [ style=bold color="green" fontcolor="black"] --"dummy_monitor_10000 node1" [ style=bold color="green" fontcolor="black"] - "dummy_monitor_11000 node2" [ style=bold color="green" fontcolor="black"] --"dummy_promote_0 node1" -> "dummy-clone_promoted_0" [ style = bold] --"dummy_promote_0 node1" -> "dummy_monitor_10000 node1" [ style = bold] --"dummy_promote_0 node1" [ style=bold color="green" fontcolor="black"] --"dummy_start_0 node1" -> "dummy-clone_running_0" [ style = bold] --"dummy_start_0 node1" -> "dummy_monitor_10000 node1" [ style = bold] --"dummy_start_0 node1" -> "dummy_promote_0 node1" [ style = bold] --"dummy_start_0 node1" [ style=bold color="green" fontcolor="black"] --"dummy_stop_0 node3" -> "dummy-clone_stopped_0" [ style = bold] --"dummy_stop_0 node3" -> "dummy_start_0 node1" [ style = bold] --"dummy_stop_0 node3" [ style=bold color="green" fontcolor="black"] - } -diff --git a/cts/scheduler/exp/cancel-behind-moving-remote.exp b/cts/scheduler/exp/cancel-behind-moving-remote.exp -index 17759cb8c57..68cdf4d5370 100644 ---- a/cts/scheduler/exp/cancel-behind-moving-remote.exp -+++ b/cts/scheduler/exp/cancel-behind-moving-remote.exp -@@ -1,46 +1,46 @@ - - - -- -+ - - - - - -- -+ - - - - - -- -+ - - - - - -- -+ - - -- -+ - - - - - -- -+ - - - - - -- -+ - - - - - -- -+ - - - -@@ -48,193 +48,187 @@ - - - -- -+ - - - - - -- -+ - - - - - -- -+ - - - - - -- -+ - - -- -+ - - - - - -- -+ - -- -+ - - - - -- -+ - - - - - -- -+ - -- -+ - - - - -- -+ - - - - - -- -+ - -- -+ - - - - -- -+ - - - - - -- -+ - -- -+ - - - - -- -+ - - -- -+ - - -- -+ - - -- -+ - - - - - -- -+ - -- -+ - - - - -- -+ - - -- -+ - - -- -+ - - - - - -- -+ - -- -+ - - - - -- -+ - - - - - -- -+ - -- -+ - - - - -- -+ - - - -- -+ - -- -+ - -- -+ - - - - -- -+ - - - -- -+ - -- -+ - -- -+ - - - - -- -+ - - - - - -- -+ - -- -+ - - - - -- -+ - - -- -+ - - -- -- -- -- -- -- -- -+ - - - - - -- -+ - -- -+ - - - -@@ -242,61 +236,11 @@ - - - -- -- -- -- -- -- -- -- -- -- -+ - - - - -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- - - - -@@ -305,567 +249,302 @@ - - - -- -+ - -- -+ - -- -+ - - - - -- -+ - - - -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -+ - -- -+ - -- -+ - - - - -- -+ - - - -- -+ - -- -+ - -- -+ - - - - -- -+ - - - -- -+ - -- -+ - -- -+ - - - - -- -+ - - - -- -+ - -- -+ - - - - - -- -+ - - -- -+ - - -- -+ - - -- -+ - - - -- -+ - -- -+ - - - - - -- -+ - - -- -+ - - - -- -+ - -- -+ - - - - - -- -+ - - -- -+ - - -- -+ - - -- -+ - - - -- -+ - -- -+ - - - - - -- -- -- -- -+ - - - -- -+ - -- -+ - - - - - -- -+ - - - -- -+ - -- -+ - - - - - -- -- -- -- -+ - - -- -+ - - -- -+ - - - -- -+ - -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -+ - - - - - -- -+ - - -- -+ - - -- -+ - - -- -+ - - - -- -+ - -- -+ - - - - - -- -+ - - -- -+ - - - -- -+ - -- -+ - - - - - -- -+ - - -- -+ - -- -- -- -- -- -- -- -- -- - -- -+ - - - -- -+ - -- -- -+ -+ - - -- -- -- -- -- -- -- -- -+ - -- -+ - -- -+ - - - - - -- -+ - - -- -+ - - - -- -+ - -- -+ - - - - - -- -+ - - -- -- -- -- -+ - - - -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -+ - -- -+ - -- -+ - - - - -- -+ - - - -- -+ - -- -+ - -- -+ - - - - -- -+ - - - -- -+ - -- -+ - -- -+ - - - - -- -+ - - - -- -+ - -- -+ - -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -+ - - - - -- -- -- -- -- -- -- -+ - - - -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -+ - - - -@@ -874,7 +553,7 @@ - - - -- -+ - - - -@@ -883,7 +562,7 @@ - - - -- -+ - - - -@@ -892,7 +571,7 @@ - - - -- -+ - - - -@@ -901,42 +580,42 @@ - - - -- -+ - -- -+ - - - - - - -- -+ - - - -- -+ - -- -+ - - - - - - -- -+ - - - -- -+ - -- -+ - - - - - - -- -+ - - - -@@ -945,7 +624,7 @@ - - - -- -+ - - - -@@ -954,7 +633,7 @@ - - - -- -+ - - - -@@ -963,42 +642,42 @@ - - - -- -+ - -- -+ - - - - - - -- -+ - - - -- -+ - -- -+ - - - - - - -- -+ - - - -- -+ - -- -+ - - - - - - -- -+ - - - -@@ -1007,7 +686,7 @@ - - - -- -+ - - - -@@ -1016,7 +695,7 @@ - - - -- -+ - - - -@@ -1025,7 +704,7 @@ - - - -- -+ - - - -@@ -1034,101 +713,68 @@ - - - -- -+ - -- -+ - - - - - -- -+ - - - -- -+ - -- -+ - - - - - -- -- -- -- -+ - - - -- -+ - -- -+ - - - - - -- -+ - - -- -+ - - - -- -+ - -- -+ - - - - - -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -+ - -- -+ - - - - - -- -+ - - - -- -+ - -- -+ - - - -diff --git a/cts/scheduler/exp/clone-recover-no-shuffle-4.exp b/cts/scheduler/exp/clone-recover-no-shuffle-4.exp -index 4596c685d0a..670a823dac9 100644 ---- a/cts/scheduler/exp/clone-recover-no-shuffle-4.exp -+++ b/cts/scheduler/exp/clone-recover-no-shuffle-4.exp -@@ -1,123 +1,51 @@ - - - -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -+ - -- -+ - - - - -- -+ - - - -- -+ - -- -+ - -- -+ - - - - -- -+ - - - -- -+ - -- -+ - - - - - -- -+ - - -- -+ - - - -- -+ - -- -+ - - - - - -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- - -diff --git a/cts/scheduler/exp/clone-recover-no-shuffle-5.exp b/cts/scheduler/exp/clone-recover-no-shuffle-5.exp -index c1cee43b12f..84b1e1bc98c 100644 ---- a/cts/scheduler/exp/clone-recover-no-shuffle-5.exp -+++ b/cts/scheduler/exp/clone-recover-no-shuffle-5.exp -@@ -1,293 +1,110 @@ - - - -- -+ - - - - - -- -+ - - -- -+ - - -- -+ - - - - - -- -+ - - - - - -- -+ - - - - - -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -+ - -- -+ - - - - -- -+ - - - -- -+ - -- -+ - -- -+ - - - - -- -+ - - - -- -+ - -- -+ - -- -+ - - - - -- -+ - - - -- -+ - -- -+ - -- -+ - - - - -- -+ - - -- -+ - - - -- -+ - -- -+ - - - - - -- -+ - - -- -+ - - - -- -+ - -- -+ - - - - - -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- - -diff --git a/cts/scheduler/exp/clone-recover-no-shuffle-6.exp b/cts/scheduler/exp/clone-recover-no-shuffle-6.exp -index e6704c9e254..6b6ed075f57 100644 ---- a/cts/scheduler/exp/clone-recover-no-shuffle-6.exp -+++ b/cts/scheduler/exp/clone-recover-no-shuffle-6.exp -@@ -1,504 +1,168 @@ - - - -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -+ - -- -+ - - - - -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -+ - - -- -+ - - -- -+ - - - -- -+ - -- -+ - - - - - -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -+ - - -- -- -- -- -+ - - - -- -+ - -- -+ - - - - - -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -+ - - - -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -+ - -- -+ - -- -+ - - - - -- -+ - - - -- -+ - -- -+ - -- -+ - - - - -- -+ - - - -- -+ - -- -+ - -- -+ - - - - -- -+ - - - -- -+ - -- -+ - -- -+ - - - - -- -+ - - -- -+ - - -- -+ - - -- -+ - - - -- -+ - -- -+ - -- -+ - - - - -- -+ - - - -- -+ - -- -+ - -- -+ - - - - -- -+ - - - -- -+ - -- -+ - -- -+ - - - - -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -+ - - - -- -- -- -- -- -- -- -- -- -+ - -- -+ - - - - - -- -- -- -- -+ - - -- -- -- -- -+ - - - -- -+ - -- -+ - - - -diff --git a/cts/scheduler/exp/clone-recover-no-shuffle-7.exp b/cts/scheduler/exp/clone-recover-no-shuffle-7.exp -index 950de9e0312..870ed54e9c2 100644 ---- a/cts/scheduler/exp/clone-recover-no-shuffle-7.exp -+++ b/cts/scheduler/exp/clone-recover-no-shuffle-7.exp -@@ -1,239 +1,161 @@ - - - -- -- -- -+ -+ -+ - - - - -- -- -- -- -+ - - - - - -- -- -- -+ -+ -+ - - - - -- -+ - - -- -+ - - - - - -- -- -- -+ -+ -+ - - -- -- -- -- -- -- -- -- -+ - - - -- -- -- -+ -+ -+ - - - - -- -+ - -- -- -- -- -- -- -- -- -- -- - -- -+ - - - -- -+ - -- -- -- -+ -+ -+ - - - - -- -+ - - -- -+ - - - -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -+ - -- -+ - -- -+ - - - - -- -+ - - - -- -+ - -- -+ - - - - - -- -+ - - -- -+ - - - -- -+ - -- -+ - - - - - -- -+ - -- -+ - - - - - -- -+ - - - -- -+ - -- -+ - - - - - -- -- -- -- -+ - - -- -+ - - - -- -+ - -- -+ - - - - - -- -+ - - -- -+ - - - -- -+ - -- -+ - - - - - -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -+ - - - -diff --git a/cts/scheduler/scores/bug-cl-5168.scores b/cts/scheduler/scores/bug-cl-5168.scores -index 916fecb195f..59dee5d39b3 100644 ---- a/cts/scheduler/scores/bug-cl-5168.scores -+++ b/cts/scheduler/scores/bug-cl-5168.scores -@@ -200,7 +200,7 @@ pcmk__primitive_assign: drbd-r1:0 allocation score on hex-2: 1001 - pcmk__primitive_assign: drbd-r1:0 allocation score on hex-3: -INFINITY - pcmk__primitive_assign: drbd-r1:0 allocation score on hex-3: INFINITY - pcmk__primitive_assign: drbd-r1:1 allocation score on hex-1: -INFINITY --pcmk__primitive_assign: drbd-r1:1 allocation score on hex-2: 0 -+pcmk__primitive_assign: drbd-r1:1 allocation score on hex-2: -INFINITY - pcmk__primitive_assign: drbd-r1:1 allocation score on hex-3: INFINITY - pcmk__primitive_assign: dummy1 allocation score on hex-1: -INFINITY - pcmk__primitive_assign: dummy1 allocation score on hex-2: -INFINITY -diff --git a/cts/scheduler/scores/cancel-behind-moving-remote.scores b/cts/scheduler/scores/cancel-behind-moving-remote.scores -index 0e11b225aea..09f0175b9e2 100644 ---- a/cts/scheduler/scores/cancel-behind-moving-remote.scores -+++ b/cts/scheduler/scores/cancel-behind-moving-remote.scores -@@ -1799,7 +1799,7 @@ pcmk__primitive_assign: ip-172.17.1.151 allocation score on messaging-1: -INFINI - pcmk__primitive_assign: ip-172.17.1.151 allocation score on messaging-2: -INFINITY - pcmk__primitive_assign: ip-172.17.1.87 allocation score on compute-0: -INFINITY - pcmk__primitive_assign: ip-172.17.1.87 allocation score on compute-1: -INFINITY --pcmk__primitive_assign: ip-172.17.1.87 allocation score on controller-0: 0 -+pcmk__primitive_assign: ip-172.17.1.87 allocation score on controller-0: -INFINITY - pcmk__primitive_assign: ip-172.17.1.87 allocation score on controller-1: -INFINITY - pcmk__primitive_assign: ip-172.17.1.87 allocation score on controller-2: -INFINITY - pcmk__primitive_assign: ip-172.17.1.87 allocation score on database-0: -INFINITY -@@ -1865,9 +1865,9 @@ pcmk__primitive_assign: openstack-cinder-volume-podman-0 allocation score on mes - pcmk__primitive_assign: openstack-cinder-volume-podman-0 allocation score on messaging-2: -INFINITY - pcmk__primitive_assign: ovn-dbs-bundle-0 allocation score on compute-0: -INFINITY - pcmk__primitive_assign: ovn-dbs-bundle-0 allocation score on compute-1: -INFINITY --pcmk__primitive_assign: ovn-dbs-bundle-0 allocation score on controller-0: 0 -+pcmk__primitive_assign: ovn-dbs-bundle-0 allocation score on controller-0: 10000 - pcmk__primitive_assign: ovn-dbs-bundle-0 allocation score on controller-1: 0 --pcmk__primitive_assign: ovn-dbs-bundle-0 allocation score on controller-2: 10000 -+pcmk__primitive_assign: ovn-dbs-bundle-0 allocation score on controller-2: 0 - pcmk__primitive_assign: ovn-dbs-bundle-0 allocation score on database-0: 0 - pcmk__primitive_assign: ovn-dbs-bundle-0 allocation score on database-1: 0 - pcmk__primitive_assign: ovn-dbs-bundle-0 allocation score on database-2: 0 -@@ -1876,9 +1876,9 @@ pcmk__primitive_assign: ovn-dbs-bundle-0 allocation score on messaging-1: 0 - pcmk__primitive_assign: ovn-dbs-bundle-0 allocation score on messaging-2: 0 - pcmk__primitive_assign: ovn-dbs-bundle-1 allocation score on compute-0: -INFINITY - pcmk__primitive_assign: ovn-dbs-bundle-1 allocation score on compute-1: -INFINITY --pcmk__primitive_assign: ovn-dbs-bundle-1 allocation score on controller-0: 10000 -+pcmk__primitive_assign: ovn-dbs-bundle-1 allocation score on controller-0: 0 - pcmk__primitive_assign: ovn-dbs-bundle-1 allocation score on controller-1: 0 --pcmk__primitive_assign: ovn-dbs-bundle-1 allocation score on controller-2: 0 -+pcmk__primitive_assign: ovn-dbs-bundle-1 allocation score on controller-2: 10000 - pcmk__primitive_assign: ovn-dbs-bundle-1 allocation score on database-0: 0 - pcmk__primitive_assign: ovn-dbs-bundle-1 allocation score on database-1: 0 - pcmk__primitive_assign: ovn-dbs-bundle-1 allocation score on database-2: 0 -@@ -1898,9 +1898,9 @@ pcmk__primitive_assign: ovn-dbs-bundle-2 allocation score on messaging-1: 0 - pcmk__primitive_assign: ovn-dbs-bundle-2 allocation score on messaging-2: 0 - pcmk__primitive_assign: ovn-dbs-bundle-podman-0 allocation score on compute-0: -INFINITY - pcmk__primitive_assign: ovn-dbs-bundle-podman-0 allocation score on compute-1: -INFINITY --pcmk__primitive_assign: ovn-dbs-bundle-podman-0 allocation score on controller-0: -INFINITY -+pcmk__primitive_assign: ovn-dbs-bundle-podman-0 allocation score on controller-0: 0 - pcmk__primitive_assign: ovn-dbs-bundle-podman-0 allocation score on controller-1: -INFINITY --pcmk__primitive_assign: ovn-dbs-bundle-podman-0 allocation score on controller-2: 0 -+pcmk__primitive_assign: ovn-dbs-bundle-podman-0 allocation score on controller-2: -INFINITY - pcmk__primitive_assign: ovn-dbs-bundle-podman-0 allocation score on database-0: -INFINITY - pcmk__primitive_assign: ovn-dbs-bundle-podman-0 allocation score on database-1: -INFINITY - pcmk__primitive_assign: ovn-dbs-bundle-podman-0 allocation score on database-2: -INFINITY -@@ -1909,24 +1909,35 @@ pcmk__primitive_assign: ovn-dbs-bundle-podman-0 allocation score on messaging-1: - pcmk__primitive_assign: ovn-dbs-bundle-podman-0 allocation score on messaging-2: -INFINITY - pcmk__primitive_assign: ovn-dbs-bundle-podman-1 allocation score on compute-0: -INFINITY - pcmk__primitive_assign: ovn-dbs-bundle-podman-1 allocation score on compute-0: -INFINITY -+pcmk__primitive_assign: ovn-dbs-bundle-podman-1 allocation score on compute-0: -INFINITY - pcmk__primitive_assign: ovn-dbs-bundle-podman-1 allocation score on compute-1: -INFINITY - pcmk__primitive_assign: ovn-dbs-bundle-podman-1 allocation score on compute-1: -INFINITY --pcmk__primitive_assign: ovn-dbs-bundle-podman-1 allocation score on controller-0: 0 -+pcmk__primitive_assign: ovn-dbs-bundle-podman-1 allocation score on compute-1: -INFINITY -+pcmk__primitive_assign: ovn-dbs-bundle-podman-1 allocation score on controller-0: -INFINITY -+pcmk__primitive_assign: ovn-dbs-bundle-podman-1 allocation score on controller-0: -INFINITY - pcmk__primitive_assign: ovn-dbs-bundle-podman-1 allocation score on controller-0: 0 - pcmk__primitive_assign: ovn-dbs-bundle-podman-1 allocation score on controller-1: -INFINITY - pcmk__primitive_assign: ovn-dbs-bundle-podman-1 allocation score on controller-1: 0 -+pcmk__primitive_assign: ovn-dbs-bundle-podman-1 allocation score on controller-1: 0 - pcmk__primitive_assign: ovn-dbs-bundle-podman-1 allocation score on controller-2: -INFINITY - pcmk__primitive_assign: ovn-dbs-bundle-podman-1 allocation score on controller-2: -INFINITY -+pcmk__primitive_assign: ovn-dbs-bundle-podman-1 allocation score on controller-2: 0 -+pcmk__primitive_assign: ovn-dbs-bundle-podman-1 allocation score on database-0: -INFINITY - pcmk__primitive_assign: ovn-dbs-bundle-podman-1 allocation score on database-0: -INFINITY - pcmk__primitive_assign: ovn-dbs-bundle-podman-1 allocation score on database-0: -INFINITY - pcmk__primitive_assign: ovn-dbs-bundle-podman-1 allocation score on database-1: -INFINITY - pcmk__primitive_assign: ovn-dbs-bundle-podman-1 allocation score on database-1: -INFINITY -+pcmk__primitive_assign: ovn-dbs-bundle-podman-1 allocation score on database-1: -INFINITY -+pcmk__primitive_assign: ovn-dbs-bundle-podman-1 allocation score on database-2: -INFINITY - pcmk__primitive_assign: ovn-dbs-bundle-podman-1 allocation score on database-2: -INFINITY - pcmk__primitive_assign: ovn-dbs-bundle-podman-1 allocation score on database-2: -INFINITY - pcmk__primitive_assign: ovn-dbs-bundle-podman-1 allocation score on messaging-0: -INFINITY - pcmk__primitive_assign: ovn-dbs-bundle-podman-1 allocation score on messaging-0: -INFINITY -+pcmk__primitive_assign: ovn-dbs-bundle-podman-1 allocation score on messaging-0: -INFINITY - pcmk__primitive_assign: ovn-dbs-bundle-podman-1 allocation score on messaging-1: -INFINITY - pcmk__primitive_assign: ovn-dbs-bundle-podman-1 allocation score on messaging-1: -INFINITY -+pcmk__primitive_assign: ovn-dbs-bundle-podman-1 allocation score on messaging-1: -INFINITY -+pcmk__primitive_assign: ovn-dbs-bundle-podman-1 allocation score on messaging-2: -INFINITY - pcmk__primitive_assign: ovn-dbs-bundle-podman-1 allocation score on messaging-2: -INFINITY - pcmk__primitive_assign: ovn-dbs-bundle-podman-1 allocation score on messaging-2: -INFINITY - pcmk__primitive_assign: ovn-dbs-bundle-podman-2 allocation score on compute-0: -INFINITY -diff --git a/cts/scheduler/scores/clone-recover-no-shuffle-10.scores b/cts/scheduler/scores/clone-recover-no-shuffle-10.scores -index 4ac63e37058..4f4c29ed7f1 100644 ---- a/cts/scheduler/scores/clone-recover-no-shuffle-10.scores -+++ b/cts/scheduler/scores/clone-recover-no-shuffle-10.scores -@@ -28,4 +28,4 @@ pcmk__primitive_assign: dummy:1 allocation score on node2: 16 - pcmk__primitive_assign: dummy:1 allocation score on node3: 0 - pcmk__primitive_assign: dummy:2 allocation score on node1: 10 - pcmk__primitive_assign: dummy:2 allocation score on node2: -INFINITY --pcmk__primitive_assign: dummy:2 allocation score on node3: 5 -+pcmk__primitive_assign: dummy:2 allocation score on node3: -INFINITY -diff --git a/cts/scheduler/scores/clone-recover-no-shuffle-4.scores b/cts/scheduler/scores/clone-recover-no-shuffle-4.scores -index 492dad1baa4..2a52c8185b2 100644 ---- a/cts/scheduler/scores/clone-recover-no-shuffle-4.scores -+++ b/cts/scheduler/scores/clone-recover-no-shuffle-4.scores -@@ -14,7 +14,7 @@ pcmk__clone_assign: dummy:2 allocation score on node3: 0 - pcmk__primitive_assign: Fencing allocation score on node1: 0 - pcmk__primitive_assign: Fencing allocation score on node2: 0 - pcmk__primitive_assign: Fencing allocation score on node3: 0 --pcmk__primitive_assign: dummy:0 allocation score on node1: 100 -+pcmk__primitive_assign: dummy:0 allocation score on node1: -INFINITY - pcmk__primitive_assign: dummy:0 allocation score on node1: 100 - pcmk__primitive_assign: dummy:0 allocation score on node2: 1 - pcmk__primitive_assign: dummy:0 allocation score on node2: 1 -@@ -22,10 +22,10 @@ pcmk__primitive_assign: dummy:0 allocation score on node3: 0 - pcmk__primitive_assign: dummy:0 allocation score on node3: 0 - pcmk__primitive_assign: dummy:1 allocation score on node1: -INFINITY - pcmk__primitive_assign: dummy:1 allocation score on node1: 100 --pcmk__primitive_assign: dummy:1 allocation score on node2: 0 --pcmk__primitive_assign: dummy:1 allocation score on node2: 0 -+pcmk__primitive_assign: dummy:1 allocation score on node2: -INFINITY -+pcmk__primitive_assign: dummy:1 allocation score on node2: -INFINITY - pcmk__primitive_assign: dummy:1 allocation score on node3: 1 - pcmk__primitive_assign: dummy:1 allocation score on node3: 1 --pcmk__primitive_assign: dummy:2 allocation score on node1: -INFINITY --pcmk__primitive_assign: dummy:2 allocation score on node2: 0 -+pcmk__primitive_assign: dummy:2 allocation score on node1: 100 -+pcmk__primitive_assign: dummy:2 allocation score on node2: -INFINITY - pcmk__primitive_assign: dummy:2 allocation score on node3: -INFINITY -diff --git a/cts/scheduler/scores/clone-recover-no-shuffle-5.scores b/cts/scheduler/scores/clone-recover-no-shuffle-5.scores -index 0dd9728830c..c6c8072db82 100644 ---- a/cts/scheduler/scores/clone-recover-no-shuffle-5.scores -+++ b/cts/scheduler/scores/clone-recover-no-shuffle-5.scores -@@ -29,7 +29,7 @@ pcmk__clone_assign: rsc2:1 allocation score on node3: 1 - pcmk__clone_assign: rsc2:2 allocation score on node1: 0 - pcmk__clone_assign: rsc2:2 allocation score on node2: 0 - pcmk__clone_assign: rsc2:2 allocation score on node3: 0 --pcmk__group_assign: grp:0 allocation score on node1: 100 -+pcmk__group_assign: grp:0 allocation score on node1: -INFINITY - pcmk__group_assign: grp:0 allocation score on node1: 100 - pcmk__group_assign: grp:0 allocation score on node2: 0 - pcmk__group_assign: grp:0 allocation score on node2: 0 -@@ -37,14 +37,14 @@ pcmk__group_assign: grp:0 allocation score on node3: 0 - pcmk__group_assign: grp:0 allocation score on node3: 0 - pcmk__group_assign: grp:1 allocation score on node1: -INFINITY - pcmk__group_assign: grp:1 allocation score on node1: 100 --pcmk__group_assign: grp:1 allocation score on node2: 0 --pcmk__group_assign: grp:1 allocation score on node2: 0 -+pcmk__group_assign: grp:1 allocation score on node2: -INFINITY -+pcmk__group_assign: grp:1 allocation score on node2: -INFINITY - pcmk__group_assign: grp:1 allocation score on node3: 0 - pcmk__group_assign: grp:1 allocation score on node3: 0 --pcmk__group_assign: grp:2 allocation score on node1: -INFINITY --pcmk__group_assign: grp:2 allocation score on node2: 0 -+pcmk__group_assign: grp:2 allocation score on node1: 100 -+pcmk__group_assign: grp:2 allocation score on node2: -INFINITY - pcmk__group_assign: grp:2 allocation score on node3: -INFINITY --pcmk__group_assign: rsc1:0 allocation score on node1: 100 -+pcmk__group_assign: rsc1:0 allocation score on node1: -INFINITY - pcmk__group_assign: rsc1:0 allocation score on node1: 100 - pcmk__group_assign: rsc1:0 allocation score on node2: 1 - pcmk__group_assign: rsc1:0 allocation score on node2: 1 -@@ -52,14 +52,14 @@ pcmk__group_assign: rsc1:0 allocation score on node3: 0 - pcmk__group_assign: rsc1:0 allocation score on node3: 0 - pcmk__group_assign: rsc1:1 allocation score on node1: -INFINITY - pcmk__group_assign: rsc1:1 allocation score on node1: 100 --pcmk__group_assign: rsc1:1 allocation score on node2: 0 --pcmk__group_assign: rsc1:1 allocation score on node2: 0 -+pcmk__group_assign: rsc1:1 allocation score on node2: -INFINITY -+pcmk__group_assign: rsc1:1 allocation score on node2: -INFINITY - pcmk__group_assign: rsc1:1 allocation score on node3: 1 - pcmk__group_assign: rsc1:1 allocation score on node3: 1 --pcmk__group_assign: rsc1:2 allocation score on node1: -INFINITY --pcmk__group_assign: rsc1:2 allocation score on node2: 0 -+pcmk__group_assign: rsc1:2 allocation score on node1: 100 -+pcmk__group_assign: rsc1:2 allocation score on node2: -INFINITY - pcmk__group_assign: rsc1:2 allocation score on node3: -INFINITY --pcmk__group_assign: rsc2:0 allocation score on node1: 0 -+pcmk__group_assign: rsc2:0 allocation score on node1: -INFINITY - pcmk__group_assign: rsc2:0 allocation score on node1: 0 - pcmk__group_assign: rsc2:0 allocation score on node2: 1 - pcmk__group_assign: rsc2:0 allocation score on node2: 1 -@@ -67,17 +67,17 @@ pcmk__group_assign: rsc2:0 allocation score on node3: 0 - pcmk__group_assign: rsc2:0 allocation score on node3: 0 - pcmk__group_assign: rsc2:1 allocation score on node1: -INFINITY - pcmk__group_assign: rsc2:1 allocation score on node1: 0 --pcmk__group_assign: rsc2:1 allocation score on node2: 0 --pcmk__group_assign: rsc2:1 allocation score on node2: 0 -+pcmk__group_assign: rsc2:1 allocation score on node2: -INFINITY -+pcmk__group_assign: rsc2:1 allocation score on node2: -INFINITY - pcmk__group_assign: rsc2:1 allocation score on node3: 1 - pcmk__group_assign: rsc2:1 allocation score on node3: 1 --pcmk__group_assign: rsc2:2 allocation score on node1: -INFINITY --pcmk__group_assign: rsc2:2 allocation score on node2: 0 -+pcmk__group_assign: rsc2:2 allocation score on node1: 0 -+pcmk__group_assign: rsc2:2 allocation score on node2: -INFINITY - pcmk__group_assign: rsc2:2 allocation score on node3: -INFINITY - pcmk__primitive_assign: Fencing allocation score on node1: 0 - pcmk__primitive_assign: Fencing allocation score on node2: 0 - pcmk__primitive_assign: Fencing allocation score on node3: 0 --pcmk__primitive_assign: rsc1:0 allocation score on node1: 100 -+pcmk__primitive_assign: rsc1:0 allocation score on node1: -INFINITY - pcmk__primitive_assign: rsc1:0 allocation score on node1: 100 - pcmk__primitive_assign: rsc1:0 allocation score on node2: 2 - pcmk__primitive_assign: rsc1:0 allocation score on node2: 2 -@@ -85,17 +85,17 @@ pcmk__primitive_assign: rsc1:0 allocation score on node3: 0 - pcmk__primitive_assign: rsc1:0 allocation score on node3: 0 - pcmk__primitive_assign: rsc1:1 allocation score on node1: -INFINITY - pcmk__primitive_assign: rsc1:1 allocation score on node1: 100 --pcmk__primitive_assign: rsc1:1 allocation score on node2: 0 --pcmk__primitive_assign: rsc1:1 allocation score on node2: 0 -+pcmk__primitive_assign: rsc1:1 allocation score on node2: -INFINITY -+pcmk__primitive_assign: rsc1:1 allocation score on node2: -INFINITY - pcmk__primitive_assign: rsc1:1 allocation score on node3: 2 - pcmk__primitive_assign: rsc1:1 allocation score on node3: 2 --pcmk__primitive_assign: rsc1:2 allocation score on node1: -INFINITY --pcmk__primitive_assign: rsc1:2 allocation score on node2: 0 -+pcmk__primitive_assign: rsc1:2 allocation score on node1: 100 -+pcmk__primitive_assign: rsc1:2 allocation score on node2: -INFINITY - pcmk__primitive_assign: rsc1:2 allocation score on node3: -INFINITY -+pcmk__primitive_assign: rsc2:0 allocation score on node1: -INFINITY - pcmk__primitive_assign: rsc2:0 allocation score on node1: 0 --pcmk__primitive_assign: rsc2:0 allocation score on node1: 0 --pcmk__primitive_assign: rsc2:0 allocation score on node2: -INFINITY - pcmk__primitive_assign: rsc2:0 allocation score on node2: -INFINITY -+pcmk__primitive_assign: rsc2:0 allocation score on node2: 1 - pcmk__primitive_assign: rsc2:0 allocation score on node3: -INFINITY - pcmk__primitive_assign: rsc2:0 allocation score on node3: -INFINITY - pcmk__primitive_assign: rsc2:1 allocation score on node1: -INFINITY -@@ -104,6 +104,6 @@ pcmk__primitive_assign: rsc2:1 allocation score on node2: -INFINITY - pcmk__primitive_assign: rsc2:1 allocation score on node2: -INFINITY - pcmk__primitive_assign: rsc2:1 allocation score on node3: -INFINITY - pcmk__primitive_assign: rsc2:1 allocation score on node3: 1 --pcmk__primitive_assign: rsc2:2 allocation score on node1: -INFINITY --pcmk__primitive_assign: rsc2:2 allocation score on node2: 0 -+pcmk__primitive_assign: rsc2:2 allocation score on node1: 0 -+pcmk__primitive_assign: rsc2:2 allocation score on node2: -INFINITY - pcmk__primitive_assign: rsc2:2 allocation score on node3: -INFINITY -diff --git a/cts/scheduler/scores/clone-recover-no-shuffle-6.scores b/cts/scheduler/scores/clone-recover-no-shuffle-6.scores -index 643e30f9d18..f1f300cbd66 100644 ---- a/cts/scheduler/scores/clone-recover-no-shuffle-6.scores -+++ b/cts/scheduler/scores/clone-recover-no-shuffle-6.scores -@@ -41,16 +41,16 @@ pcmk__clone_assign: base:2 allocation score on base-bundle-2: INFINITY - pcmk__primitive_assign: Fencing allocation score on node1: 0 - pcmk__primitive_assign: Fencing allocation score on node2: 0 - pcmk__primitive_assign: Fencing allocation score on node3: 0 --pcmk__primitive_assign: base-bundle-0 allocation score on node1: 10000 -+pcmk__primitive_assign: base-bundle-0 allocation score on node1: 0 - pcmk__primitive_assign: base-bundle-0 allocation score on node2: 0 --pcmk__primitive_assign: base-bundle-0 allocation score on node3: 0 -+pcmk__primitive_assign: base-bundle-0 allocation score on node3: 10000 - pcmk__primitive_assign: base-bundle-1 allocation score on node1: 0 --pcmk__primitive_assign: base-bundle-1 allocation score on node2: 0 --pcmk__primitive_assign: base-bundle-1 allocation score on node3: 10000 --pcmk__primitive_assign: base-bundle-2 allocation score on node1: 0 --pcmk__primitive_assign: base-bundle-2 allocation score on node2: 10000 -+pcmk__primitive_assign: base-bundle-1 allocation score on node2: 10000 -+pcmk__primitive_assign: base-bundle-1 allocation score on node3: 0 -+pcmk__primitive_assign: base-bundle-2 allocation score on node1: 10000 -+pcmk__primitive_assign: base-bundle-2 allocation score on node2: 0 - pcmk__primitive_assign: base-bundle-2 allocation score on node3: 0 --pcmk__primitive_assign: base-bundle-podman-0 allocation score on node1: 100 -+pcmk__primitive_assign: base-bundle-podman-0 allocation score on node1: -INFINITY - pcmk__primitive_assign: base-bundle-podman-0 allocation score on node1: 100 - pcmk__primitive_assign: base-bundle-podman-0 allocation score on node2: 0 - pcmk__primitive_assign: base-bundle-podman-0 allocation score on node2: 0 -@@ -60,10 +60,10 @@ pcmk__primitive_assign: base-bundle-podman-1 allocation score on node1: -INFINIT - pcmk__primitive_assign: base-bundle-podman-1 allocation score on node1: 100 - pcmk__primitive_assign: base-bundle-podman-1 allocation score on node2: 0 - pcmk__primitive_assign: base-bundle-podman-1 allocation score on node2: 0 --pcmk__primitive_assign: base-bundle-podman-1 allocation score on node3: 0 --pcmk__primitive_assign: base-bundle-podman-1 allocation score on node3: 0 --pcmk__primitive_assign: base-bundle-podman-2 allocation score on node1: -INFINITY --pcmk__primitive_assign: base-bundle-podman-2 allocation score on node2: 0 -+pcmk__primitive_assign: base-bundle-podman-1 allocation score on node3: -INFINITY -+pcmk__primitive_assign: base-bundle-podman-1 allocation score on node3: -INFINITY -+pcmk__primitive_assign: base-bundle-podman-2 allocation score on node1: 100 -+pcmk__primitive_assign: base-bundle-podman-2 allocation score on node2: -INFINITY - pcmk__primitive_assign: base-bundle-podman-2 allocation score on node3: -INFINITY - pcmk__primitive_assign: base:0 allocation score on base-bundle-0: INFINITY - pcmk__primitive_assign: base:1 allocation score on base-bundle-1: INFINITY -diff --git a/cts/scheduler/scores/clone-recover-no-shuffle-7.scores b/cts/scheduler/scores/clone-recover-no-shuffle-7.scores -index fc45bf740fd..503cbb3addf 100644 ---- a/cts/scheduler/scores/clone-recover-no-shuffle-7.scores -+++ b/cts/scheduler/scores/clone-recover-no-shuffle-7.scores -@@ -1,7 +1,7 @@ - --dummy:0 promotion score on node1: 15 -+dummy:0 promotion score on node3: 5 - dummy:1 promotion score on node2: 10 --dummy:2 promotion score on node3: 5 -+dummy:2 promotion score on node1: 15 - pcmk__clone_assign: dummy-clone allocation score on node1: 0 - pcmk__clone_assign: dummy-clone allocation score on node2: 0 - pcmk__clone_assign: dummy-clone allocation score on node3: 0 -@@ -17,7 +17,7 @@ pcmk__clone_assign: dummy:2 allocation score on node3: 5 - pcmk__primitive_assign: Fencing allocation score on node1: 0 - pcmk__primitive_assign: Fencing allocation score on node2: 0 - pcmk__primitive_assign: Fencing allocation score on node3: 0 --pcmk__primitive_assign: dummy:0 allocation score on node1: 15 -+pcmk__primitive_assign: dummy:0 allocation score on node1: -INFINITY - pcmk__primitive_assign: dummy:0 allocation score on node1: 15 - pcmk__primitive_assign: dummy:0 allocation score on node2: 0 - pcmk__primitive_assign: dummy:0 allocation score on node2: 0 -@@ -27,8 +27,8 @@ pcmk__primitive_assign: dummy:1 allocation score on node1: -INFINITY - pcmk__primitive_assign: dummy:1 allocation score on node1: 15 - pcmk__primitive_assign: dummy:1 allocation score on node2: 11 - pcmk__primitive_assign: dummy:1 allocation score on node2: 11 --pcmk__primitive_assign: dummy:1 allocation score on node3: 0 --pcmk__primitive_assign: dummy:1 allocation score on node3: 0 --pcmk__primitive_assign: dummy:2 allocation score on node1: -INFINITY -+pcmk__primitive_assign: dummy:1 allocation score on node3: -INFINITY -+pcmk__primitive_assign: dummy:1 allocation score on node3: -INFINITY -+pcmk__primitive_assign: dummy:2 allocation score on node1: 15 - pcmk__primitive_assign: dummy:2 allocation score on node2: -INFINITY --pcmk__primitive_assign: dummy:2 allocation score on node3: 5 -+pcmk__primitive_assign: dummy:2 allocation score on node3: -INFINITY -diff --git a/cts/scheduler/scores/promoted-failed-demote-2.scores b/cts/scheduler/scores/promoted-failed-demote-2.scores -index e457d8c6057..39399d9eac4 100644 ---- a/cts/scheduler/scores/promoted-failed-demote-2.scores -+++ b/cts/scheduler/scores/promoted-failed-demote-2.scores -@@ -34,14 +34,10 @@ pcmk__group_assign: stateful-2:0 allocation score on dl380g5b: -INFINITY - pcmk__group_assign: stateful-2:1 allocation score on dl380g5a: INFINITY - pcmk__group_assign: stateful-2:1 allocation score on dl380g5b: 0 - pcmk__primitive_assign: stateful-1:0 allocation score on dl380g5a: -INFINITY --pcmk__primitive_assign: stateful-1:0 allocation score on dl380g5a: -INFINITY --pcmk__primitive_assign: stateful-1:0 allocation score on dl380g5b: -INFINITY - pcmk__primitive_assign: stateful-1:0 allocation score on dl380g5b: -INFINITY - pcmk__primitive_assign: stateful-1:1 allocation score on dl380g5a: INFINITY - pcmk__primitive_assign: stateful-1:1 allocation score on dl380g5b: 0 - pcmk__primitive_assign: stateful-2:0 allocation score on dl380g5a: -INFINITY --pcmk__primitive_assign: stateful-2:0 allocation score on dl380g5a: -INFINITY --pcmk__primitive_assign: stateful-2:0 allocation score on dl380g5b: -INFINITY - pcmk__primitive_assign: stateful-2:0 allocation score on dl380g5b: -INFINITY - pcmk__primitive_assign: stateful-2:1 allocation score on dl380g5a: INFINITY - pcmk__primitive_assign: stateful-2:1 allocation score on dl380g5b: -INFINITY -diff --git a/cts/scheduler/scores/promoted-failed-demote.scores b/cts/scheduler/scores/promoted-failed-demote.scores -index e457d8c6057..39399d9eac4 100644 ---- a/cts/scheduler/scores/promoted-failed-demote.scores -+++ b/cts/scheduler/scores/promoted-failed-demote.scores -@@ -34,14 +34,10 @@ pcmk__group_assign: stateful-2:0 allocation score on dl380g5b: -INFINITY - pcmk__group_assign: stateful-2:1 allocation score on dl380g5a: INFINITY - pcmk__group_assign: stateful-2:1 allocation score on dl380g5b: 0 - pcmk__primitive_assign: stateful-1:0 allocation score on dl380g5a: -INFINITY --pcmk__primitive_assign: stateful-1:0 allocation score on dl380g5a: -INFINITY --pcmk__primitive_assign: stateful-1:0 allocation score on dl380g5b: -INFINITY - pcmk__primitive_assign: stateful-1:0 allocation score on dl380g5b: -INFINITY - pcmk__primitive_assign: stateful-1:1 allocation score on dl380g5a: INFINITY - pcmk__primitive_assign: stateful-1:1 allocation score on dl380g5b: 0 - pcmk__primitive_assign: stateful-2:0 allocation score on dl380g5a: -INFINITY --pcmk__primitive_assign: stateful-2:0 allocation score on dl380g5a: -INFINITY --pcmk__primitive_assign: stateful-2:0 allocation score on dl380g5b: -INFINITY - pcmk__primitive_assign: stateful-2:0 allocation score on dl380g5b: -INFINITY - pcmk__primitive_assign: stateful-2:1 allocation score on dl380g5a: INFINITY - pcmk__primitive_assign: stateful-2:1 allocation score on dl380g5b: -INFINITY -diff --git a/cts/scheduler/scores/utilization-complex.scores b/cts/scheduler/scores/utilization-complex.scores -index 29bc92c193f..b9dd80c4b6a 100644 ---- a/cts/scheduler/scores/utilization-complex.scores -+++ b/cts/scheduler/scores/utilization-complex.scores -@@ -312,18 +312,26 @@ pcmk__primitive_assign: clone1:2 allocation score on rhel8-4: 1 - pcmk__primitive_assign: clone1:2 allocation score on rhel8-5: 0 - pcmk__primitive_assign: clone1:3 allocation score on httpd-bundle-0: -INFINITY - pcmk__primitive_assign: clone1:3 allocation score on httpd-bundle-0: -INFINITY -+pcmk__primitive_assign: clone1:3 allocation score on httpd-bundle-0: -INFINITY -+pcmk__primitive_assign: clone1:3 allocation score on httpd-bundle-1: -INFINITY - pcmk__primitive_assign: clone1:3 allocation score on httpd-bundle-1: -INFINITY - pcmk__primitive_assign: clone1:3 allocation score on httpd-bundle-1: -INFINITY - pcmk__primitive_assign: clone1:3 allocation score on httpd-bundle-2: -INFINITY - pcmk__primitive_assign: clone1:3 allocation score on httpd-bundle-2: -INFINITY -+pcmk__primitive_assign: clone1:3 allocation score on httpd-bundle-2: -INFINITY -+pcmk__primitive_assign: clone1:3 allocation score on rhel8-1: -INFINITY - pcmk__primitive_assign: clone1:3 allocation score on rhel8-1: -INFINITY - pcmk__primitive_assign: clone1:3 allocation score on rhel8-1: 0 - pcmk__primitive_assign: clone1:3 allocation score on rhel8-2: -INFINITY - pcmk__primitive_assign: clone1:3 allocation score on rhel8-2: -INFINITY -+pcmk__primitive_assign: clone1:3 allocation score on rhel8-2: -INFINITY -+pcmk__primitive_assign: clone1:3 allocation score on rhel8-3: -INFINITY - pcmk__primitive_assign: clone1:3 allocation score on rhel8-3: -INFINITY - pcmk__primitive_assign: clone1:3 allocation score on rhel8-3: -INFINITY - pcmk__primitive_assign: clone1:3 allocation score on rhel8-4: -INFINITY - pcmk__primitive_assign: clone1:3 allocation score on rhel8-4: -INFINITY -+pcmk__primitive_assign: clone1:3 allocation score on rhel8-4: -INFINITY -+pcmk__primitive_assign: clone1:3 allocation score on rhel8-5: 1 - pcmk__primitive_assign: clone1:3 allocation score on rhel8-5: 1 - pcmk__primitive_assign: clone1:3 allocation score on rhel8-5: 1 - pcmk__primitive_assign: clone1:4 allocation score on httpd-bundle-0: -INFINITY -@@ -384,18 +392,26 @@ pcmk__primitive_assign: clone2:2 allocation score on rhel8-4: 1 - pcmk__primitive_assign: clone2:2 allocation score on rhel8-5: -INFINITY - pcmk__primitive_assign: clone2:3 allocation score on httpd-bundle-0: -INFINITY - pcmk__primitive_assign: clone2:3 allocation score on httpd-bundle-0: -INFINITY -+pcmk__primitive_assign: clone2:3 allocation score on httpd-bundle-0: -INFINITY - pcmk__primitive_assign: clone2:3 allocation score on httpd-bundle-1: -INFINITY - pcmk__primitive_assign: clone2:3 allocation score on httpd-bundle-1: -INFINITY -+pcmk__primitive_assign: clone2:3 allocation score on httpd-bundle-1: -INFINITY -+pcmk__primitive_assign: clone2:3 allocation score on httpd-bundle-2: -INFINITY - pcmk__primitive_assign: clone2:3 allocation score on httpd-bundle-2: -INFINITY - pcmk__primitive_assign: clone2:3 allocation score on httpd-bundle-2: -INFINITY - pcmk__primitive_assign: clone2:3 allocation score on rhel8-1: -INFINITY -+pcmk__primitive_assign: clone2:3 allocation score on rhel8-1: -INFINITY - pcmk__primitive_assign: clone2:3 allocation score on rhel8-1: 0 - pcmk__primitive_assign: clone2:3 allocation score on rhel8-2: -INFINITY - pcmk__primitive_assign: clone2:3 allocation score on rhel8-2: -INFINITY -+pcmk__primitive_assign: clone2:3 allocation score on rhel8-2: -INFINITY -+pcmk__primitive_assign: clone2:3 allocation score on rhel8-3: -INFINITY - pcmk__primitive_assign: clone2:3 allocation score on rhel8-3: -INFINITY - pcmk__primitive_assign: clone2:3 allocation score on rhel8-3: -INFINITY - pcmk__primitive_assign: clone2:3 allocation score on rhel8-4: -INFINITY - pcmk__primitive_assign: clone2:3 allocation score on rhel8-4: -INFINITY -+pcmk__primitive_assign: clone2:3 allocation score on rhel8-4: -INFINITY -+pcmk__primitive_assign: clone2:3 allocation score on rhel8-5: -INFINITY - pcmk__primitive_assign: clone2:3 allocation score on rhel8-5: -INFINITY - pcmk__primitive_assign: clone2:3 allocation score on rhel8-5: -INFINITY - pcmk__primitive_assign: clone2:4 allocation score on httpd-bundle-0: -INFINITY -@@ -535,18 +551,26 @@ pcmk__primitive_assign: httpd-bundle-ip-192.168.122.133 allocation score on rhel - pcmk__primitive_assign: httpd-bundle-ip-192.168.122.133 allocation score on rhel8-5: -INFINITY - pcmk__primitive_assign: httpd-bundle-podman-0 allocation score on httpd-bundle-0: -INFINITY - pcmk__primitive_assign: httpd-bundle-podman-0 allocation score on httpd-bundle-0: -INFINITY -+pcmk__primitive_assign: httpd-bundle-podman-0 allocation score on httpd-bundle-0: -INFINITY -+pcmk__primitive_assign: httpd-bundle-podman-0 allocation score on httpd-bundle-1: -INFINITY - pcmk__primitive_assign: httpd-bundle-podman-0 allocation score on httpd-bundle-1: -INFINITY - pcmk__primitive_assign: httpd-bundle-podman-0 allocation score on httpd-bundle-1: -INFINITY - pcmk__primitive_assign: httpd-bundle-podman-0 allocation score on httpd-bundle-2: -INFINITY - pcmk__primitive_assign: httpd-bundle-podman-0 allocation score on httpd-bundle-2: -INFINITY -+pcmk__primitive_assign: httpd-bundle-podman-0 allocation score on httpd-bundle-2: -INFINITY -+pcmk__primitive_assign: httpd-bundle-podman-0 allocation score on rhel8-1: -INFINITY - pcmk__primitive_assign: httpd-bundle-podman-0 allocation score on rhel8-1: -INFINITY - pcmk__primitive_assign: httpd-bundle-podman-0 allocation score on rhel8-1: -INFINITY - pcmk__primitive_assign: httpd-bundle-podman-0 allocation score on rhel8-2: -INFINITY - pcmk__primitive_assign: httpd-bundle-podman-0 allocation score on rhel8-2: -INFINITY -+pcmk__primitive_assign: httpd-bundle-podman-0 allocation score on rhel8-2: -INFINITY - pcmk__primitive_assign: httpd-bundle-podman-0 allocation score on rhel8-3: -INFINITY - pcmk__primitive_assign: httpd-bundle-podman-0 allocation score on rhel8-3: -INFINITY -+pcmk__primitive_assign: httpd-bundle-podman-0 allocation score on rhel8-3: -INFINITY -+pcmk__primitive_assign: httpd-bundle-podman-0 allocation score on rhel8-4: -INFINITY - pcmk__primitive_assign: httpd-bundle-podman-0 allocation score on rhel8-4: -INFINITY - pcmk__primitive_assign: httpd-bundle-podman-0 allocation score on rhel8-4: -INFINITY -+pcmk__primitive_assign: httpd-bundle-podman-0 allocation score on rhel8-5: -INFINITY - pcmk__primitive_assign: httpd-bundle-podman-0 allocation score on rhel8-5: 0 - pcmk__primitive_assign: httpd-bundle-podman-0 allocation score on rhel8-5: 0 - pcmk__primitive_assign: httpd-bundle-podman-1 allocation score on httpd-bundle-1: -INFINITY -diff --git a/cts/scheduler/scores/utilization-order2.scores b/cts/scheduler/scores/utilization-order2.scores -index c4b49d9b366..4476b60ee21 100644 ---- a/cts/scheduler/scores/utilization-order2.scores -+++ b/cts/scheduler/scores/utilization-order2.scores -@@ -9,6 +9,8 @@ pcmk__primitive_assign: rsc1 allocation score on node1: 0 - pcmk__primitive_assign: rsc1 allocation score on node2: 0 - pcmk__primitive_assign: rsc2:0 allocation score on node1: 1 - pcmk__primitive_assign: rsc2:0 allocation score on node1: 1 -+pcmk__primitive_assign: rsc2:0 allocation score on node1: 1 -+pcmk__primitive_assign: rsc2:0 allocation score on node2: -INFINITY - pcmk__primitive_assign: rsc2:0 allocation score on node2: -INFINITY - pcmk__primitive_assign: rsc2:0 allocation score on node2: 0 - pcmk__primitive_assign: rsc2:1 allocation score on node1: 0 -diff --git a/cts/scheduler/summary/cancel-behind-moving-remote.summary b/cts/scheduler/summary/cancel-behind-moving-remote.summary -index 7726876f949..58de340318b 100644 ---- a/cts/scheduler/summary/cancel-behind-moving-remote.summary -+++ b/cts/scheduler/summary/cancel-behind-moving-remote.summary -@@ -58,22 +58,17 @@ Current cluster status: - Transition Summary: - * Start rabbitmq-bundle-1 ( controller-0 ) due to unrunnable rabbitmq-bundle-podman-1 start (blocked) - * Start rabbitmq:1 ( rabbitmq-bundle-1 ) due to unrunnable rabbitmq-bundle-podman-1 start (blocked) -- * Start ovn-dbs-bundle-podman-0 ( controller-2 ) -- * Start ovn-dbs-bundle-0 ( controller-2 ) -+ * Start ovn-dbs-bundle-podman-0 ( controller-0 ) -+ * Start ovn-dbs-bundle-0 ( controller-0 ) - * Start ovndb_servers:0 ( ovn-dbs-bundle-0 ) -- * Move ovn-dbs-bundle-podman-1 ( controller-2 -> controller-0 ) -- * Move ovn-dbs-bundle-1 ( controller-2 -> controller-0 ) -- * Restart ovndb_servers:1 ( Unpromoted -> Promoted ovn-dbs-bundle-1 ) due to required ovn-dbs-bundle-podman-1 start -- * Start ip-172.17.1.87 ( controller-0 ) -+ * Promote ovndb_servers:1 ( Unpromoted -> Promoted ovn-dbs-bundle-1 ) - * Move stonith-fence_ipmilan-52540040bb56 ( messaging-2 -> database-0 ) - * Move stonith-fence_ipmilan-525400e1534e ( database-1 -> messaging-2 ) - - Executing Cluster Transition: - * Pseudo action: rabbitmq-bundle-clone_pre_notify_start_0 - * Resource action: ovndb_servers cancel=30000 on ovn-dbs-bundle-1 -- * Pseudo action: ovn-dbs-bundle-master_pre_notify_stop_0 -- * Cluster action: clear_failcount for ovn-dbs-bundle-0 on controller-0 -- * Cluster action: clear_failcount for ovn-dbs-bundle-1 on controller-2 -+ * Pseudo action: ovn-dbs-bundle-master_pre_notify_start_0 - * Cluster action: clear_failcount for stonith-fence_compute-fence-nova on messaging-0 - * Cluster action: clear_failcount for nova-evacuate on messaging-0 - * Cluster action: clear_failcount for stonith-fence_ipmilan-525400aa1373 on database-0 -@@ -87,52 +82,34 @@ Executing Cluster Transition: - * Cluster action: clear_failcount for stonith-fence_ipmilan-52540060dbba on messaging-0 - * Cluster action: clear_failcount for stonith-fence_ipmilan-525400e018b6 on database-0 - * Cluster action: clear_failcount for stonith-fence_ipmilan-525400c87cdb on database-2 -- * Pseudo action: ovn-dbs-bundle_stop_0 -+ * Pseudo action: ovn-dbs-bundle_start_0 - * Pseudo action: rabbitmq-bundle_start_0 - * Pseudo action: rabbitmq-bundle-clone_confirmed-pre_notify_start_0 - * Pseudo action: rabbitmq-bundle-clone_start_0 - * Resource action: ovndb_servers notify on ovn-dbs-bundle-1 - * Resource action: ovndb_servers notify on ovn-dbs-bundle-2 -- * Pseudo action: ovn-dbs-bundle-master_confirmed-pre_notify_stop_0 -- * Pseudo action: ovn-dbs-bundle-master_stop_0 -+ * Pseudo action: ovn-dbs-bundle-master_confirmed-pre_notify_start_0 -+ * Pseudo action: ovn-dbs-bundle-master_start_0 -+ * Resource action: ovn-dbs-bundle-podman-0 start on controller-0 -+ * Resource action: ovn-dbs-bundle-0 start on controller-0 - * Resource action: stonith-fence_ipmilan-52540040bb56 start on database-0 - * Resource action: stonith-fence_ipmilan-525400e1534e start on messaging-2 - * Pseudo action: rabbitmq-bundle-clone_running_0 -- * Resource action: ovndb_servers stop on ovn-dbs-bundle-1 -- * Pseudo action: ovn-dbs-bundle-master_stopped_0 -- * Resource action: ovn-dbs-bundle-1 stop on controller-2 -+ * Resource action: ovndb_servers start on ovn-dbs-bundle-0 -+ * Pseudo action: ovn-dbs-bundle-master_running_0 -+ * Resource action: ovn-dbs-bundle-podman-0 monitor=60000 on controller-0 -+ * Resource action: ovn-dbs-bundle-0 monitor=30000 on controller-0 - * Resource action: stonith-fence_ipmilan-52540040bb56 monitor=60000 on database-0 - * Resource action: stonith-fence_ipmilan-525400e1534e monitor=60000 on messaging-2 - * Pseudo action: rabbitmq-bundle-clone_post_notify_running_0 -- * Pseudo action: ovn-dbs-bundle-master_post_notify_stopped_0 -- * Resource action: ovn-dbs-bundle-podman-1 stop on controller-2 -- * Pseudo action: rabbitmq-bundle-clone_confirmed-post_notify_running_0 -- * Resource action: ovndb_servers notify on ovn-dbs-bundle-2 -- * Pseudo action: ovn-dbs-bundle-master_confirmed-post_notify_stopped_0 -- * Pseudo action: ovn-dbs-bundle-master_pre_notify_start_0 -- * Pseudo action: ovn-dbs-bundle_stopped_0 -- * Pseudo action: ovn-dbs-bundle_start_0 -- * Pseudo action: rabbitmq-bundle_running_0 -- * Resource action: ovndb_servers notify on ovn-dbs-bundle-2 -- * Pseudo action: ovn-dbs-bundle-master_confirmed-pre_notify_start_0 -- * Pseudo action: ovn-dbs-bundle-master_start_0 -- * Resource action: ovn-dbs-bundle-podman-0 start on controller-2 -- * Resource action: ovn-dbs-bundle-0 start on controller-2 -- * Resource action: ovn-dbs-bundle-podman-1 start on controller-0 -- * Resource action: ovn-dbs-bundle-1 start on controller-0 -- * Resource action: ovndb_servers start on ovn-dbs-bundle-0 -- * Resource action: ovndb_servers start on ovn-dbs-bundle-1 -- * Pseudo action: ovn-dbs-bundle-master_running_0 -- * Resource action: ovn-dbs-bundle-podman-0 monitor=60000 on controller-2 -- * Resource action: ovn-dbs-bundle-0 monitor=30000 on controller-2 -- * Resource action: ovn-dbs-bundle-podman-1 monitor=60000 on controller-0 -- * Resource action: ovn-dbs-bundle-1 monitor=30000 on controller-0 - * Pseudo action: ovn-dbs-bundle-master_post_notify_running_0 -+ * Pseudo action: rabbitmq-bundle-clone_confirmed-post_notify_running_0 - * Resource action: ovndb_servers notify on ovn-dbs-bundle-0 - * Resource action: ovndb_servers notify on ovn-dbs-bundle-1 - * Resource action: ovndb_servers notify on ovn-dbs-bundle-2 - * Pseudo action: ovn-dbs-bundle-master_confirmed-post_notify_running_0 - * Pseudo action: ovn-dbs-bundle_running_0 -+ * Pseudo action: rabbitmq-bundle_running_0 - * Pseudo action: ovn-dbs-bundle-master_pre_notify_promote_0 - * Pseudo action: ovn-dbs-bundle_promote_0 - * Resource action: ovndb_servers notify on ovn-dbs-bundle-0 -@@ -140,10 +117,8 @@ Executing Cluster Transition: - * Resource action: ovndb_servers notify on ovn-dbs-bundle-2 - * Pseudo action: ovn-dbs-bundle-master_confirmed-pre_notify_promote_0 - * Pseudo action: ovn-dbs-bundle-master_promote_0 -- * Resource action: ip-172.17.1.87 start on controller-0 - * Resource action: ovndb_servers promote on ovn-dbs-bundle-1 - * Pseudo action: ovn-dbs-bundle-master_promoted_0 -- * Resource action: ip-172.17.1.87 monitor=10000 on controller-0 - * Pseudo action: ovn-dbs-bundle-master_post_notify_promoted_0 - * Resource action: ovndb_servers notify on ovn-dbs-bundle-0 - * Resource action: ovndb_servers notify on ovn-dbs-bundle-1 -@@ -187,10 +162,10 @@ Revised Cluster Status: - * haproxy-bundle-podman-1 (ocf:heartbeat:podman): Started controller-0 - * haproxy-bundle-podman-2 (ocf:heartbeat:podman): Started controller-1 - * Container bundle set: ovn-dbs-bundle [cluster.common.tag/rhosp16-openstack-ovn-northd:pcmklatest]: -- * ovn-dbs-bundle-0 (ocf:ovn:ovndb-servers): Unpromoted controller-2 -- * ovn-dbs-bundle-1 (ocf:ovn:ovndb-servers): Promoted controller-0 -+ * ovn-dbs-bundle-0 (ocf:ovn:ovndb-servers): Unpromoted controller-0 -+ * ovn-dbs-bundle-1 (ocf:ovn:ovndb-servers): Promoted controller-2 - * ovn-dbs-bundle-2 (ocf:ovn:ovndb-servers): Unpromoted controller-1 -- * ip-172.17.1.87 (ocf:heartbeat:IPaddr2): Started controller-0 -+ * ip-172.17.1.87 (ocf:heartbeat:IPaddr2): Stopped - * stonith-fence_compute-fence-nova (stonith:fence_compute): Started database-1 - * Clone Set: compute-unfence-trigger-clone [compute-unfence-trigger]: - * Started: [ compute-0 compute-1 ] -diff --git a/cts/scheduler/summary/clone-recover-no-shuffle-4.summary b/cts/scheduler/summary/clone-recover-no-shuffle-4.summary -index 944bcb834b3..0b6866ec16c 100644 ---- a/cts/scheduler/summary/clone-recover-no-shuffle-4.summary -+++ b/cts/scheduler/summary/clone-recover-no-shuffle-4.summary -@@ -10,19 +10,13 @@ Current cluster status: - * Stopped: [ node1 ] - - Transition Summary: -- * Move dummy:0 ( node2 -> node1 ) -- * Start dummy:2 ( node2 ) -+ * Start dummy:2 ( node1 ) - - Executing Cluster Transition: -- * Pseudo action: dummy-clone_stop_0 -- * Resource action: dummy stop on node2 -- * Pseudo action: dummy-clone_stopped_0 - * Pseudo action: dummy-clone_start_0 - * Resource action: dummy start on node1 -- * Resource action: dummy start on node2 - * Pseudo action: dummy-clone_running_0 - * Resource action: dummy monitor=10000 on node1 -- * Resource action: dummy monitor=10000 on node2 - Using the original execution date of: 2023-06-21 00:59:59Z - - Revised Cluster Status: -diff --git a/cts/scheduler/summary/clone-recover-no-shuffle-5.summary b/cts/scheduler/summary/clone-recover-no-shuffle-5.summary -index 121214c42ab..8b18120ad8d 100644 ---- a/cts/scheduler/summary/clone-recover-no-shuffle-5.summary -+++ b/cts/scheduler/summary/clone-recover-no-shuffle-5.summary -@@ -9,31 +9,17 @@ Current cluster status: - * Stopped: [ node1 ] - - Transition Summary: -- * Move rsc1:0 ( node2 -> node1 ) -- * Move rsc2:0 ( node2 -> node1 ) -- * Start rsc1:2 ( node2 ) -- * Start rsc2:2 ( node2 ) -+ * Start rsc1:2 ( node1 ) -+ * Start rsc2:2 ( node1 ) - - Executing Cluster Transition: -- * Pseudo action: grp-clone_stop_0 -- * Pseudo action: grp:0_stop_0 -- * Resource action: rsc2 stop on node2 -- * Resource action: rsc1 stop on node2 -- * Pseudo action: grp:0_stopped_0 -- * Pseudo action: grp-clone_stopped_0 - * Pseudo action: grp-clone_start_0 -- * Pseudo action: grp:0_start_0 -+ * Pseudo action: grp:2_start_0 - * Resource action: rsc1 start on node1 - * Resource action: rsc2 start on node1 -- * Pseudo action: grp:2_start_0 -- * Resource action: rsc1 start on node2 -- * Resource action: rsc2 start on node2 -- * Pseudo action: grp:0_running_0 -+ * Pseudo action: grp:2_running_0 - * Resource action: rsc1 monitor=10000 on node1 - * Resource action: rsc2 monitor=10000 on node1 -- * Pseudo action: grp:2_running_0 -- * Resource action: rsc1 monitor=10000 on node2 -- * Resource action: rsc2 monitor=10000 on node2 - * Pseudo action: grp-clone_running_0 - - Revised Cluster Status: -diff --git a/cts/scheduler/summary/clone-recover-no-shuffle-6.summary b/cts/scheduler/summary/clone-recover-no-shuffle-6.summary -index 19a957e15fb..5702177e33d 100644 ---- a/cts/scheduler/summary/clone-recover-no-shuffle-6.summary -+++ b/cts/scheduler/summary/clone-recover-no-shuffle-6.summary -@@ -11,48 +11,22 @@ Current cluster status: - * base-bundle-2 (ocf:pacemaker:Stateful): Stopped - - Transition Summary: -- * Move base-bundle-podman-0 ( node3 -> node1 ) -- * Move base-bundle-0 ( node3 -> node1 ) -- * Restart base:0 ( base-bundle-0 ) due to required base-bundle-podman-0 start -- * Move base-bundle-podman-1 ( node2 -> node3 ) -- * Move base-bundle-1 ( node2 -> node3 ) -- * Restart base:1 ( base-bundle-1 ) due to required base-bundle-podman-1 start -- * Start base-bundle-podman-2 ( node2 ) -- * Start base-bundle-2 ( node2 ) -- * Start base:2 ( base-bundle-2 ) -+ * Start base-bundle-podman-2 ( node1 ) -+ * Start base-bundle-2 ( node1 ) -+ * Start base:2 ( base-bundle-2 ) - - Executing Cluster Transition: -- * Pseudo action: base-bundle_stop_0 - * Pseudo action: base-bundle_start_0 -- * Pseudo action: base-bundle-clone_stop_0 -- * Resource action: base-bundle-podman-2 start on node2 -+ * Pseudo action: base-bundle-clone_start_0 -+ * Resource action: base-bundle-podman-2 start on node1 - * Resource action: base-bundle-2 monitor on node3 - * Resource action: base-bundle-2 monitor on node2 - * Resource action: base-bundle-2 monitor on node1 -- * Resource action: base stop on base-bundle-1 -- * Resource action: base-bundle-1 stop on node2 -- * Resource action: base-bundle-podman-2 monitor=60000 on node2 -- * Resource action: base-bundle-2 start on node2 -- * Resource action: base stop on base-bundle-0 -- * Pseudo action: base-bundle-clone_stopped_0 -- * Pseudo action: base-bundle-clone_start_0 -- * Resource action: base-bundle-0 stop on node3 -- * Resource action: base-bundle-podman-1 stop on node2 -- * Resource action: base-bundle-2 monitor=30000 on node2 -- * Resource action: base-bundle-podman-0 stop on node3 -- * Resource action: base-bundle-podman-1 start on node3 -- * Resource action: base-bundle-1 start on node3 -- * Pseudo action: base-bundle_stopped_0 -- * Resource action: base-bundle-podman-0 start on node1 -- * Resource action: base-bundle-0 start on node1 -- * Resource action: base-bundle-podman-1 monitor=60000 on node3 -- * Resource action: base-bundle-1 monitor=30000 on node3 -- * Resource action: base start on base-bundle-0 -- * Resource action: base start on base-bundle-1 -+ * Resource action: base-bundle-podman-2 monitor=60000 on node1 -+ * Resource action: base-bundle-2 start on node1 - * Resource action: base start on base-bundle-2 - * Pseudo action: base-bundle-clone_running_0 -- * Resource action: base-bundle-podman-0 monitor=60000 on node1 -- * Resource action: base-bundle-0 monitor=30000 on node1 -+ * Resource action: base-bundle-2 monitor=30000 on node1 - * Pseudo action: base-bundle_running_0 - - Revised Cluster Status: -@@ -63,6 +37,6 @@ Revised Cluster Status: - * Full List of Resources: - * Fencing (stonith:fence_xvm): Started node2 - * Container bundle set: base-bundle [localhost/pcmktest]: -- * base-bundle-0 (ocf:pacemaker:Stateful): Started node1 -- * base-bundle-1 (ocf:pacemaker:Stateful): Started node3 -- * base-bundle-2 (ocf:pacemaker:Stateful): Started node2 -+ * base-bundle-0 (ocf:pacemaker:Stateful): Started node3 -+ * base-bundle-1 (ocf:pacemaker:Stateful): Started node2 -+ * base-bundle-2 (ocf:pacemaker:Stateful): Started node1 -diff --git a/cts/scheduler/summary/clone-recover-no-shuffle-7.summary b/cts/scheduler/summary/clone-recover-no-shuffle-7.summary -index e6c9baed0db..77445700f04 100644 ---- a/cts/scheduler/summary/clone-recover-no-shuffle-7.summary -+++ b/cts/scheduler/summary/clone-recover-no-shuffle-7.summary -@@ -10,24 +10,18 @@ Current cluster status: - * Stopped: [ node1 ] - - Transition Summary: -- * Move dummy:0 ( Unpromoted node3 -> Promoted node1 ) -- * Demote dummy:1 ( Promoted -> Unpromoted node2 ) -- * Start dummy:2 ( node3 ) -+ * Demote dummy:1 ( Promoted -> Unpromoted node2 ) -+ * Promote dummy:2 ( Stopped -> Promoted node1 ) - - Executing Cluster Transition: - * Resource action: dummy cancel=10000 on node2 - * Pseudo action: dummy-clone_demote_0 - * Resource action: dummy demote on node2 - * Pseudo action: dummy-clone_demoted_0 -- * Pseudo action: dummy-clone_stop_0 -- * Resource action: dummy stop on node3 -- * Resource action: dummy monitor=11000 on node2 -- * Pseudo action: dummy-clone_stopped_0 - * Pseudo action: dummy-clone_start_0 -+ * Resource action: dummy monitor=11000 on node2 - * Resource action: dummy start on node1 -- * Resource action: dummy start on node3 - * Pseudo action: dummy-clone_running_0 -- * Resource action: dummy monitor=11000 on node3 - * Pseudo action: dummy-clone_promote_0 - * Resource action: dummy promote on node1 - * Pseudo action: dummy-clone_promoted_0 -diff --git a/cts/scheduler/xml/cancel-behind-moving-remote.xml b/cts/scheduler/xml/cancel-behind-moving-remote.xml -index 67e14300ba8..7b880602b1b 100644 ---- a/cts/scheduler/xml/cancel-behind-moving-remote.xml -+++ b/cts/scheduler/xml/cancel-behind-moving-remote.xml -@@ -1,5 +1,19 @@ - - -+ - - - -diff --git a/cts/scheduler/xml/clone-recover-no-shuffle-4.xml b/cts/scheduler/xml/clone-recover-no-shuffle-4.xml -index 40e6520c6d0..f0a5feb8c2f 100644 ---- a/cts/scheduler/xml/clone-recover-no-shuffle-4.xml -+++ b/cts/scheduler/xml/clone-recover-no-shuffle-4.xml -@@ -11,11 +11,6 @@ - * Instance dummy:0 should remain started on node2 - * Instance dummy:1 should remain started on node3 - * Instance dummy:2 should start on node1 -- -- This test output is incorrect: -- * Instance dummy:0 moves from node2 to node1 -- * Instance dummy:1 remains started on node3 (correct) -- * Instance dummy:2 starts on node2 - --> - - -diff --git a/cts/scheduler/xml/clone-recover-no-shuffle-5.xml b/cts/scheduler/xml/clone-recover-no-shuffle-5.xml -index 45f3b5a9f3a..95e5eca9c9d 100644 ---- a/cts/scheduler/xml/clone-recover-no-shuffle-5.xml -+++ b/cts/scheduler/xml/clone-recover-no-shuffle-5.xml -@@ -12,11 +12,6 @@ - * Instance grp:0 should remain started on node2 - * Instance grp:1 should remain started on node3 - * Instance grp:2 should start on node1 -- -- This test output is incorrect: -- * Instance grp:0 moves to node1 -- * Instance grp:1 remains started on node3 (correct) -- * Instance grp:2 starts on node2 - --> - - -diff --git a/cts/scheduler/xml/clone-recover-no-shuffle-6.xml b/cts/scheduler/xml/clone-recover-no-shuffle-6.xml -index 3de42f581d4..64bb4d90179 100644 ---- a/cts/scheduler/xml/clone-recover-no-shuffle-6.xml -+++ b/cts/scheduler/xml/clone-recover-no-shuffle-6.xml -@@ -12,11 +12,6 @@ - * Instance base:0 should remain started on node3 - * Instance base:1 should remain started on node2 - * Instance base:2 should start on node1 -- -- This test output is incorrect: -- * Instance base:0 moves from node3 to node1 -- * Instance base:1 moves from node2 to node3 -- * Instance base:2 starts on node2 - --> - - -diff --git a/cts/scheduler/xml/clone-recover-no-shuffle-7.xml b/cts/scheduler/xml/clone-recover-no-shuffle-7.xml -index 6e9dad50db4..e588b811d77 100644 ---- a/cts/scheduler/xml/clone-recover-no-shuffle-7.xml -+++ b/cts/scheduler/xml/clone-recover-no-shuffle-7.xml -@@ -11,11 +11,6 @@ - * Instance dummy:0 should remain started (unpromoted) on node3 - * Instance dummy:1 should demote on node2 - * Instance dummy:2 should promote on node1 -- -- This test output is incorrect: -- * Instance dummy:0 moves from unpromoted on node3 to promoted on node1 -- * Instance dummy:1 demotes on node2 -- * Instance dummy:2 starts on node3 - --> - - diff --git a/004-clone-rsc-display.patch b/004-clone-rsc-display.patch deleted file mode 100644 index b09a53a..0000000 --- a/004-clone-rsc-display.patch +++ /dev/null @@ -1,35 +0,0 @@ -From 770d417e28dc9527fec8b8a00caaba8825995454 Mon Sep 17 00:00:00 2001 -From: Grace Chin -Date: Wed, 19 Jul 2023 10:25:55 -0400 -Subject: [PATCH] Fix: tools: Fix a bug in clone resource description display - -Previously, descriptions of resources running on multiple -nodes were displayed despite --full not being used (with pcs -status) or --show-detail not being used (with crm_mon). - -For example, clone resources running on multiple nodes were -affected. - -Now, --full and --show-detail must be used in order for resource -descriptions to be displayed, regardless of the number of nodes -the resource is run on. - -see bz: 2106642 ---- - lib/pengine/pe_output.c | 3 +-- - 1 file changed, 1 insertion(+), 2 deletions(-) - -diff --git a/lib/pengine/pe_output.c b/lib/pengine/pe_output.c -index e0b43d997a..d1c9f6e226 100644 ---- a/lib/pengine/pe_output.c -+++ b/lib/pengine/pe_output.c -@@ -20,8 +20,7 @@ pe__resource_description(const pe_resource_t *rsc, uint32_t show_opts) - { - const char * desc = NULL; - // User-supplied description -- if (pcmk_any_flags_set(show_opts, pcmk_show_rsc_only|pcmk_show_description) -- || pcmk__list_of_multiple(rsc->running_on)) { -+ if (pcmk_any_flags_set(show_opts, pcmk_show_rsc_only|pcmk_show_description)) { - desc = crm_element_value(rsc->xml, XML_ATTR_DESC); - } - return desc; diff --git a/005-attrd-dampen.patch b/005-attrd-dampen.patch deleted file mode 100644 index 80c8a67..0000000 --- a/005-attrd-dampen.patch +++ /dev/null @@ -1,26 +0,0 @@ -From ebac530c815a62f7c3a1c24f64e9a530d9753dbe Mon Sep 17 00:00:00 2001 -From: Hideo Yamauchi -Date: Wed, 19 Jul 2023 18:21:07 +0900 -Subject: [PATCH] High: tools: The dampen parameter is disabled when setting - values with attrd_updater. - ---- - tools/attrd_updater.c | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/tools/attrd_updater.c b/tools/attrd_updater.c -index b615a3575..4688b9ff6 100644 ---- a/tools/attrd_updater.c -+++ b/tools/attrd_updater.c -@@ -501,7 +501,7 @@ send_attrd_update(char command, const char *attr_node, const char *attr_name, - - case 'U': - rc = pcmk__attrd_api_update(NULL, attr_node, attr_name, attr_value, -- NULL, attr_set, NULL, -+ attr_dampen, attr_set, NULL, - attr_options | pcmk__node_attr_value); - break; - --- -2.41.0 - diff --git a/006-controller-reply.patch b/006-controller-reply.patch deleted file mode 100644 index efd4f9c..0000000 --- a/006-controller-reply.patch +++ /dev/null @@ -1,109 +0,0 @@ -From 3e31da0016795397bfeacb2f3d76ecfe35cc1f67 Mon Sep 17 00:00:00 2001 -From: Ken Gaillot -Date: Mon, 17 Jul 2023 14:52:42 -0500 -Subject: [PATCH] Fix: libcrmcommon: wait for reply from appropriate controller - commands - -ipc_controld.c:reply_expected() wrongly omitted PCMK__CONTROLD_CMD_NODES (which -hasn't been a problem because crm_node uses a mainloop instead of sync dispatch -for that) and CRM_OP_RM_NODE_CACHE (which can be sent via -ipc_client.c:pcmk_ipc_purge_node()). - -Because CRM_OP_RM_NODE_CACHE gets only an ack and no further replies, we now -have to be careful not to return true from the controller's dispatch() -function, otherwise crm_node -R would wait forever for more data. That means -we have to check for whether any replies are expected, which means we have to -increment expected replies *before* sending a request (in case it's sync). - -Regression introduced in 2.0.5 by ae14fa4a - -Fixes T681 ---- - lib/common/ipc_controld.c | 49 ++++++++++++++------------------------- - 1 file changed, 17 insertions(+), 32 deletions(-) - -diff --git a/lib/common/ipc_controld.c b/lib/common/ipc_controld.c -index 3c3a98964..405fd0518 100644 ---- a/lib/common/ipc_controld.c -+++ b/lib/common/ipc_controld.c -@@ -177,18 +177,16 @@ set_nodes_data(pcmk_controld_api_reply_t *data, xmlNode *msg_data) - static bool - reply_expected(pcmk_ipc_api_t *api, xmlNode *request) - { -- const char *command = crm_element_value(request, F_CRM_TASK); -- -- if (command == NULL) { -- return false; -- } -- -- // We only need to handle commands that functions in this file can send -- return !strcmp(command, CRM_OP_REPROBE) -- || !strcmp(command, CRM_OP_NODE_INFO) -- || !strcmp(command, CRM_OP_PING) -- || !strcmp(command, CRM_OP_LRM_FAIL) -- || !strcmp(command, CRM_OP_LRM_DELETE); -+ // We only need to handle commands that API functions can send -+ return pcmk__str_any_of(crm_element_value(request, F_CRM_TASK), -+ PCMK__CONTROLD_CMD_NODES, -+ CRM_OP_LRM_DELETE, -+ CRM_OP_LRM_FAIL, -+ CRM_OP_NODE_INFO, -+ CRM_OP_PING, -+ CRM_OP_REPROBE, -+ CRM_OP_RM_NODE_CACHE, -+ NULL); - } - - static bool -@@ -202,22 +200,12 @@ dispatch(pcmk_ipc_api_t *api, xmlNode *reply) - pcmk_controld_reply_unknown, NULL, NULL, - }; - -- /* If we got an ACK, return true so the caller knows to expect more responses -- * from the IPC server. We do this before decrementing replies_expected because -- * ACKs are not going to be included in that value. -- * -- * Note that we cannot do the same kind of status checking here that we do in -- * ipc_pacemakerd.c. The ACK message we receive does not necessarily contain -- * a status attribute. That is, we may receive this: -- * -- * -- * -- * Instead of this: -- * -- * -- */ - if (pcmk__str_eq(crm_element_name(reply), "ack", pcmk__str_none)) { -- return true; // More replies needed -+ /* ACKs are trivial responses that do not count toward expected replies, -+ * and do not have all the fields that validation requires, so skip that -+ * processing. -+ */ -+ return private->replies_expected > 0; - } - - if (private->replies_expected > 0) { -@@ -344,18 +332,15 @@ static int - send_controller_request(pcmk_ipc_api_t *api, xmlNode *request, - bool reply_is_expected) - { -- int rc; -- - if (crm_element_value(request, XML_ATTR_REFERENCE) == NULL) { - return EINVAL; - } -- rc = pcmk__send_ipc_request(api, request); -- if ((rc == pcmk_rc_ok) && reply_is_expected) { -+ if (reply_is_expected) { - struct controld_api_private_s *private = api->api_data; - - private->replies_expected++; - } -- return rc; -+ return pcmk__send_ipc_request(api, request); - } - - static xmlNode * --- -2.41.0 - diff --git a/007-glib-assertions.patch b/007-glib-assertions.patch deleted file mode 100644 index 5679ee6..0000000 --- a/007-glib-assertions.patch +++ /dev/null @@ -1,163 +0,0 @@ -From 63f4bd4d5a324e6eb279340a42c7c36c8902ada7 Mon Sep 17 00:00:00 2001 -From: Ken Gaillot -Date: Wed, 2 Aug 2023 15:55:26 -0500 -Subject: [PATCH 1/4] Fix: controller: don't try to execute agent action at - shutdown - -Normally, agent execution is not possible at shutdown. However, when metadata -is needed for some action, the agent can be called asynchronously, and when the -metadata action returns, the original action is performed. If the metadata is -initiated before shutdown, but completes after shutdown has begun, do not try -to attempt the original action, so we avoid unnecessary error logs. ---- - daemons/controld/controld_execd.c | 4 +++- - 1 file changed, 3 insertions(+), 1 deletion(-) - -diff --git a/daemons/controld/controld_execd.c b/daemons/controld/controld_execd.c -index 530e4346c8..a90e8d833e 100644 ---- a/daemons/controld/controld_execd.c -+++ b/daemons/controld/controld_execd.c -@@ -1400,7 +1400,9 @@ metadata_complete(int pid, const pcmk__action_result_t *result, void *user_data) - md = controld_cache_metadata(lrm_state->metadata_cache, data->rsc, - result->action_stdout); - } -- do_lrm_rsc_op(lrm_state, data->rsc, data->input_xml, md); -+ if (!pcmk_is_set(controld_globals.fsa_input_register, R_HA_DISCONNECTED)) { -+ do_lrm_rsc_op(lrm_state, data->rsc, data->input_xml, md); -+ } - free_metadata_cb_data(data); - } - - -From 247d9534f36f690c1474e36cedaadb3934022a05 Mon Sep 17 00:00:00 2001 -From: Ken Gaillot -Date: Wed, 2 Aug 2023 16:16:31 -0500 -Subject: [PATCH 2/4] Refactor: controller: de-functionize lrm_state_destroy() - -It was a one-liner called once ---- - daemons/controld/controld_execd_state.c | 8 +------- - daemons/controld/controld_lrm.h | 5 ----- - 2 files changed, 1 insertion(+), 12 deletions(-) - -diff --git a/daemons/controld/controld_execd_state.c b/daemons/controld/controld_execd_state.c -index 8c68bfca08..4a87a9b332 100644 ---- a/daemons/controld/controld_execd_state.c -+++ b/daemons/controld/controld_execd_state.c -@@ -132,12 +132,6 @@ lrm_state_create(const char *node_name) - return state; - } - --void --lrm_state_destroy(const char *node_name) --{ -- g_hash_table_remove(lrm_state_table, node_name); --} -- - static gboolean - remote_proxy_remove_by_node(gpointer key, gpointer value, gpointer user_data) - { -@@ -799,7 +793,7 @@ lrm_state_unregister_rsc(lrm_state_t * lrm_state, - } - - if (is_remote_lrmd_ra(NULL, NULL, rsc_id)) { -- lrm_state_destroy(rsc_id); -+ g_hash_table_remove(lrm_state_table, rsc_id); - return pcmk_ok; - } - -diff --git a/daemons/controld/controld_lrm.h b/daemons/controld/controld_lrm.h -index 25f3db3316..c3113e49c3 100644 ---- a/daemons/controld/controld_lrm.h -+++ b/daemons/controld/controld_lrm.h -@@ -108,11 +108,6 @@ gboolean lrm_state_init_local(void); - */ - void lrm_state_destroy_all(void); - --/*! -- * \brief Destroy executor connection by node name -- */ --void lrm_state_destroy(const char *node_name); -- - /*! - * \brief Find lrm_state data by node name - */ - -From 1b915f1ce38756431f7faa142565e3e07aade194 Mon Sep 17 00:00:00 2001 -From: Ken Gaillot -Date: Wed, 2 Aug 2023 15:58:09 -0500 -Subject: [PATCH 3/4] Low: controller: guard lrm_state_table usage with NULL - check - -It is NULL while draining the mainloop during the shutdown sequence. ---- - daemons/controld/controld_execd_state.c | 7 ++++++- - 1 file changed, 6 insertions(+), 1 deletion(-) - -diff --git a/daemons/controld/controld_execd_state.c b/daemons/controld/controld_execd_state.c -index 4a87a9b332..b90cc5e635 100644 ---- a/daemons/controld/controld_execd_state.c -+++ b/daemons/controld/controld_execd_state.c -@@ -301,7 +301,7 @@ lrm_state_destroy_all(void) - lrm_state_t * - lrm_state_find(const char *node_name) - { -- if (!node_name) { -+ if ((node_name == NULL) || (lrm_state_table == NULL)) { - return NULL; - } - return g_hash_table_lookup(lrm_state_table, node_name); -@@ -312,6 +312,8 @@ lrm_state_find_or_create(const char *node_name) - { - lrm_state_t *lrm_state; - -+ CRM_CHECK(lrm_state_table != NULL, return NULL); -+ - lrm_state = g_hash_table_lookup(lrm_state_table, node_name); - if (!lrm_state) { - lrm_state = lrm_state_create(node_name); -@@ -323,6 +325,9 @@ lrm_state_find_or_create(const char *node_name) - GList * - lrm_state_get_list(void) - { -+ if (lrm_state_table == NULL) { -+ return NULL; -+ } - return g_hash_table_get_values(lrm_state_table); - } - - -From 78581213ed3bf4183b0ec1f391b720d5d91f3f68 Mon Sep 17 00:00:00 2001 -From: Ken Gaillot -Date: Wed, 2 Aug 2023 15:48:36 -0500 -Subject: [PATCH 4/4] Log: controller: improve messages for resource history - updates - ---- - daemons/controld/controld_cib.c | 11 +++++++++-- - 1 file changed, 9 insertions(+), 2 deletions(-) - -diff --git a/daemons/controld/controld_cib.c b/daemons/controld/controld_cib.c -index 22ac42486f..c9dde0b748 100644 ---- a/daemons/controld/controld_cib.c -+++ b/daemons/controld/controld_cib.c -@@ -861,10 +861,17 @@ cib_rsc_callback(xmlNode * msg, int call_id, int rc, xmlNode * output, void *use - case pcmk_ok: - case -pcmk_err_diff_failed: - case -pcmk_err_diff_resync: -- crm_trace("Resource update %d complete: rc=%d", call_id, rc); -+ crm_trace("Resource history update completed (call=%d rc=%d)", -+ call_id, rc); - break; - default: -- crm_warn("Resource update %d failed: (rc=%d) %s", call_id, rc, pcmk_strerror(rc)); -+ if (call_id > 0) { -+ crm_warn("Resource history update %d failed: %s " -+ CRM_XS " rc=%d", call_id, pcmk_strerror(rc), rc); -+ } else { -+ crm_warn("Resource history update failed: %s " CRM_XS " rc=%d", -+ pcmk_strerror(rc), rc); -+ } - } - - if (call_id == pending_rsc_update) { diff --git a/008-attrd-shutdown.patch b/008-attrd-shutdown.patch deleted file mode 100644 index 1d02526..0000000 --- a/008-attrd-shutdown.patch +++ /dev/null @@ -1,45 +0,0 @@ -From f5263c9401c9c38d4e039149deddcc0da0c184ba Mon Sep 17 00:00:00 2001 -From: Ken Gaillot -Date: Thu, 3 Aug 2023 12:17:08 -0500 -Subject: [PATCH] Fix: attrd: avoid race condition when shutting down - -This addresses a race condition that can occur when the DC and the attribute -writer are different nodes, and shutting down at the same time. When the DC -controller leaves its Corosync process group, the remaining nodes erase its -transient node attributes (including "shutdown") from the CIB. However if the -(former) DC's attrd is still up, it can win the attribute writer election -called after the original writer leaves. As the election winner, it writes out -all its attributes to the CIB, including "shutdown". The next time it rejoins -the cluster, it will be immediately shut down. - -Fixes T138 ---- - daemons/attrd/attrd_elections.c | 10 +++++++++- - 1 file changed, 9 insertions(+), 1 deletion(-) - -diff --git a/daemons/attrd/attrd_elections.c b/daemons/attrd/attrd_elections.c -index 3b6b55a0f59..6f4916888a9 100644 ---- a/daemons/attrd/attrd_elections.c -+++ b/daemons/attrd/attrd_elections.c -@@ -22,12 +22,20 @@ attrd_election_cb(gpointer user_data) - { - attrd_declare_winner(); - -+ if (attrd_requesting_shutdown() || attrd_shutting_down()) { -+ /* This node is shutting down or about to, meaning its attributes will -+ * be removed (and may have already been removed from the CIB by a -+ * controller). Don't sync or write its attributes in this case. -+ */ -+ return G_SOURCE_REMOVE; -+ } -+ - /* Update the peers after an election */ - attrd_peer_sync(NULL, NULL); - - /* Update the CIB after an election */ - attrd_write_attributes(true, false); -- return FALSE; -+ return G_SOURCE_REMOVE; - } - - void diff --git a/009-attrd-shutdown-2.patch b/009-attrd-shutdown-2.patch deleted file mode 100644 index ba79a62..0000000 --- a/009-attrd-shutdown-2.patch +++ /dev/null @@ -1,210 +0,0 @@ -From 83e547cc64f2586031a007ab58e91fc22cd1a68a Mon Sep 17 00:00:00 2001 -From: Ken Gaillot -Date: Thu, 24 Aug 2023 12:18:23 -0500 -Subject: [PATCH] Refactor: attrd: use enum instead of bools for - attrd_write_attributes() - ---- - daemons/attrd/attrd_cib.c | 24 ++++++++++++++++++------ - daemons/attrd/attrd_corosync.c | 2 +- - daemons/attrd/attrd_elections.c | 2 +- - daemons/attrd/attrd_ipc.c | 2 +- - daemons/attrd/attrd_utils.c | 2 +- - daemons/attrd/pacemaker-attrd.h | 8 +++++++- - 6 files changed, 29 insertions(+), 11 deletions(-) - -diff --git a/daemons/attrd/attrd_cib.c b/daemons/attrd/attrd_cib.c -index 928c0133745..9c787fe1024 100644 ---- a/daemons/attrd/attrd_cib.c -+++ b/daemons/attrd/attrd_cib.c -@@ -343,16 +343,23 @@ attrd_write_attribute(attribute_t *a, bool ignore_delay) - free_xml(xml_top); - } - -+/*! -+ * \internal -+ * \brief Write out attributes -+ * -+ * \param[in] options Group of enum attrd_write_options -+ */ - void --attrd_write_attributes(bool all, bool ignore_delay) -+attrd_write_attributes(uint32_t options) - { - GHashTableIter iter; - attribute_t *a = NULL; - -- crm_debug("Writing out %s attributes", all? "all" : "changed"); -+ crm_debug("Writing out %s attributes", -+ pcmk_is_set(options, attrd_write_all)? "all" : "changed"); - g_hash_table_iter_init(&iter, attributes); - while (g_hash_table_iter_next(&iter, NULL, (gpointer *) & a)) { -- if (!all && a->unknown_peer_uuids) { -+ if (!pcmk_is_set(options, attrd_write_all) && a->unknown_peer_uuids) { - // Try writing this attribute again, in case peer ID was learned - a->changed = true; - } else if (a->force_write) { -@@ -360,9 +367,14 @@ attrd_write_attributes(bool all, bool ignore_delay) - a->changed = true; - } - -- if(all || a->changed) { -- /* When forced write flag is set, ignore delay. */ -- attrd_write_attribute(a, (a->force_write ? true : ignore_delay)); -+ if (pcmk_is_set(options, attrd_write_all) || a->changed) { -+ bool ignore_delay = pcmk_is_set(options, attrd_write_no_delay); -+ -+ if (a->force_write) { -+ // Always ignore delay when forced write flag is set -+ ignore_delay = true; -+ } -+ attrd_write_attribute(a, ignore_delay); - } else { - crm_trace("Skipping unchanged attribute %s", a->id); - } -diff --git a/daemons/attrd/attrd_corosync.c b/daemons/attrd/attrd_corosync.c -index 1aec35a054e..49631df6e44 100644 ---- a/daemons/attrd/attrd_corosync.c -+++ b/daemons/attrd/attrd_corosync.c -@@ -285,7 +285,7 @@ record_peer_nodeid(attribute_value_t *v, const char *host) - - crm_trace("Learned %s has node id %s", known_peer->uname, known_peer->uuid); - if (attrd_election_won()) { -- attrd_write_attributes(false, false); -+ attrd_write_attributes(attrd_write_changed); - } - } - -diff --git a/daemons/attrd/attrd_elections.c b/daemons/attrd/attrd_elections.c -index c25a41a4492..01341db18e4 100644 ---- a/daemons/attrd/attrd_elections.c -+++ b/daemons/attrd/attrd_elections.c -@@ -34,7 +34,7 @@ attrd_election_cb(gpointer user_data) - attrd_peer_sync(NULL, NULL); - - /* Update the CIB after an election */ -- attrd_write_attributes(true, false); -+ attrd_write_attributes(attrd_write_all); - return G_SOURCE_REMOVE; - } - -diff --git a/daemons/attrd/attrd_ipc.c b/daemons/attrd/attrd_ipc.c -index 4be789de7f9..05c4a696a19 100644 ---- a/daemons/attrd/attrd_ipc.c -+++ b/daemons/attrd/attrd_ipc.c -@@ -232,7 +232,7 @@ attrd_client_refresh(pcmk__request_t *request) - crm_info("Updating all attributes"); - - attrd_send_ack(request->ipc_client, request->ipc_id, request->ipc_flags); -- attrd_write_attributes(true, true); -+ attrd_write_attributes(attrd_write_all|attrd_write_no_delay); - - pcmk__set_result(&request->result, CRM_EX_OK, PCMK_EXEC_DONE, NULL); - return NULL; -diff --git a/daemons/attrd/attrd_utils.c b/daemons/attrd/attrd_utils.c -index c43eac1695a..bfd51368890 100644 ---- a/daemons/attrd/attrd_utils.c -+++ b/daemons/attrd/attrd_utils.c -@@ -156,7 +156,7 @@ attrd_cib_replaced_cb(const char *event, xmlNode * msg) - if (attrd_election_won()) { - if (change_section & (cib_change_section_nodes | cib_change_section_status)) { - crm_notice("Updating all attributes after %s event", event); -- attrd_write_attributes(true, false); -+ attrd_write_attributes(attrd_write_all); - } - } - -diff --git a/daemons/attrd/pacemaker-attrd.h b/daemons/attrd/pacemaker-attrd.h -index 41f31d97b3b..2d781d11394 100644 ---- a/daemons/attrd/pacemaker-attrd.h -+++ b/daemons/attrd/pacemaker-attrd.h -@@ -176,8 +176,14 @@ void attrd_free_attribute(gpointer data); - void attrd_free_attribute_value(gpointer data); - attribute_t *attrd_populate_attribute(xmlNode *xml, const char *attr); - -+enum attrd_write_options { -+ attrd_write_changed = 0, -+ attrd_write_all = (1 << 0), -+ attrd_write_no_delay = (1 << 1), -+}; -+ - void attrd_write_attribute(attribute_t *a, bool ignore_delay); --void attrd_write_attributes(bool all, bool ignore_delay); -+void attrd_write_attributes(uint32_t options); - void attrd_write_or_elect_attribute(attribute_t *a); - - extern int minimum_protocol_version; -From 58400e272cfc51f02eec69cdd0ed0d27a30e78a3 Mon Sep 17 00:00:00 2001 -From: Ken Gaillot -Date: Thu, 24 Aug 2023 12:27:53 -0500 -Subject: [PATCH] Fix: attrd: avoid race condition at writer election - -f5263c94 was not a complete fix. The issue may also occur if a remaining node -(not the original DC or writer) wins the attribute writer election after the -original DC's controller has exited but before its attribute manger has exited. - -The long-term solution will be to have the attribute manager (instead of the -controller) be in control of erasing transient attributes from the CIB when a -node leaves. This short-term workaround simply has new attribute writers skip -shutdown attributes when writing out all attributes. - -Fixes T138 ---- - daemons/attrd/attrd_cib.c | 5 +++++ - daemons/attrd/attrd_elections.c | 14 ++++++++++++-- - daemons/attrd/pacemaker-attrd.h | 1 + - 3 files changed, 18 insertions(+), 2 deletions(-) - -diff --git a/daemons/attrd/attrd_cib.c b/daemons/attrd/attrd_cib.c -index 9c787fe102..2c910b4c64 100644 ---- a/daemons/attrd/attrd_cib.c -+++ b/daemons/attrd/attrd_cib.c -@@ -359,6 +359,11 @@ attrd_write_attributes(uint32_t options) - pcmk_is_set(options, attrd_write_all)? "all" : "changed"); - g_hash_table_iter_init(&iter, attributes); - while (g_hash_table_iter_next(&iter, NULL, (gpointer *) & a)) { -+ if (pcmk_is_set(options, attrd_write_skip_shutdown) -+ && pcmk__str_eq(a->id, XML_CIB_ATTR_SHUTDOWN, pcmk__str_none)) { -+ continue; -+ } -+ - if (!pcmk_is_set(options, attrd_write_all) && a->unknown_peer_uuids) { - // Try writing this attribute again, in case peer ID was learned - a->changed = true; -diff --git a/daemons/attrd/attrd_elections.c b/daemons/attrd/attrd_elections.c -index 01341db18e..a95cd44cbd 100644 ---- a/daemons/attrd/attrd_elections.c -+++ b/daemons/attrd/attrd_elections.c -@@ -33,8 +33,18 @@ attrd_election_cb(gpointer user_data) - /* Update the peers after an election */ - attrd_peer_sync(NULL, NULL); - -- /* Update the CIB after an election */ -- attrd_write_attributes(attrd_write_all); -+ /* After winning an election, update the CIB with the values of all -+ * attributes as the winner knows them. -+ * -+ * However, do not write out any "shutdown" attributes. A node that is -+ * shutting down will have all its transient attributes removed from the CIB -+ * when its controller exits, and from the attribute manager's memory (on -+ * remaining nodes) when its attribute manager exits; if an election is won -+ * between when those two things happen, we don't want to write the shutdown -+ * attribute back out, which would cause the node to immediately shut down -+ * the next time it rejoins. -+ */ -+ attrd_write_attributes(attrd_write_all|attrd_write_skip_shutdown); - return G_SOURCE_REMOVE; - } - -diff --git a/daemons/attrd/pacemaker-attrd.h b/daemons/attrd/pacemaker-attrd.h -index 2d781d1139..2e35bd7ec5 100644 ---- a/daemons/attrd/pacemaker-attrd.h -+++ b/daemons/attrd/pacemaker-attrd.h -@@ -180,6 +180,7 @@ enum attrd_write_options { - attrd_write_changed = 0, - attrd_write_all = (1 << 0), - attrd_write_no_delay = (1 << 1), -+ attrd_write_skip_shutdown = (1 << 2), - }; - - void attrd_write_attribute(attribute_t *a, bool ignore_delay); diff --git a/010-revert-58400e27.patch b/010-revert-58400e27.patch deleted file mode 100644 index d08ff17..0000000 --- a/010-revert-58400e27.patch +++ /dev/null @@ -1,62 +0,0 @@ -From 2e81e0db9a716c486805e0760f78be65ca79eeae Mon Sep 17 00:00:00 2001 -From: Ken Gaillot -Date: Tue, 17 Oct 2023 15:28:27 -0500 -Subject: [PATCH] Fix: attrd: avoid regression by reverting 58400e27 - -Fixes T714 ---- - daemons/attrd/attrd_cib.c | 5 ----- - daemons/attrd/attrd_elections.c | 10 +--------- - daemons/attrd/pacemaker-attrd.h | 1 - - 3 files changed, 1 insertion(+), 15 deletions(-) - -diff --git a/daemons/attrd/attrd_cib.c b/daemons/attrd/attrd_cib.c -index 2de37a7cb6..9ce2872715 100644 ---- a/daemons/attrd/attrd_cib.c -+++ b/daemons/attrd/attrd_cib.c -@@ -641,11 +641,6 @@ attrd_write_attributes(uint32_t options) - pcmk_is_set(options, attrd_write_all)? "all" : "changed"); - g_hash_table_iter_init(&iter, attributes); - while (g_hash_table_iter_next(&iter, NULL, (gpointer *) & a)) { -- if (pcmk_is_set(options, attrd_write_skip_shutdown) -- && pcmk__str_eq(a->id, XML_CIB_ATTR_SHUTDOWN, pcmk__str_none)) { -- continue; -- } -- - if (!pcmk_is_set(options, attrd_write_all) && a->unknown_peer_uuids) { - // Try writing this attribute again, in case peer ID was learned - a->changed = true; -diff --git a/daemons/attrd/attrd_elections.c b/daemons/attrd/attrd_elections.c -index a95cd44cbd..62310ed1d8 100644 ---- a/daemons/attrd/attrd_elections.c -+++ b/daemons/attrd/attrd_elections.c -@@ -35,16 +35,8 @@ attrd_election_cb(gpointer user_data) - - /* After winning an election, update the CIB with the values of all - * attributes as the winner knows them. -- * -- * However, do not write out any "shutdown" attributes. A node that is -- * shutting down will have all its transient attributes removed from the CIB -- * when its controller exits, and from the attribute manager's memory (on -- * remaining nodes) when its attribute manager exits; if an election is won -- * between when those two things happen, we don't want to write the shutdown -- * attribute back out, which would cause the node to immediately shut down -- * the next time it rejoins. - */ -- attrd_write_attributes(attrd_write_all|attrd_write_skip_shutdown); -+ attrd_write_attributes(attrd_write_all); - return G_SOURCE_REMOVE; - } - -diff --git a/daemons/attrd/pacemaker-attrd.h b/daemons/attrd/pacemaker-attrd.h -index e3c369b5bc..a95bb54367 100644 ---- a/daemons/attrd/pacemaker-attrd.h -+++ b/daemons/attrd/pacemaker-attrd.h -@@ -181,7 +181,6 @@ enum attrd_write_options { - attrd_write_changed = 0, - attrd_write_all = (1 << 0), - attrd_write_no_delay = (1 << 1), -- attrd_write_skip_shutdown = (1 << 2), - }; - - void attrd_write_attribute(attribute_t *a, bool ignore_delay); diff --git a/011-revert-f5263c94.patch b/011-revert-f5263c94.patch deleted file mode 100644 index c0f1c03..0000000 --- a/011-revert-f5263c94.patch +++ /dev/null @@ -1,34 +0,0 @@ -From 14b87a38786ae5b4dc12fc1581e5d39a274fced2 Mon Sep 17 00:00:00 2001 -From: Ken Gaillot -Date: Mon, 30 Oct 2023 12:21:24 -0500 -Subject: [PATCH] Fix: attrd: revert faulty T138 fix - -f5263c9401 created a timing issue where a node could get a shutdown attribute, -the original writer leaves the cluster before writing it out, then the -shutting-down node wins the writer election. In that case, it would skip the -write-out and the scheduler would never shut it down. - -Reopens T138 ---- - daemons/attrd/attrd_elections.c | 8 -------- - 1 file changed, 8 deletions(-) - -diff --git a/daemons/attrd/attrd_elections.c b/daemons/attrd/attrd_elections.c -index 62310ed1d8..82fbe8affc 100644 ---- a/daemons/attrd/attrd_elections.c -+++ b/daemons/attrd/attrd_elections.c -@@ -22,14 +22,6 @@ - { - attrd_declare_winner(); - -- if (attrd_requesting_shutdown() || attrd_shutting_down()) { -- /* This node is shutting down or about to, meaning its attributes will -- * be removed (and may have already been removed from the CIB by a -- * controller). Don't sync or write its attributes in this case. -- */ -- return G_SOURCE_REMOVE; -- } -- - /* Update the peers after an election */ - attrd_peer_sync(NULL, NULL); - diff --git a/pacemaker.spec b/pacemaker.spec index 51c0706..0b782a7 100644 --- a/pacemaker.spec +++ b/pacemaker.spec @@ -35,11 +35,11 @@ ## Upstream pacemaker version, and its package version (specversion ## can be incremented to build packages reliably considered "newer" ## than previously built packages with the same pcmkversion) -%global pcmkversion 2.1.6 -%global specversion 10 +%global pcmkversion 2.1.7 +%global specversion 1 ## Upstream commit (full commit ID, abbreviated commit ID, or tag) to build -%global commit 6fdc9deea294bbad629b003c6ae036aaed8e3ee0 +%global commit 7534cc50aefbf3c161c7ed258daa1019a94d5079 ## Since git v2.11, the extent of abbreviation is autoscaled by default ## (used to be constant of 7), so we need to convey it for non-tags, too. @@ -232,7 +232,7 @@ Name: pacemaker Summary: Scalable High-Availability cluster resource manager Version: %{pcmkversion} -Release: %{pcmk_release}.1%{?dist} +Release: %{pcmk_release}%{?dist} License: GPL-2.0-or-later AND LGPL-2.1-or-later Url: https://www.clusterlabs.org/ @@ -248,17 +248,7 @@ Source0: https://codeload.github.com/%{github_owner}/%{name}/tar.gz/%{arch Source1: https://codeload.github.com/%{github_owner}/%{nagios_name}/tar.gz/%{nagios_archive_github_url} # upstream commits -Patch001: 001-remote-start-state.patch -Patch002: 002-group-colocation-constraint.patch -Patch003: 003-clone-shuffle.patch -Patch004: 004-clone-rsc-display.patch -Patch005: 005-attrd-dampen.patch -Patch006: 006-controller-reply.patch -Patch007: 007-glib-assertions.patch -Patch008: 008-attrd-shutdown.patch -Patch009: 009-attrd-shutdown-2.patch -Patch010: 010-revert-58400e27.patch -Patch011: 011-revert-f5263c94.patch +#Patch001: 001-xxxx.patch Requires: resource-agents Requires: %{pkgname_pcmk_libs}%{?_isa} = %{version}-%{release} @@ -303,7 +293,7 @@ BuildRequires: sed # Required for core functionality BuildRequires: pkgconfig(glib-2.0) >= 2.42 -BuildRequires: libxml2-devel +BuildRequires: libxml2-devel >= 2.6.0 BuildRequires: libxslt-devel BuildRequires: libuuid-devel BuildRequires: %{pkgname_bzip2_devel} @@ -318,7 +308,7 @@ BuildRequires: pam-devel BuildRequires: %{pkgname_gettext} >= 0.18 # Required for "make check" -BuildRequires: libcmocka-devel +BuildRequires: libcmocka-devel >= 1.1.0 BuildRequires: pkgconfig(systemd) @@ -466,7 +456,7 @@ Requires: libqb-devel%{?_isa} Requires: %{?pkgname_libtool_devel_arch} %endif Requires: libuuid-devel%{?_isa} -Requires: libxml2-devel%{?_isa} +Requires: libxml2-devel%{?_isa} >= 2.6.0 Requires: libxslt-devel%{?_isa} %description -n %{pkgname_pcmk_libs}-devel @@ -725,15 +715,20 @@ exit 0 %exclude %{_sbindir}/pacemaker_remoted %{_libexecdir}/pacemaker/* -%{_sbindir}/crm_master +%if %{with stonithd} +%{_sbindir}/fence_legacy +%endif %{_sbindir}/fence_watchdog %doc %{_mandir}/man7/pacemaker-controld.* %doc %{_mandir}/man7/pacemaker-schedulerd.* %doc %{_mandir}/man7/pacemaker-fenced.* %doc %{_mandir}/man7/ocf_pacemaker_controld.* +%doc %{_mandir}/man7/ocf_pacemaker_o2cb.* %doc %{_mandir}/man7/ocf_pacemaker_remote.* -%doc %{_mandir}/man8/crm_master.* +%if %{with stonithd} +%doc %{_mandir}/man8/fence_legacy.* +%endif %doc %{_mandir}/man8/fence_watchdog.* %doc %{_mandir}/man8/pacemakerd.* @@ -746,6 +741,7 @@ exit 0 %dir %attr (750, %{uname}, %{gname}) %{_var}/lib/pacemaker/cib %dir %attr (750, %{uname}, %{gname}) %{_var}/lib/pacemaker/pengine %{ocf_root}/resource.d/pacemaker/controld +%{ocf_root}/resource.d/pacemaker/o2cb %{ocf_root}/resource.d/pacemaker/remote %files cli @@ -764,6 +760,7 @@ exit 0 %{_sbindir}/crm_diff %{_sbindir}/crm_error %{_sbindir}/crm_failcount +%{_sbindir}/crm_master %{_sbindir}/crm_mon %{_sbindir}/crm_node %{_sbindir}/crm_resource @@ -799,7 +796,6 @@ exit 0 %exclude %{_mandir}/man7/ocf_pacemaker_o2cb.* %exclude %{_mandir}/man7/ocf_pacemaker_remote.* %doc %{_mandir}/man8/crm*.8.gz -%exclude %{_mandir}/man8/crm_master.* %doc %{_mandir}/man8/attrd_updater.* %doc %{_mandir}/man8/cibadmin.* %if %{with cibsecrets} @@ -865,7 +861,6 @@ exit 0 %license licenses/CC-BY-SA-4.0 %files cts -%{python_site}/cts %{python3_sitelib}/pacemaker/_cts/ %{_datadir}/pacemaker/tests @@ -908,6 +903,11 @@ exit 0 %license %{nagios_name}-%{nagios_hash}/COPYING %changelog +* Wed Nov 22 2023 Chris Lumens - 2.1.7-1 +- Rebase on upstream 2.1.7-rc2 release +- Resolves: RHEL-7682 +- Related: RHEL-17225 + * Tue Oct 31 2023 Chris Lumens - 2.1.6-10.1 - Revert the rest of the attrd shutdown race condition fix - Related: RHEL-14044 diff --git a/sources b/sources index 01010a1..c018296 100644 --- a/sources +++ b/sources @@ -1,2 +1,2 @@ SHA512 (nagios-agents-metadata-105ab8a7b2c16b9a29cf1c1596b80136eeef332b.tar.gz) = 11ddeb48a4929e7642b6dfa9c7962aa1d7a1af1c569830f55ed6cd6773abac13377317327bc1db8411c8077884f83f81cc54d746c834b63a99fa6dc219b5caad -SHA512 (pacemaker-6fdc9deea.tar.gz) = 3f8ce62c362dab8e74f6a752e291036bcdc4aea940c5cc9a4de3c8b4bab7d7b7ce0e68430502f51c382540a449297380f5b46f1a2271b5106709c33d3023eedf +SHA512 (pacemaker-7534cc50a.tar.gz) = 70703c68e3249a2fcc4d88d53b78a2a990163ec59b32fb4c5c6993cb53943900279017f529f9bf29a06e59fd67bb2d631719ed5a41848b01a54497caf90e0b20