From e0f6d5b090bfdd21cabe0167286d9d24d4caa83f Mon Sep 17 00:00:00 2001 From: CentOS Sources Date: Tue, 28 Jul 2020 04:15:05 -0400 Subject: [PATCH] import pacemaker-2.0.4-3.el8 --- .gitignore | 2 +- .pacemaker.metadata | 2 +- SOURCES/001-rules.patch | 4947 ++++++++++++++++++++ SOURCES/002-demote.patch | 8664 +++++++++++++++++++++++++++++++++++ SOURCES/003-trace.patch | 30 + SOURCES/004-test.patch | 27 + SOURCES/005-sysconfig.patch | 32 + SPECS/pacemaker.spec | 192 +- 8 files changed, 13824 insertions(+), 72 deletions(-) create mode 100644 SOURCES/001-rules.patch create mode 100644 SOURCES/002-demote.patch create mode 100644 SOURCES/003-trace.patch create mode 100644 SOURCES/004-test.patch create mode 100644 SOURCES/005-sysconfig.patch diff --git a/.gitignore b/.gitignore index 302b56c..6e6662f 100644 --- a/.gitignore +++ b/.gitignore @@ -1,2 +1,2 @@ SOURCES/nagios-agents-metadata-105ab8a.tar.gz -SOURCES/pacemaker-4b1f869.tar.gz +SOURCES/pacemaker-2deceaa.tar.gz diff --git a/.pacemaker.metadata b/.pacemaker.metadata index 1c52241..6a9af04 100644 --- a/.pacemaker.metadata +++ b/.pacemaker.metadata @@ -1,2 +1,2 @@ ea6c0a27fd0ae8ce02f84a11f08a0d79377041c3 SOURCES/nagios-agents-metadata-105ab8a.tar.gz -dfd19e7ec7aa96520f4948fc37d48ea69835bbdb SOURCES/pacemaker-4b1f869.tar.gz +78c94fdcf59cfb064d4433e1b8f71fd856eeec5f SOURCES/pacemaker-2deceaa.tar.gz diff --git a/SOURCES/001-rules.patch b/SOURCES/001-rules.patch new file mode 100644 index 0000000..0133975 --- /dev/null +++ b/SOURCES/001-rules.patch @@ -0,0 +1,4947 @@ +From 2f10dde2f2a0ac7a3d74cb2f398be1deaba75615 Mon Sep 17 00:00:00 2001 +From: Chris Lumens +Date: Mon, 6 Apr 2020 11:22:50 -0400 +Subject: [PATCH 01/17] Feature: scheduler: Add new expression_type values. + +--- + include/crm/pengine/rules.h | 4 +++- + lib/pengine/rules.c | 6 ++++++ + 2 files changed, 9 insertions(+), 1 deletion(-) + +diff --git a/include/crm/pengine/rules.h b/include/crm/pengine/rules.h +index ebd3148..37f092b 100644 +--- a/include/crm/pengine/rules.h ++++ b/include/crm/pengine/rules.h +@@ -28,7 +28,9 @@ enum expression_type { + loc_expr, + role_expr, + time_expr, +- version_expr ++ version_expr, ++ rsc_expr, ++ op_expr + }; + + typedef struct pe_re_match_data { +diff --git a/lib/pengine/rules.c b/lib/pengine/rules.c +index fa9a222..130bada 100644 +--- a/lib/pengine/rules.c ++++ b/lib/pengine/rules.c +@@ -189,6 +189,12 @@ find_expression_type(xmlNode * expr) + if (safe_str_eq(tag, "date_expression")) { + return time_expr; + ++ } else if (safe_str_eq(tag, "rsc_expression")) { ++ return rsc_expr; ++ ++ } else if (safe_str_eq(tag, "op_expression")) { ++ return op_expr; ++ + } else if (safe_str_eq(tag, XML_TAG_RULE)) { + return nested_rule; + +-- +1.8.3.1 + + +From bc7491e5226af2a2e7f1a9b2d61892d3af0767fe Mon Sep 17 00:00:00 2001 +From: Chris Lumens +Date: Fri, 3 Apr 2020 15:03:23 -0400 +Subject: [PATCH 02/17] Refactor: scheduler: Add new pe__eval_*_expr functions. + +These new functions all take the same input arguments - an xmlNodePtr +and a pe_rule_eval_data_t. This latter type holds all the parameters +that could possibly be useful for evaluating some rule. Most functions +will only need a few items out of this structure. + +Then, implement pe_test_*_expression in terms of these new functions. +--- + include/crm/pengine/common.h | 37 ++- + include/crm/pengine/rules.h | 13 - + include/crm/pengine/rules_internal.h | 5 + + lib/pengine/rules.c | 592 +++++++++++++++++++---------------- + 4 files changed, 363 insertions(+), 284 deletions(-) + +diff --git a/include/crm/pengine/common.h b/include/crm/pengine/common.h +index 48c2b66..3a770b7 100644 +--- a/include/crm/pengine/common.h ++++ b/include/crm/pengine/common.h +@@ -1,5 +1,5 @@ + /* +- * Copyright 2004-2019 the Pacemaker project contributors ++ * Copyright 2004-2020 the Pacemaker project contributors + * + * The version control history for this file may have further details. + * +@@ -15,6 +15,9 @@ extern "C" { + #endif + + # include ++# include ++ ++# include + + extern gboolean was_processing_error; + extern gboolean was_processing_warning; +@@ -131,6 +134,38 @@ recovery2text(enum rsc_recovery_type type) + return "Unknown"; + } + ++typedef struct pe_re_match_data { ++ char *string; ++ int nregs; ++ regmatch_t *pmatch; ++} pe_re_match_data_t; ++ ++typedef struct pe_match_data { ++ pe_re_match_data_t *re; ++ GHashTable *params; ++ GHashTable *meta; ++} pe_match_data_t; ++ ++typedef struct pe_rsc_eval_data { ++ const char *standard; ++ const char *provider; ++ const char *agent; ++} pe_rsc_eval_data_t; ++ ++typedef struct pe_op_eval_data { ++ const char *op_name; ++ guint interval; ++} pe_op_eval_data_t; ++ ++typedef struct pe_rule_eval_data { ++ GHashTable *node_hash; ++ enum rsc_role_e role; ++ crm_time_t *now; ++ pe_match_data_t *match_data; ++ pe_rsc_eval_data_t *rsc_data; ++ pe_op_eval_data_t *op_data; ++} pe_rule_eval_data_t; ++ + #ifdef __cplusplus + } + #endif +diff --git a/include/crm/pengine/rules.h b/include/crm/pengine/rules.h +index 37f092b..d7bdbf9 100644 +--- a/include/crm/pengine/rules.h ++++ b/include/crm/pengine/rules.h +@@ -15,7 +15,6 @@ extern "C" { + #endif + + # include +-# include + + # include + # include +@@ -33,18 +32,6 @@ enum expression_type { + op_expr + }; + +-typedef struct pe_re_match_data { +- char *string; +- int nregs; +- regmatch_t *pmatch; +-} pe_re_match_data_t; +- +-typedef struct pe_match_data { +- pe_re_match_data_t *re; +- GHashTable *params; +- GHashTable *meta; +-} pe_match_data_t; +- + enum expression_type find_expression_type(xmlNode * expr); + + gboolean pe_evaluate_rules(xmlNode *ruleset, GHashTable *node_hash, +diff --git a/include/crm/pengine/rules_internal.h b/include/crm/pengine/rules_internal.h +index fd65c1e..8a22108 100644 +--- a/include/crm/pengine/rules_internal.h ++++ b/include/crm/pengine/rules_internal.h +@@ -21,6 +21,11 @@ void pe_free_alert_list(GListPtr alert_list); + + crm_time_t *pe_parse_xml_duration(crm_time_t * start, xmlNode * duration_spec); + ++gboolean pe__eval_attr_expr(xmlNode *expr, pe_rule_eval_data_t *rule_data); ++int pe__eval_date_expr(xmlNode *expr, pe_rule_eval_data_t *rule_data, ++ crm_time_t *next_change); ++gboolean pe__eval_role_expr(xmlNode *expr, pe_rule_eval_data_t *rule_data); ++ + int pe_eval_date_expression(xmlNode *time_expr, + crm_time_t *now, + crm_time_t *next_change); +diff --git a/lib/pengine/rules.c b/lib/pengine/rules.c +index 130bada..3f316c2 100644 +--- a/lib/pengine/rules.c ++++ b/lib/pengine/rules.c +@@ -219,201 +219,34 @@ find_expression_type(xmlNode * expr) + } + + gboolean +-pe_test_role_expression(xmlNode * expr, enum rsc_role_e role, crm_time_t * now) ++pe_test_role_expression(xmlNode *expr, enum rsc_role_e role, crm_time_t *now) + { +- gboolean accept = FALSE; +- const char *op = NULL; +- const char *value = NULL; +- +- if (role == RSC_ROLE_UNKNOWN) { +- return accept; +- } +- +- value = crm_element_value(expr, XML_EXPR_ATTR_VALUE); +- op = crm_element_value(expr, XML_EXPR_ATTR_OPERATION); +- +- if (safe_str_eq(op, "defined")) { +- if (role > RSC_ROLE_STARTED) { +- accept = TRUE; +- } +- +- } else if (safe_str_eq(op, "not_defined")) { +- if (role < RSC_ROLE_SLAVE && role > RSC_ROLE_UNKNOWN) { +- accept = TRUE; +- } +- +- } else if (safe_str_eq(op, "eq")) { +- if (text2role(value) == role) { +- accept = TRUE; +- } +- +- } else if (safe_str_eq(op, "ne")) { +- // Test "ne" only with promotable clone roles +- if (role < RSC_ROLE_SLAVE && role > RSC_ROLE_UNKNOWN) { +- accept = FALSE; +- +- } else if (text2role(value) != role) { +- accept = TRUE; +- } +- } +- return accept; ++ pe_rule_eval_data_t rule_data = { ++ .node_hash = NULL, ++ .role = role, ++ .now = now, ++ .match_data = NULL, ++ .rsc_data = NULL, ++ .op_data = NULL ++ }; ++ ++ return pe__eval_role_expr(expr, &rule_data); + } + + gboolean + pe_test_attr_expression(xmlNode *expr, GHashTable *hash, crm_time_t *now, + pe_match_data_t *match_data) + { +- gboolean accept = FALSE; +- gboolean attr_allocated = FALSE; +- int cmp = 0; +- const char *h_val = NULL; +- GHashTable *table = NULL; +- +- const char *op = NULL; +- const char *type = NULL; +- const char *attr = NULL; +- const char *value = NULL; +- const char *value_source = NULL; +- +- attr = crm_element_value(expr, XML_EXPR_ATTR_ATTRIBUTE); +- op = crm_element_value(expr, XML_EXPR_ATTR_OPERATION); +- value = crm_element_value(expr, XML_EXPR_ATTR_VALUE); +- type = crm_element_value(expr, XML_EXPR_ATTR_TYPE); +- value_source = crm_element_value(expr, XML_EXPR_ATTR_VALUE_SOURCE); +- +- if (attr == NULL || op == NULL) { +- pe_err("Invalid attribute or operation in expression" +- " (\'%s\' \'%s\' \'%s\')", crm_str(attr), crm_str(op), crm_str(value)); +- return FALSE; +- } +- +- if (match_data) { +- if (match_data->re) { +- char *resolved_attr = pe_expand_re_matches(attr, match_data->re); +- +- if (resolved_attr) { +- attr = (const char *) resolved_attr; +- attr_allocated = TRUE; +- } +- } +- +- if (safe_str_eq(value_source, "param")) { +- table = match_data->params; +- } else if (safe_str_eq(value_source, "meta")) { +- table = match_data->meta; +- } +- } +- +- if (table) { +- const char *param_name = value; +- const char *param_value = NULL; +- +- if (param_name && param_name[0]) { +- if ((param_value = (const char *)g_hash_table_lookup(table, param_name))) { +- value = param_value; +- } +- } +- } +- +- if (hash != NULL) { +- h_val = (const char *)g_hash_table_lookup(hash, attr); +- } +- +- if (attr_allocated) { +- free((char *)attr); +- attr = NULL; +- } +- +- if (value != NULL && h_val != NULL) { +- if (type == NULL) { +- if (safe_str_eq(op, "lt") +- || safe_str_eq(op, "lte") +- || safe_str_eq(op, "gt") +- || safe_str_eq(op, "gte")) { +- type = "number"; +- +- } else { +- type = "string"; +- } +- crm_trace("Defaulting to %s based comparison for '%s' op", type, op); +- } +- +- if (safe_str_eq(type, "string")) { +- cmp = strcasecmp(h_val, value); +- +- } else if (safe_str_eq(type, "number")) { +- int h_val_f = crm_parse_int(h_val, NULL); +- int value_f = crm_parse_int(value, NULL); +- +- if (h_val_f < value_f) { +- cmp = -1; +- } else if (h_val_f > value_f) { +- cmp = 1; +- } else { +- cmp = 0; +- } +- +- } else if (safe_str_eq(type, "version")) { +- cmp = compare_version(h_val, value); +- +- } +- +- } else if (value == NULL && h_val == NULL) { +- cmp = 0; +- } else if (value == NULL) { +- cmp = 1; +- } else { +- cmp = -1; +- } +- +- if (safe_str_eq(op, "defined")) { +- if (h_val != NULL) { +- accept = TRUE; +- } +- +- } else if (safe_str_eq(op, "not_defined")) { +- if (h_val == NULL) { +- accept = TRUE; +- } +- +- } else if (safe_str_eq(op, "eq")) { +- if ((h_val == value) || cmp == 0) { +- accept = TRUE; +- } +- +- } else if (safe_str_eq(op, "ne")) { +- if ((h_val == NULL && value != NULL) +- || (h_val != NULL && value == NULL) +- || cmp != 0) { +- accept = TRUE; +- } +- +- } else if (value == NULL || h_val == NULL) { +- // The comparison is meaningless from this point on +- accept = FALSE; +- +- } else if (safe_str_eq(op, "lt")) { +- if (cmp < 0) { +- accept = TRUE; +- } +- +- } else if (safe_str_eq(op, "lte")) { +- if (cmp <= 0) { +- accept = TRUE; +- } +- +- } else if (safe_str_eq(op, "gt")) { +- if (cmp > 0) { +- accept = TRUE; +- } +- +- } else if (safe_str_eq(op, "gte")) { +- if (cmp >= 0) { +- accept = TRUE; +- } +- } +- +- return accept; ++ pe_rule_eval_data_t rule_data = { ++ .node_hash = hash, ++ .role = RSC_ROLE_UNKNOWN, ++ .now = now, ++ .match_data = match_data, ++ .rsc_data = NULL, ++ .op_data = NULL ++ }; ++ ++ return pe__eval_attr_expr(expr, &rule_data); + } + + /* As per the nethack rules: +@@ -587,10 +420,18 @@ pe_parse_xml_duration(crm_time_t * start, xmlNode * duration_spec) + * \return TRUE if date expression is in effect at given time, FALSE otherwise + */ + gboolean +-pe_test_date_expression(xmlNode *time_expr, crm_time_t *now, +- crm_time_t *next_change) ++pe_test_date_expression(xmlNode *expr, crm_time_t *now, crm_time_t *next_change) + { +- switch (pe_eval_date_expression(time_expr, now, next_change)) { ++ pe_rule_eval_data_t rule_data = { ++ .node_hash = NULL, ++ .role = RSC_ROLE_UNKNOWN, ++ .now = now, ++ .match_data = NULL, ++ .rsc_data = NULL, ++ .op_data = NULL ++ }; ++ ++ switch (pe__eval_date_expr(expr, &rule_data, next_change)) { + case pcmk_rc_within_range: + case pcmk_rc_ok: + return TRUE; +@@ -623,86 +464,18 @@ crm_time_set_if_earlier(crm_time_t *next_change, crm_time_t *t) + * \return Standard Pacemaker return code + */ + int +-pe_eval_date_expression(xmlNode *time_expr, crm_time_t *now, +- crm_time_t *next_change) ++pe_eval_date_expression(xmlNode *expr, crm_time_t *now, crm_time_t *next_change) + { +- crm_time_t *start = NULL; +- crm_time_t *end = NULL; +- const char *value = NULL; +- const char *op = crm_element_value(time_expr, "operation"); +- +- xmlNode *duration_spec = NULL; +- xmlNode *date_spec = NULL; +- +- // "undetermined" will also be returned for parsing errors +- int rc = pcmk_rc_undetermined; +- +- crm_trace("Testing expression: %s", ID(time_expr)); +- +- duration_spec = first_named_child(time_expr, "duration"); +- date_spec = first_named_child(time_expr, "date_spec"); +- +- value = crm_element_value(time_expr, "start"); +- if (value != NULL) { +- start = crm_time_new(value); +- } +- value = crm_element_value(time_expr, "end"); +- if (value != NULL) { +- end = crm_time_new(value); +- } +- +- if (start != NULL && end == NULL && duration_spec != NULL) { +- end = pe_parse_xml_duration(start, duration_spec); +- } +- +- if ((op == NULL) || safe_str_eq(op, "in_range")) { +- if ((start == NULL) && (end == NULL)) { +- // in_range requires at least one of start or end +- } else if ((start != NULL) && (crm_time_compare(now, start) < 0)) { +- rc = pcmk_rc_before_range; +- crm_time_set_if_earlier(next_change, start); +- } else if ((end != NULL) && (crm_time_compare(now, end) > 0)) { +- rc = pcmk_rc_after_range; +- } else { +- rc = pcmk_rc_within_range; +- if (end && next_change) { +- // Evaluation doesn't change until second after end +- crm_time_add_seconds(end, 1); +- crm_time_set_if_earlier(next_change, end); +- } +- } +- +- } else if (safe_str_eq(op, "date_spec")) { +- rc = pe_cron_range_satisfied(now, date_spec); +- // @TODO set next_change appropriately +- +- } else if (safe_str_eq(op, "gt")) { +- if (start == NULL) { +- // gt requires start +- } else if (crm_time_compare(now, start) > 0) { +- rc = pcmk_rc_within_range; +- } else { +- rc = pcmk_rc_before_range; +- +- // Evaluation doesn't change until second after start +- crm_time_add_seconds(start, 1); +- crm_time_set_if_earlier(next_change, start); +- } +- +- } else if (safe_str_eq(op, "lt")) { +- if (end == NULL) { +- // lt requires end +- } else if (crm_time_compare(now, end) < 0) { +- rc = pcmk_rc_within_range; +- crm_time_set_if_earlier(next_change, end); +- } else { +- rc = pcmk_rc_after_range; +- } +- } +- +- crm_time_free(start); +- crm_time_free(end); +- return rc; ++ pe_rule_eval_data_t rule_data = { ++ .node_hash = NULL, ++ .role = RSC_ROLE_UNKNOWN, ++ .now = now, ++ .match_data = NULL, ++ .rsc_data = NULL, ++ .op_data = NULL ++ }; ++ ++ return pe__eval_date_expr(expr, &rule_data, next_change); + } + + // Information about a block of nvpair elements +@@ -1111,6 +884,285 @@ pe_unpack_versioned_parameters(xmlNode *versioned_params, const char *ra_version + } + #endif + ++gboolean ++pe__eval_attr_expr(xmlNodePtr expr, pe_rule_eval_data_t *rule_data) ++{ ++ gboolean accept = FALSE; ++ gboolean attr_allocated = FALSE; ++ int cmp = 0; ++ const char *h_val = NULL; ++ GHashTable *table = NULL; ++ ++ const char *op = NULL; ++ const char *type = NULL; ++ const char *attr = NULL; ++ const char *value = NULL; ++ const char *value_source = NULL; ++ ++ attr = crm_element_value(expr, XML_EXPR_ATTR_ATTRIBUTE); ++ op = crm_element_value(expr, XML_EXPR_ATTR_OPERATION); ++ value = crm_element_value(expr, XML_EXPR_ATTR_VALUE); ++ type = crm_element_value(expr, XML_EXPR_ATTR_TYPE); ++ value_source = crm_element_value(expr, XML_EXPR_ATTR_VALUE_SOURCE); ++ ++ if (attr == NULL || op == NULL) { ++ pe_err("Invalid attribute or operation in expression" ++ " (\'%s\' \'%s\' \'%s\')", crm_str(attr), crm_str(op), crm_str(value)); ++ return FALSE; ++ } ++ ++ if (rule_data->match_data) { ++ if (rule_data->match_data->re) { ++ char *resolved_attr = pe_expand_re_matches(attr, rule_data->match_data->re); ++ ++ if (resolved_attr) { ++ attr = (const char *) resolved_attr; ++ attr_allocated = TRUE; ++ } ++ } ++ ++ if (safe_str_eq(value_source, "param")) { ++ table = rule_data->match_data->params; ++ } else if (safe_str_eq(value_source, "meta")) { ++ table = rule_data->match_data->meta; ++ } ++ } ++ ++ if (table) { ++ const char *param_name = value; ++ const char *param_value = NULL; ++ ++ if (param_name && param_name[0]) { ++ if ((param_value = (const char *)g_hash_table_lookup(table, param_name))) { ++ value = param_value; ++ } ++ } ++ } ++ ++ if (rule_data->node_hash != NULL) { ++ h_val = (const char *)g_hash_table_lookup(rule_data->node_hash, attr); ++ } ++ ++ if (attr_allocated) { ++ free((char *)attr); ++ attr = NULL; ++ } ++ ++ if (value != NULL && h_val != NULL) { ++ if (type == NULL) { ++ if (safe_str_eq(op, "lt") ++ || safe_str_eq(op, "lte") ++ || safe_str_eq(op, "gt") ++ || safe_str_eq(op, "gte")) { ++ type = "number"; ++ ++ } else { ++ type = "string"; ++ } ++ crm_trace("Defaulting to %s based comparison for '%s' op", type, op); ++ } ++ ++ if (safe_str_eq(type, "string")) { ++ cmp = strcasecmp(h_val, value); ++ ++ } else if (safe_str_eq(type, "number")) { ++ int h_val_f = crm_parse_int(h_val, NULL); ++ int value_f = crm_parse_int(value, NULL); ++ ++ if (h_val_f < value_f) { ++ cmp = -1; ++ } else if (h_val_f > value_f) { ++ cmp = 1; ++ } else { ++ cmp = 0; ++ } ++ ++ } else if (safe_str_eq(type, "version")) { ++ cmp = compare_version(h_val, value); ++ ++ } ++ ++ } else if (value == NULL && h_val == NULL) { ++ cmp = 0; ++ } else if (value == NULL) { ++ cmp = 1; ++ } else { ++ cmp = -1; ++ } ++ ++ if (safe_str_eq(op, "defined")) { ++ if (h_val != NULL) { ++ accept = TRUE; ++ } ++ ++ } else if (safe_str_eq(op, "not_defined")) { ++ if (h_val == NULL) { ++ accept = TRUE; ++ } ++ ++ } else if (safe_str_eq(op, "eq")) { ++ if ((h_val == value) || cmp == 0) { ++ accept = TRUE; ++ } ++ ++ } else if (safe_str_eq(op, "ne")) { ++ if ((h_val == NULL && value != NULL) ++ || (h_val != NULL && value == NULL) ++ || cmp != 0) { ++ accept = TRUE; ++ } ++ ++ } else if (value == NULL || h_val == NULL) { ++ // The comparison is meaningless from this point on ++ accept = FALSE; ++ ++ } else if (safe_str_eq(op, "lt")) { ++ if (cmp < 0) { ++ accept = TRUE; ++ } ++ ++ } else if (safe_str_eq(op, "lte")) { ++ if (cmp <= 0) { ++ accept = TRUE; ++ } ++ ++ } else if (safe_str_eq(op, "gt")) { ++ if (cmp > 0) { ++ accept = TRUE; ++ } ++ ++ } else if (safe_str_eq(op, "gte")) { ++ if (cmp >= 0) { ++ accept = TRUE; ++ } ++ } ++ ++ return accept; ++} ++ ++int ++pe__eval_date_expr(xmlNodePtr expr, pe_rule_eval_data_t *rule_data, crm_time_t *next_change) ++{ ++ crm_time_t *start = NULL; ++ crm_time_t *end = NULL; ++ const char *value = NULL; ++ const char *op = crm_element_value(expr, "operation"); ++ ++ xmlNode *duration_spec = NULL; ++ xmlNode *date_spec = NULL; ++ ++ // "undetermined" will also be returned for parsing errors ++ int rc = pcmk_rc_undetermined; ++ ++ crm_trace("Testing expression: %s", ID(expr)); ++ ++ duration_spec = first_named_child(expr, "duration"); ++ date_spec = first_named_child(expr, "date_spec"); ++ ++ value = crm_element_value(expr, "start"); ++ if (value != NULL) { ++ start = crm_time_new(value); ++ } ++ value = crm_element_value(expr, "end"); ++ if (value != NULL) { ++ end = crm_time_new(value); ++ } ++ ++ if (start != NULL && end == NULL && duration_spec != NULL) { ++ end = pe_parse_xml_duration(start, duration_spec); ++ } ++ ++ if ((op == NULL) || safe_str_eq(op, "in_range")) { ++ if ((start == NULL) && (end == NULL)) { ++ // in_range requires at least one of start or end ++ } else if ((start != NULL) && (crm_time_compare(rule_data->now, start) < 0)) { ++ rc = pcmk_rc_before_range; ++ crm_time_set_if_earlier(next_change, start); ++ } else if ((end != NULL) && (crm_time_compare(rule_data->now, end) > 0)) { ++ rc = pcmk_rc_after_range; ++ } else { ++ rc = pcmk_rc_within_range; ++ if (end && next_change) { ++ // Evaluation doesn't change until second after end ++ crm_time_add_seconds(end, 1); ++ crm_time_set_if_earlier(next_change, end); ++ } ++ } ++ ++ } else if (safe_str_eq(op, "date_spec")) { ++ rc = pe_cron_range_satisfied(rule_data->now, date_spec); ++ // @TODO set next_change appropriately ++ ++ } else if (safe_str_eq(op, "gt")) { ++ if (start == NULL) { ++ // gt requires start ++ } else if (crm_time_compare(rule_data->now, start) > 0) { ++ rc = pcmk_rc_within_range; ++ } else { ++ rc = pcmk_rc_before_range; ++ ++ // Evaluation doesn't change until second after start ++ crm_time_add_seconds(start, 1); ++ crm_time_set_if_earlier(next_change, start); ++ } ++ ++ } else if (safe_str_eq(op, "lt")) { ++ if (end == NULL) { ++ // lt requires end ++ } else if (crm_time_compare(rule_data->now, end) < 0) { ++ rc = pcmk_rc_within_range; ++ crm_time_set_if_earlier(next_change, end); ++ } else { ++ rc = pcmk_rc_after_range; ++ } ++ } ++ ++ crm_time_free(start); ++ crm_time_free(end); ++ return rc; ++} ++ ++gboolean ++pe__eval_role_expr(xmlNodePtr expr, pe_rule_eval_data_t *rule_data) ++{ ++ gboolean accept = FALSE; ++ const char *op = NULL; ++ const char *value = NULL; ++ ++ if (rule_data->role == RSC_ROLE_UNKNOWN) { ++ return accept; ++ } ++ ++ value = crm_element_value(expr, XML_EXPR_ATTR_VALUE); ++ op = crm_element_value(expr, XML_EXPR_ATTR_OPERATION); ++ ++ if (safe_str_eq(op, "defined")) { ++ if (rule_data->role > RSC_ROLE_STARTED) { ++ accept = TRUE; ++ } ++ ++ } else if (safe_str_eq(op, "not_defined")) { ++ if (rule_data->role < RSC_ROLE_SLAVE && rule_data->role > RSC_ROLE_UNKNOWN) { ++ accept = TRUE; ++ } ++ ++ } else if (safe_str_eq(op, "eq")) { ++ if (text2role(value) == rule_data->role) { ++ accept = TRUE; ++ } ++ ++ } else if (safe_str_eq(op, "ne")) { ++ // Test "ne" only with promotable clone roles ++ if (rule_data->role < RSC_ROLE_SLAVE && rule_data->role > RSC_ROLE_UNKNOWN) { ++ accept = FALSE; ++ ++ } else if (text2role(value) != rule_data->role) { ++ accept = TRUE; ++ } ++ } ++ return accept; ++} ++ + // Deprecated functions kept only for backward API compatibility + gboolean test_ruleset(xmlNode *ruleset, GHashTable *node_hash, crm_time_t *now); + gboolean test_rule(xmlNode *rule, GHashTable *node_hash, enum rsc_role_e role, +-- +1.8.3.1 + + +From 56a1337a54f3ba8a175ff3252658e1e43f7c670b Mon Sep 17 00:00:00 2001 +From: Chris Lumens +Date: Tue, 28 Apr 2020 14:34:40 -0400 +Subject: [PATCH 03/17] Feature: scheduler: Add new rule tests for op_defaults + and rsc_defaults. + +These are like all the other rule evaluating functions, but they do not +have any wrappers for the older style API. +--- + include/crm/pengine/rules_internal.h | 2 ++ + lib/pengine/rules.c | 68 ++++++++++++++++++++++++++++++++++++ + 2 files changed, 70 insertions(+) + +diff --git a/include/crm/pengine/rules_internal.h b/include/crm/pengine/rules_internal.h +index 8a22108..f60263a 100644 +--- a/include/crm/pengine/rules_internal.h ++++ b/include/crm/pengine/rules_internal.h +@@ -24,7 +24,9 @@ crm_time_t *pe_parse_xml_duration(crm_time_t * start, xmlNode * duration_spec); + gboolean pe__eval_attr_expr(xmlNode *expr, pe_rule_eval_data_t *rule_data); + int pe__eval_date_expr(xmlNode *expr, pe_rule_eval_data_t *rule_data, + crm_time_t *next_change); ++gboolean pe__eval_op_expr(xmlNodePtr expr, pe_rule_eval_data_t *rule_data); + gboolean pe__eval_role_expr(xmlNode *expr, pe_rule_eval_data_t *rule_data); ++gboolean pe__eval_rsc_expr(xmlNodePtr expr, pe_rule_eval_data_t *rule_data); + + int pe_eval_date_expression(xmlNode *time_expr, + crm_time_t *now, +diff --git a/lib/pengine/rules.c b/lib/pengine/rules.c +index 3f316c2..a5af57a 100644 +--- a/lib/pengine/rules.c ++++ b/lib/pengine/rules.c +@@ -1123,6 +1123,38 @@ pe__eval_date_expr(xmlNodePtr expr, pe_rule_eval_data_t *rule_data, crm_time_t * + } + + gboolean ++pe__eval_op_expr(xmlNodePtr expr, pe_rule_eval_data_t *rule_data) { ++ const char *name = crm_element_value(expr, XML_NVPAIR_ATTR_NAME); ++ const char *interval_s = crm_element_value(expr, XML_LRM_ATTR_INTERVAL); ++ guint interval; ++ ++ crm_trace("Testing op_defaults expression: %s", ID(expr)); ++ ++ if (rule_data->op_data == NULL) { ++ crm_trace("No operations data provided"); ++ return FALSE; ++ } ++ ++ interval = crm_parse_interval_spec(interval_s); ++ if (interval == 0 && errno != 0) { ++ crm_trace("Could not parse interval: %s", interval_s); ++ return FALSE; ++ } ++ ++ if (interval_s != NULL && interval != rule_data->op_data->interval) { ++ crm_trace("Interval doesn't match: %d != %d", interval, rule_data->op_data->interval); ++ return FALSE; ++ } ++ ++ if (!crm_str_eq(name, rule_data->op_data->op_name, TRUE)) { ++ crm_trace("Name doesn't match: %s != %s", name, rule_data->op_data->op_name); ++ return FALSE; ++ } ++ ++ return TRUE; ++} ++ ++gboolean + pe__eval_role_expr(xmlNodePtr expr, pe_rule_eval_data_t *rule_data) + { + gboolean accept = FALSE; +@@ -1163,6 +1195,42 @@ pe__eval_role_expr(xmlNodePtr expr, pe_rule_eval_data_t *rule_data) + return accept; + } + ++gboolean ++pe__eval_rsc_expr(xmlNodePtr expr, pe_rule_eval_data_t *rule_data) ++{ ++ const char *class = crm_element_value(expr, XML_AGENT_ATTR_CLASS); ++ const char *provider = crm_element_value(expr, XML_AGENT_ATTR_PROVIDER); ++ const char *type = crm_element_value(expr, XML_EXPR_ATTR_TYPE); ++ ++ crm_trace("Testing rsc_defaults expression: %s", ID(expr)); ++ ++ if (rule_data->rsc_data == NULL) { ++ crm_trace("No resource data provided"); ++ return FALSE; ++ } ++ ++ if (class != NULL && ++ !crm_str_eq(class, rule_data->rsc_data->standard, TRUE)) { ++ crm_trace("Class doesn't match: %s != %s", class, rule_data->rsc_data->standard); ++ return FALSE; ++ } ++ ++ if ((provider == NULL && rule_data->rsc_data->provider != NULL) || ++ (provider != NULL && rule_data->rsc_data->provider == NULL) || ++ !crm_str_eq(provider, rule_data->rsc_data->provider, TRUE)) { ++ crm_trace("Provider doesn't match: %s != %s", provider, rule_data->rsc_data->provider); ++ return FALSE; ++ } ++ ++ if (type != NULL && ++ !crm_str_eq(type, rule_data->rsc_data->agent, TRUE)) { ++ crm_trace("Agent doesn't match: %s != %s", type, rule_data->rsc_data->agent); ++ return FALSE; ++ } ++ ++ return TRUE; ++} ++ + // Deprecated functions kept only for backward API compatibility + gboolean test_ruleset(xmlNode *ruleset, GHashTable *node_hash, crm_time_t *now); + gboolean test_rule(xmlNode *rule, GHashTable *node_hash, enum rsc_role_e role, +-- +1.8.3.1 + + +From 5a4da3f77feee0d3bac50e9adc4eb4b35724dfb2 Mon Sep 17 00:00:00 2001 +From: Chris Lumens +Date: Tue, 28 Apr 2020 14:41:08 -0400 +Subject: [PATCH 04/17] Refactor: scheduler: Reimplement core rule eval + functions. + +The core functions of pe_evaluate_rules, pe_test_rule, and +pe_test_expression have been turned into new, similarly named functions +that take a pe_rule_eval_data_t as an argument. The old ones still +exist as wrappers around the new ones. +--- + include/crm/pengine/rules.h | 7 ++ + lib/pengine/rules.c | 259 ++++++++++++++++++++++++++------------------ + 2 files changed, 162 insertions(+), 104 deletions(-) + +diff --git a/include/crm/pengine/rules.h b/include/crm/pengine/rules.h +index d7bdbf9..a74c629 100644 +--- a/include/crm/pengine/rules.h ++++ b/include/crm/pengine/rules.h +@@ -61,6 +61,13 @@ GHashTable *pe_unpack_versioned_parameters(xmlNode *versioned_params, const char + + char *pe_expand_re_matches(const char *string, pe_re_match_data_t * match_data); + ++gboolean pe_eval_rules(xmlNode *ruleset, pe_rule_eval_data_t *rule_data, ++ crm_time_t *next_change); ++gboolean pe_eval_expr(xmlNode *rule, pe_rule_eval_data_t *rule_data, ++ crm_time_t *next_change); ++gboolean pe_eval_subexpr(xmlNode *expr, pe_rule_eval_data_t *rule_data, ++ crm_time_t *next_change); ++ + #ifndef PCMK__NO_COMPAT + /* Everything here is deprecated and kept only for public API backward + * compatibility. It will be moved to compatibility.h when 2.1.0 is released. +diff --git a/lib/pengine/rules.c b/lib/pengine/rules.c +index a5af57a..a6353ef 100644 +--- a/lib/pengine/rules.c ++++ b/lib/pengine/rules.c +@@ -38,25 +38,16 @@ gboolean + pe_evaluate_rules(xmlNode *ruleset, GHashTable *node_hash, crm_time_t *now, + crm_time_t *next_change) + { +- // If there are no rules, pass by default +- gboolean ruleset_default = TRUE; +- +- for (xmlNode *rule = first_named_child(ruleset, XML_TAG_RULE); +- rule != NULL; rule = crm_next_same_xml(rule)) { ++ pe_rule_eval_data_t rule_data = { ++ .node_hash = node_hash, ++ .role = RSC_ROLE_UNKNOWN, ++ .now = now, ++ .match_data = NULL, ++ .rsc_data = NULL, ++ .op_data = NULL ++ }; + +- ruleset_default = FALSE; +- if (pe_test_rule(rule, node_hash, RSC_ROLE_UNKNOWN, now, next_change, +- NULL)) { +- /* Only the deprecated "lifetime" element of location constraints +- * may contain more than one rule at the top level -- the schema +- * limits a block of nvpairs to a single top-level rule. So, this +- * effectively means that a lifetime is active if any rule it +- * contains is active. +- */ +- return TRUE; +- } +- } +- return ruleset_default; ++ return pe_eval_rules(ruleset, &rule_data, next_change); + } + + gboolean +@@ -64,44 +55,16 @@ pe_test_rule(xmlNode *rule, GHashTable *node_hash, enum rsc_role_e role, + crm_time_t *now, crm_time_t *next_change, + pe_match_data_t *match_data) + { +- xmlNode *expr = NULL; +- gboolean test = TRUE; +- gboolean empty = TRUE; +- gboolean passed = TRUE; +- gboolean do_and = TRUE; +- const char *value = NULL; +- +- rule = expand_idref(rule, NULL); +- value = crm_element_value(rule, XML_RULE_ATTR_BOOLEAN_OP); +- if (safe_str_eq(value, "or")) { +- do_and = FALSE; +- passed = FALSE; +- } +- +- crm_trace("Testing rule %s", ID(rule)); +- for (expr = __xml_first_child_element(rule); expr != NULL; +- expr = __xml_next_element(expr)) { +- +- test = pe_test_expression(expr, node_hash, role, now, next_change, +- match_data); +- empty = FALSE; +- +- if (test && do_and == FALSE) { +- crm_trace("Expression %s/%s passed", ID(rule), ID(expr)); +- return TRUE; +- +- } else if (test == FALSE && do_and) { +- crm_trace("Expression %s/%s failed", ID(rule), ID(expr)); +- return FALSE; +- } +- } +- +- if (empty) { +- crm_err("Invalid Rule %s: rules must contain at least one expression", ID(rule)); +- } ++ pe_rule_eval_data_t rule_data = { ++ .node_hash = node_hash, ++ .role = role, ++ .now = now, ++ .match_data = match_data, ++ .rsc_data = NULL, ++ .op_data = NULL ++ }; + +- crm_trace("Rule %s %s", ID(rule), passed ? "passed" : "failed"); +- return passed; ++ return pe_eval_expr(rule, &rule_data, next_change); + } + + /*! +@@ -125,56 +88,16 @@ pe_test_expression(xmlNode *expr, GHashTable *node_hash, enum rsc_role_e role, + crm_time_t *now, crm_time_t *next_change, + pe_match_data_t *match_data) + { +- gboolean accept = FALSE; +- const char *uname = NULL; +- +- switch (find_expression_type(expr)) { +- case nested_rule: +- accept = pe_test_rule(expr, node_hash, role, now, next_change, +- match_data); +- break; +- case attr_expr: +- case loc_expr: +- /* these expressions can never succeed if there is +- * no node to compare with +- */ +- if (node_hash != NULL) { +- accept = pe_test_attr_expression(expr, node_hash, now, match_data); +- } +- break; +- +- case time_expr: +- accept = pe_test_date_expression(expr, now, next_change); +- break; +- +- case role_expr: +- accept = pe_test_role_expression(expr, role, now); +- break; +- +-#if ENABLE_VERSIONED_ATTRS +- case version_expr: +- if (node_hash && g_hash_table_lookup_extended(node_hash, +- CRM_ATTR_RA_VERSION, +- NULL, NULL)) { +- accept = pe_test_attr_expression(expr, node_hash, now, NULL); +- } else { +- // we are going to test it when we have ra-version +- accept = TRUE; +- } +- break; +-#endif +- +- default: +- CRM_CHECK(FALSE /* bad type */ , return FALSE); +- accept = FALSE; +- } +- if (node_hash) { +- uname = g_hash_table_lookup(node_hash, CRM_ATTR_UNAME); +- } ++ pe_rule_eval_data_t rule_data = { ++ .node_hash = node_hash, ++ .role = role, ++ .now = now, ++ .match_data = match_data, ++ .rsc_data = NULL, ++ .op_data = NULL ++ }; + +- crm_trace("Expression %s %s on %s", +- ID(expr), accept ? "passed" : "failed", uname ? uname : "all nodes"); +- return accept; ++ return pe_eval_subexpr(expr, &rule_data, next_change); + } + + enum expression_type +@@ -885,6 +808,134 @@ pe_unpack_versioned_parameters(xmlNode *versioned_params, const char *ra_version + #endif + + gboolean ++pe_eval_rules(xmlNode *ruleset, pe_rule_eval_data_t *rule_data, crm_time_t *next_change) ++{ ++ // If there are no rules, pass by default ++ gboolean ruleset_default = TRUE; ++ ++ for (xmlNode *rule = first_named_child(ruleset, XML_TAG_RULE); ++ rule != NULL; rule = crm_next_same_xml(rule)) { ++ ++ ruleset_default = FALSE; ++ if (pe_eval_expr(rule, rule_data, next_change)) { ++ /* Only the deprecated "lifetime" element of location constraints ++ * may contain more than one rule at the top level -- the schema ++ * limits a block of nvpairs to a single top-level rule. So, this ++ * effectively means that a lifetime is active if any rule it ++ * contains is active. ++ */ ++ return TRUE; ++ } ++ } ++ ++ return ruleset_default; ++} ++ ++gboolean ++pe_eval_expr(xmlNode *rule, pe_rule_eval_data_t *rule_data, crm_time_t *next_change) ++{ ++ xmlNode *expr = NULL; ++ gboolean test = TRUE; ++ gboolean empty = TRUE; ++ gboolean passed = TRUE; ++ gboolean do_and = TRUE; ++ const char *value = NULL; ++ ++ rule = expand_idref(rule, NULL); ++ value = crm_element_value(rule, XML_RULE_ATTR_BOOLEAN_OP); ++ if (safe_str_eq(value, "or")) { ++ do_and = FALSE; ++ passed = FALSE; ++ } ++ ++ crm_trace("Testing rule %s", ID(rule)); ++ for (expr = __xml_first_child_element(rule); expr != NULL; ++ expr = __xml_next_element(expr)) { ++ ++ test = pe_eval_subexpr(expr, rule_data, next_change); ++ empty = FALSE; ++ ++ if (test && do_and == FALSE) { ++ crm_trace("Expression %s/%s passed", ID(rule), ID(expr)); ++ return TRUE; ++ ++ } else if (test == FALSE && do_and) { ++ crm_trace("Expression %s/%s failed", ID(rule), ID(expr)); ++ return FALSE; ++ } ++ } ++ ++ if (empty) { ++ crm_err("Invalid Rule %s: rules must contain at least one expression", ID(rule)); ++ } ++ ++ crm_trace("Rule %s %s", ID(rule), passed ? "passed" : "failed"); ++ return passed; ++} ++ ++gboolean ++pe_eval_subexpr(xmlNode *expr, pe_rule_eval_data_t *rule_data, crm_time_t *next_change) ++{ ++ gboolean accept = FALSE; ++ const char *uname = NULL; ++ ++ switch (find_expression_type(expr)) { ++ case nested_rule: ++ accept = pe_eval_expr(expr, rule_data, next_change); ++ break; ++ case attr_expr: ++ case loc_expr: ++ /* these expressions can never succeed if there is ++ * no node to compare with ++ */ ++ if (rule_data->node_hash != NULL) { ++ accept = pe__eval_attr_expr(expr, rule_data); ++ } ++ break; ++ ++ case time_expr: ++ accept = pe_test_date_expression(expr, rule_data->now, next_change); ++ break; ++ ++ case role_expr: ++ accept = pe__eval_role_expr(expr, rule_data); ++ break; ++ ++ case rsc_expr: ++ accept = pe__eval_rsc_expr(expr, rule_data); ++ break; ++ ++ case op_expr: ++ accept = pe__eval_op_expr(expr, rule_data); ++ break; ++ ++#if ENABLE_VERSIONED_ATTRS ++ case version_expr: ++ if (rule_data->node_hash && ++ g_hash_table_lookup_extended(rule_data->node_hash, ++ CRM_ATTR_RA_VERSION, NULL, NULL)) { ++ accept = pe__eval_attr_expr(expr, rule_data); ++ } else { ++ // we are going to test it when we have ra-version ++ accept = TRUE; ++ } ++ break; ++#endif ++ ++ default: ++ CRM_CHECK(FALSE /* bad type */ , return FALSE); ++ accept = FALSE; ++ } ++ if (rule_data->node_hash) { ++ uname = g_hash_table_lookup(rule_data->node_hash, CRM_ATTR_UNAME); ++ } ++ ++ crm_trace("Expression %s %s on %s", ++ ID(expr), accept ? "passed" : "failed", uname ? uname : "all nodes"); ++ return accept; ++} ++ ++gboolean + pe__eval_attr_expr(xmlNodePtr expr, pe_rule_eval_data_t *rule_data) + { + gboolean accept = FALSE; +-- +1.8.3.1 + + +From ea6318252164578fd27dcef657e80f5225337a4b Mon Sep 17 00:00:00 2001 +From: Chris Lumens +Date: Tue, 7 Apr 2020 15:57:06 -0400 +Subject: [PATCH 05/17] Refactor: scheduler: Add rule_data to unpack_data_s. + +This is just to get rid of a couple extra arguments to some internal +functions and make them look like the external functions. +--- + lib/pengine/rules.c | 65 ++++++++++++++++++++++++++++++++++++----------------- + 1 file changed, 44 insertions(+), 21 deletions(-) + +diff --git a/lib/pengine/rules.c b/lib/pengine/rules.c +index a6353ef..2709d68 100644 +--- a/lib/pengine/rules.c ++++ b/lib/pengine/rules.c +@@ -555,10 +555,9 @@ add_versioned_attributes(xmlNode * attr_set, xmlNode * versioned_attrs) + + typedef struct unpack_data_s { + gboolean overwrite; +- GHashTable *node_hash; + void *hash; +- crm_time_t *now; + crm_time_t *next_change; ++ pe_rule_eval_data_t *rule_data; + xmlNode *top; + } unpack_data_t; + +@@ -568,14 +567,14 @@ unpack_attr_set(gpointer data, gpointer user_data) + sorted_set_t *pair = data; + unpack_data_t *unpack_data = user_data; + +- if (!pe_evaluate_rules(pair->attr_set, unpack_data->node_hash, +- unpack_data->now, unpack_data->next_change)) { ++ if (!pe_eval_rules(pair->attr_set, unpack_data->rule_data, ++ unpack_data->next_change)) { + return; + } + + #if ENABLE_VERSIONED_ATTRS +- if (get_versioned_rule(pair->attr_set) && !(unpack_data->node_hash && +- g_hash_table_lookup_extended(unpack_data->node_hash, ++ if (get_versioned_rule(pair->attr_set) && !(unpack_data->rule_data->node_hash && ++ g_hash_table_lookup_extended(unpack_data->rule_data->node_hash, + CRM_ATTR_RA_VERSION, NULL, NULL))) { + // we haven't actually tested versioned expressions yet + return; +@@ -593,8 +592,8 @@ unpack_versioned_attr_set(gpointer data, gpointer user_data) + sorted_set_t *pair = data; + unpack_data_t *unpack_data = user_data; + +- if (pe_evaluate_rules(pair->attr_set, unpack_data->node_hash, +- unpack_data->now, unpack_data->next_change)) { ++ if (pe_eval_rules(pair->attr_set, unpack_data->rule_data, ++ unpack_data->next_change)) { + add_versioned_attributes(pair->attr_set, unpack_data->hash); + } + } +@@ -658,19 +657,17 @@ make_pairs(xmlNode *top, xmlNode *xml_obj, const char *set_name, + * \param[in] top XML document root (used to expand id-ref's) + * \param[in] xml_obj XML element containing blocks of nvpair elements + * \param[in] set_name If not NULL, only use blocks of this element type +- * \param[in] node_hash Node attributes to use when evaluating rules + * \param[out] hash Where to store extracted name/value pairs + * \param[in] always_first If not NULL, process block with this ID first + * \param[in] overwrite Whether to replace existing values with same name +- * \param[in] now Time to use when evaluating rules ++ * \param[in] rule_data Matching parameters to use when unpacking + * \param[out] next_change If not NULL, set to when rule evaluation will change + * \param[in] unpack_func Function to call to unpack each block + */ + static void + unpack_nvpair_blocks(xmlNode *top, xmlNode *xml_obj, const char *set_name, +- GHashTable *node_hash, void *hash, +- const char *always_first, gboolean overwrite, +- crm_time_t *now, crm_time_t *next_change, ++ void *hash, const char *always_first, gboolean overwrite, ++ pe_rule_eval_data_t *rule_data, crm_time_t *next_change, + GFunc unpack_func) + { + GList *pairs = make_pairs(top, xml_obj, set_name, always_first); +@@ -678,11 +675,10 @@ unpack_nvpair_blocks(xmlNode *top, xmlNode *xml_obj, const char *set_name, + if (pairs) { + unpack_data_t data = { + .hash = hash, +- .node_hash = node_hash, +- .now = now, + .overwrite = overwrite, + .next_change = next_change, + .top = top, ++ .rule_data = rule_data + }; + + g_list_foreach(pairs, unpack_func, &data); +@@ -709,8 +705,17 @@ pe_unpack_nvpairs(xmlNode *top, xmlNode *xml_obj, const char *set_name, + const char *always_first, gboolean overwrite, + crm_time_t *now, crm_time_t *next_change) + { +- unpack_nvpair_blocks(top, xml_obj, set_name, node_hash, hash, always_first, +- overwrite, now, next_change, unpack_attr_set); ++ pe_rule_eval_data_t rule_data = { ++ .node_hash = node_hash, ++ .role = RSC_ROLE_UNKNOWN, ++ .now = now, ++ .match_data = NULL, ++ .rsc_data = NULL, ++ .op_data = NULL ++ }; ++ ++ unpack_nvpair_blocks(top, xml_obj, set_name, hash, always_first, ++ overwrite, &rule_data, next_change, unpack_attr_set); + } + + #if ENABLE_VERSIONED_ATTRS +@@ -720,8 +725,17 @@ pe_unpack_versioned_attributes(xmlNode *top, xmlNode *xml_obj, + xmlNode *hash, crm_time_t *now, + crm_time_t *next_change) + { +- unpack_nvpair_blocks(top, xml_obj, set_name, node_hash, hash, NULL, FALSE, +- now, next_change, unpack_versioned_attr_set); ++ pe_rule_eval_data_t rule_data = { ++ .node_hash = node_hash, ++ .role = RSC_ROLE_UNKNOWN, ++ .now = now, ++ .match_data = NULL, ++ .rsc_data = NULL, ++ .op_data = NULL ++ }; ++ ++ unpack_nvpair_blocks(top, xml_obj, set_name, hash, NULL, FALSE, ++ &rule_data, next_change, unpack_versioned_attr_set); + } + #endif + +@@ -1366,6 +1380,15 @@ unpack_instance_attributes(xmlNode *top, xmlNode *xml_obj, const char *set_name, + const char *always_first, gboolean overwrite, + crm_time_t *now) + { +- unpack_nvpair_blocks(top, xml_obj, set_name, node_hash, hash, always_first, +- overwrite, now, NULL, unpack_attr_set); ++ pe_rule_eval_data_t rule_data = { ++ .node_hash = node_hash, ++ .role = RSC_ROLE_UNKNOWN, ++ .now = now, ++ .match_data = NULL, ++ .rsc_data = NULL, ++ .op_data = NULL ++ }; ++ ++ unpack_nvpair_blocks(top, xml_obj, set_name, hash, always_first, ++ overwrite, &rule_data, NULL, unpack_attr_set); + } +-- +1.8.3.1 + + +From 54646db6f5e4f1bb141b35798bcad5c3cc025afe Mon Sep 17 00:00:00 2001 +From: Chris Lumens +Date: Wed, 8 Apr 2020 10:41:41 -0400 +Subject: [PATCH 06/17] Refactor: scheduler: Change args to + pe__unpack_dataset_nvpairs. + +It should now take a pe_rule_eval_data_t instead of various separate +arguments. This will allow passing further data that needs to be tested +against in the future (such as rsc_defaults and op_defaults). It's also +convenient to make versions of pe_unpack_nvpairs and +pe_unpack_versioned_attributes that take the same arguments. + +Then, adapt callers of pe__unpack_dataset_nvpairs to pass the new +argument. +--- + include/crm/pengine/internal.h | 2 +- + include/crm/pengine/rules.h | 9 +++++++ + lib/pengine/complex.c | 41 ++++++++++++++++++++++------- + lib/pengine/rules.c | 23 ++++++++++++++-- + lib/pengine/unpack.c | 33 ++++++++++++++++++++--- + lib/pengine/utils.c | 60 +++++++++++++++++++++++++++++++----------- + 6 files changed, 137 insertions(+), 31 deletions(-) + +diff --git a/include/crm/pengine/internal.h b/include/crm/pengine/internal.h +index 189ba7b..3e59502 100644 +--- a/include/crm/pengine/internal.h ++++ b/include/crm/pengine/internal.h +@@ -460,7 +460,7 @@ void pe__update_recheck_time(time_t recheck, pe_working_set_t *data_set); + void pe__register_messages(pcmk__output_t *out); + + void pe__unpack_dataset_nvpairs(xmlNode *xml_obj, const char *set_name, +- GHashTable *node_hash, GHashTable *hash, ++ pe_rule_eval_data_t *rule_data, GHashTable *hash, + const char *always_first, gboolean overwrite, + pe_working_set_t *data_set); + +diff --git a/include/crm/pengine/rules.h b/include/crm/pengine/rules.h +index a74c629..cbae8ed 100644 +--- a/include/crm/pengine/rules.h ++++ b/include/crm/pengine/rules.h +@@ -46,12 +46,21 @@ gboolean pe_test_expression(xmlNode *expr, GHashTable *node_hash, + crm_time_t *next_change, + pe_match_data_t *match_data); + ++void pe_eval_nvpairs(xmlNode *top, xmlNode *xml_obj, const char *set_name, ++ pe_rule_eval_data_t *rule_data, GHashTable *hash, ++ const char *always_first, gboolean overwrite, ++ crm_time_t *next_change); ++ + void pe_unpack_nvpairs(xmlNode *top, xmlNode *xml_obj, const char *set_name, + GHashTable *node_hash, GHashTable *hash, + const char *always_first, gboolean overwrite, + crm_time_t *now, crm_time_t *next_change); + + #if ENABLE_VERSIONED_ATTRS ++void pe_eval_versioned_attributes(xmlNode *top, xmlNode *xml_obj, ++ const char *set_name, pe_rule_eval_data_t *rule_data, ++ xmlNode *hash, crm_time_t *next_change); ++ + void pe_unpack_versioned_attributes(xmlNode *top, xmlNode *xml_obj, + const char *set_name, GHashTable *node_hash, + xmlNode *hash, crm_time_t *now, +diff --git a/lib/pengine/complex.c b/lib/pengine/complex.c +index 16f3a71..d91c95e 100644 +--- a/lib/pengine/complex.c ++++ b/lib/pengine/complex.c +@@ -95,10 +95,17 @@ void + get_meta_attributes(GHashTable * meta_hash, pe_resource_t * rsc, + pe_node_t * node, pe_working_set_t * data_set) + { +- GHashTable *node_hash = NULL; ++ pe_rule_eval_data_t rule_data = { ++ .node_hash = NULL, ++ .role = RSC_ROLE_UNKNOWN, ++ .now = data_set->now, ++ .match_data = NULL, ++ .rsc_data = NULL, ++ .op_data = NULL ++ }; + + if (node) { +- node_hash = node->details->attrs; ++ rule_data.node_hash = node->details->attrs; + } + + if (rsc->xml) { +@@ -112,7 +119,7 @@ get_meta_attributes(GHashTable * meta_hash, pe_resource_t * rsc, + } + } + +- pe__unpack_dataset_nvpairs(rsc->xml, XML_TAG_META_SETS, node_hash, ++ pe__unpack_dataset_nvpairs(rsc->xml, XML_TAG_META_SETS, &rule_data, + meta_hash, NULL, FALSE, data_set); + + /* set anything else based on the parent */ +@@ -122,20 +129,27 @@ get_meta_attributes(GHashTable * meta_hash, pe_resource_t * rsc, + + /* and finally check the defaults */ + pe__unpack_dataset_nvpairs(data_set->rsc_defaults, XML_TAG_META_SETS, +- node_hash, meta_hash, NULL, FALSE, data_set); ++ &rule_data, meta_hash, NULL, FALSE, data_set); + } + + void + get_rsc_attributes(GHashTable * meta_hash, pe_resource_t * rsc, + pe_node_t * node, pe_working_set_t * data_set) + { +- GHashTable *node_hash = NULL; ++ pe_rule_eval_data_t rule_data = { ++ .node_hash = NULL, ++ .role = RSC_ROLE_UNKNOWN, ++ .now = data_set->now, ++ .match_data = NULL, ++ .rsc_data = NULL, ++ .op_data = NULL ++ }; + + if (node) { +- node_hash = node->details->attrs; ++ rule_data.node_hash = node->details->attrs; + } + +- pe__unpack_dataset_nvpairs(rsc->xml, XML_TAG_ATTR_SETS, node_hash, ++ pe__unpack_dataset_nvpairs(rsc->xml, XML_TAG_ATTR_SETS, &rule_data, + meta_hash, NULL, FALSE, data_set); + + /* set anything else based on the parent */ +@@ -145,7 +159,7 @@ get_rsc_attributes(GHashTable * meta_hash, pe_resource_t * rsc, + } else { + /* and finally check the defaults */ + pe__unpack_dataset_nvpairs(data_set->rsc_defaults, XML_TAG_ATTR_SETS, +- node_hash, meta_hash, NULL, FALSE, data_set); ++ &rule_data, meta_hash, NULL, FALSE, data_set); + } + } + +@@ -376,6 +390,15 @@ common_unpack(xmlNode * xml_obj, pe_resource_t ** rsc, + bool remote_node = FALSE; + bool has_versioned_params = FALSE; + ++ pe_rule_eval_data_t rule_data = { ++ .node_hash = NULL, ++ .role = RSC_ROLE_UNKNOWN, ++ .now = data_set->now, ++ .match_data = NULL, ++ .rsc_data = NULL, ++ .op_data = NULL ++ }; ++ + crm_log_xml_trace(xml_obj, "Processing resource input..."); + + if (id == NULL) { +@@ -706,7 +729,7 @@ common_unpack(xmlNode * xml_obj, pe_resource_t ** rsc, + + (*rsc)->utilization = crm_str_table_new(); + +- pe__unpack_dataset_nvpairs((*rsc)->xml, XML_TAG_UTILIZATION, NULL, ++ pe__unpack_dataset_nvpairs((*rsc)->xml, XML_TAG_UTILIZATION, &rule_data, + (*rsc)->utilization, NULL, FALSE, data_set); + + /* data_set->resources = g_list_append(data_set->resources, (*rsc)); */ +diff --git a/lib/pengine/rules.c b/lib/pengine/rules.c +index 2709d68..7575011 100644 +--- a/lib/pengine/rules.c ++++ b/lib/pengine/rules.c +@@ -686,6 +686,16 @@ unpack_nvpair_blocks(xmlNode *top, xmlNode *xml_obj, const char *set_name, + } + } + ++void ++pe_eval_nvpairs(xmlNode *top, xmlNode *xml_obj, const char *set_name, ++ pe_rule_eval_data_t *rule_data, GHashTable *hash, ++ const char *always_first, gboolean overwrite, ++ crm_time_t *next_change) ++{ ++ unpack_nvpair_blocks(top, xml_obj, set_name, hash, always_first, ++ overwrite, rule_data, next_change, unpack_attr_set); ++} ++ + /*! + * \brief Extract nvpair blocks contained by an XML element into a hash table + * +@@ -714,12 +724,21 @@ pe_unpack_nvpairs(xmlNode *top, xmlNode *xml_obj, const char *set_name, + .op_data = NULL + }; + +- unpack_nvpair_blocks(top, xml_obj, set_name, hash, always_first, +- overwrite, &rule_data, next_change, unpack_attr_set); ++ pe_eval_nvpairs(top, xml_obj, set_name, &rule_data, hash, ++ always_first, overwrite, next_change); + } + + #if ENABLE_VERSIONED_ATTRS + void ++pe_eval_versioned_attributes(xmlNode *top, xmlNode *xml_obj, const char *set_name, ++ pe_rule_eval_data_t *rule_data, xmlNode *hash, ++ crm_time_t *next_change) ++{ ++ unpack_nvpair_blocks(top, xml_obj, set_name, hash, NULL, FALSE, rule_data, ++ next_change, unpack_versioned_attr_set); ++} ++ ++void + pe_unpack_versioned_attributes(xmlNode *top, xmlNode *xml_obj, + const char *set_name, GHashTable *node_hash, + xmlNode *hash, crm_time_t *now, +diff --git a/lib/pengine/unpack.c b/lib/pengine/unpack.c +index 532a3e6..8784857 100644 +--- a/lib/pengine/unpack.c ++++ b/lib/pengine/unpack.c +@@ -188,9 +188,18 @@ unpack_config(xmlNode * config, pe_working_set_t * data_set) + const char *value = NULL; + GHashTable *config_hash = crm_str_table_new(); + ++ pe_rule_eval_data_t rule_data = { ++ .node_hash = NULL, ++ .role = RSC_ROLE_UNKNOWN, ++ .now = data_set->now, ++ .match_data = NULL, ++ .rsc_data = NULL, ++ .op_data = NULL ++ }; ++ + data_set->config_hash = config_hash; + +- pe__unpack_dataset_nvpairs(config, XML_CIB_TAG_PROPSET, NULL, config_hash, ++ pe__unpack_dataset_nvpairs(config, XML_CIB_TAG_PROPSET, &rule_data, config_hash, + CIB_OPTIONS_FIRST, FALSE, data_set); + + verify_pe_options(data_set->config_hash); +@@ -515,6 +524,15 @@ unpack_nodes(xmlNode * xml_nodes, pe_working_set_t * data_set) + const char *type = NULL; + const char *score = NULL; + ++ pe_rule_eval_data_t rule_data = { ++ .node_hash = NULL, ++ .role = RSC_ROLE_UNKNOWN, ++ .now = data_set->now, ++ .match_data = NULL, ++ .rsc_data = NULL, ++ .op_data = NULL ++ }; ++ + for (xml_obj = __xml_first_child_element(xml_nodes); xml_obj != NULL; + xml_obj = __xml_next_element(xml_obj)) { + +@@ -547,7 +565,7 @@ unpack_nodes(xmlNode * xml_nodes, pe_working_set_t * data_set) + handle_startup_fencing(data_set, new_node); + + add_node_attrs(xml_obj, new_node, FALSE, data_set); +- pe__unpack_dataset_nvpairs(xml_obj, XML_TAG_UTILIZATION, NULL, ++ pe__unpack_dataset_nvpairs(xml_obj, XML_TAG_UTILIZATION, &rule_data, + new_node->details->utilization, NULL, + FALSE, data_set); + +@@ -3698,6 +3716,15 @@ add_node_attrs(xmlNode *xml_obj, pe_node_t *node, bool overwrite, + { + const char *cluster_name = NULL; + ++ pe_rule_eval_data_t rule_data = { ++ .node_hash = NULL, ++ .role = RSC_ROLE_UNKNOWN, ++ .now = data_set->now, ++ .match_data = NULL, ++ .rsc_data = NULL, ++ .op_data = NULL ++ }; ++ + g_hash_table_insert(node->details->attrs, + strdup(CRM_ATTR_UNAME), strdup(node->details->uname)); + +@@ -3719,7 +3746,7 @@ add_node_attrs(xmlNode *xml_obj, pe_node_t *node, bool overwrite, + strdup(cluster_name)); + } + +- pe__unpack_dataset_nvpairs(xml_obj, XML_TAG_ATTR_SETS, NULL, ++ pe__unpack_dataset_nvpairs(xml_obj, XML_TAG_ATTR_SETS, &rule_data, + node->details->attrs, NULL, overwrite, data_set); + + if (pe_node_attribute_raw(node, CRM_ATTR_SITE_NAME) == NULL) { +diff --git a/lib/pengine/utils.c b/lib/pengine/utils.c +index c9b45e0..d01936d 100644 +--- a/lib/pengine/utils.c ++++ b/lib/pengine/utils.c +@@ -597,10 +597,19 @@ custom_action(pe_resource_t * rsc, char *key, const char *task, + + if (is_set(action->flags, pe_action_have_node_attrs) == FALSE + && action->node != NULL && action->op_entry != NULL) { ++ pe_rule_eval_data_t rule_data = { ++ .node_hash = action->node->details->attrs, ++ .role = RSC_ROLE_UNKNOWN, ++ .now = data_set->now, ++ .match_data = NULL, ++ .rsc_data = NULL, ++ .op_data = NULL ++ }; ++ + pe_set_action_bit(action, pe_action_have_node_attrs); + pe__unpack_dataset_nvpairs(action->op_entry, XML_TAG_ATTR_SETS, +- action->node->details->attrs, +- action->extra, NULL, FALSE, data_set); ++ &rule_data, action->extra, NULL, ++ FALSE, data_set); + } + + if (is_set(action->flags, pe_action_pseudo)) { +@@ -873,6 +882,15 @@ pe_get_configured_timeout(pe_resource_t *rsc, const char *action, pe_working_set + const char *timeout = NULL; + int timeout_ms = 0; + ++ pe_rule_eval_data_t rule_data = { ++ .node_hash = NULL, ++ .role = RSC_ROLE_UNKNOWN, ++ .now = data_set->now, ++ .match_data = NULL, ++ .rsc_data = NULL, ++ .op_data = NULL ++ }; ++ + for (child = first_named_child(rsc->ops_xml, XML_ATTR_OP); + child != NULL; child = crm_next_same_xml(child)) { + if (safe_str_eq(action, crm_element_value(child, XML_NVPAIR_ATTR_NAME))) { +@@ -884,7 +902,7 @@ pe_get_configured_timeout(pe_resource_t *rsc, const char *action, pe_working_set + if (timeout == NULL && data_set->op_defaults) { + GHashTable *action_meta = crm_str_table_new(); + pe__unpack_dataset_nvpairs(data_set->op_defaults, XML_TAG_META_SETS, +- NULL, action_meta, NULL, FALSE, data_set); ++ &rule_data, action_meta, NULL, FALSE, data_set); + timeout = g_hash_table_lookup(action_meta, XML_ATTR_TIMEOUT); + } + +@@ -964,10 +982,19 @@ unpack_operation(pe_action_t * action, xmlNode * xml_obj, pe_resource_t * contai + pe_rsc_action_details_t *rsc_details = NULL; + #endif + ++ pe_rule_eval_data_t rule_data = { ++ .node_hash = NULL, ++ .role = RSC_ROLE_UNKNOWN, ++ .now = data_set->now, ++ .match_data = NULL, ++ .rsc_data = NULL, ++ .op_data = NULL ++ }; ++ + CRM_CHECK(action && action->rsc, return); + + // Cluster-wide +- pe__unpack_dataset_nvpairs(data_set->op_defaults, XML_TAG_META_SETS, NULL, ++ pe__unpack_dataset_nvpairs(data_set->op_defaults, XML_TAG_META_SETS, &rule_data, + action->meta, NULL, FALSE, data_set); + + // Probe timeouts default differently, so handle timeout default later +@@ -981,19 +1008,20 @@ unpack_operation(pe_action_t * action, xmlNode * xml_obj, pe_resource_t * contai + xmlAttrPtr xIter = NULL; + + // take precedence over defaults +- pe__unpack_dataset_nvpairs(xml_obj, XML_TAG_META_SETS, NULL, ++ pe__unpack_dataset_nvpairs(xml_obj, XML_TAG_META_SETS, &rule_data, + action->meta, NULL, TRUE, data_set); + + #if ENABLE_VERSIONED_ATTRS + rsc_details = pe_rsc_action_details(action); +- pe_unpack_versioned_attributes(data_set->input, xml_obj, +- XML_TAG_ATTR_SETS, NULL, +- rsc_details->versioned_parameters, +- data_set->now, NULL); +- pe_unpack_versioned_attributes(data_set->input, xml_obj, +- XML_TAG_META_SETS, NULL, +- rsc_details->versioned_meta, +- data_set->now, NULL); ++ ++ pe_eval_versioned_attributes(data_set->input, xml_obj, ++ XML_TAG_ATTR_SETS, &rule_data, ++ rsc_details->versioned_parameters, ++ NULL); ++ pe_eval_versioned_attributes(data_set->input, xml_obj, ++ XML_TAG_META_SETS, &rule_data, ++ rsc_details->versioned_meta, ++ NULL); + #endif + + /* Anything set as an XML property has highest precedence. +@@ -2693,14 +2721,14 @@ pe__update_recheck_time(time_t recheck, pe_working_set_t *data_set) + */ + void + pe__unpack_dataset_nvpairs(xmlNode *xml_obj, const char *set_name, +- GHashTable *node_hash, GHashTable *hash, ++ pe_rule_eval_data_t *rule_data, GHashTable *hash, + const char *always_first, gboolean overwrite, + pe_working_set_t *data_set) + { + crm_time_t *next_change = crm_time_new_undefined(); + +- pe_unpack_nvpairs(data_set->input, xml_obj, set_name, node_hash, hash, +- always_first, overwrite, data_set->now, next_change); ++ pe_eval_nvpairs(data_set->input, xml_obj, set_name, rule_data, hash, ++ always_first, overwrite, next_change); + if (crm_time_is_defined(next_change)) { + time_t recheck = (time_t) crm_time_get_seconds_since_epoch(next_change); + +-- +1.8.3.1 + + +From ad06f60bae1fcb5d204fa18a0b21ade78aaee5f4 Mon Sep 17 00:00:00 2001 +From: Chris Lumens +Date: Wed, 8 Apr 2020 13:43:26 -0400 +Subject: [PATCH 07/17] Refactor: scheduler: unpack_operation should be static. + +--- + lib/pengine/utils.c | 6 +++--- + 1 file changed, 3 insertions(+), 3 deletions(-) + +diff --git a/lib/pengine/utils.c b/lib/pengine/utils.c +index d01936d..c345875 100644 +--- a/lib/pengine/utils.c ++++ b/lib/pengine/utils.c +@@ -23,8 +23,8 @@ + extern xmlNode *get_object_root(const char *object_type, xmlNode * the_root); + void print_str_str(gpointer key, gpointer value, gpointer user_data); + gboolean ghash_free_str_str(gpointer key, gpointer value, gpointer user_data); +-void unpack_operation(pe_action_t * action, xmlNode * xml_obj, pe_resource_t * container, +- pe_working_set_t * data_set); ++static void unpack_operation(pe_action_t * action, xmlNode * xml_obj, pe_resource_t * container, ++ pe_working_set_t * data_set); + static xmlNode *find_rsc_op_entry_helper(pe_resource_t * rsc, const char *key, + gboolean include_disabled); + +@@ -968,7 +968,7 @@ unpack_versioned_meta(xmlNode *versioned_meta, xmlNode *xml_obj, + * \param[in] container Resource that contains affected resource, if any + * \param[in] data_set Cluster state + */ +-void ++static void + unpack_operation(pe_action_t * action, xmlNode * xml_obj, pe_resource_t * container, + pe_working_set_t * data_set) + { +-- +1.8.3.1 + + +From 7e57d955c9209af62dffc0639c50d51121028c26 Mon Sep 17 00:00:00 2001 +From: Chris Lumens +Date: Wed, 8 Apr 2020 14:58:35 -0400 +Subject: [PATCH 08/17] Refactor: scheduler: Pass interval to unpack_operation. + +--- + lib/pengine/utils.c | 36 ++++++++++++++---------------------- + 1 file changed, 14 insertions(+), 22 deletions(-) + +diff --git a/lib/pengine/utils.c b/lib/pengine/utils.c +index c345875..1e3b0bd 100644 +--- a/lib/pengine/utils.c ++++ b/lib/pengine/utils.c +@@ -24,7 +24,7 @@ extern xmlNode *get_object_root(const char *object_type, xmlNode * the_root); + void print_str_str(gpointer key, gpointer value, gpointer user_data); + gboolean ghash_free_str_str(gpointer key, gpointer value, gpointer user_data); + static void unpack_operation(pe_action_t * action, xmlNode * xml_obj, pe_resource_t * container, +- pe_working_set_t * data_set); ++ pe_working_set_t * data_set, guint interval_ms); + static xmlNode *find_rsc_op_entry_helper(pe_resource_t * rsc, const char *key, + gboolean include_disabled); + +@@ -568,9 +568,13 @@ custom_action(pe_resource_t * rsc, char *key, const char *task, + } + + if (rsc != NULL) { ++ guint interval_ms = 0; ++ + action->op_entry = find_rsc_op_entry_helper(rsc, key, TRUE); ++ parse_op_key(key, NULL, NULL, &interval_ms); + +- unpack_operation(action, action->op_entry, rsc->container, data_set); ++ unpack_operation(action, action->op_entry, rsc->container, data_set, ++ interval_ms); + + if (save_action) { + rsc->actions = g_list_prepend(rsc->actions, action); +@@ -963,20 +967,20 @@ unpack_versioned_meta(xmlNode *versioned_meta, xmlNode *xml_obj, + * and start delay values as integer milliseconds), requirements, and + * failure policy. + * +- * \param[in,out] action Action to unpack into +- * \param[in] xml_obj Operation XML (or NULL if all defaults) +- * \param[in] container Resource that contains affected resource, if any +- * \param[in] data_set Cluster state ++ * \param[in,out] action Action to unpack into ++ * \param[in] xml_obj Operation XML (or NULL if all defaults) ++ * \param[in] container Resource that contains affected resource, if any ++ * \param[in] data_set Cluster state ++ * \param[in] interval_ms How frequently to perform the operation + */ + static void + unpack_operation(pe_action_t * action, xmlNode * xml_obj, pe_resource_t * container, +- pe_working_set_t * data_set) ++ pe_working_set_t * data_set, guint interval_ms) + { +- guint interval_ms = 0; + int timeout = 0; + char *value_ms = NULL; + const char *value = NULL; +- const char *field = NULL; ++ const char *field = XML_LRM_ATTR_INTERVAL; + char *default_timeout = NULL; + #if ENABLE_VERSIONED_ATTRS + pe_rsc_action_details_t *rsc_details = NULL; +@@ -1038,23 +1042,11 @@ unpack_operation(pe_action_t * action, xmlNode * xml_obj, pe_resource_t * contai + g_hash_table_remove(action->meta, "id"); + + // Normalize interval to milliseconds +- field = XML_LRM_ATTR_INTERVAL; +- value = g_hash_table_lookup(action->meta, field); +- if (value != NULL) { +- interval_ms = crm_parse_interval_spec(value); +- +- } else if ((xml_obj == NULL) && !strcmp(action->task, RSC_STATUS)) { +- /* An orphaned recurring monitor will not have any XML. However, we +- * want the interval to be set, so the action can be properly detected +- * as a recurring monitor. Parse it from the key in this case. +- */ +- parse_op_key(action->uuid, NULL, NULL, &interval_ms); +- } + if (interval_ms > 0) { + value_ms = crm_strdup_printf("%u", interval_ms); + g_hash_table_replace(action->meta, strdup(field), value_ms); + +- } else if (value) { ++ } else if (g_hash_table_lookup(action->meta, field) != NULL) { + g_hash_table_remove(action->meta, field); + } + +-- +1.8.3.1 + + +From e4c411d9674e222647dd3ed31714c369f54ccad1 Mon Sep 17 00:00:00 2001 +From: Chris Lumens +Date: Thu, 9 Apr 2020 16:15:17 -0400 +Subject: [PATCH 09/17] Feature: scheduler: Pass rsc_defaults and op_defaults + data. + +See: rhbz#1628701. +--- + lib/pengine/complex.c | 8 +++++++- + lib/pengine/utils.c | 15 +++++++++++++-- + 2 files changed, 20 insertions(+), 3 deletions(-) + +diff --git a/lib/pengine/complex.c b/lib/pengine/complex.c +index d91c95e..1f06348 100644 +--- a/lib/pengine/complex.c ++++ b/lib/pengine/complex.c +@@ -95,12 +95,18 @@ void + get_meta_attributes(GHashTable * meta_hash, pe_resource_t * rsc, + pe_node_t * node, pe_working_set_t * data_set) + { ++ pe_rsc_eval_data_t rsc_rule_data = { ++ .standard = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS), ++ .provider = crm_element_value(rsc->xml, XML_AGENT_ATTR_PROVIDER), ++ .agent = crm_element_value(rsc->xml, XML_EXPR_ATTR_TYPE) ++ }; ++ + pe_rule_eval_data_t rule_data = { + .node_hash = NULL, + .role = RSC_ROLE_UNKNOWN, + .now = data_set->now, + .match_data = NULL, +- .rsc_data = NULL, ++ .rsc_data = &rsc_rule_data, + .op_data = NULL + }; + +diff --git a/lib/pengine/utils.c b/lib/pengine/utils.c +index 1e3b0bd..d5309ed 100644 +--- a/lib/pengine/utils.c ++++ b/lib/pengine/utils.c +@@ -986,13 +986,24 @@ unpack_operation(pe_action_t * action, xmlNode * xml_obj, pe_resource_t * contai + pe_rsc_action_details_t *rsc_details = NULL; + #endif + ++ pe_rsc_eval_data_t rsc_rule_data = { ++ .standard = crm_element_value(action->rsc->xml, XML_AGENT_ATTR_CLASS), ++ .provider = crm_element_value(action->rsc->xml, XML_AGENT_ATTR_PROVIDER), ++ .agent = crm_element_value(action->rsc->xml, XML_EXPR_ATTR_TYPE) ++ }; ++ ++ pe_op_eval_data_t op_rule_data = { ++ .op_name = action->task, ++ .interval = interval_ms ++ }; ++ + pe_rule_eval_data_t rule_data = { + .node_hash = NULL, + .role = RSC_ROLE_UNKNOWN, + .now = data_set->now, + .match_data = NULL, +- .rsc_data = NULL, +- .op_data = NULL ++ .rsc_data = &rsc_rule_data, ++ .op_data = &op_rule_data + }; + + CRM_CHECK(action && action->rsc, return); +-- +1.8.3.1 + + +From 57eedcad739071530f01e1fd691734f7681a08a1 Mon Sep 17 00:00:00 2001 +From: Chris Lumens +Date: Fri, 17 Apr 2020 12:30:51 -0400 +Subject: [PATCH 10/17] Feature: xml: Add rsc_expression and op_expression to + the XML schema. + +--- + cts/cli/regression.upgrade.exp | 7 +- + cts/cli/regression.validity.exp | 22 ++- + xml/constraints-next.rng | 4 +- + xml/nodes-3.4.rng | 44 +++++ + xml/nvset-3.4.rng | 63 ++++++ + xml/options-3.4.rng | 111 +++++++++++ + xml/resources-3.4.rng | 425 ++++++++++++++++++++++++++++++++++++++++ + xml/rule-3.4.rng | 165 ++++++++++++++++ + 8 files changed, 833 insertions(+), 8 deletions(-) + create mode 100644 xml/nodes-3.4.rng + create mode 100644 xml/nvset-3.4.rng + create mode 100644 xml/options-3.4.rng + create mode 100644 xml/resources-3.4.rng + create mode 100644 xml/rule-3.4.rng + +diff --git a/cts/cli/regression.upgrade.exp b/cts/cli/regression.upgrade.exp +index 28ca057..50b22df 100644 +--- a/cts/cli/regression.upgrade.exp ++++ b/cts/cli/regression.upgrade.exp +@@ -79,8 +79,11 @@ update_validation debug: Configuration valid for schema: pacemaker-3.2 + update_validation debug: pacemaker-3.2-style configuration is also valid for pacemaker-3.3 + update_validation debug: Testing 'pacemaker-3.3' validation (17 of X) + update_validation debug: Configuration valid for schema: pacemaker-3.3 +-update_validation trace: Stopping at pacemaker-3.3 +-update_validation info: Transformed the configuration from pacemaker-2.10 to pacemaker-3.3 ++update_validation debug: pacemaker-3.3-style configuration is also valid for pacemaker-3.4 ++update_validation debug: Testing 'pacemaker-3.4' validation (18 of X) ++update_validation debug: Configuration valid for schema: pacemaker-3.4 ++update_validation trace: Stopping at pacemaker-3.4 ++update_validation info: Transformed the configuration from pacemaker-2.10 to pacemaker-3.4 + =#=#=#= Current cib after: Upgrade to latest CIB schema (trigger 2.10.xsl + the wrapping) =#=#=#= + + +diff --git a/cts/cli/regression.validity.exp b/cts/cli/regression.validity.exp +index 46e54b5..4407074 100644 +--- a/cts/cli/regression.validity.exp ++++ b/cts/cli/regression.validity.exp +@@ -105,7 +105,11 @@ update_validation debug: Testing 'pacemaker-3.3' validation (17 of X) + element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order + element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order + update_validation trace: pacemaker-3.3 validation failed +-Cannot upgrade configuration (claiming schema pacemaker-1.2) to at least pacemaker-3.0 because it does not validate with any schema from pacemaker-1.2 to pacemaker-3.3 ++update_validation debug: Testing 'pacemaker-3.4' validation (18 of X) ++element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order ++element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order ++update_validation trace: pacemaker-3.4 validation failed ++Cannot upgrade configuration (claiming schema pacemaker-1.2) to at least pacemaker-3.0 because it does not validate with any schema from pacemaker-1.2 to pacemaker-3.4 + =#=#=#= End test: Run crm_simulate with invalid CIB (enum violation) - Invalid configuration (78) =#=#=#= + * Passed: crm_simulate - Run crm_simulate with invalid CIB (enum violation) + =#=#=#= Begin test: Try to make resulting CIB invalid (unrecognized validate-with) =#=#=#= +@@ -198,7 +202,10 @@ update_validation trace: pacemaker-3.2 validation failed + update_validation debug: Testing 'pacemaker-3.3' validation (17 of X) + element cib: Relax-NG validity error : Invalid attribute validate-with for element cib + update_validation trace: pacemaker-3.3 validation failed +-Cannot upgrade configuration (claiming schema pacemaker-9999.0) to at least pacemaker-3.0 because it does not validate with any schema from unknown to pacemaker-3.3 ++update_validation debug: Testing 'pacemaker-3.4' validation (18 of X) ++element cib: Relax-NG validity error : Invalid attribute validate-with for element cib ++update_validation trace: pacemaker-3.4 validation failed ++Cannot upgrade configuration (claiming schema pacemaker-9999.0) to at least pacemaker-3.0 because it does not validate with any schema from unknown to pacemaker-3.4 + =#=#=#= End test: Run crm_simulate with invalid CIB (unrecognized validate-with) - Invalid configuration (78) =#=#=#= + * Passed: crm_simulate - Run crm_simulate with invalid CIB (unrecognized validate-with) + =#=#=#= Begin test: Try to make resulting CIB invalid, but possibly recoverable (valid with X.Y+1) =#=#=#= +@@ -286,8 +293,11 @@ update_validation debug: Configuration valid for schema: pacemaker-3.2 + update_validation debug: pacemaker-3.2-style configuration is also valid for pacemaker-3.3 + update_validation debug: Testing 'pacemaker-3.3' validation (17 of X) + update_validation debug: Configuration valid for schema: pacemaker-3.3 +-update_validation trace: Stopping at pacemaker-3.3 +-update_validation info: Transformed the configuration from pacemaker-1.2 to pacemaker-3.3 ++update_validation debug: pacemaker-3.3-style configuration is also valid for pacemaker-3.4 ++update_validation debug: Testing 'pacemaker-3.4' validation (18 of X) ++update_validation debug: Configuration valid for schema: pacemaker-3.4 ++update_validation trace: Stopping at pacemaker-3.4 ++update_validation info: Transformed the configuration from pacemaker-1.2 to pacemaker-3.4 + unpack_resources error: Resource start-up disabled since no STONITH resources have been defined + unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option + unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity +@@ -393,6 +403,8 @@ element rsc_order: Relax-NG validity error : Invalid attribute first-action for + element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order + element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order + element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order ++element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order ++element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order + =#=#=#= Current cib after: Make resulting CIB invalid, and without validate-with attribute =#=#=#= + + +@@ -450,6 +462,8 @@ validity.bad.xml:10: element rsc_order: Relax-NG validity error : Invalid attrib + validity.bad.xml:10: element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order + validity.bad.xml:10: element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order + validity.bad.xml:10: element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order ++validity.bad.xml:10: element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order ++validity.bad.xml:10: element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order + unpack_resources error: Resource start-up disabled since no STONITH resources have been defined + unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option + unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity +diff --git a/xml/constraints-next.rng b/xml/constraints-next.rng +index 7e0d98e..1fa3e75 100644 +--- a/xml/constraints-next.rng ++++ b/xml/constraints-next.rng +@@ -43,7 +43,7 @@ + + + +- ++ + + + +@@ -255,7 +255,7 @@ + + + +- ++ + + + +diff --git a/xml/nodes-3.4.rng b/xml/nodes-3.4.rng +new file mode 100644 +index 0000000..0132c72 +--- /dev/null ++++ b/xml/nodes-3.4.rng +@@ -0,0 +1,44 @@ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ member ++ ping ++ remote ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ +diff --git a/xml/nvset-3.4.rng b/xml/nvset-3.4.rng +new file mode 100644 +index 0000000..91a7d23 +--- /dev/null ++++ b/xml/nvset-3.4.rng +@@ -0,0 +1,63 @@ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ +diff --git a/xml/options-3.4.rng b/xml/options-3.4.rng +new file mode 100644 +index 0000000..22330d8 +--- /dev/null ++++ b/xml/options-3.4.rng +@@ -0,0 +1,111 @@ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ cluster-infrastructure ++ ++ ++ ++ ++ ++ heartbeat ++ openais ++ classic openais ++ classic openais (with plugin) ++ cman ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ cluster-infrastructure ++ cluster_recheck_interval ++ dc_deadtime ++ default-action-timeout ++ default_action_timeout ++ default-migration-threshold ++ default_migration_threshold ++ default-resource-failure-stickiness ++ default_resource_failure_stickiness ++ default-resource-stickiness ++ default_resource_stickiness ++ election_timeout ++ expected-quorum-votes ++ is-managed-default ++ is_managed_default ++ no_quorum_policy ++ notification-agent ++ notification-recipient ++ remove_after_stop ++ shutdown_escalation ++ startup_fencing ++ stonith_action ++ stonith_enabled ++ stop_orphan_actions ++ stop_orphan_resources ++ symmetric_cluster ++ transition_idle_timeout ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ +diff --git a/xml/resources-3.4.rng b/xml/resources-3.4.rng +new file mode 100644 +index 0000000..fbb4b65 +--- /dev/null ++++ b/xml/resources-3.4.rng +@@ -0,0 +1,425 @@ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ isolation ++ isolation-host ++ isolation-instance ++ isolation-wrapper ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ([0-9\-]+) ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ requires ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ Stopped ++ Started ++ Slave ++ Master ++ ++ ++ ++ ++ ++ ++ ignore ++ block ++ stop ++ restart ++ standby ++ fence ++ restart-container ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ocf ++ ++ ++ ++ ++ lsb ++ heartbeat ++ stonith ++ upstart ++ service ++ systemd ++ nagios ++ ++ ++ ++ ++ +diff --git a/xml/rule-3.4.rng b/xml/rule-3.4.rng +new file mode 100644 +index 0000000..5d1daf0 +--- /dev/null ++++ b/xml/rule-3.4.rng +@@ -0,0 +1,165 @@ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ or ++ and ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ lt ++ gt ++ lte ++ gte ++ eq ++ ne ++ defined ++ not_defined ++ ++ ++ ++ ++ ++ ++ ++ ++ string ++ number ++ version ++ ++ ++ ++ ++ ++ ++ literal ++ param ++ meta ++ ++ ++ ++ ++ ++ ++ ++ ++ in_range ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ gt ++ ++ ++ ++ lt ++ ++ ++ ++ ++ ++ date_spec ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ +-- +1.8.3.1 + + +From b0e2345d92fb7cf42c133b24457eeb07126db8a0 Mon Sep 17 00:00:00 2001 +From: Chris Lumens +Date: Mon, 27 Apr 2020 16:24:22 -0400 +Subject: [PATCH 11/17] Fix: scheduler: Change trace output in populate_hash. + +Only show the "Setting attribute:" text when it comes time to actually +set the attribute. Also show the value being set. This makes it +clearer that an attribute is actually being set, not just that the +function is processing something. +--- + lib/pengine/rules.c | 3 +-- + 1 file changed, 1 insertion(+), 2 deletions(-) + +diff --git a/lib/pengine/rules.c b/lib/pengine/rules.c +index 7575011..b0fca55 100644 +--- a/lib/pengine/rules.c ++++ b/lib/pengine/rules.c +@@ -463,7 +463,6 @@ populate_hash(xmlNode * nvpair_list, GHashTable * hash, gboolean overwrite, xmlN + name = crm_element_value(ref_nvpair, XML_NVPAIR_ATTR_NAME); + } + +- crm_trace("Setting attribute: %s", name); + value = crm_element_value(an_attr, XML_NVPAIR_ATTR_VALUE); + if (value == NULL) { + value = crm_element_value(ref_nvpair, XML_NVPAIR_ATTR_VALUE); +@@ -471,7 +470,6 @@ populate_hash(xmlNode * nvpair_list, GHashTable * hash, gboolean overwrite, xmlN + + if (name == NULL || value == NULL) { + continue; +- + } + + old_value = g_hash_table_lookup(hash, name); +@@ -484,6 +482,7 @@ populate_hash(xmlNode * nvpair_list, GHashTable * hash, gboolean overwrite, xmlN + continue; + + } else if (old_value == NULL) { ++ crm_trace("Setting attribute: %s = %s", name, value); + g_hash_table_insert(hash, strdup(name), strdup(value)); + + } else if (overwrite) { +-- +1.8.3.1 + + +From d35854384b231c79b8aba1ce4c5caf5dd51ec982 Mon Sep 17 00:00:00 2001 +From: Chris Lumens +Date: Fri, 1 May 2020 15:45:31 -0400 +Subject: [PATCH 12/17] Test: scheduler: Add a regression test for op_defaults. + +--- + cts/cts-scheduler.in | 3 + + cts/scheduler/op-defaults.dot | 33 ++++++ + cts/scheduler/op-defaults.exp | 211 ++++++++++++++++++++++++++++++++++++++ + cts/scheduler/op-defaults.scores | 11 ++ + cts/scheduler/op-defaults.summary | 46 +++++++++ + cts/scheduler/op-defaults.xml | 87 ++++++++++++++++ + 6 files changed, 391 insertions(+) + create mode 100644 cts/scheduler/op-defaults.dot + create mode 100644 cts/scheduler/op-defaults.exp + create mode 100644 cts/scheduler/op-defaults.scores + create mode 100644 cts/scheduler/op-defaults.summary + create mode 100644 cts/scheduler/op-defaults.xml + +diff --git a/cts/cts-scheduler.in b/cts/cts-scheduler.in +index 5d72205..b83f812 100644 +--- a/cts/cts-scheduler.in ++++ b/cts/cts-scheduler.in +@@ -962,6 +962,9 @@ TESTS = [ + [ "shutdown-lock", "Ensure shutdown lock works properly" ], + [ "shutdown-lock-expiration", "Ensure shutdown lock expiration works properly" ], + ], ++ [ ++ [ "op-defaults", "Test op_defaults conditional expressions " ], ++ ], + + # @TODO: If pacemaker implements versioned attributes, uncomment these tests + #[ +diff --git a/cts/scheduler/op-defaults.dot b/cts/scheduler/op-defaults.dot +new file mode 100644 +index 0000000..5536c15 +--- /dev/null ++++ b/cts/scheduler/op-defaults.dot +@@ -0,0 +1,33 @@ ++ digraph "g" { ++"dummy-rsc_monitor_0 cluster01" -> "dummy-rsc_start_0 cluster02" [ style = bold] ++"dummy-rsc_monitor_0 cluster01" [ style=bold color="green" fontcolor="black"] ++"dummy-rsc_monitor_0 cluster02" -> "dummy-rsc_start_0 cluster02" [ style = bold] ++"dummy-rsc_monitor_0 cluster02" [ style=bold color="green" fontcolor="black"] ++"dummy-rsc_monitor_60000 cluster02" [ style=bold color="green" fontcolor="black"] ++"dummy-rsc_start_0 cluster02" -> "dummy-rsc_monitor_60000 cluster02" [ style = bold] ++"dummy-rsc_start_0 cluster02" [ style=bold color="green" fontcolor="black"] ++"fencing_monitor_0 cluster01" -> "fencing_start_0 cluster01" [ style = bold] ++"fencing_monitor_0 cluster01" [ style=bold color="green" fontcolor="black"] ++"fencing_monitor_0 cluster02" -> "fencing_start_0 cluster01" [ style = bold] ++"fencing_monitor_0 cluster02" [ style=bold color="green" fontcolor="black"] ++"fencing_start_0 cluster01" [ style=bold color="green" fontcolor="black"] ++"ip-rsc2_monitor_0 cluster01" -> "ip-rsc2_start_0 cluster01" [ style = bold] ++"ip-rsc2_monitor_0 cluster01" [ style=bold color="green" fontcolor="black"] ++"ip-rsc2_monitor_0 cluster02" -> "ip-rsc2_start_0 cluster01" [ style = bold] ++"ip-rsc2_monitor_0 cluster02" [ style=bold color="green" fontcolor="black"] ++"ip-rsc2_monitor_10000 cluster01" [ style=bold color="green" fontcolor="black"] ++"ip-rsc2_start_0 cluster01" -> "ip-rsc2_monitor_10000 cluster01" [ style = bold] ++"ip-rsc2_start_0 cluster01" [ style=bold color="green" fontcolor="black"] ++"ip-rsc_monitor_0 cluster01" -> "ip-rsc_start_0 cluster02" [ style = bold] ++"ip-rsc_monitor_0 cluster01" [ style=bold color="green" fontcolor="black"] ++"ip-rsc_monitor_0 cluster02" -> "ip-rsc_start_0 cluster02" [ style = bold] ++"ip-rsc_monitor_0 cluster02" [ style=bold color="green" fontcolor="black"] ++"ip-rsc_monitor_20000 cluster02" [ style=bold color="green" fontcolor="black"] ++"ip-rsc_start_0 cluster02" -> "ip-rsc_monitor_20000 cluster02" [ style = bold] ++"ip-rsc_start_0 cluster02" [ style=bold color="green" fontcolor="black"] ++"ping-rsc-ping_monitor_0 cluster01" -> "ping-rsc-ping_start_0 cluster01" [ style = bold] ++"ping-rsc-ping_monitor_0 cluster01" [ style=bold color="green" fontcolor="black"] ++"ping-rsc-ping_monitor_0 cluster02" -> "ping-rsc-ping_start_0 cluster01" [ style = bold] ++"ping-rsc-ping_monitor_0 cluster02" [ style=bold color="green" fontcolor="black"] ++"ping-rsc-ping_start_0 cluster01" [ style=bold color="green" fontcolor="black"] ++} +diff --git a/cts/scheduler/op-defaults.exp b/cts/scheduler/op-defaults.exp +new file mode 100644 +index 0000000..b81eacb +--- /dev/null ++++ b/cts/scheduler/op-defaults.exp +@@ -0,0 +1,211 @@ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ +diff --git a/cts/scheduler/op-defaults.scores b/cts/scheduler/op-defaults.scores +new file mode 100644 +index 0000000..1c622f0 +--- /dev/null ++++ b/cts/scheduler/op-defaults.scores +@@ -0,0 +1,11 @@ ++Allocation scores: ++pcmk__native_allocate: dummy-rsc allocation score on cluster01: 0 ++pcmk__native_allocate: dummy-rsc allocation score on cluster02: 0 ++pcmk__native_allocate: fencing allocation score on cluster01: 0 ++pcmk__native_allocate: fencing allocation score on cluster02: 0 ++pcmk__native_allocate: ip-rsc allocation score on cluster01: 0 ++pcmk__native_allocate: ip-rsc allocation score on cluster02: 0 ++pcmk__native_allocate: ip-rsc2 allocation score on cluster01: 0 ++pcmk__native_allocate: ip-rsc2 allocation score on cluster02: 0 ++pcmk__native_allocate: ping-rsc-ping allocation score on cluster01: 0 ++pcmk__native_allocate: ping-rsc-ping allocation score on cluster02: 0 +diff --git a/cts/scheduler/op-defaults.summary b/cts/scheduler/op-defaults.summary +new file mode 100644 +index 0000000..b580939 +--- /dev/null ++++ b/cts/scheduler/op-defaults.summary +@@ -0,0 +1,46 @@ ++ ++Current cluster status: ++Online: [ cluster01 cluster02 ] ++ ++ fencing (stonith:fence_xvm): Stopped ++ ip-rsc (ocf::heartbeat:IPaddr2): Stopped ++ ip-rsc2 (ocf::heartbeat:IPaddr2): Stopped ++ dummy-rsc (ocf::pacemaker:Dummy): Stopped ++ ping-rsc-ping (ocf::pacemaker:ping): Stopped ++ ++Transition Summary: ++ * Start fencing ( cluster01 ) ++ * Start ip-rsc ( cluster02 ) ++ * Start ip-rsc2 ( cluster01 ) ++ * Start dummy-rsc ( cluster02 ) ++ * Start ping-rsc-ping ( cluster01 ) ++ ++Executing cluster transition: ++ * Resource action: fencing monitor on cluster02 ++ * Resource action: fencing monitor on cluster01 ++ * Resource action: ip-rsc monitor on cluster02 ++ * Resource action: ip-rsc monitor on cluster01 ++ * Resource action: ip-rsc2 monitor on cluster02 ++ * Resource action: ip-rsc2 monitor on cluster01 ++ * Resource action: dummy-rsc monitor on cluster02 ++ * Resource action: dummy-rsc monitor on cluster01 ++ * Resource action: ping-rsc-ping monitor on cluster02 ++ * Resource action: ping-rsc-ping monitor on cluster01 ++ * Resource action: fencing start on cluster01 ++ * Resource action: ip-rsc start on cluster02 ++ * Resource action: ip-rsc2 start on cluster01 ++ * Resource action: dummy-rsc start on cluster02 ++ * Resource action: ping-rsc-ping start on cluster01 ++ * Resource action: ip-rsc monitor=20000 on cluster02 ++ * Resource action: ip-rsc2 monitor=10000 on cluster01 ++ * Resource action: dummy-rsc monitor=60000 on cluster02 ++ ++Revised cluster status: ++Online: [ cluster01 cluster02 ] ++ ++ fencing (stonith:fence_xvm): Started cluster01 ++ ip-rsc (ocf::heartbeat:IPaddr2): Started cluster02 ++ ip-rsc2 (ocf::heartbeat:IPaddr2): Started cluster01 ++ dummy-rsc (ocf::pacemaker:Dummy): Started cluster02 ++ ping-rsc-ping (ocf::pacemaker:ping): Started cluster01 ++ +diff --git a/cts/scheduler/op-defaults.xml b/cts/scheduler/op-defaults.xml +new file mode 100644 +index 0000000..ae3b248 +--- /dev/null ++++ b/cts/scheduler/op-defaults.xml +@@ -0,0 +1,87 @@ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ +-- +1.8.3.1 + + +From 67067927bc1b8e000c06d2b5a4ae6b9223ca13c7 Mon Sep 17 00:00:00 2001 +From: Chris Lumens +Date: Wed, 13 May 2020 10:40:34 -0400 +Subject: [PATCH 13/17] Test: scheduler: Add a regression test for + rsc_defaults. + +--- + cts/cts-scheduler.in | 3 +- + cts/scheduler/rsc-defaults.dot | 18 ++++++ + cts/scheduler/rsc-defaults.exp | 124 +++++++++++++++++++++++++++++++++++++ + cts/scheduler/rsc-defaults.scores | 11 ++++ + cts/scheduler/rsc-defaults.summary | 38 ++++++++++++ + cts/scheduler/rsc-defaults.xml | 78 +++++++++++++++++++++++ + 6 files changed, 271 insertions(+), 1 deletion(-) + create mode 100644 cts/scheduler/rsc-defaults.dot + create mode 100644 cts/scheduler/rsc-defaults.exp + create mode 100644 cts/scheduler/rsc-defaults.scores + create mode 100644 cts/scheduler/rsc-defaults.summary + create mode 100644 cts/scheduler/rsc-defaults.xml + +diff --git a/cts/cts-scheduler.in b/cts/cts-scheduler.in +index b83f812..9022ce9 100644 +--- a/cts/cts-scheduler.in ++++ b/cts/cts-scheduler.in +@@ -963,7 +963,8 @@ TESTS = [ + [ "shutdown-lock-expiration", "Ensure shutdown lock expiration works properly" ], + ], + [ +- [ "op-defaults", "Test op_defaults conditional expressions " ], ++ [ "op-defaults", "Test op_defaults conditional expressions" ], ++ [ "rsc-defaults", "Test rsc_defaults conditional expressions" ], + ], + + # @TODO: If pacemaker implements versioned attributes, uncomment these tests +diff --git a/cts/scheduler/rsc-defaults.dot b/cts/scheduler/rsc-defaults.dot +new file mode 100644 +index 0000000..d776614 +--- /dev/null ++++ b/cts/scheduler/rsc-defaults.dot +@@ -0,0 +1,18 @@ ++ digraph "g" { ++"dummy-rsc_monitor_0 cluster01" [ style=bold color="green" fontcolor="black"] ++"dummy-rsc_monitor_0 cluster02" [ style=bold color="green" fontcolor="black"] ++"fencing_monitor_0 cluster01" -> "fencing_start_0 cluster01" [ style = bold] ++"fencing_monitor_0 cluster01" [ style=bold color="green" fontcolor="black"] ++"fencing_monitor_0 cluster02" -> "fencing_start_0 cluster01" [ style = bold] ++"fencing_monitor_0 cluster02" [ style=bold color="green" fontcolor="black"] ++"fencing_start_0 cluster01" [ style=bold color="green" fontcolor="black"] ++"ip-rsc2_monitor_0 cluster01" [ style=bold color="green" fontcolor="black"] ++"ip-rsc2_monitor_0 cluster02" [ style=bold color="green" fontcolor="black"] ++"ip-rsc_monitor_0 cluster01" [ style=bold color="green" fontcolor="black"] ++"ip-rsc_monitor_0 cluster02" [ style=bold color="green" fontcolor="black"] ++"ping-rsc-ping_monitor_0 cluster01" -> "ping-rsc-ping_start_0 cluster02" [ style = bold] ++"ping-rsc-ping_monitor_0 cluster01" [ style=bold color="green" fontcolor="black"] ++"ping-rsc-ping_monitor_0 cluster02" -> "ping-rsc-ping_start_0 cluster02" [ style = bold] ++"ping-rsc-ping_monitor_0 cluster02" [ style=bold color="green" fontcolor="black"] ++"ping-rsc-ping_start_0 cluster02" [ style=bold color="green" fontcolor="black"] ++} +diff --git a/cts/scheduler/rsc-defaults.exp b/cts/scheduler/rsc-defaults.exp +new file mode 100644 +index 0000000..4aec360 +--- /dev/null ++++ b/cts/scheduler/rsc-defaults.exp +@@ -0,0 +1,124 @@ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ +diff --git a/cts/scheduler/rsc-defaults.scores b/cts/scheduler/rsc-defaults.scores +new file mode 100644 +index 0000000..e7f1bab +--- /dev/null ++++ b/cts/scheduler/rsc-defaults.scores +@@ -0,0 +1,11 @@ ++Allocation scores: ++pcmk__native_allocate: dummy-rsc allocation score on cluster01: 0 ++pcmk__native_allocate: dummy-rsc allocation score on cluster02: 0 ++pcmk__native_allocate: fencing allocation score on cluster01: 0 ++pcmk__native_allocate: fencing allocation score on cluster02: 0 ++pcmk__native_allocate: ip-rsc allocation score on cluster01: -INFINITY ++pcmk__native_allocate: ip-rsc allocation score on cluster02: -INFINITY ++pcmk__native_allocate: ip-rsc2 allocation score on cluster01: -INFINITY ++pcmk__native_allocate: ip-rsc2 allocation score on cluster02: -INFINITY ++pcmk__native_allocate: ping-rsc-ping allocation score on cluster01: 0 ++pcmk__native_allocate: ping-rsc-ping allocation score on cluster02: 0 +diff --git a/cts/scheduler/rsc-defaults.summary b/cts/scheduler/rsc-defaults.summary +new file mode 100644 +index 0000000..0066f2e +--- /dev/null ++++ b/cts/scheduler/rsc-defaults.summary +@@ -0,0 +1,38 @@ ++2 of 5 resource instances DISABLED and 0 BLOCKED from further action due to failure ++ ++Current cluster status: ++Online: [ cluster01 cluster02 ] ++ ++ fencing (stonith:fence_xvm): Stopped ++ ip-rsc (ocf::heartbeat:IPaddr2): Stopped (disabled) ++ ip-rsc2 (ocf::heartbeat:IPaddr2): Stopped (disabled) ++ dummy-rsc (ocf::pacemaker:Dummy): Stopped (unmanaged) ++ ping-rsc-ping (ocf::pacemaker:ping): Stopped ++ ++Transition Summary: ++ * Start fencing ( cluster01 ) ++ * Start ping-rsc-ping ( cluster02 ) ++ ++Executing cluster transition: ++ * Resource action: fencing monitor on cluster02 ++ * Resource action: fencing monitor on cluster01 ++ * Resource action: ip-rsc monitor on cluster02 ++ * Resource action: ip-rsc monitor on cluster01 ++ * Resource action: ip-rsc2 monitor on cluster02 ++ * Resource action: ip-rsc2 monitor on cluster01 ++ * Resource action: dummy-rsc monitor on cluster02 ++ * Resource action: dummy-rsc monitor on cluster01 ++ * Resource action: ping-rsc-ping monitor on cluster02 ++ * Resource action: ping-rsc-ping monitor on cluster01 ++ * Resource action: fencing start on cluster01 ++ * Resource action: ping-rsc-ping start on cluster02 ++ ++Revised cluster status: ++Online: [ cluster01 cluster02 ] ++ ++ fencing (stonith:fence_xvm): Started cluster01 ++ ip-rsc (ocf::heartbeat:IPaddr2): Stopped (disabled) ++ ip-rsc2 (ocf::heartbeat:IPaddr2): Stopped (disabled) ++ dummy-rsc (ocf::pacemaker:Dummy): Stopped (unmanaged) ++ ping-rsc-ping (ocf::pacemaker:ping): Started cluster02 ++ +diff --git a/cts/scheduler/rsc-defaults.xml b/cts/scheduler/rsc-defaults.xml +new file mode 100644 +index 0000000..38cae8b +--- /dev/null ++++ b/cts/scheduler/rsc-defaults.xml +@@ -0,0 +1,78 @@ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ +-- +1.8.3.1 + + +From bcfe068ccb3f3cb6cc3509257fbc4a59bc2b1a41 Mon Sep 17 00:00:00 2001 +From: Chris Lumens +Date: Wed, 13 May 2020 12:47:35 -0400 +Subject: [PATCH 14/17] Test: scheduler: Add a regression test for op_defaults + with an AND expr. + +--- + cts/cts-scheduler.in | 1 + + cts/scheduler/op-defaults-2.dot | 33 ++++++ + cts/scheduler/op-defaults-2.exp | 211 ++++++++++++++++++++++++++++++++++++ + cts/scheduler/op-defaults-2.scores | 11 ++ + cts/scheduler/op-defaults-2.summary | 46 ++++++++ + cts/scheduler/op-defaults-2.xml | 73 +++++++++++++ + 6 files changed, 375 insertions(+) + create mode 100644 cts/scheduler/op-defaults-2.dot + create mode 100644 cts/scheduler/op-defaults-2.exp + create mode 100644 cts/scheduler/op-defaults-2.scores + create mode 100644 cts/scheduler/op-defaults-2.summary + create mode 100644 cts/scheduler/op-defaults-2.xml + +diff --git a/cts/cts-scheduler.in b/cts/cts-scheduler.in +index 9022ce9..669b344 100644 +--- a/cts/cts-scheduler.in ++++ b/cts/cts-scheduler.in +@@ -964,6 +964,7 @@ TESTS = [ + ], + [ + [ "op-defaults", "Test op_defaults conditional expressions" ], ++ [ "op-defaults-2", "Test op_defaults AND'ed conditional expressions" ], + [ "rsc-defaults", "Test rsc_defaults conditional expressions" ], + ], + +diff --git a/cts/scheduler/op-defaults-2.dot b/cts/scheduler/op-defaults-2.dot +new file mode 100644 +index 0000000..5c67bd8 +--- /dev/null ++++ b/cts/scheduler/op-defaults-2.dot +@@ -0,0 +1,33 @@ ++ digraph "g" { ++"dummy-rsc_monitor_0 cluster01" -> "dummy-rsc_start_0 cluster02" [ style = bold] ++"dummy-rsc_monitor_0 cluster01" [ style=bold color="green" fontcolor="black"] ++"dummy-rsc_monitor_0 cluster02" -> "dummy-rsc_start_0 cluster02" [ style = bold] ++"dummy-rsc_monitor_0 cluster02" [ style=bold color="green" fontcolor="black"] ++"dummy-rsc_monitor_10000 cluster02" [ style=bold color="green" fontcolor="black"] ++"dummy-rsc_start_0 cluster02" -> "dummy-rsc_monitor_10000 cluster02" [ style = bold] ++"dummy-rsc_start_0 cluster02" [ style=bold color="green" fontcolor="black"] ++"fencing_monitor_0 cluster01" -> "fencing_start_0 cluster01" [ style = bold] ++"fencing_monitor_0 cluster01" [ style=bold color="green" fontcolor="black"] ++"fencing_monitor_0 cluster02" -> "fencing_start_0 cluster01" [ style = bold] ++"fencing_monitor_0 cluster02" [ style=bold color="green" fontcolor="black"] ++"fencing_start_0 cluster01" [ style=bold color="green" fontcolor="black"] ++"ip-rsc_monitor_0 cluster01" -> "ip-rsc_start_0 cluster02" [ style = bold] ++"ip-rsc_monitor_0 cluster01" [ style=bold color="green" fontcolor="black"] ++"ip-rsc_monitor_0 cluster02" -> "ip-rsc_start_0 cluster02" [ style = bold] ++"ip-rsc_monitor_0 cluster02" [ style=bold color="green" fontcolor="black"] ++"ip-rsc_monitor_20000 cluster02" [ style=bold color="green" fontcolor="black"] ++"ip-rsc_start_0 cluster02" -> "ip-rsc_monitor_20000 cluster02" [ style = bold] ++"ip-rsc_start_0 cluster02" [ style=bold color="green" fontcolor="black"] ++"ping-rsc-ping_monitor_0 cluster01" -> "ping-rsc-ping_start_0 cluster01" [ style = bold] ++"ping-rsc-ping_monitor_0 cluster01" [ style=bold color="green" fontcolor="black"] ++"ping-rsc-ping_monitor_0 cluster02" -> "ping-rsc-ping_start_0 cluster01" [ style = bold] ++"ping-rsc-ping_monitor_0 cluster02" [ style=bold color="green" fontcolor="black"] ++"ping-rsc-ping_start_0 cluster01" [ style=bold color="green" fontcolor="black"] ++"rsc-passes_monitor_0 cluster01" -> "rsc-passes_start_0 cluster01" [ style = bold] ++"rsc-passes_monitor_0 cluster01" [ style=bold color="green" fontcolor="black"] ++"rsc-passes_monitor_0 cluster02" -> "rsc-passes_start_0 cluster01" [ style = bold] ++"rsc-passes_monitor_0 cluster02" [ style=bold color="green" fontcolor="black"] ++"rsc-passes_monitor_10000 cluster01" [ style=bold color="green" fontcolor="black"] ++"rsc-passes_start_0 cluster01" -> "rsc-passes_monitor_10000 cluster01" [ style = bold] ++"rsc-passes_start_0 cluster01" [ style=bold color="green" fontcolor="black"] ++} +diff --git a/cts/scheduler/op-defaults-2.exp b/cts/scheduler/op-defaults-2.exp +new file mode 100644 +index 0000000..4324fde +--- /dev/null ++++ b/cts/scheduler/op-defaults-2.exp +@@ -0,0 +1,211 @@ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ +diff --git a/cts/scheduler/op-defaults-2.scores b/cts/scheduler/op-defaults-2.scores +new file mode 100644 +index 0000000..180c8b4 +--- /dev/null ++++ b/cts/scheduler/op-defaults-2.scores +@@ -0,0 +1,11 @@ ++Allocation scores: ++pcmk__native_allocate: dummy-rsc allocation score on cluster01: 0 ++pcmk__native_allocate: dummy-rsc allocation score on cluster02: 0 ++pcmk__native_allocate: fencing allocation score on cluster01: 0 ++pcmk__native_allocate: fencing allocation score on cluster02: 0 ++pcmk__native_allocate: ip-rsc allocation score on cluster01: 0 ++pcmk__native_allocate: ip-rsc allocation score on cluster02: 0 ++pcmk__native_allocate: ping-rsc-ping allocation score on cluster01: 0 ++pcmk__native_allocate: ping-rsc-ping allocation score on cluster02: 0 ++pcmk__native_allocate: rsc-passes allocation score on cluster01: 0 ++pcmk__native_allocate: rsc-passes allocation score on cluster02: 0 +diff --git a/cts/scheduler/op-defaults-2.summary b/cts/scheduler/op-defaults-2.summary +new file mode 100644 +index 0000000..16a68be +--- /dev/null ++++ b/cts/scheduler/op-defaults-2.summary +@@ -0,0 +1,46 @@ ++ ++Current cluster status: ++Online: [ cluster01 cluster02 ] ++ ++ fencing (stonith:fence_xvm): Stopped ++ ip-rsc (ocf::heartbeat:IPaddr2): Stopped ++ rsc-passes (ocf::heartbeat:IPaddr2): Stopped ++ dummy-rsc (ocf::pacemaker:Dummy): Stopped ++ ping-rsc-ping (ocf::pacemaker:ping): Stopped ++ ++Transition Summary: ++ * Start fencing ( cluster01 ) ++ * Start ip-rsc ( cluster02 ) ++ * Start rsc-passes ( cluster01 ) ++ * Start dummy-rsc ( cluster02 ) ++ * Start ping-rsc-ping ( cluster01 ) ++ ++Executing cluster transition: ++ * Resource action: fencing monitor on cluster02 ++ * Resource action: fencing monitor on cluster01 ++ * Resource action: ip-rsc monitor on cluster02 ++ * Resource action: ip-rsc monitor on cluster01 ++ * Resource action: rsc-passes monitor on cluster02 ++ * Resource action: rsc-passes monitor on cluster01 ++ * Resource action: dummy-rsc monitor on cluster02 ++ * Resource action: dummy-rsc monitor on cluster01 ++ * Resource action: ping-rsc-ping monitor on cluster02 ++ * Resource action: ping-rsc-ping monitor on cluster01 ++ * Resource action: fencing start on cluster01 ++ * Resource action: ip-rsc start on cluster02 ++ * Resource action: rsc-passes start on cluster01 ++ * Resource action: dummy-rsc start on cluster02 ++ * Resource action: ping-rsc-ping start on cluster01 ++ * Resource action: ip-rsc monitor=20000 on cluster02 ++ * Resource action: rsc-passes monitor=10000 on cluster01 ++ * Resource action: dummy-rsc monitor=10000 on cluster02 ++ ++Revised cluster status: ++Online: [ cluster01 cluster02 ] ++ ++ fencing (stonith:fence_xvm): Started cluster01 ++ ip-rsc (ocf::heartbeat:IPaddr2): Started cluster02 ++ rsc-passes (ocf::heartbeat:IPaddr2): Started cluster01 ++ dummy-rsc (ocf::pacemaker:Dummy): Started cluster02 ++ ping-rsc-ping (ocf::pacemaker:ping): Started cluster01 ++ +diff --git a/cts/scheduler/op-defaults-2.xml b/cts/scheduler/op-defaults-2.xml +new file mode 100644 +index 0000000..9f3c288 +--- /dev/null ++++ b/cts/scheduler/op-defaults-2.xml +@@ -0,0 +1,73 @@ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ +-- +1.8.3.1 + + +From 017b783c2037d641c40a39dd7ec3a9eba0aaa6df Mon Sep 17 00:00:00 2001 +From: Chris Lumens +Date: Wed, 13 May 2020 15:18:28 -0400 +Subject: [PATCH 15/17] Doc: Pacemaker Explained: Add documentation for + rsc_expr and op_expr. + +--- + doc/Pacemaker_Explained/en-US/Ch-Rules.txt | 174 +++++++++++++++++++++++++++++ + 1 file changed, 174 insertions(+) + +diff --git a/doc/Pacemaker_Explained/en-US/Ch-Rules.txt b/doc/Pacemaker_Explained/en-US/Ch-Rules.txt +index 9d617f6..5df5f82 100644 +--- a/doc/Pacemaker_Explained/en-US/Ch-Rules.txt ++++ b/doc/Pacemaker_Explained/en-US/Ch-Rules.txt +@@ -522,6 +522,124 @@ You may wish to write +end="2005-03-31T23:59:59"+ to avoid confusion. + ------- + ===== + ++== Resource Expressions == ++ ++An +rsc_expression+ is a rule condition based on a resource agent's properties. ++This rule is only valid within an +rsc_defaults+ or +op_defaults+ context. None ++of the matching attributes of +class+, +provider+, and +type+ are required. If ++one is omitted, all values of that attribute will match. For instance, omitting +++type+ means every type will match. ++ ++.Attributes of an rsc_expression Element ++[width="95%",cols="2m,<5",options="header",align="center"] ++|========================================================= ++ ++|Field ++|Description ++ ++|id ++|A unique name for the expression (required) ++ indexterm:[XML attribute,id attribute,rsc_expression element] ++ indexterm:[XML element,rsc_expression element,id attribute] ++ ++|class ++|The standard name to be matched against resource agents ++ indexterm:[XML attribute,class attribute,rsc_expression element] ++ indexterm:[XML element,rsc_expression element,class attribute] ++ ++|provider ++|If given, the vendor to be matched against resource agents. This ++ only makes sense for agents using the OCF spec. ++ indexterm:[XML attribute,provider attribute,rsc_expression element] ++ indexterm:[XML element,rsc_expression element,provider attribute] ++ ++|type ++|The name of the resource agent to be matched ++ indexterm:[XML attribute,type attribute,rsc_expression element] ++ indexterm:[XML element,rsc_expression element,type attribute] ++ ++|========================================================= ++ ++=== Example Resource-Based Expressions === ++ ++A small sample of how resource-based expressions can be used: ++ ++.True for all ocf:heartbeat:IPaddr2 resources ++==== ++[source,XML] ++---- ++ ++ ++ ++---- ++==== ++ ++.Provider doesn't apply to non-OCF resources ++==== ++[source,XML] ++---- ++ ++ ++ ++---- ++==== ++ ++== Operation Expressions == ++ ++An +op_expression+ is a rule condition based on an action of some resource ++agent. This rule is only valid within an +op_defaults+ context. ++ ++.Attributes of an op_expression Element ++[width="95%",cols="2m,<5",options="header",align="center"] ++|========================================================= ++ ++|Field ++|Description ++ ++|id ++|A unique name for the expression (required) ++ indexterm:[XML attribute,id attribute,op_expression element] ++ indexterm:[XML element,op_expression element,id attribute] ++ ++|name ++|The action name to match against. This can be any action supported by ++ the resource agent; common values include +monitor+, +start+, and +stop+ ++ (required). ++ indexterm:[XML attribute,name attribute,op_expression element] ++ indexterm:[XML element,op_expression element,name attribute] ++ ++|interval ++|The interval of the action to match against. If not given, only ++ the name attribute will be used to match. ++ indexterm:[XML attribute,interval attribute,op_expression element] ++ indexterm:[XML element,op_expression element,interval attribute] ++ ++|========================================================= ++ ++=== Example Operation-Based Expressions === ++ ++A small sample of how operation-based expressions can be used: ++ ++.True for all monitor actions ++==== ++[source,XML] ++---- ++ ++ ++ ++---- ++==== ++ ++.True for all monitor actions with a 10 second interval ++==== ++[source,XML] ++---- ++ ++ ++ ++---- ++==== ++ + == Using Rules to Determine Resource Location == + indexterm:[Rule,Determine Resource Location] + indexterm:[Resource,Location,Determine by Rules] +@@ -710,6 +828,62 @@ Rules may be used similarly in +instance_attributes+ or +utilization+ blocks. + Any single block may directly contain only a single rule, but that rule may + itself contain any number of rules. + +++rsc_expression+ and +op_expression+ blocks may additionally be used to set defaults ++on either a single resource or across an entire class of resources with a single ++rule. +rsc_expression+ may be used to select resource agents within both +rsc_defaults+ ++and +op_defaults+, while +op_expression+ may only be used within +op_defaults+. If ++multiple rules succeed for a given resource agent, the last one specified will be ++the one that takes effect. As with any other rule, boolean operations may be used ++to make more complicated expressions. ++ ++.Set all IPaddr2 resources to stopped ++===== ++[source,XML] ++------- ++ ++ ++ ++ ++ ++ ++ ++ ++------- ++===== ++ ++.Set all monitor action timeouts to 7 seconds ++===== ++[source,XML] ++------- ++ ++ ++ ++ ++ ++ ++ ++ ++------- ++===== ++ ++.Set the monitor action timeout on all IPaddr2 resources with a given monitor interval to 8 seconds ++===== ++[source,XML] ++------- ++ ++ ++ ++ ++ ++ ++ ++ ++ ++------- ++===== ++ + === Using Rules to Control Cluster Options === + indexterm:[Rule,Controlling Cluster Options] + indexterm:[Cluster,Setting Options with Rules] +-- +1.8.3.1 + + +From b8dd16c5e454445f73416ae8b74649545ee1b472 Mon Sep 17 00:00:00 2001 +From: Chris Lumens +Date: Wed, 13 May 2020 16:26:21 -0400 +Subject: [PATCH 16/17] Test: scheduler: Add a test for multiple rules applying + to the same resource. + +--- + cts/cts-scheduler.in | 1 + + cts/scheduler/op-defaults-3.dot | 14 +++++++ + cts/scheduler/op-defaults-3.exp | 83 +++++++++++++++++++++++++++++++++++++ + cts/scheduler/op-defaults-3.scores | 5 +++ + cts/scheduler/op-defaults-3.summary | 26 ++++++++++++ + cts/scheduler/op-defaults-3.xml | 54 ++++++++++++++++++++++++ + 6 files changed, 183 insertions(+) + create mode 100644 cts/scheduler/op-defaults-3.dot + create mode 100644 cts/scheduler/op-defaults-3.exp + create mode 100644 cts/scheduler/op-defaults-3.scores + create mode 100644 cts/scheduler/op-defaults-3.summary + create mode 100644 cts/scheduler/op-defaults-3.xml + +diff --git a/cts/cts-scheduler.in b/cts/cts-scheduler.in +index 669b344..2c2d14f 100644 +--- a/cts/cts-scheduler.in ++++ b/cts/cts-scheduler.in +@@ -965,6 +965,7 @@ TESTS = [ + [ + [ "op-defaults", "Test op_defaults conditional expressions" ], + [ "op-defaults-2", "Test op_defaults AND'ed conditional expressions" ], ++ [ "op-defaults-3", "Test op_defaults precedence" ], + [ "rsc-defaults", "Test rsc_defaults conditional expressions" ], + ], + +diff --git a/cts/scheduler/op-defaults-3.dot b/cts/scheduler/op-defaults-3.dot +new file mode 100644 +index 0000000..382f630 +--- /dev/null ++++ b/cts/scheduler/op-defaults-3.dot +@@ -0,0 +1,14 @@ ++ digraph "g" { ++"dummy-rsc_monitor_0 cluster01" -> "dummy-rsc_start_0 cluster02" [ style = bold] ++"dummy-rsc_monitor_0 cluster01" [ style=bold color="green" fontcolor="black"] ++"dummy-rsc_monitor_0 cluster02" -> "dummy-rsc_start_0 cluster02" [ style = bold] ++"dummy-rsc_monitor_0 cluster02" [ style=bold color="green" fontcolor="black"] ++"dummy-rsc_monitor_10000 cluster02" [ style=bold color="green" fontcolor="black"] ++"dummy-rsc_start_0 cluster02" -> "dummy-rsc_monitor_10000 cluster02" [ style = bold] ++"dummy-rsc_start_0 cluster02" [ style=bold color="green" fontcolor="black"] ++"fencing_monitor_0 cluster01" -> "fencing_start_0 cluster01" [ style = bold] ++"fencing_monitor_0 cluster01" [ style=bold color="green" fontcolor="black"] ++"fencing_monitor_0 cluster02" -> "fencing_start_0 cluster01" [ style = bold] ++"fencing_monitor_0 cluster02" [ style=bold color="green" fontcolor="black"] ++"fencing_start_0 cluster01" [ style=bold color="green" fontcolor="black"] ++} +diff --git a/cts/scheduler/op-defaults-3.exp b/cts/scheduler/op-defaults-3.exp +new file mode 100644 +index 0000000..6d567dc +--- /dev/null ++++ b/cts/scheduler/op-defaults-3.exp +@@ -0,0 +1,83 @@ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ +diff --git a/cts/scheduler/op-defaults-3.scores b/cts/scheduler/op-defaults-3.scores +new file mode 100644 +index 0000000..0a5190a +--- /dev/null ++++ b/cts/scheduler/op-defaults-3.scores +@@ -0,0 +1,5 @@ ++Allocation scores: ++pcmk__native_allocate: dummy-rsc allocation score on cluster01: 0 ++pcmk__native_allocate: dummy-rsc allocation score on cluster02: 0 ++pcmk__native_allocate: fencing allocation score on cluster01: 0 ++pcmk__native_allocate: fencing allocation score on cluster02: 0 +diff --git a/cts/scheduler/op-defaults-3.summary b/cts/scheduler/op-defaults-3.summary +new file mode 100644 +index 0000000..a83eb15 +--- /dev/null ++++ b/cts/scheduler/op-defaults-3.summary +@@ -0,0 +1,26 @@ ++ ++Current cluster status: ++Online: [ cluster01 cluster02 ] ++ ++ fencing (stonith:fence_xvm): Stopped ++ dummy-rsc (ocf::pacemaker:Dummy): Stopped ++ ++Transition Summary: ++ * Start fencing ( cluster01 ) ++ * Start dummy-rsc ( cluster02 ) ++ ++Executing cluster transition: ++ * Resource action: fencing monitor on cluster02 ++ * Resource action: fencing monitor on cluster01 ++ * Resource action: dummy-rsc monitor on cluster02 ++ * Resource action: dummy-rsc monitor on cluster01 ++ * Resource action: fencing start on cluster01 ++ * Resource action: dummy-rsc start on cluster02 ++ * Resource action: dummy-rsc monitor=10000 on cluster02 ++ ++Revised cluster status: ++Online: [ cluster01 cluster02 ] ++ ++ fencing (stonith:fence_xvm): Started cluster01 ++ dummy-rsc (ocf::pacemaker:Dummy): Started cluster02 ++ +diff --git a/cts/scheduler/op-defaults-3.xml b/cts/scheduler/op-defaults-3.xml +new file mode 100644 +index 0000000..4a8912e +--- /dev/null ++++ b/cts/scheduler/op-defaults-3.xml +@@ -0,0 +1,54 @@ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ +-- +1.8.3.1 + + +From b9ccde16609e7d005ac0578a603da97a1808704a Mon Sep 17 00:00:00 2001 +From: Chris Lumens +Date: Fri, 15 May 2020 13:48:47 -0400 +Subject: [PATCH 17/17] Test: scheduler: Add a test for rsc_defaults not + specifying type. + +--- + cts/cts-scheduler.in | 1 + + cts/scheduler/rsc-defaults-2.dot | 11 ++++++ + cts/scheduler/rsc-defaults-2.exp | 72 ++++++++++++++++++++++++++++++++++++ + cts/scheduler/rsc-defaults-2.scores | 7 ++++ + cts/scheduler/rsc-defaults-2.summary | 27 ++++++++++++++ + cts/scheduler/rsc-defaults-2.xml | 52 ++++++++++++++++++++++++++ + 6 files changed, 170 insertions(+) + create mode 100644 cts/scheduler/rsc-defaults-2.dot + create mode 100644 cts/scheduler/rsc-defaults-2.exp + create mode 100644 cts/scheduler/rsc-defaults-2.scores + create mode 100644 cts/scheduler/rsc-defaults-2.summary + create mode 100644 cts/scheduler/rsc-defaults-2.xml + +diff --git a/cts/cts-scheduler.in b/cts/cts-scheduler.in +index 2c2d14f..346ada2 100644 +--- a/cts/cts-scheduler.in ++++ b/cts/cts-scheduler.in +@@ -967,6 +967,7 @@ TESTS = [ + [ "op-defaults-2", "Test op_defaults AND'ed conditional expressions" ], + [ "op-defaults-3", "Test op_defaults precedence" ], + [ "rsc-defaults", "Test rsc_defaults conditional expressions" ], ++ [ "rsc-defaults-2", "Test rsc_defaults conditional expressions without type" ], + ], + + # @TODO: If pacemaker implements versioned attributes, uncomment these tests +diff --git a/cts/scheduler/rsc-defaults-2.dot b/cts/scheduler/rsc-defaults-2.dot +new file mode 100644 +index 0000000..b43c5e6 +--- /dev/null ++++ b/cts/scheduler/rsc-defaults-2.dot +@@ -0,0 +1,11 @@ ++ digraph "g" { ++"dummy-rsc_monitor_0 cluster01" [ style=bold color="green" fontcolor="black"] ++"dummy-rsc_monitor_0 cluster02" [ style=bold color="green" fontcolor="black"] ++"fencing_monitor_0 cluster01" -> "fencing_start_0 cluster01" [ style = bold] ++"fencing_monitor_0 cluster01" [ style=bold color="green" fontcolor="black"] ++"fencing_monitor_0 cluster02" -> "fencing_start_0 cluster01" [ style = bold] ++"fencing_monitor_0 cluster02" [ style=bold color="green" fontcolor="black"] ++"fencing_start_0 cluster01" [ style=bold color="green" fontcolor="black"] ++"ping-rsc-ping_monitor_0 cluster01" [ style=bold color="green" fontcolor="black"] ++"ping-rsc-ping_monitor_0 cluster02" [ style=bold color="green" fontcolor="black"] ++} +diff --git a/cts/scheduler/rsc-defaults-2.exp b/cts/scheduler/rsc-defaults-2.exp +new file mode 100644 +index 0000000..e9e1b5f +--- /dev/null ++++ b/cts/scheduler/rsc-defaults-2.exp +@@ -0,0 +1,72 @@ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ +diff --git a/cts/scheduler/rsc-defaults-2.scores b/cts/scheduler/rsc-defaults-2.scores +new file mode 100644 +index 0000000..4b70f54 +--- /dev/null ++++ b/cts/scheduler/rsc-defaults-2.scores +@@ -0,0 +1,7 @@ ++Allocation scores: ++pcmk__native_allocate: dummy-rsc allocation score on cluster01: 0 ++pcmk__native_allocate: dummy-rsc allocation score on cluster02: 0 ++pcmk__native_allocate: fencing allocation score on cluster01: 0 ++pcmk__native_allocate: fencing allocation score on cluster02: 0 ++pcmk__native_allocate: ping-rsc-ping allocation score on cluster01: 0 ++pcmk__native_allocate: ping-rsc-ping allocation score on cluster02: 0 +diff --git a/cts/scheduler/rsc-defaults-2.summary b/cts/scheduler/rsc-defaults-2.summary +new file mode 100644 +index 0000000..46a2a2d +--- /dev/null ++++ b/cts/scheduler/rsc-defaults-2.summary +@@ -0,0 +1,27 @@ ++ ++Current cluster status: ++Online: [ cluster01 cluster02 ] ++ ++ fencing (stonith:fence_xvm): Stopped ++ dummy-rsc (ocf::pacemaker:Dummy): Stopped (unmanaged) ++ ping-rsc-ping (ocf::pacemaker:ping): Stopped (unmanaged) ++ ++Transition Summary: ++ * Start fencing ( cluster01 ) ++ ++Executing cluster transition: ++ * Resource action: fencing monitor on cluster02 ++ * Resource action: fencing monitor on cluster01 ++ * Resource action: dummy-rsc monitor on cluster02 ++ * Resource action: dummy-rsc monitor on cluster01 ++ * Resource action: ping-rsc-ping monitor on cluster02 ++ * Resource action: ping-rsc-ping monitor on cluster01 ++ * Resource action: fencing start on cluster01 ++ ++Revised cluster status: ++Online: [ cluster01 cluster02 ] ++ ++ fencing (stonith:fence_xvm): Started cluster01 ++ dummy-rsc (ocf::pacemaker:Dummy): Stopped (unmanaged) ++ ping-rsc-ping (ocf::pacemaker:ping): Stopped (unmanaged) ++ +diff --git a/cts/scheduler/rsc-defaults-2.xml b/cts/scheduler/rsc-defaults-2.xml +new file mode 100644 +index 0000000..a160fae +--- /dev/null ++++ b/cts/scheduler/rsc-defaults-2.xml +@@ -0,0 +1,52 @@ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ +-- +1.8.3.1 + diff --git a/SOURCES/002-demote.patch b/SOURCES/002-demote.patch new file mode 100644 index 0000000..5da2515 --- /dev/null +++ b/SOURCES/002-demote.patch @@ -0,0 +1,8664 @@ +From f1f71b3f3c342987db0058e7db0030417f3f83fa Mon Sep 17 00:00:00 2001 +From: Ken Gaillot +Date: Thu, 28 May 2020 08:22:00 -0500 +Subject: [PATCH 01/20] Refactor: scheduler: functionize comparing on-fail + values + +The action_fail_response enum values used for the "on-fail" operation +meta-attribute were initially intended to be in order of severity. +However as new values were added, they were added to the end (out of severity +order) to preserve API backward compatibility. + +This resulted in a convoluted comparison of values that will only get worse as +more values are added. + +This commit adds a comparison function to isolate that complexity. +--- + include/crm/pengine/common.h | 32 ++++++++++++------ + lib/pengine/unpack.c | 80 +++++++++++++++++++++++++++++++++++++++++--- + 2 files changed, 97 insertions(+), 15 deletions(-) + +diff --git a/include/crm/pengine/common.h b/include/crm/pengine/common.h +index 3a770b7..2737b2e 100644 +--- a/include/crm/pengine/common.h ++++ b/include/crm/pengine/common.h +@@ -22,18 +22,29 @@ extern "C" { + extern gboolean was_processing_error; + extern gboolean was_processing_warning; + +-/* order is significant here +- * items listed in order of accending severeness +- * more severe actions take precedent over lower ones ++/* The order is (partially) significant here; the values from action_fail_ignore ++ * through action_fail_fence are in order of increasing severity. ++ * ++ * @COMPAT The values should be ordered and numbered per the "TODO" comments ++ * below, so all values are in order of severity and there is room for ++ * future additions, but that would break API compatibility. ++ * @TODO For now, we just use a function to compare the values specially, but ++ * at the next compatibility break, we should arrange things properly. + */ + enum action_fail_response { +- action_fail_ignore, +- action_fail_recover, +- action_fail_migrate, /* recover by moving it somewhere else */ +- action_fail_block, +- action_fail_stop, +- action_fail_standby, +- action_fail_fence, ++ action_fail_ignore, // @TODO = 10 ++ // @TODO action_fail_demote = 20, ++ action_fail_recover, // @TODO = 30 ++ // @TODO action_fail_reset_remote = 40, ++ // @TODO action_fail_restart_container = 50, ++ action_fail_migrate, // @TODO = 60 ++ action_fail_block, // @TODO = 70 ++ action_fail_stop, // @TODO = 80 ++ action_fail_standby, // @TODO = 90 ++ action_fail_fence, // @TODO = 100 ++ ++ // @COMPAT Values below here are out of order for API compatibility ++ + action_fail_restart_container, + + /* This is reserved for internal use for remote node connection resources. +@@ -44,6 +55,7 @@ enum action_fail_response { + */ + action_fail_reset_remote, + ++ action_fail_demote, + }; + + /* the "done" action must be the "pre" action +1 */ +diff --git a/lib/pengine/unpack.c b/lib/pengine/unpack.c +index 3c6606b..f688881 100644 +--- a/lib/pengine/unpack.c ++++ b/lib/pengine/unpack.c +@@ -2770,6 +2770,78 @@ last_change_str(xmlNode *xml_op) + return ((when_s && *when_s)? when_s : "unknown time"); + } + ++/*! ++ * \internal ++ * \brief Compare two on-fail values ++ * ++ * \param[in] first One on-fail value to compare ++ * \param[in] second The other on-fail value to compare ++ * ++ * \return A negative number if second is more severe than first, zero if they ++ * are equal, or a positive number if first is more severe than second. ++ * \note This is only needed until the action_fail_response values can be ++ * renumbered at the next API compatibility break. ++ */ ++static int ++cmp_on_fail(enum action_fail_response first, enum action_fail_response second) ++{ ++ switch (first) { ++ case action_fail_reset_remote: ++ switch (second) { ++ case action_fail_ignore: ++ case action_fail_recover: ++ return 1; ++ case action_fail_reset_remote: ++ return 0; ++ default: ++ return -1; ++ } ++ break; ++ ++ case action_fail_restart_container: ++ switch (second) { ++ case action_fail_ignore: ++ case action_fail_recover: ++ case action_fail_reset_remote: ++ return 1; ++ case action_fail_restart_container: ++ return 0; ++ default: ++ return -1; ++ } ++ break; ++ ++ default: ++ break; ++ } ++ switch (second) { ++ case action_fail_reset_remote: ++ switch (first) { ++ case action_fail_ignore: ++ case action_fail_recover: ++ return -1; ++ default: ++ return 1; ++ } ++ break; ++ ++ case action_fail_restart_container: ++ switch (first) { ++ case action_fail_ignore: ++ case action_fail_recover: ++ case action_fail_reset_remote: ++ return -1; ++ default: ++ return 1; ++ } ++ break; ++ ++ default: ++ break; ++ } ++ return first - second; ++} ++ + static void + unpack_rsc_op_failure(pe_resource_t * rsc, pe_node_t * node, int rc, xmlNode * xml_op, xmlNode ** last_failure, + enum action_fail_response * on_fail, pe_working_set_t * data_set) +@@ -2829,10 +2901,7 @@ unpack_rsc_op_failure(pe_resource_t * rsc, pe_node_t * node, int rc, xmlNode * x + } + + action = custom_action(rsc, strdup(key), task, NULL, TRUE, FALSE, data_set); +- if ((action->on_fail <= action_fail_fence && *on_fail < action->on_fail) || +- (action->on_fail == action_fail_reset_remote && *on_fail <= action_fail_recover) || +- (action->on_fail == action_fail_restart_container && *on_fail <= action_fail_recover) || +- (*on_fail == action_fail_restart_container && action->on_fail >= action_fail_migrate)) { ++ if (cmp_on_fail(*on_fail, action->on_fail) < 0) { + pe_rsc_trace(rsc, "on-fail %s -> %s for %s (%s)", fail2text(*on_fail), + fail2text(action->on_fail), action->uuid, key); + *on_fail = action->on_fail; +@@ -3675,7 +3744,8 @@ unpack_rsc_op(pe_resource_t *rsc, pe_node_t *node, xmlNode *xml_op, + + record_failed_op(xml_op, node, rsc, data_set); + +- if (failure_strategy == action_fail_restart_container && *on_fail <= action_fail_recover) { ++ if ((failure_strategy == action_fail_restart_container) ++ && cmp_on_fail(*on_fail, action_fail_recover) <= 0) { + *on_fail = failure_strategy; + } + +-- +1.8.3.1 + + +From ef246ff05d7459f9672b10ac1873e3191a3b46e9 Mon Sep 17 00:00:00 2001 +From: Ken Gaillot +Date: Thu, 28 May 2020 08:27:47 -0500 +Subject: [PATCH 02/20] Fix: scheduler: disallow on-fail=stop for stop + operations + +because it would loop infinitely as long as the stop continued to fail +--- + lib/pengine/utils.c | 15 ++++++++++++--- + 1 file changed, 12 insertions(+), 3 deletions(-) + +diff --git a/lib/pengine/utils.c b/lib/pengine/utils.c +index 20a8db5..3fb7e62 100644 +--- a/lib/pengine/utils.c ++++ b/lib/pengine/utils.c +@@ -716,16 +716,25 @@ custom_action(pe_resource_t * rsc, char *key, const char *task, + return action; + } + ++static bool ++valid_stop_on_fail(const char *value) ++{ ++ return safe_str_neq(value, "standby") ++ && safe_str_neq(value, "stop"); ++} ++ + static const char * + unpack_operation_on_fail(pe_action_t * action) + { + + const char *value = g_hash_table_lookup(action->meta, XML_OP_ATTR_ON_FAIL); + +- if (safe_str_eq(action->task, CRMD_ACTION_STOP) && safe_str_eq(value, "standby")) { ++ if (safe_str_eq(action->task, CRMD_ACTION_STOP) ++ && !valid_stop_on_fail(value)) { ++ + pcmk__config_err("Resetting '" XML_OP_ATTR_ON_FAIL "' for %s stop " +- "action to default value because 'standby' is not " +- "allowed for stop", action->rsc->id); ++ "action to default value because '%s' is not " ++ "allowed for stop", action->rsc->id, value); + return NULL; + } else if (safe_str_eq(action->task, CRMD_ACTION_DEMOTE) && !value) { + /* demote on_fail defaults to master monitor value if present */ +-- +1.8.3.1 + + +From 8dceba792ffe65cd77c3aae430067638dbba63f9 Mon Sep 17 00:00:00 2001 +From: Ken Gaillot +Date: Thu, 28 May 2020 08:50:33 -0500 +Subject: [PATCH 03/20] Refactor: scheduler: use more appropriate types in a + couple places + +--- + lib/pengine/unpack.c | 5 ++--- + 1 file changed, 2 insertions(+), 3 deletions(-) + +diff --git a/lib/pengine/unpack.c b/lib/pengine/unpack.c +index f688881..6a350e5 100644 +--- a/lib/pengine/unpack.c ++++ b/lib/pengine/unpack.c +@@ -2244,7 +2244,7 @@ unpack_lrm_rsc_state(pe_node_t * node, xmlNode * rsc_entry, pe_working_set_t * d + xmlNode *rsc_op = NULL; + xmlNode *last_failure = NULL; + +- enum action_fail_response on_fail = FALSE; ++ enum action_fail_response on_fail = action_fail_ignore; + enum rsc_role_e saved_role = RSC_ROLE_UNKNOWN; + + crm_trace("[%s] Processing %s on %s", +@@ -2287,7 +2287,6 @@ unpack_lrm_rsc_state(pe_node_t * node, xmlNode * rsc_entry, pe_working_set_t * d + + /* process operations */ + saved_role = rsc->role; +- on_fail = action_fail_ignore; + rsc->role = RSC_ROLE_UNKNOWN; + sorted_op_list = g_list_sort(op_list, sort_op_by_callid); + +@@ -3376,7 +3375,7 @@ int pe__target_rc_from_xml(xmlNode *xml_op) + static enum action_fail_response + get_action_on_fail(pe_resource_t *rsc, const char *key, const char *task, pe_working_set_t * data_set) + { +- int result = action_fail_recover; ++ enum action_fail_response result = action_fail_recover; + pe_action_t *action = custom_action(rsc, strdup(key), task, NULL, TRUE, FALSE, data_set); + + result = action->on_fail; +-- +1.8.3.1 + + +From a4d6a20a990d1461184f888e21aa61cddff8996d Mon Sep 17 00:00:00 2001 +From: Ken Gaillot +Date: Tue, 2 Jun 2020 12:05:57 -0500 +Subject: [PATCH 04/20] Low: libpacemaker: don't force stop when skipping + reload of failed resource + +Normal failure recovery will apply, which will stop if needed. + +(The stop was forced as of 2558d76f.) +--- + lib/pacemaker/pcmk_sched_native.c | 16 +++++++++++++--- + 1 file changed, 13 insertions(+), 3 deletions(-) + +diff --git a/lib/pacemaker/pcmk_sched_native.c b/lib/pacemaker/pcmk_sched_native.c +index bd8a0b5..ff2fb92 100644 +--- a/lib/pacemaker/pcmk_sched_native.c ++++ b/lib/pacemaker/pcmk_sched_native.c +@@ -3362,9 +3362,19 @@ ReloadRsc(pe_resource_t * rsc, pe_node_t *node, pe_working_set_t * data_set) + pe_rsc_trace(rsc, "%s: unmanaged", rsc->id); + return; + +- } else if (is_set(rsc->flags, pe_rsc_failed) || is_set(rsc->flags, pe_rsc_start_pending)) { +- pe_rsc_trace(rsc, "%s: general resource state: flags=0x%.16llx", rsc->id, rsc->flags); +- stop_action(rsc, node, FALSE); /* Force a full restart, overkill? */ ++ } else if (is_set(rsc->flags, pe_rsc_failed)) { ++ /* We don't need to specify any particular actions here, normal failure ++ * recovery will apply. ++ */ ++ pe_rsc_trace(rsc, "%s: preventing reload because failed", rsc->id); ++ return; ++ ++ } else if (is_set(rsc->flags, pe_rsc_start_pending)) { ++ /* If a resource's configuration changed while a start was pending, ++ * force a full restart. ++ */ ++ pe_rsc_trace(rsc, "%s: preventing reload because start pending", rsc->id); ++ stop_action(rsc, node, FALSE); + return; + + } else if (node == NULL) { +-- +1.8.3.1 + + +From f2d244bc4306297d5960c0ba54e0a85a68e864ee Mon Sep 17 00:00:00 2001 +From: Ken Gaillot +Date: Tue, 2 Jun 2020 12:16:33 -0500 +Subject: [PATCH 05/20] Test: scheduler: test forcing a restart instead of + reload when start is pending + +--- + cts/cts-scheduler.in | 1 + + cts/scheduler/params-3.dot | 28 ++++++ + cts/scheduler/params-3.exp | 208 +++++++++++++++++++++++++++++++++++++++++ + cts/scheduler/params-3.scores | 21 +++++ + cts/scheduler/params-3.summary | 45 +++++++++ + cts/scheduler/params-3.xml | 154 ++++++++++++++++++++++++++++++ + 6 files changed, 457 insertions(+) + create mode 100644 cts/scheduler/params-3.dot + create mode 100644 cts/scheduler/params-3.exp + create mode 100644 cts/scheduler/params-3.scores + create mode 100644 cts/scheduler/params-3.summary + create mode 100644 cts/scheduler/params-3.xml + +diff --git a/cts/cts-scheduler.in b/cts/cts-scheduler.in +index 346ada2..ae8247e 100644 +--- a/cts/cts-scheduler.in ++++ b/cts/cts-scheduler.in +@@ -84,6 +84,7 @@ TESTS = [ + [ "params-0", "Params: No change" ], + [ "params-1", "Params: Changed" ], + [ "params-2", "Params: Resource definition" ], ++ [ "params-3", "Params: Restart instead of reload if start pending" ], + [ "params-4", "Params: Reload" ], + [ "params-5", "Params: Restart based on probe digest" ], + [ "novell-251689", "Resource definition change + target_role=stopped" ], +diff --git a/cts/scheduler/params-3.dot b/cts/scheduler/params-3.dot +new file mode 100644 +index 0000000..d681ee5 +--- /dev/null ++++ b/cts/scheduler/params-3.dot +@@ -0,0 +1,28 @@ ++ digraph "g" { ++"Cancel rsc_c001n02_monitor_5000 c001n02" [ style=bold color="green" fontcolor="black"] ++"DcIPaddr_monitor_0 c001n01" -> "DcIPaddr_start_0 c001n02" [ style = bold] ++"DcIPaddr_monitor_0 c001n01" [ style=bold color="green" fontcolor="black"] ++"DcIPaddr_monitor_0 c001n03" -> "DcIPaddr_start_0 c001n02" [ style = bold] ++"DcIPaddr_monitor_0 c001n03" [ style=bold color="green" fontcolor="black"] ++"DcIPaddr_monitor_0 c001n08" -> "DcIPaddr_start_0 c001n02" [ style = bold] ++"DcIPaddr_monitor_0 c001n08" [ style=bold color="green" fontcolor="black"] ++"DcIPaddr_monitor_5000 c001n02" [ style=bold color="green" fontcolor="black"] ++"DcIPaddr_start_0 c001n02" -> "DcIPaddr_monitor_5000 c001n02" [ style = bold] ++"DcIPaddr_start_0 c001n02" [ style=bold color="green" fontcolor="black"] ++"DcIPaddr_stop_0 c001n02" -> "DcIPaddr_start_0 c001n02" [ style = bold] ++"DcIPaddr_stop_0 c001n02" [ style=bold color="green" fontcolor="black"] ++"rsc_c001n01_monitor_0 c001n02" [ style=bold color="green" fontcolor="black"] ++"rsc_c001n01_monitor_0 c001n03" [ style=bold color="green" fontcolor="black"] ++"rsc_c001n01_monitor_0 c001n08" [ style=bold color="green" fontcolor="black"] ++"rsc_c001n02_monitor_0 c001n01" [ style=bold color="green" fontcolor="black"] ++"rsc_c001n02_monitor_0 c001n03" [ style=bold color="green" fontcolor="black"] ++"rsc_c001n02_monitor_0 c001n08" [ style=bold color="green" fontcolor="black"] ++"rsc_c001n02_monitor_6000 c001n02" [ style=bold color="green" fontcolor="black"] ++"rsc_c001n03_monitor_0 c001n01" [ style=bold color="green" fontcolor="black"] ++"rsc_c001n03_monitor_0 c001n02" [ style=bold color="green" fontcolor="black"] ++"rsc_c001n03_monitor_0 c001n08" [ style=bold color="green" fontcolor="black"] ++"rsc_c001n08_monitor_0 c001n01" [ style=bold color="green" fontcolor="black"] ++"rsc_c001n08_monitor_0 c001n02" [ style=bold color="green" fontcolor="black"] ++"rsc_c001n08_monitor_0 c001n03" [ style=bold color="green" fontcolor="black"] ++"rsc_c001n08_monitor_5000 c001n08" [ style=bold color="green" fontcolor="black"] ++} +diff --git a/cts/scheduler/params-3.exp b/cts/scheduler/params-3.exp +new file mode 100644 +index 0000000..5cccdec +--- /dev/null ++++ b/cts/scheduler/params-3.exp +@@ -0,0 +1,208 @@ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ +diff --git a/cts/scheduler/params-3.scores b/cts/scheduler/params-3.scores +new file mode 100644 +index 0000000..00417ea +--- /dev/null ++++ b/cts/scheduler/params-3.scores +@@ -0,0 +1,21 @@ ++Allocation scores: ++pcmk__native_allocate: DcIPaddr allocation score on c001n01: -INFINITY ++pcmk__native_allocate: DcIPaddr allocation score on c001n02: 0 ++pcmk__native_allocate: DcIPaddr allocation score on c001n03: -INFINITY ++pcmk__native_allocate: DcIPaddr allocation score on c001n08: -INFINITY ++pcmk__native_allocate: rsc_c001n01 allocation score on c001n01: 100 ++pcmk__native_allocate: rsc_c001n01 allocation score on c001n02: 0 ++pcmk__native_allocate: rsc_c001n01 allocation score on c001n03: 0 ++pcmk__native_allocate: rsc_c001n01 allocation score on c001n08: 0 ++pcmk__native_allocate: rsc_c001n02 allocation score on c001n01: 0 ++pcmk__native_allocate: rsc_c001n02 allocation score on c001n02: 100 ++pcmk__native_allocate: rsc_c001n02 allocation score on c001n03: 0 ++pcmk__native_allocate: rsc_c001n02 allocation score on c001n08: 0 ++pcmk__native_allocate: rsc_c001n03 allocation score on c001n01: 0 ++pcmk__native_allocate: rsc_c001n03 allocation score on c001n02: 0 ++pcmk__native_allocate: rsc_c001n03 allocation score on c001n03: 100 ++pcmk__native_allocate: rsc_c001n03 allocation score on c001n08: 0 ++pcmk__native_allocate: rsc_c001n08 allocation score on c001n01: 0 ++pcmk__native_allocate: rsc_c001n08 allocation score on c001n02: 0 ++pcmk__native_allocate: rsc_c001n08 allocation score on c001n03: 0 ++pcmk__native_allocate: rsc_c001n08 allocation score on c001n08: 100 +diff --git a/cts/scheduler/params-3.summary b/cts/scheduler/params-3.summary +new file mode 100644 +index 0000000..257f8ba +--- /dev/null ++++ b/cts/scheduler/params-3.summary +@@ -0,0 +1,45 @@ ++ ++Current cluster status: ++Online: [ c001n01 c001n02 c001n03 c001n08 ] ++ ++ DcIPaddr (ocf::heartbeat:IPaddr): Starting c001n02 ++ rsc_c001n08 (ocf::heartbeat:IPaddr): Started c001n08 ++ rsc_c001n02 (ocf::heartbeat:IPaddr): Started c001n02 ++ rsc_c001n03 (ocf::heartbeat:IPaddr): Started c001n03 ++ rsc_c001n01 (ocf::heartbeat:IPaddr): Started c001n01 ++ ++Transition Summary: ++ * Restart DcIPaddr ( c001n02 ) ++ ++Executing cluster transition: ++ * Resource action: DcIPaddr monitor on c001n08 ++ * Resource action: DcIPaddr monitor on c001n03 ++ * Resource action: DcIPaddr monitor on c001n01 ++ * Resource action: DcIPaddr stop on c001n02 ++ * Resource action: rsc_c001n08 monitor on c001n03 ++ * Resource action: rsc_c001n08 monitor on c001n02 ++ * Resource action: rsc_c001n08 monitor on c001n01 ++ * Resource action: rsc_c001n08 monitor=5000 on c001n08 ++ * Resource action: rsc_c001n02 monitor=6000 on c001n02 ++ * Resource action: rsc_c001n02 monitor on c001n08 ++ * Resource action: rsc_c001n02 monitor on c001n03 ++ * Resource action: rsc_c001n02 monitor on c001n01 ++ * Resource action: rsc_c001n02 cancel=5000 on c001n02 ++ * Resource action: rsc_c001n03 monitor on c001n08 ++ * Resource action: rsc_c001n03 monitor on c001n02 ++ * Resource action: rsc_c001n03 monitor on c001n01 ++ * Resource action: rsc_c001n01 monitor on c001n08 ++ * Resource action: rsc_c001n01 monitor on c001n03 ++ * Resource action: rsc_c001n01 monitor on c001n02 ++ * Resource action: DcIPaddr start on c001n02 ++ * Resource action: DcIPaddr monitor=5000 on c001n02 ++ ++Revised cluster status: ++Online: [ c001n01 c001n02 c001n03 c001n08 ] ++ ++ DcIPaddr (ocf::heartbeat:IPaddr): Started c001n02 ++ rsc_c001n08 (ocf::heartbeat:IPaddr): Started c001n08 ++ rsc_c001n02 (ocf::heartbeat:IPaddr): Started c001n02 ++ rsc_c001n03 (ocf::heartbeat:IPaddr): Started c001n03 ++ rsc_c001n01 (ocf::heartbeat:IPaddr): Started c001n01 ++ +diff --git a/cts/scheduler/params-3.xml b/cts/scheduler/params-3.xml +new file mode 100644 +index 0000000..ee6e157 +--- /dev/null ++++ b/cts/scheduler/params-3.xml +@@ -0,0 +1,154 @@ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ +-- +1.8.3.1 + + +From ff6aebecf8b40b882bddbd0d78e3f8702f97147e Mon Sep 17 00:00:00 2001 +From: Ken Gaillot +Date: Mon, 13 Apr 2020 12:22:35 -0500 +Subject: [PATCH 06/20] Doc: libpacemaker: improve comments when logging + actions + +... with slight refactoring for consistency +--- + lib/pacemaker/pcmk_sched_native.c | 41 ++++++++++++++++++++++----------------- + 1 file changed, 23 insertions(+), 18 deletions(-) + +diff --git a/lib/pacemaker/pcmk_sched_native.c b/lib/pacemaker/pcmk_sched_native.c +index ff2fb92..f14e690 100644 +--- a/lib/pacemaker/pcmk_sched_native.c ++++ b/lib/pacemaker/pcmk_sched_native.c +@@ -2348,8 +2348,6 @@ native_expand(pe_resource_t * rsc, pe_working_set_t * data_set) + } \ + } while(0) + +-static int rsc_width = 5; +-static int detail_width = 5; + static void + LogAction(const char *change, pe_resource_t *rsc, pe_node_t *origin, pe_node_t *destination, pe_action_t *action, pe_action_t *source, gboolean terminal) + { +@@ -2360,6 +2358,9 @@ LogAction(const char *change, pe_resource_t *rsc, pe_node_t *origin, pe_node_t * + bool same_role = FALSE; + bool need_role = FALSE; + ++ static int rsc_width = 5; ++ static int detail_width = 5; ++ + CRM_ASSERT(action); + CRM_ASSERT(destination != NULL || origin != NULL); + +@@ -2384,36 +2385,40 @@ LogAction(const char *change, pe_resource_t *rsc, pe_node_t *origin, pe_node_t * + same_role = TRUE; + } + +- if(need_role && origin == NULL) { +- /* Promoting from Stopped */ ++ if (need_role && (origin == NULL)) { ++ /* Starting and promoting a promotable clone instance */ + details = crm_strdup_printf("%s -> %s %s", role2text(rsc->role), role2text(rsc->next_role), destination->details->uname); + +- } else if(need_role && destination == NULL) { +- /* Demoting a Master or Stopping a Slave */ ++ } else if (origin == NULL) { ++ /* Starting a resource */ ++ details = crm_strdup_printf("%s", destination->details->uname); ++ ++ } else if (need_role && (destination == NULL)) { ++ /* Stopping a promotable clone instance */ + details = crm_strdup_printf("%s %s", role2text(rsc->role), origin->details->uname); + +- } else if(origin == NULL || destination == NULL) { +- /* Starting or stopping a resource */ +- details = crm_strdup_printf("%s", origin?origin->details->uname:destination->details->uname); ++ } else if (destination == NULL) { ++ /* Stopping a resource */ ++ details = crm_strdup_printf("%s", origin->details->uname); + +- } else if(need_role && same_role && same_host) { +- /* Recovering or restarting a promotable clone resource */ ++ } else if (need_role && same_role && same_host) { ++ /* Recovering, restarting or re-promoting a promotable clone instance */ + details = crm_strdup_printf("%s %s", role2text(rsc->role), origin->details->uname); + +- } else if(same_role && same_host) { ++ } else if (same_role && same_host) { + /* Recovering or Restarting a normal resource */ + details = crm_strdup_printf("%s", origin->details->uname); + +- } else if(same_role && need_role) { +- /* Moving a promotable clone resource */ ++ } else if (need_role && same_role) { ++ /* Moving a promotable clone instance */ + details = crm_strdup_printf("%s -> %s %s", origin->details->uname, destination->details->uname, role2text(rsc->role)); + +- } else if(same_role) { ++ } else if (same_role) { + /* Moving a normal resource */ + details = crm_strdup_printf("%s -> %s", origin->details->uname, destination->details->uname); + +- } else if(same_host) { +- /* Promoting or demoting a promotable clone resource */ ++ } else if (same_host) { ++ /* Promoting or demoting a promotable clone instance */ + details = crm_strdup_printf("%s -> %s %s", role2text(rsc->role), role2text(rsc->next_role), origin->details->uname); + + } else { +@@ -2560,7 +2565,7 @@ LogActions(pe_resource_t * rsc, pe_working_set_t * data_set, gboolean terminal) + pe_rsc_info(rsc, "Leave %s\t(%s %s)", rsc->id, role2text(rsc->role), + next->details->uname); + +- } else if (start && is_set(start->flags, pe_action_runnable) == FALSE) { ++ } else if (is_not_set(start->flags, pe_action_runnable)) { + LogAction("Stop", rsc, current, NULL, stop, + (stop && stop->reason)? stop : start, terminal); + STOP_SANITY_ASSERT(__LINE__); +-- +1.8.3.1 + + +From 98c3b649fa065b7e7a59029cc2f887bc462d170a Mon Sep 17 00:00:00 2001 +From: Ken Gaillot +Date: Mon, 13 Apr 2020 12:23:22 -0500 +Subject: [PATCH 07/20] Log: libpacemaker: check for re-promotes specifically + +If a promotable clone instance is being demoted and promoted on its current +node, without also stopping and starting, it previously would be logged as +"Leave" indicating unchanged, because the current and next role are the same. + +Now, check for this situation specifically, and log it as "Re-promote". + +Currently, the scheduler is not capable of generating this situation, but +upcoming changes will. +--- + lib/pacemaker/pcmk_sched_native.c | 12 ++++++++++-- + 1 file changed, 10 insertions(+), 2 deletions(-) + +diff --git a/lib/pacemaker/pcmk_sched_native.c b/lib/pacemaker/pcmk_sched_native.c +index f14e690..89952bf 100644 +--- a/lib/pacemaker/pcmk_sched_native.c ++++ b/lib/pacemaker/pcmk_sched_native.c +@@ -2561,9 +2561,17 @@ LogActions(pe_resource_t * rsc, pe_working_set_t * data_set, gboolean terminal) + } else if (is_set(rsc->flags, pe_rsc_reload)) { + LogAction("Reload", rsc, current, next, start, NULL, terminal); + ++ + } else if (start == NULL || is_set(start->flags, pe_action_optional)) { +- pe_rsc_info(rsc, "Leave %s\t(%s %s)", rsc->id, role2text(rsc->role), +- next->details->uname); ++ if ((demote != NULL) && (promote != NULL) ++ && is_not_set(demote->flags, pe_action_optional) ++ && is_not_set(promote->flags, pe_action_optional)) { ++ LogAction("Re-promote", rsc, current, next, promote, demote, ++ terminal); ++ } else { ++ pe_rsc_info(rsc, "Leave %s\t(%s %s)", rsc->id, ++ role2text(rsc->role), next->details->uname); ++ } + + } else if (is_not_set(start->flags, pe_action_runnable)) { + LogAction("Stop", rsc, current, NULL, stop, +-- +1.8.3.1 + + +From fd55a6660574c0bca517fd519377340712fb443a Mon Sep 17 00:00:00 2001 +From: Ken Gaillot +Date: Mon, 13 Apr 2020 12:51:03 -0500 +Subject: [PATCH 08/20] Doc: libpacemaker: improve comments for resource state + and action matrices + +Also, make them static, for linker efficiency. +--- + lib/pacemaker/pcmk_sched_native.c | 39 ++++++++++++++++++++++++--------------- + 1 file changed, 24 insertions(+), 15 deletions(-) + +diff --git a/lib/pacemaker/pcmk_sched_native.c b/lib/pacemaker/pcmk_sched_native.c +index 89952bf..b9bca80 100644 +--- a/lib/pacemaker/pcmk_sched_native.c ++++ b/lib/pacemaker/pcmk_sched_native.c +@@ -41,27 +41,36 @@ gboolean PromoteRsc(pe_resource_t * rsc, pe_node_t * next, gboolean optional, + gboolean RoleError(pe_resource_t * rsc, pe_node_t * next, gboolean optional, pe_working_set_t * data_set); + gboolean NullOp(pe_resource_t * rsc, pe_node_t * next, gboolean optional, pe_working_set_t * data_set); + +-/* *INDENT-OFF* */ +-enum rsc_role_e rsc_state_matrix[RSC_ROLE_MAX][RSC_ROLE_MAX] = { +-/* Current State */ +-/* Next State: Unknown Stopped Started Slave Master */ ++/* This array says what the *next* role should be when transitioning from one ++ * role to another. For example going from Stopped to Master, the next role is ++ * RSC_ROLE_SLAVE, because the resource must be started before being promoted. ++ * The current state then becomes Started, which is fed into this array again, ++ * giving a next role of RSC_ROLE_MASTER. ++ */ ++static enum rsc_role_e rsc_state_matrix[RSC_ROLE_MAX][RSC_ROLE_MAX] = { ++ /* Current state Next state*/ ++ /* Unknown Stopped Started Slave Master */ + /* Unknown */ { RSC_ROLE_UNKNOWN, RSC_ROLE_STOPPED, RSC_ROLE_STOPPED, RSC_ROLE_STOPPED, RSC_ROLE_STOPPED, }, + /* Stopped */ { RSC_ROLE_STOPPED, RSC_ROLE_STOPPED, RSC_ROLE_STARTED, RSC_ROLE_SLAVE, RSC_ROLE_SLAVE, }, + /* Started */ { RSC_ROLE_STOPPED, RSC_ROLE_STOPPED, RSC_ROLE_STARTED, RSC_ROLE_SLAVE, RSC_ROLE_MASTER, }, +- /* Slave */ { RSC_ROLE_STOPPED, RSC_ROLE_STOPPED, RSC_ROLE_STOPPED, RSC_ROLE_SLAVE, RSC_ROLE_MASTER, }, +- /* Master */ { RSC_ROLE_STOPPED, RSC_ROLE_SLAVE, RSC_ROLE_SLAVE, RSC_ROLE_SLAVE, RSC_ROLE_MASTER, }, ++ /* Slave */ { RSC_ROLE_STOPPED, RSC_ROLE_STOPPED, RSC_ROLE_STOPPED, RSC_ROLE_SLAVE, RSC_ROLE_MASTER, }, ++ /* Master */ { RSC_ROLE_STOPPED, RSC_ROLE_SLAVE, RSC_ROLE_SLAVE, RSC_ROLE_SLAVE, RSC_ROLE_MASTER, }, + }; + +-gboolean (*rsc_action_matrix[RSC_ROLE_MAX][RSC_ROLE_MAX])(pe_resource_t*,pe_node_t*,gboolean,pe_working_set_t*) = { +-/* Current State */ +-/* Next State: Unknown Stopped Started Slave Master */ +- /* Unknown */ { RoleError, StopRsc, RoleError, RoleError, RoleError, }, +- /* Stopped */ { RoleError, NullOp, StartRsc, StartRsc, RoleError, }, +- /* Started */ { RoleError, StopRsc, NullOp, NullOp, PromoteRsc, }, +- /* Slave */ { RoleError, StopRsc, StopRsc, NullOp, PromoteRsc, }, +- /* Master */ { RoleError, DemoteRsc, DemoteRsc, DemoteRsc, NullOp, }, ++typedef gboolean (*rsc_transition_fn)(pe_resource_t *rsc, pe_node_t *next, ++ gboolean optional, ++ pe_working_set_t *data_set); ++ ++// This array picks the function needed to transition from one role to another ++static rsc_transition_fn rsc_action_matrix[RSC_ROLE_MAX][RSC_ROLE_MAX] = { ++ /* Current state Next state */ ++ /* Unknown Stopped Started Slave Master */ ++ /* Unknown */ { RoleError, StopRsc, RoleError, RoleError, RoleError, }, ++ /* Stopped */ { RoleError, NullOp, StartRsc, StartRsc, RoleError, }, ++ /* Started */ { RoleError, StopRsc, NullOp, NullOp, PromoteRsc, }, ++ /* Slave */ { RoleError, StopRsc, StopRsc, NullOp, PromoteRsc, }, ++ /* Master */ { RoleError, DemoteRsc, DemoteRsc, DemoteRsc, NullOp , }, + }; +-/* *INDENT-ON* */ + + static gboolean + native_choose_node(pe_resource_t * rsc, pe_node_t * prefer, pe_working_set_t * data_set) +-- +1.8.3.1 + + +From 2f1e2df1f5ec67591cddf14f9dda1c52919dd53a Mon Sep 17 00:00:00 2001 +From: Ken Gaillot +Date: Tue, 26 May 2020 17:50:48 -0500 +Subject: [PATCH 09/20] Feature: xml: add on-fail="demote" option to resources + schema + +We don't need an XML schema version bump because it was already bumped since +the last release, for the rsc_expression/op_expression feature. +--- + xml/resources-3.4.rng | 1 + + 1 file changed, 1 insertion(+) + +diff --git a/xml/resources-3.4.rng b/xml/resources-3.4.rng +index fbb4b65..887dc1c 100644 +--- a/xml/resources-3.4.rng ++++ b/xml/resources-3.4.rng +@@ -388,6 +388,7 @@ + + ignore + block ++ demote + stop + restart + standby +-- +1.8.3.1 + + +From 874f75e0faad91c634860221d727e51e95d97f19 Mon Sep 17 00:00:00 2001 +From: Ken Gaillot +Date: Thu, 28 May 2020 08:29:37 -0500 +Subject: [PATCH 10/20] Feature: scheduler: new on-fail="demote" recovery + policy for promoted resources + +--- + include/crm/pengine/pe_types.h | 1 + + lib/pacemaker/pcmk_sched_native.c | 25 +++++++++++++++---- + lib/pengine/common.c | 3 +++ + lib/pengine/unpack.c | 51 ++++++++++++++++++++++++++++++++++++--- + lib/pengine/utils.c | 35 +++++++++++++++++++++++---- + 5 files changed, 102 insertions(+), 13 deletions(-) + +diff --git a/include/crm/pengine/pe_types.h b/include/crm/pengine/pe_types.h +index ba88491..ed5eb12 100644 +--- a/include/crm/pengine/pe_types.h ++++ b/include/crm/pengine/pe_types.h +@@ -246,6 +246,7 @@ struct pe_node_s { + # define pe_rsc_allocating 0x00000200ULL + # define pe_rsc_merging 0x00000400ULL + ++# define pe_rsc_stop 0x00001000ULL + # define pe_rsc_reload 0x00002000ULL + # define pe_rsc_allow_remote_remotes 0x00004000ULL + +diff --git a/lib/pacemaker/pcmk_sched_native.c b/lib/pacemaker/pcmk_sched_native.c +index b9bca80..4e3bd7c 100644 +--- a/lib/pacemaker/pcmk_sched_native.c ++++ b/lib/pacemaker/pcmk_sched_native.c +@@ -1205,6 +1205,7 @@ native_create_actions(pe_resource_t * rsc, pe_working_set_t * data_set) + pe_node_t *chosen = NULL; + pe_node_t *current = NULL; + gboolean need_stop = FALSE; ++ bool need_promote = FALSE; + gboolean is_moving = FALSE; + gboolean allow_migrate = is_set(rsc->flags, pe_rsc_allow_migrate) ? TRUE : FALSE; + +@@ -1309,8 +1310,15 @@ native_create_actions(pe_resource_t * rsc, pe_working_set_t * data_set) + need_stop = TRUE; + + } else if (is_set(rsc->flags, pe_rsc_failed)) { +- pe_rsc_trace(rsc, "Recovering %s", rsc->id); +- need_stop = TRUE; ++ if (is_set(rsc->flags, pe_rsc_stop)) { ++ need_stop = TRUE; ++ pe_rsc_trace(rsc, "Recovering %s", rsc->id); ++ } else { ++ pe_rsc_trace(rsc, "Recovering %s by demotion", rsc->id); ++ if (rsc->next_role == RSC_ROLE_MASTER) { ++ need_promote = TRUE; ++ } ++ } + + } else if (is_set(rsc->flags, pe_rsc_block)) { + pe_rsc_trace(rsc, "Block %s", rsc->id); +@@ -1344,10 +1352,16 @@ native_create_actions(pe_resource_t * rsc, pe_working_set_t * data_set) + + + while (rsc->role <= rsc->next_role && role != rsc->role && is_not_set(rsc->flags, pe_rsc_block)) { ++ bool required = need_stop; ++ + next_role = rsc_state_matrix[role][rsc->role]; ++ if ((next_role == RSC_ROLE_MASTER) && need_promote) { ++ required = true; ++ } + pe_rsc_trace(rsc, "Up: Executing: %s->%s (%s)%s", role2text(role), role2text(next_role), +- rsc->id, need_stop ? " required" : ""); +- if (rsc_action_matrix[role][next_role] (rsc, chosen, !need_stop, data_set) == FALSE) { ++ rsc->id, (required? " required" : "")); ++ if (rsc_action_matrix[role][next_role](rsc, chosen, !required, ++ data_set) == FALSE) { + break; + } + role = next_role; +@@ -2631,7 +2645,8 @@ LogActions(pe_resource_t * rsc, pe_working_set_t * data_set, gboolean terminal) + + free(key); + +- } else if (stop && is_set(rsc->flags, pe_rsc_failed)) { ++ } else if (stop && is_set(rsc->flags, pe_rsc_failed) ++ && is_set(rsc->flags, pe_rsc_stop)) { + /* 'stop' may be NULL if the failure was ignored */ + LogAction("Recover", rsc, current, next, stop, start, terminal); + STOP_SANITY_ASSERT(__LINE__); +diff --git a/lib/pengine/common.c b/lib/pengine/common.c +index ded6df8..f4f2106 100644 +--- a/lib/pengine/common.c ++++ b/lib/pengine/common.c +@@ -326,6 +326,9 @@ fail2text(enum action_fail_response fail) + case action_fail_ignore: + result = "ignore"; + break; ++ case action_fail_demote: ++ result = "demote"; ++ break; + case action_fail_block: + result = "block"; + break; +diff --git a/lib/pengine/unpack.c b/lib/pengine/unpack.c +index 6a350e5..a219805 100644 +--- a/lib/pengine/unpack.c ++++ b/lib/pengine/unpack.c +@@ -108,6 +108,7 @@ pe_fence_node(pe_working_set_t * data_set, pe_node_t * node, + */ + node->details->remote_requires_reset = TRUE; + set_bit(rsc->flags, pe_rsc_failed); ++ set_bit(rsc->flags, pe_rsc_stop); + } + } + +@@ -117,6 +118,7 @@ pe_fence_node(pe_working_set_t * data_set, pe_node_t * node, + "and guest resource no longer exists", + node->details->uname, reason); + set_bit(node->details->remote_rsc->flags, pe_rsc_failed); ++ set_bit(node->details->remote_rsc->flags, pe_rsc_stop); + + } else if (pe__is_remote_node(node)) { + pe_resource_t *rsc = node->details->remote_rsc; +@@ -1914,6 +1916,7 @@ process_rsc_state(pe_resource_t * rsc, pe_node_t * node, + */ + if (pe__is_guest_node(node)) { + set_bit(rsc->flags, pe_rsc_failed); ++ set_bit(rsc->flags, pe_rsc_stop); + should_fence = TRUE; + + } else if (is_set(data_set->flags, pe_flag_stonith_enabled)) { +@@ -1956,6 +1959,11 @@ process_rsc_state(pe_resource_t * rsc, pe_node_t * node, + /* nothing to do */ + break; + ++ case action_fail_demote: ++ set_bit(rsc->flags, pe_rsc_failed); ++ demote_action(rsc, node, FALSE); ++ break; ++ + case action_fail_fence: + /* treat it as if it is still running + * but also mark the node as unclean +@@ -1992,12 +2000,14 @@ process_rsc_state(pe_resource_t * rsc, pe_node_t * node, + case action_fail_recover: + if (rsc->role != RSC_ROLE_STOPPED && rsc->role != RSC_ROLE_UNKNOWN) { + set_bit(rsc->flags, pe_rsc_failed); ++ set_bit(rsc->flags, pe_rsc_stop); + stop_action(rsc, node, FALSE); + } + break; + + case action_fail_restart_container: + set_bit(rsc->flags, pe_rsc_failed); ++ set_bit(rsc->flags, pe_rsc_stop); + + if (rsc->container && pe_rsc_is_bundled(rsc)) { + /* A bundle's remote connection can run on a different node than +@@ -2016,6 +2026,7 @@ process_rsc_state(pe_resource_t * rsc, pe_node_t * node, + + case action_fail_reset_remote: + set_bit(rsc->flags, pe_rsc_failed); ++ set_bit(rsc->flags, pe_rsc_stop); + if (is_set(data_set->flags, pe_flag_stonith_enabled)) { + tmpnode = NULL; + if (rsc->is_remote_node) { +@@ -2071,8 +2082,17 @@ process_rsc_state(pe_resource_t * rsc, pe_node_t * node, + } + + native_add_running(rsc, node, data_set); +- if (on_fail != action_fail_ignore) { +- set_bit(rsc->flags, pe_rsc_failed); ++ switch (on_fail) { ++ case action_fail_ignore: ++ break; ++ case action_fail_demote: ++ case action_fail_block: ++ set_bit(rsc->flags, pe_rsc_failed); ++ break; ++ default: ++ set_bit(rsc->flags, pe_rsc_failed); ++ set_bit(rsc->flags, pe_rsc_stop); ++ break; + } + + } else if (rsc->clone_name && strchr(rsc->clone_name, ':') != NULL) { +@@ -2595,6 +2615,7 @@ unpack_migrate_to_success(pe_resource_t *rsc, pe_node_t *node, xmlNode *xml_op, + } else { + /* Consider it failed here - forces a restart, prevents migration */ + set_bit(rsc->flags, pe_rsc_failed); ++ set_bit(rsc->flags, pe_rsc_stop); + clear_bit(rsc->flags, pe_rsc_allow_migrate); + } + } +@@ -2785,9 +2806,21 @@ static int + cmp_on_fail(enum action_fail_response first, enum action_fail_response second) + { + switch (first) { ++ case action_fail_demote: ++ switch (second) { ++ case action_fail_ignore: ++ return 1; ++ case action_fail_demote: ++ return 0; ++ default: ++ return -1; ++ } ++ break; ++ + case action_fail_reset_remote: + switch (second) { + case action_fail_ignore: ++ case action_fail_demote: + case action_fail_recover: + return 1; + case action_fail_reset_remote: +@@ -2800,6 +2833,7 @@ cmp_on_fail(enum action_fail_response first, enum action_fail_response second) + case action_fail_restart_container: + switch (second) { + case action_fail_ignore: ++ case action_fail_demote: + case action_fail_recover: + case action_fail_reset_remote: + return 1; +@@ -2814,9 +2848,13 @@ cmp_on_fail(enum action_fail_response first, enum action_fail_response second) + break; + } + switch (second) { ++ case action_fail_demote: ++ return (first == action_fail_ignore)? -1 : 1; ++ + case action_fail_reset_remote: + switch (first) { + case action_fail_ignore: ++ case action_fail_demote: + case action_fail_recover: + return -1; + default: +@@ -2827,6 +2865,7 @@ cmp_on_fail(enum action_fail_response first, enum action_fail_response second) + case action_fail_restart_container: + switch (first) { + case action_fail_ignore: ++ case action_fail_demote: + case action_fail_recover: + case action_fail_reset_remote: + return -1; +@@ -3426,7 +3465,11 @@ update_resource_state(pe_resource_t * rsc, pe_node_t * node, xmlNode * xml_op, c + clear_past_failure = TRUE; + + } else if (safe_str_eq(task, CRMD_ACTION_DEMOTE)) { +- /* Demote from Master does not clear an error */ ++ ++ if (*on_fail == action_fail_demote) { ++ // Demote clears an error only if on-fail=demote ++ clear_past_failure = TRUE; ++ } + rsc->role = RSC_ROLE_SLAVE; + + } else if (safe_str_eq(task, CRMD_ACTION_MIGRATED)) { +@@ -3454,6 +3497,7 @@ update_resource_state(pe_resource_t * rsc, pe_node_t * node, xmlNode * xml_op, c + + case action_fail_block: + case action_fail_ignore: ++ case action_fail_demote: + case action_fail_recover: + case action_fail_restart_container: + *on_fail = action_fail_ignore; +@@ -3714,6 +3758,7 @@ unpack_rsc_op(pe_resource_t *rsc, pe_node_t *node, xmlNode *xml_op, + * that, ensure the remote connection is considered failed. + */ + set_bit(node->details->remote_rsc->flags, pe_rsc_failed); ++ set_bit(node->details->remote_rsc->flags, pe_rsc_stop); + } + + // fall through +diff --git a/lib/pengine/utils.c b/lib/pengine/utils.c +index 3fb7e62..fee9efb 100644 +--- a/lib/pengine/utils.c ++++ b/lib/pengine/utils.c +@@ -720,6 +720,7 @@ static bool + valid_stop_on_fail(const char *value) + { + return safe_str_neq(value, "standby") ++ && safe_str_neq(value, "demote") + && safe_str_neq(value, "stop"); + } + +@@ -727,6 +728,11 @@ static const char * + unpack_operation_on_fail(pe_action_t * action) + { + ++ const char *name = NULL; ++ const char *role = NULL; ++ const char *on_fail = NULL; ++ const char *interval_spec = NULL; ++ const char *enabled = NULL; + const char *value = g_hash_table_lookup(action->meta, XML_OP_ATTR_ON_FAIL); + + if (safe_str_eq(action->task, CRMD_ACTION_STOP) +@@ -736,14 +742,10 @@ unpack_operation_on_fail(pe_action_t * action) + "action to default value because '%s' is not " + "allowed for stop", action->rsc->id, value); + return NULL; ++ + } else if (safe_str_eq(action->task, CRMD_ACTION_DEMOTE) && !value) { + /* demote on_fail defaults to master monitor value if present */ + xmlNode *operation = NULL; +- const char *name = NULL; +- const char *role = NULL; +- const char *on_fail = NULL; +- const char *interval_spec = NULL; +- const char *enabled = NULL; + + CRM_CHECK(action->rsc != NULL, return NULL); + +@@ -766,12 +768,31 @@ unpack_operation_on_fail(pe_action_t * action) + continue; + } else if (crm_parse_interval_spec(interval_spec) == 0) { + continue; ++ } else if (safe_str_eq(on_fail, "demote")) { ++ continue; + } + + value = on_fail; + } + } else if (safe_str_eq(action->task, CRM_OP_LRM_DELETE)) { + value = "ignore"; ++ ++ } else if (safe_str_eq(value, "demote")) { ++ name = crm_element_value(action->op_entry, "name"); ++ role = crm_element_value(action->op_entry, "role"); ++ on_fail = crm_element_value(action->op_entry, XML_OP_ATTR_ON_FAIL); ++ interval_spec = crm_element_value(action->op_entry, ++ XML_LRM_ATTR_INTERVAL); ++ ++ if (safe_str_neq(name, CRMD_ACTION_PROMOTE) ++ && (safe_str_neq(name, CRMD_ACTION_STATUS) ++ || safe_str_neq(role, "Master") ++ || (crm_parse_interval_spec(interval_spec) == 0))) { ++ pcmk__config_err("Resetting '" XML_OP_ATTR_ON_FAIL "' for %s %s " ++ "action to default value because 'demote' is not " ++ "allowed for it", action->rsc->id, name); ++ return NULL; ++ } + } + + return value; +@@ -1170,6 +1191,10 @@ unpack_operation(pe_action_t * action, xmlNode * xml_obj, pe_resource_t * contai + value = NULL; + } + ++ } else if (safe_str_eq(value, "demote")) { ++ action->on_fail = action_fail_demote; ++ value = "demote instance"; ++ + } else { + pe_err("Resource %s: Unknown failure type (%s)", action->rsc->id, value); + value = NULL; +-- +1.8.3.1 + + +From d29433ea57796de000f4fea8c60f8da1d903108b Mon Sep 17 00:00:00 2001 +From: Ken Gaillot +Date: Tue, 16 Jun 2020 16:03:14 -0500 +Subject: [PATCH 11/20] Test: scheduler: add regression tests for + on-fail="demote" + +--- + cts/cts-scheduler.in | 4 + + cts/scheduler/on_fail_demote1.dot | 64 ++ + cts/scheduler/on_fail_demote1.exp | 360 +++++++ + cts/scheduler/on_fail_demote1.scores | 470 +++++++++ + cts/scheduler/on_fail_demote1.summary | 86 ++ + cts/scheduler/on_fail_demote1.xml | 616 +++++++++++ + cts/scheduler/on_fail_demote2.dot | 22 + + cts/scheduler/on_fail_demote2.exp | 125 +++ + cts/scheduler/on_fail_demote2.scores | 127 +++ + cts/scheduler/on_fail_demote2.summary | 41 + + cts/scheduler/on_fail_demote2.xml | 221 ++++ + cts/scheduler/on_fail_demote3.dot | 12 + + cts/scheduler/on_fail_demote3.exp | 63 ++ + cts/scheduler/on_fail_demote3.scores | 127 +++ + cts/scheduler/on_fail_demote3.summary | 34 + + cts/scheduler/on_fail_demote3.xml | 221 ++++ + cts/scheduler/on_fail_demote4.dot | 383 +++++++ + cts/scheduler/on_fail_demote4.exp | 1818 +++++++++++++++++++++++++++++++++ + cts/scheduler/on_fail_demote4.scores | 470 +++++++++ + cts/scheduler/on_fail_demote4.summary | 187 ++++ + cts/scheduler/on_fail_demote4.xml | 625 ++++++++++++ + 21 files changed, 6076 insertions(+) + create mode 100644 cts/scheduler/on_fail_demote1.dot + create mode 100644 cts/scheduler/on_fail_demote1.exp + create mode 100644 cts/scheduler/on_fail_demote1.scores + create mode 100644 cts/scheduler/on_fail_demote1.summary + create mode 100644 cts/scheduler/on_fail_demote1.xml + create mode 100644 cts/scheduler/on_fail_demote2.dot + create mode 100644 cts/scheduler/on_fail_demote2.exp + create mode 100644 cts/scheduler/on_fail_demote2.scores + create mode 100644 cts/scheduler/on_fail_demote2.summary + create mode 100644 cts/scheduler/on_fail_demote2.xml + create mode 100644 cts/scheduler/on_fail_demote3.dot + create mode 100644 cts/scheduler/on_fail_demote3.exp + create mode 100644 cts/scheduler/on_fail_demote3.scores + create mode 100644 cts/scheduler/on_fail_demote3.summary + create mode 100644 cts/scheduler/on_fail_demote3.xml + create mode 100644 cts/scheduler/on_fail_demote4.dot + create mode 100644 cts/scheduler/on_fail_demote4.exp + create mode 100644 cts/scheduler/on_fail_demote4.scores + create mode 100644 cts/scheduler/on_fail_demote4.summary + create mode 100644 cts/scheduler/on_fail_demote4.xml + +diff --git a/cts/cts-scheduler.in b/cts/cts-scheduler.in +index ae8247e..0e68e73 100644 +--- a/cts/cts-scheduler.in ++++ b/cts/cts-scheduler.in +@@ -478,6 +478,10 @@ TESTS = [ + [ "master-score-startup", "Use permanent master scores without LRM history" ], + [ "failed-demote-recovery", "Recover resource in slave role after demote fails" ], + [ "failed-demote-recovery-master", "Recover resource in master role after demote fails" ], ++ [ "on_fail_demote1", "Recovery with on-fail=\"demote\" on healthy cluster, remote, guest, and bundle nodes" ], ++ [ "on_fail_demote2", "Recovery with on-fail=\"demote\" with promotion on different node" ], ++ [ "on_fail_demote3", "Recovery with on-fail=\"demote\" with no promotion" ], ++ [ "on_fail_demote4", "Recovery with on-fail=\"demote\" on failed cluster, remote, guest, and bundle nodes" ], + ], + [ + [ "history-1", "Correctly parse stateful-1 resource state" ], +diff --git a/cts/scheduler/on_fail_demote1.dot b/cts/scheduler/on_fail_demote1.dot +new file mode 100644 +index 0000000..d11c1c1 +--- /dev/null ++++ b/cts/scheduler/on_fail_demote1.dot +@@ -0,0 +1,64 @@ ++ digraph "g" { ++"bundled_demote_0 stateful-bundle-0" -> "bundled_promote_0 stateful-bundle-0" [ style = bold] ++"bundled_demote_0 stateful-bundle-0" -> "stateful-bundle-master_demoted_0" [ style = bold] ++"bundled_demote_0 stateful-bundle-0" [ style=bold color="green" fontcolor="black"] ++"bundled_promote_0 stateful-bundle-0" -> "stateful-bundle-master_promoted_0" [ style = bold] ++"bundled_promote_0 stateful-bundle-0" [ style=bold color="green" fontcolor="black"] ++"lxc-ms-master_demote_0" -> "lxc-ms-master_demoted_0" [ style = bold] ++"lxc-ms-master_demote_0" -> "lxc-ms_demote_0 lxc2" [ style = bold] ++"lxc-ms-master_demote_0" [ style=bold color="green" fontcolor="orange"] ++"lxc-ms-master_demoted_0" -> "lxc-ms-master_promote_0" [ style = bold] ++"lxc-ms-master_demoted_0" [ style=bold color="green" fontcolor="orange"] ++"lxc-ms-master_promote_0" -> "lxc-ms_promote_0 lxc2" [ style = bold] ++"lxc-ms-master_promote_0" [ style=bold color="green" fontcolor="orange"] ++"lxc-ms-master_promoted_0" [ style=bold color="green" fontcolor="orange"] ++"lxc-ms_demote_0 lxc2" -> "lxc-ms-master_demoted_0" [ style = bold] ++"lxc-ms_demote_0 lxc2" -> "lxc-ms_promote_0 lxc2" [ style = bold] ++"lxc-ms_demote_0 lxc2" [ style=bold color="green" fontcolor="black"] ++"lxc-ms_promote_0 lxc2" -> "lxc-ms-master_promoted_0" [ style = bold] ++"lxc-ms_promote_0 lxc2" [ style=bold color="green" fontcolor="black"] ++"rsc1-clone_demote_0" -> "rsc1-clone_demoted_0" [ style = bold] ++"rsc1-clone_demote_0" -> "rsc1_demote_0 rhel7-4" [ style = bold] ++"rsc1-clone_demote_0" [ style=bold color="green" fontcolor="orange"] ++"rsc1-clone_demoted_0" -> "rsc1-clone_promote_0" [ style = bold] ++"rsc1-clone_demoted_0" [ style=bold color="green" fontcolor="orange"] ++"rsc1-clone_promote_0" -> "rsc1_promote_0 rhel7-4" [ style = bold] ++"rsc1-clone_promote_0" [ style=bold color="green" fontcolor="orange"] ++"rsc1-clone_promoted_0" [ style=bold color="green" fontcolor="orange"] ++"rsc1_demote_0 rhel7-4" -> "rsc1-clone_demoted_0" [ style = bold] ++"rsc1_demote_0 rhel7-4" -> "rsc1_promote_0 rhel7-4" [ style = bold] ++"rsc1_demote_0 rhel7-4" [ style=bold color="green" fontcolor="black"] ++"rsc1_promote_0 rhel7-4" -> "rsc1-clone_promoted_0" [ style = bold] ++"rsc1_promote_0 rhel7-4" [ style=bold color="green" fontcolor="black"] ++"rsc2-master_demote_0" -> "rsc2-master_demoted_0" [ style = bold] ++"rsc2-master_demote_0" -> "rsc2_demote_0 remote-rhel7-2" [ style = bold] ++"rsc2-master_demote_0" [ style=bold color="green" fontcolor="orange"] ++"rsc2-master_demoted_0" -> "rsc2-master_promote_0" [ style = bold] ++"rsc2-master_demoted_0" [ style=bold color="green" fontcolor="orange"] ++"rsc2-master_promote_0" -> "rsc2_promote_0 remote-rhel7-2" [ style = bold] ++"rsc2-master_promote_0" [ style=bold color="green" fontcolor="orange"] ++"rsc2-master_promoted_0" [ style=bold color="green" fontcolor="orange"] ++"rsc2_demote_0 remote-rhel7-2" -> "rsc2-master_demoted_0" [ style = bold] ++"rsc2_demote_0 remote-rhel7-2" -> "rsc2_promote_0 remote-rhel7-2" [ style = bold] ++"rsc2_demote_0 remote-rhel7-2" [ style=bold color="green" fontcolor="black"] ++"rsc2_promote_0 remote-rhel7-2" -> "rsc2-master_promoted_0" [ style = bold] ++"rsc2_promote_0 remote-rhel7-2" [ style=bold color="green" fontcolor="black"] ++"stateful-bundle-master_demote_0" -> "bundled_demote_0 stateful-bundle-0" [ style = bold] ++"stateful-bundle-master_demote_0" -> "stateful-bundle-master_demoted_0" [ style = bold] ++"stateful-bundle-master_demote_0" [ style=bold color="green" fontcolor="orange"] ++"stateful-bundle-master_demoted_0" -> "stateful-bundle-master_promote_0" [ style = bold] ++"stateful-bundle-master_demoted_0" -> "stateful-bundle_demoted_0" [ style = bold] ++"stateful-bundle-master_demoted_0" [ style=bold color="green" fontcolor="orange"] ++"stateful-bundle-master_promote_0" -> "bundled_promote_0 stateful-bundle-0" [ style = bold] ++"stateful-bundle-master_promote_0" [ style=bold color="green" fontcolor="orange"] ++"stateful-bundle-master_promoted_0" -> "stateful-bundle_promoted_0" [ style = bold] ++"stateful-bundle-master_promoted_0" [ style=bold color="green" fontcolor="orange"] ++"stateful-bundle_demote_0" -> "stateful-bundle-master_demote_0" [ style = bold] ++"stateful-bundle_demote_0" -> "stateful-bundle_demoted_0" [ style = bold] ++"stateful-bundle_demote_0" [ style=bold color="green" fontcolor="orange"] ++"stateful-bundle_demoted_0" -> "stateful-bundle_promote_0" [ style = bold] ++"stateful-bundle_demoted_0" [ style=bold color="green" fontcolor="orange"] ++"stateful-bundle_promote_0" -> "stateful-bundle-master_promote_0" [ style = bold] ++"stateful-bundle_promote_0" [ style=bold color="green" fontcolor="orange"] ++"stateful-bundle_promoted_0" [ style=bold color="green" fontcolor="orange"] ++} +diff --git a/cts/scheduler/on_fail_demote1.exp b/cts/scheduler/on_fail_demote1.exp +new file mode 100644 +index 0000000..ebe1dd5 +--- /dev/null ++++ b/cts/scheduler/on_fail_demote1.exp +@@ -0,0 +1,360 @@ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ +diff --git a/cts/scheduler/on_fail_demote1.scores b/cts/scheduler/on_fail_demote1.scores +new file mode 100644 +index 0000000..7df582f +--- /dev/null ++++ b/cts/scheduler/on_fail_demote1.scores +@@ -0,0 +1,470 @@ ++Allocation scores: ++Using the original execution date of: 2020-06-16 19:23:21Z ++bundled:0 promotion score on stateful-bundle-0: 10 ++bundled:1 promotion score on stateful-bundle-1: 5 ++bundled:2 promotion score on stateful-bundle-2: 5 ++lxc-ms:0 promotion score on lxc2: INFINITY ++lxc-ms:1 promotion score on lxc1: INFINITY ++pcmk__bundle_allocate: bundled:0 allocation score on stateful-bundle-0: 501 ++pcmk__bundle_allocate: bundled:1 allocation score on stateful-bundle-1: 501 ++pcmk__bundle_allocate: bundled:2 allocation score on stateful-bundle-2: 501 ++pcmk__bundle_allocate: stateful-bundle allocation score on lxc1: 0 ++pcmk__bundle_allocate: stateful-bundle allocation score on lxc2: 0 ++pcmk__bundle_allocate: stateful-bundle allocation score on remote-rhel7-2: 0 ++pcmk__bundle_allocate: stateful-bundle allocation score on rhel7-1: 0 ++pcmk__bundle_allocate: stateful-bundle allocation score on rhel7-3: 0 ++pcmk__bundle_allocate: stateful-bundle allocation score on rhel7-4: 0 ++pcmk__bundle_allocate: stateful-bundle allocation score on rhel7-5: 0 ++pcmk__bundle_allocate: stateful-bundle-0 allocation score on lxc1: -INFINITY ++pcmk__bundle_allocate: stateful-bundle-0 allocation score on lxc2: -INFINITY ++pcmk__bundle_allocate: stateful-bundle-0 allocation score on remote-rhel7-2: -INFINITY ++pcmk__bundle_allocate: stateful-bundle-0 allocation score on rhel7-1: 0 ++pcmk__bundle_allocate: stateful-bundle-0 allocation score on rhel7-3: 0 ++pcmk__bundle_allocate: stateful-bundle-0 allocation score on rhel7-4: 0 ++pcmk__bundle_allocate: stateful-bundle-0 allocation score on rhel7-5: 0 ++pcmk__bundle_allocate: stateful-bundle-1 allocation score on lxc1: -INFINITY ++pcmk__bundle_allocate: stateful-bundle-1 allocation score on lxc2: -INFINITY ++pcmk__bundle_allocate: stateful-bundle-1 allocation score on remote-rhel7-2: -INFINITY ++pcmk__bundle_allocate: stateful-bundle-1 allocation score on rhel7-1: 0 ++pcmk__bundle_allocate: stateful-bundle-1 allocation score on rhel7-3: 0 ++pcmk__bundle_allocate: stateful-bundle-1 allocation score on rhel7-4: 0 ++pcmk__bundle_allocate: stateful-bundle-1 allocation score on rhel7-5: 0 ++pcmk__bundle_allocate: stateful-bundle-2 allocation score on lxc1: -INFINITY ++pcmk__bundle_allocate: stateful-bundle-2 allocation score on lxc2: -INFINITY ++pcmk__bundle_allocate: stateful-bundle-2 allocation score on remote-rhel7-2: -INFINITY ++pcmk__bundle_allocate: stateful-bundle-2 allocation score on rhel7-1: 0 ++pcmk__bundle_allocate: stateful-bundle-2 allocation score on rhel7-3: 0 ++pcmk__bundle_allocate: stateful-bundle-2 allocation score on rhel7-4: 0 ++pcmk__bundle_allocate: stateful-bundle-2 allocation score on rhel7-5: 0 ++pcmk__bundle_allocate: stateful-bundle-docker-0 allocation score on lxc1: 0 ++pcmk__bundle_allocate: stateful-bundle-docker-0 allocation score on lxc2: 0 ++pcmk__bundle_allocate: stateful-bundle-docker-0 allocation score on remote-rhel7-2: 0 ++pcmk__bundle_allocate: stateful-bundle-docker-0 allocation score on rhel7-1: 0 ++pcmk__bundle_allocate: stateful-bundle-docker-0 allocation score on rhel7-3: 0 ++pcmk__bundle_allocate: stateful-bundle-docker-0 allocation score on rhel7-4: 0 ++pcmk__bundle_allocate: stateful-bundle-docker-0 allocation score on rhel7-5: 0 ++pcmk__bundle_allocate: stateful-bundle-docker-1 allocation score on lxc1: 0 ++pcmk__bundle_allocate: stateful-bundle-docker-1 allocation score on lxc2: 0 ++pcmk__bundle_allocate: stateful-bundle-docker-1 allocation score on remote-rhel7-2: 0 ++pcmk__bundle_allocate: stateful-bundle-docker-1 allocation score on rhel7-1: 0 ++pcmk__bundle_allocate: stateful-bundle-docker-1 allocation score on rhel7-3: 0 ++pcmk__bundle_allocate: stateful-bundle-docker-1 allocation score on rhel7-4: 0 ++pcmk__bundle_allocate: stateful-bundle-docker-1 allocation score on rhel7-5: 0 ++pcmk__bundle_allocate: stateful-bundle-docker-2 allocation score on lxc1: 0 ++pcmk__bundle_allocate: stateful-bundle-docker-2 allocation score on lxc2: 0 ++pcmk__bundle_allocate: stateful-bundle-docker-2 allocation score on remote-rhel7-2: 0 ++pcmk__bundle_allocate: stateful-bundle-docker-2 allocation score on rhel7-1: 0 ++pcmk__bundle_allocate: stateful-bundle-docker-2 allocation score on rhel7-3: 0 ++pcmk__bundle_allocate: stateful-bundle-docker-2 allocation score on rhel7-4: 0 ++pcmk__bundle_allocate: stateful-bundle-docker-2 allocation score on rhel7-5: 0 ++pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.131 allocation score on lxc1: 0 ++pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.131 allocation score on lxc2: 0 ++pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.131 allocation score on remote-rhel7-2: 0 ++pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.131 allocation score on rhel7-1: 0 ++pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.131 allocation score on rhel7-3: 0 ++pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.131 allocation score on rhel7-4: 0 ++pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.131 allocation score on rhel7-5: 0 ++pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.132 allocation score on lxc1: 0 ++pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.132 allocation score on lxc2: 0 ++pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.132 allocation score on remote-rhel7-2: 0 ++pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.132 allocation score on rhel7-1: 0 ++pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.132 allocation score on rhel7-3: 0 ++pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.132 allocation score on rhel7-4: 0 ++pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.132 allocation score on rhel7-5: 0 ++pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.133 allocation score on lxc1: 0 ++pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.133 allocation score on lxc2: 0 ++pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.133 allocation score on remote-rhel7-2: 0 ++pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.133 allocation score on rhel7-1: 0 ++pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.133 allocation score on rhel7-3: 0 ++pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.133 allocation score on rhel7-4: 0 ++pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.133 allocation score on rhel7-5: 0 ++pcmk__bundle_allocate: stateful-bundle-master allocation score on lxc1: 0 ++pcmk__bundle_allocate: stateful-bundle-master allocation score on lxc2: 0 ++pcmk__bundle_allocate: stateful-bundle-master allocation score on remote-rhel7-2: 0 ++pcmk__bundle_allocate: stateful-bundle-master allocation score on rhel7-1: 0 ++pcmk__bundle_allocate: stateful-bundle-master allocation score on rhel7-3: 0 ++pcmk__bundle_allocate: stateful-bundle-master allocation score on rhel7-4: 0 ++pcmk__bundle_allocate: stateful-bundle-master allocation score on rhel7-5: 0 ++pcmk__bundle_allocate: stateful-bundle-master allocation score on stateful-bundle-0: -INFINITY ++pcmk__bundle_allocate: stateful-bundle-master allocation score on stateful-bundle-1: -INFINITY ++pcmk__bundle_allocate: stateful-bundle-master allocation score on stateful-bundle-2: -INFINITY ++pcmk__clone_allocate: bundled:0 allocation score on stateful-bundle-0: INFINITY ++pcmk__clone_allocate: bundled:1 allocation score on stateful-bundle-1: INFINITY ++pcmk__clone_allocate: bundled:2 allocation score on stateful-bundle-2: INFINITY ++pcmk__clone_allocate: lxc-ms-master allocation score on lxc1: INFINITY ++pcmk__clone_allocate: lxc-ms-master allocation score on lxc2: INFINITY ++pcmk__clone_allocate: lxc-ms-master allocation score on remote-rhel7-2: 0 ++pcmk__clone_allocate: lxc-ms-master allocation score on rhel7-1: 0 ++pcmk__clone_allocate: lxc-ms-master allocation score on rhel7-3: 0 ++pcmk__clone_allocate: lxc-ms-master allocation score on rhel7-4: 0 ++pcmk__clone_allocate: lxc-ms-master allocation score on rhel7-5: 0 ++pcmk__clone_allocate: lxc-ms:0 allocation score on lxc1: INFINITY ++pcmk__clone_allocate: lxc-ms:0 allocation score on lxc2: INFINITY ++pcmk__clone_allocate: lxc-ms:0 allocation score on remote-rhel7-2: 0 ++pcmk__clone_allocate: lxc-ms:0 allocation score on rhel7-1: 0 ++pcmk__clone_allocate: lxc-ms:0 allocation score on rhel7-3: 0 ++pcmk__clone_allocate: lxc-ms:0 allocation score on rhel7-4: 0 ++pcmk__clone_allocate: lxc-ms:0 allocation score on rhel7-5: 0 ++pcmk__clone_allocate: lxc-ms:1 allocation score on lxc1: INFINITY ++pcmk__clone_allocate: lxc-ms:1 allocation score on lxc2: INFINITY ++pcmk__clone_allocate: lxc-ms:1 allocation score on remote-rhel7-2: 0 ++pcmk__clone_allocate: lxc-ms:1 allocation score on rhel7-1: 0 ++pcmk__clone_allocate: lxc-ms:1 allocation score on rhel7-3: 0 ++pcmk__clone_allocate: lxc-ms:1 allocation score on rhel7-4: 0 ++pcmk__clone_allocate: lxc-ms:1 allocation score on rhel7-5: 0 ++pcmk__clone_allocate: rsc1-clone allocation score on lxc1: 0 ++pcmk__clone_allocate: rsc1-clone allocation score on lxc2: 0 ++pcmk__clone_allocate: rsc1-clone allocation score on remote-rhel7-2: 0 ++pcmk__clone_allocate: rsc1-clone allocation score on rhel7-1: 0 ++pcmk__clone_allocate: rsc1-clone allocation score on rhel7-3: 0 ++pcmk__clone_allocate: rsc1-clone allocation score on rhel7-4: 0 ++pcmk__clone_allocate: rsc1-clone allocation score on rhel7-5: 0 ++pcmk__clone_allocate: rsc1:0 allocation score on lxc1: 0 ++pcmk__clone_allocate: rsc1:0 allocation score on lxc2: 0 ++pcmk__clone_allocate: rsc1:0 allocation score on remote-rhel7-2: 0 ++pcmk__clone_allocate: rsc1:0 allocation score on rhel7-1: 0 ++pcmk__clone_allocate: rsc1:0 allocation score on rhel7-3: 0 ++pcmk__clone_allocate: rsc1:0 allocation score on rhel7-4: 11 ++pcmk__clone_allocate: rsc1:0 allocation score on rhel7-5: 0 ++pcmk__clone_allocate: rsc1:1 allocation score on lxc1: 0 ++pcmk__clone_allocate: rsc1:1 allocation score on lxc2: 0 ++pcmk__clone_allocate: rsc1:1 allocation score on remote-rhel7-2: 0 ++pcmk__clone_allocate: rsc1:1 allocation score on rhel7-1: 0 ++pcmk__clone_allocate: rsc1:1 allocation score on rhel7-3: 6 ++pcmk__clone_allocate: rsc1:1 allocation score on rhel7-4: 0 ++pcmk__clone_allocate: rsc1:1 allocation score on rhel7-5: 0 ++pcmk__clone_allocate: rsc1:2 allocation score on lxc1: 0 ++pcmk__clone_allocate: rsc1:2 allocation score on lxc2: 0 ++pcmk__clone_allocate: rsc1:2 allocation score on remote-rhel7-2: 0 ++pcmk__clone_allocate: rsc1:2 allocation score on rhel7-1: 0 ++pcmk__clone_allocate: rsc1:2 allocation score on rhel7-3: 0 ++pcmk__clone_allocate: rsc1:2 allocation score on rhel7-4: 0 ++pcmk__clone_allocate: rsc1:2 allocation score on rhel7-5: 6 ++pcmk__clone_allocate: rsc1:3 allocation score on lxc1: 0 ++pcmk__clone_allocate: rsc1:3 allocation score on lxc2: 0 ++pcmk__clone_allocate: rsc1:3 allocation score on remote-rhel7-2: 0 ++pcmk__clone_allocate: rsc1:3 allocation score on rhel7-1: 6 ++pcmk__clone_allocate: rsc1:3 allocation score on rhel7-3: 0 ++pcmk__clone_allocate: rsc1:3 allocation score on rhel7-4: 0 ++pcmk__clone_allocate: rsc1:3 allocation score on rhel7-5: 0 ++pcmk__clone_allocate: rsc1:4 allocation score on lxc1: 0 ++pcmk__clone_allocate: rsc1:4 allocation score on lxc2: 0 ++pcmk__clone_allocate: rsc1:4 allocation score on remote-rhel7-2: 6 ++pcmk__clone_allocate: rsc1:4 allocation score on rhel7-1: 0 ++pcmk__clone_allocate: rsc1:4 allocation score on rhel7-3: 0 ++pcmk__clone_allocate: rsc1:4 allocation score on rhel7-4: 0 ++pcmk__clone_allocate: rsc1:4 allocation score on rhel7-5: 0 ++pcmk__clone_allocate: rsc1:5 allocation score on lxc1: 0 ++pcmk__clone_allocate: rsc1:5 allocation score on lxc2: 6 ++pcmk__clone_allocate: rsc1:5 allocation score on remote-rhel7-2: 0 ++pcmk__clone_allocate: rsc1:5 allocation score on rhel7-1: 0 ++pcmk__clone_allocate: rsc1:5 allocation score on rhel7-3: 0 ++pcmk__clone_allocate: rsc1:5 allocation score on rhel7-4: 0 ++pcmk__clone_allocate: rsc1:5 allocation score on rhel7-5: 0 ++pcmk__clone_allocate: rsc1:6 allocation score on lxc1: 6 ++pcmk__clone_allocate: rsc1:6 allocation score on lxc2: 0 ++pcmk__clone_allocate: rsc1:6 allocation score on remote-rhel7-2: 0 ++pcmk__clone_allocate: rsc1:6 allocation score on rhel7-1: 0 ++pcmk__clone_allocate: rsc1:6 allocation score on rhel7-3: 0 ++pcmk__clone_allocate: rsc1:6 allocation score on rhel7-4: 0 ++pcmk__clone_allocate: rsc1:6 allocation score on rhel7-5: 0 ++pcmk__clone_allocate: rsc2-master allocation score on lxc1: 0 ++pcmk__clone_allocate: rsc2-master allocation score on lxc2: 0 ++pcmk__clone_allocate: rsc2-master allocation score on remote-rhel7-2: 0 ++pcmk__clone_allocate: rsc2-master allocation score on rhel7-1: 0 ++pcmk__clone_allocate: rsc2-master allocation score on rhel7-3: 0 ++pcmk__clone_allocate: rsc2-master allocation score on rhel7-4: 0 ++pcmk__clone_allocate: rsc2-master allocation score on rhel7-5: 0 ++pcmk__clone_allocate: rsc2:0 allocation score on lxc1: 0 ++pcmk__clone_allocate: rsc2:0 allocation score on lxc2: 0 ++pcmk__clone_allocate: rsc2:0 allocation score on remote-rhel7-2: 0 ++pcmk__clone_allocate: rsc2:0 allocation score on rhel7-1: 0 ++pcmk__clone_allocate: rsc2:0 allocation score on rhel7-3: 0 ++pcmk__clone_allocate: rsc2:0 allocation score on rhel7-4: 11 ++pcmk__clone_allocate: rsc2:0 allocation score on rhel7-5: 0 ++pcmk__clone_allocate: rsc2:1 allocation score on lxc1: 0 ++pcmk__clone_allocate: rsc2:1 allocation score on lxc2: 0 ++pcmk__clone_allocate: rsc2:1 allocation score on remote-rhel7-2: 0 ++pcmk__clone_allocate: rsc2:1 allocation score on rhel7-1: 0 ++pcmk__clone_allocate: rsc2:1 allocation score on rhel7-3: 6 ++pcmk__clone_allocate: rsc2:1 allocation score on rhel7-4: 0 ++pcmk__clone_allocate: rsc2:1 allocation score on rhel7-5: 0 ++pcmk__clone_allocate: rsc2:2 allocation score on lxc1: 0 ++pcmk__clone_allocate: rsc2:2 allocation score on lxc2: 0 ++pcmk__clone_allocate: rsc2:2 allocation score on remote-rhel7-2: 0 ++pcmk__clone_allocate: rsc2:2 allocation score on rhel7-1: 0 ++pcmk__clone_allocate: rsc2:2 allocation score on rhel7-3: 0 ++pcmk__clone_allocate: rsc2:2 allocation score on rhel7-4: 0 ++pcmk__clone_allocate: rsc2:2 allocation score on rhel7-5: 6 ++pcmk__clone_allocate: rsc2:3 allocation score on lxc1: 0 ++pcmk__clone_allocate: rsc2:3 allocation score on lxc2: 0 ++pcmk__clone_allocate: rsc2:3 allocation score on remote-rhel7-2: 0 ++pcmk__clone_allocate: rsc2:3 allocation score on rhel7-1: 6 ++pcmk__clone_allocate: rsc2:3 allocation score on rhel7-3: 0 ++pcmk__clone_allocate: rsc2:3 allocation score on rhel7-4: 0 ++pcmk__clone_allocate: rsc2:3 allocation score on rhel7-5: 0 ++pcmk__clone_allocate: rsc2:4 allocation score on lxc1: 0 ++pcmk__clone_allocate: rsc2:4 allocation score on lxc2: 0 ++pcmk__clone_allocate: rsc2:4 allocation score on remote-rhel7-2: 11 ++pcmk__clone_allocate: rsc2:4 allocation score on rhel7-1: 0 ++pcmk__clone_allocate: rsc2:4 allocation score on rhel7-3: 0 ++pcmk__clone_allocate: rsc2:4 allocation score on rhel7-4: 0 ++pcmk__clone_allocate: rsc2:4 allocation score on rhel7-5: 0 ++pcmk__clone_allocate: rsc2:5 allocation score on lxc1: 0 ++pcmk__clone_allocate: rsc2:5 allocation score on lxc2: 6 ++pcmk__clone_allocate: rsc2:5 allocation score on remote-rhel7-2: 0 ++pcmk__clone_allocate: rsc2:5 allocation score on rhel7-1: 0 ++pcmk__clone_allocate: rsc2:5 allocation score on rhel7-3: 0 ++pcmk__clone_allocate: rsc2:5 allocation score on rhel7-4: 0 ++pcmk__clone_allocate: rsc2:5 allocation score on rhel7-5: 0 ++pcmk__clone_allocate: rsc2:6 allocation score on lxc1: 6 ++pcmk__clone_allocate: rsc2:6 allocation score on lxc2: 0 ++pcmk__clone_allocate: rsc2:6 allocation score on remote-rhel7-2: 0 ++pcmk__clone_allocate: rsc2:6 allocation score on rhel7-1: 0 ++pcmk__clone_allocate: rsc2:6 allocation score on rhel7-3: 0 ++pcmk__clone_allocate: rsc2:6 allocation score on rhel7-4: 0 ++pcmk__clone_allocate: rsc2:6 allocation score on rhel7-5: 0 ++pcmk__clone_allocate: stateful-bundle-master allocation score on lxc1: -INFINITY ++pcmk__clone_allocate: stateful-bundle-master allocation score on lxc2: -INFINITY ++pcmk__clone_allocate: stateful-bundle-master allocation score on remote-rhel7-2: -INFINITY ++pcmk__clone_allocate: stateful-bundle-master allocation score on rhel7-1: -INFINITY ++pcmk__clone_allocate: stateful-bundle-master allocation score on rhel7-3: -INFINITY ++pcmk__clone_allocate: stateful-bundle-master allocation score on rhel7-4: -INFINITY ++pcmk__clone_allocate: stateful-bundle-master allocation score on rhel7-5: -INFINITY ++pcmk__clone_allocate: stateful-bundle-master allocation score on stateful-bundle-0: 0 ++pcmk__clone_allocate: stateful-bundle-master allocation score on stateful-bundle-1: 0 ++pcmk__clone_allocate: stateful-bundle-master allocation score on stateful-bundle-2: 0 ++pcmk__native_allocate: Fencing allocation score on lxc1: -INFINITY ++pcmk__native_allocate: Fencing allocation score on lxc2: -INFINITY ++pcmk__native_allocate: Fencing allocation score on remote-rhel7-2: -INFINITY ++pcmk__native_allocate: Fencing allocation score on rhel7-1: 0 ++pcmk__native_allocate: Fencing allocation score on rhel7-3: 0 ++pcmk__native_allocate: Fencing allocation score on rhel7-4: 0 ++pcmk__native_allocate: Fencing allocation score on rhel7-5: 0 ++pcmk__native_allocate: bundled:0 allocation score on stateful-bundle-0: INFINITY ++pcmk__native_allocate: bundled:1 allocation score on stateful-bundle-1: INFINITY ++pcmk__native_allocate: bundled:2 allocation score on stateful-bundle-2: INFINITY ++pcmk__native_allocate: container1 allocation score on lxc1: -INFINITY ++pcmk__native_allocate: container1 allocation score on lxc2: -INFINITY ++pcmk__native_allocate: container1 allocation score on remote-rhel7-2: -INFINITY ++pcmk__native_allocate: container1 allocation score on rhel7-1: 0 ++pcmk__native_allocate: container1 allocation score on rhel7-3: INFINITY ++pcmk__native_allocate: container1 allocation score on rhel7-4: 0 ++pcmk__native_allocate: container1 allocation score on rhel7-5: 0 ++pcmk__native_allocate: container2 allocation score on lxc1: -INFINITY ++pcmk__native_allocate: container2 allocation score on lxc2: -INFINITY ++pcmk__native_allocate: container2 allocation score on remote-rhel7-2: -INFINITY ++pcmk__native_allocate: container2 allocation score on rhel7-1: 0 ++pcmk__native_allocate: container2 allocation score on rhel7-3: INFINITY ++pcmk__native_allocate: container2 allocation score on rhel7-4: 0 ++pcmk__native_allocate: container2 allocation score on rhel7-5: 0 ++pcmk__native_allocate: lxc-ms:0 allocation score on lxc1: -INFINITY ++pcmk__native_allocate: lxc-ms:0 allocation score on lxc2: INFINITY ++pcmk__native_allocate: lxc-ms:0 allocation score on remote-rhel7-2: 0 ++pcmk__native_allocate: lxc-ms:0 allocation score on rhel7-1: 0 ++pcmk__native_allocate: lxc-ms:0 allocation score on rhel7-3: 0 ++pcmk__native_allocate: lxc-ms:0 allocation score on rhel7-4: 0 ++pcmk__native_allocate: lxc-ms:0 allocation score on rhel7-5: 0 ++pcmk__native_allocate: lxc-ms:1 allocation score on lxc1: INFINITY ++pcmk__native_allocate: lxc-ms:1 allocation score on lxc2: INFINITY ++pcmk__native_allocate: lxc-ms:1 allocation score on remote-rhel7-2: 0 ++pcmk__native_allocate: lxc-ms:1 allocation score on rhel7-1: 0 ++pcmk__native_allocate: lxc-ms:1 allocation score on rhel7-3: 0 ++pcmk__native_allocate: lxc-ms:1 allocation score on rhel7-4: 0 ++pcmk__native_allocate: lxc-ms:1 allocation score on rhel7-5: 0 ++pcmk__native_allocate: lxc1 allocation score on lxc1: -INFINITY ++pcmk__native_allocate: lxc1 allocation score on lxc2: -INFINITY ++pcmk__native_allocate: lxc1 allocation score on remote-rhel7-2: -INFINITY ++pcmk__native_allocate: lxc1 allocation score on rhel7-1: -INFINITY ++pcmk__native_allocate: lxc1 allocation score on rhel7-3: 0 ++pcmk__native_allocate: lxc1 allocation score on rhel7-4: -INFINITY ++pcmk__native_allocate: lxc1 allocation score on rhel7-5: -INFINITY ++pcmk__native_allocate: lxc2 allocation score on lxc1: -INFINITY ++pcmk__native_allocate: lxc2 allocation score on lxc2: -INFINITY ++pcmk__native_allocate: lxc2 allocation score on remote-rhel7-2: -INFINITY ++pcmk__native_allocate: lxc2 allocation score on rhel7-1: -INFINITY ++pcmk__native_allocate: lxc2 allocation score on rhel7-3: 0 ++pcmk__native_allocate: lxc2 allocation score on rhel7-4: -INFINITY ++pcmk__native_allocate: lxc2 allocation score on rhel7-5: -INFINITY ++pcmk__native_allocate: remote-rhel7-2 allocation score on lxc1: -INFINITY ++pcmk__native_allocate: remote-rhel7-2 allocation score on lxc2: -INFINITY ++pcmk__native_allocate: remote-rhel7-2 allocation score on remote-rhel7-2: -INFINITY ++pcmk__native_allocate: remote-rhel7-2 allocation score on rhel7-1: 0 ++pcmk__native_allocate: remote-rhel7-2 allocation score on rhel7-3: 0 ++pcmk__native_allocate: remote-rhel7-2 allocation score on rhel7-4: 0 ++pcmk__native_allocate: remote-rhel7-2 allocation score on rhel7-5: 0 ++pcmk__native_allocate: rsc1:0 allocation score on lxc1: -INFINITY ++pcmk__native_allocate: rsc1:0 allocation score on lxc2: -INFINITY ++pcmk__native_allocate: rsc1:0 allocation score on remote-rhel7-2: -INFINITY ++pcmk__native_allocate: rsc1:0 allocation score on rhel7-1: -INFINITY ++pcmk__native_allocate: rsc1:0 allocation score on rhel7-3: -INFINITY ++pcmk__native_allocate: rsc1:0 allocation score on rhel7-4: 11 ++pcmk__native_allocate: rsc1:0 allocation score on rhel7-5: -INFINITY ++pcmk__native_allocate: rsc1:1 allocation score on lxc1: 0 ++pcmk__native_allocate: rsc1:1 allocation score on lxc2: 0 ++pcmk__native_allocate: rsc1:1 allocation score on remote-rhel7-2: 0 ++pcmk__native_allocate: rsc1:1 allocation score on rhel7-1: 0 ++pcmk__native_allocate: rsc1:1 allocation score on rhel7-3: 6 ++pcmk__native_allocate: rsc1:1 allocation score on rhel7-4: 0 ++pcmk__native_allocate: rsc1:1 allocation score on rhel7-5: 0 ++pcmk__native_allocate: rsc1:2 allocation score on lxc1: 0 ++pcmk__native_allocate: rsc1:2 allocation score on lxc2: 0 ++pcmk__native_allocate: rsc1:2 allocation score on remote-rhel7-2: 0 ++pcmk__native_allocate: rsc1:2 allocation score on rhel7-1: 0 ++pcmk__native_allocate: rsc1:2 allocation score on rhel7-3: -INFINITY ++pcmk__native_allocate: rsc1:2 allocation score on rhel7-4: 0 ++pcmk__native_allocate: rsc1:2 allocation score on rhel7-5: 6 ++pcmk__native_allocate: rsc1:3 allocation score on lxc1: 0 ++pcmk__native_allocate: rsc1:3 allocation score on lxc2: 0 ++pcmk__native_allocate: rsc1:3 allocation score on remote-rhel7-2: 0 ++pcmk__native_allocate: rsc1:3 allocation score on rhel7-1: 6 ++pcmk__native_allocate: rsc1:3 allocation score on rhel7-3: -INFINITY ++pcmk__native_allocate: rsc1:3 allocation score on rhel7-4: 0 ++pcmk__native_allocate: rsc1:3 allocation score on rhel7-5: -INFINITY ++pcmk__native_allocate: rsc1:4 allocation score on lxc1: 0 ++pcmk__native_allocate: rsc1:4 allocation score on lxc2: 0 ++pcmk__native_allocate: rsc1:4 allocation score on remote-rhel7-2: 6 ++pcmk__native_allocate: rsc1:4 allocation score on rhel7-1: -INFINITY ++pcmk__native_allocate: rsc1:4 allocation score on rhel7-3: -INFINITY ++pcmk__native_allocate: rsc1:4 allocation score on rhel7-4: 0 ++pcmk__native_allocate: rsc1:4 allocation score on rhel7-5: -INFINITY ++pcmk__native_allocate: rsc1:5 allocation score on lxc1: 0 ++pcmk__native_allocate: rsc1:5 allocation score on lxc2: 6 ++pcmk__native_allocate: rsc1:5 allocation score on remote-rhel7-2: -INFINITY ++pcmk__native_allocate: rsc1:5 allocation score on rhel7-1: -INFINITY ++pcmk__native_allocate: rsc1:5 allocation score on rhel7-3: -INFINITY ++pcmk__native_allocate: rsc1:5 allocation score on rhel7-4: 0 ++pcmk__native_allocate: rsc1:5 allocation score on rhel7-5: -INFINITY ++pcmk__native_allocate: rsc1:6 allocation score on lxc1: 6 ++pcmk__native_allocate: rsc1:6 allocation score on lxc2: -INFINITY ++pcmk__native_allocate: rsc1:6 allocation score on remote-rhel7-2: -INFINITY ++pcmk__native_allocate: rsc1:6 allocation score on rhel7-1: -INFINITY ++pcmk__native_allocate: rsc1:6 allocation score on rhel7-3: -INFINITY ++pcmk__native_allocate: rsc1:6 allocation score on rhel7-4: 0 ++pcmk__native_allocate: rsc1:6 allocation score on rhel7-5: -INFINITY ++pcmk__native_allocate: rsc2:0 allocation score on lxc1: 0 ++pcmk__native_allocate: rsc2:0 allocation score on lxc2: 0 ++pcmk__native_allocate: rsc2:0 allocation score on remote-rhel7-2: 0 ++pcmk__native_allocate: rsc2:0 allocation score on rhel7-1: 0 ++pcmk__native_allocate: rsc2:0 allocation score on rhel7-3: 0 ++pcmk__native_allocate: rsc2:0 allocation score on rhel7-4: 11 ++pcmk__native_allocate: rsc2:0 allocation score on rhel7-5: 0 ++pcmk__native_allocate: rsc2:1 allocation score on lxc1: 0 ++pcmk__native_allocate: rsc2:1 allocation score on lxc2: 0 ++pcmk__native_allocate: rsc2:1 allocation score on remote-rhel7-2: 0 ++pcmk__native_allocate: rsc2:1 allocation score on rhel7-1: 0 ++pcmk__native_allocate: rsc2:1 allocation score on rhel7-3: 6 ++pcmk__native_allocate: rsc2:1 allocation score on rhel7-4: -INFINITY ++pcmk__native_allocate: rsc2:1 allocation score on rhel7-5: 0 ++pcmk__native_allocate: rsc2:2 allocation score on lxc1: 0 ++pcmk__native_allocate: rsc2:2 allocation score on lxc2: 0 ++pcmk__native_allocate: rsc2:2 allocation score on remote-rhel7-2: 0 ++pcmk__native_allocate: rsc2:2 allocation score on rhel7-1: 0 ++pcmk__native_allocate: rsc2:2 allocation score on rhel7-3: -INFINITY ++pcmk__native_allocate: rsc2:2 allocation score on rhel7-4: -INFINITY ++pcmk__native_allocate: rsc2:2 allocation score on rhel7-5: 6 ++pcmk__native_allocate: rsc2:3 allocation score on lxc1: 0 ++pcmk__native_allocate: rsc2:3 allocation score on lxc2: 0 ++pcmk__native_allocate: rsc2:3 allocation score on remote-rhel7-2: 0 ++pcmk__native_allocate: rsc2:3 allocation score on rhel7-1: 6 ++pcmk__native_allocate: rsc2:3 allocation score on rhel7-3: -INFINITY ++pcmk__native_allocate: rsc2:3 allocation score on rhel7-4: -INFINITY ++pcmk__native_allocate: rsc2:3 allocation score on rhel7-5: -INFINITY ++pcmk__native_allocate: rsc2:4 allocation score on lxc1: -INFINITY ++pcmk__native_allocate: rsc2:4 allocation score on lxc2: -INFINITY ++pcmk__native_allocate: rsc2:4 allocation score on remote-rhel7-2: 11 ++pcmk__native_allocate: rsc2:4 allocation score on rhel7-1: -INFINITY ++pcmk__native_allocate: rsc2:4 allocation score on rhel7-3: -INFINITY ++pcmk__native_allocate: rsc2:4 allocation score on rhel7-4: -INFINITY ++pcmk__native_allocate: rsc2:4 allocation score on rhel7-5: -INFINITY ++pcmk__native_allocate: rsc2:5 allocation score on lxc1: 0 ++pcmk__native_allocate: rsc2:5 allocation score on lxc2: 6 ++pcmk__native_allocate: rsc2:5 allocation score on remote-rhel7-2: 0 ++pcmk__native_allocate: rsc2:5 allocation score on rhel7-1: -INFINITY ++pcmk__native_allocate: rsc2:5 allocation score on rhel7-3: -INFINITY ++pcmk__native_allocate: rsc2:5 allocation score on rhel7-4: -INFINITY ++pcmk__native_allocate: rsc2:5 allocation score on rhel7-5: -INFINITY ++pcmk__native_allocate: rsc2:6 allocation score on lxc1: 6 ++pcmk__native_allocate: rsc2:6 allocation score on lxc2: -INFINITY ++pcmk__native_allocate: rsc2:6 allocation score on remote-rhel7-2: 0 ++pcmk__native_allocate: rsc2:6 allocation score on rhel7-1: -INFINITY ++pcmk__native_allocate: rsc2:6 allocation score on rhel7-3: -INFINITY ++pcmk__native_allocate: rsc2:6 allocation score on rhel7-4: -INFINITY ++pcmk__native_allocate: rsc2:6 allocation score on rhel7-5: -INFINITY ++pcmk__native_allocate: stateful-bundle-0 allocation score on lxc1: -INFINITY ++pcmk__native_allocate: stateful-bundle-0 allocation score on lxc2: -INFINITY ++pcmk__native_allocate: stateful-bundle-0 allocation score on remote-rhel7-2: -INFINITY ++pcmk__native_allocate: stateful-bundle-0 allocation score on rhel7-1: 0 ++pcmk__native_allocate: stateful-bundle-0 allocation score on rhel7-3: 0 ++pcmk__native_allocate: stateful-bundle-0 allocation score on rhel7-4: 0 ++pcmk__native_allocate: stateful-bundle-0 allocation score on rhel7-5: 10000 ++pcmk__native_allocate: stateful-bundle-1 allocation score on lxc1: -INFINITY ++pcmk__native_allocate: stateful-bundle-1 allocation score on lxc2: -INFINITY ++pcmk__native_allocate: stateful-bundle-1 allocation score on remote-rhel7-2: -INFINITY ++pcmk__native_allocate: stateful-bundle-1 allocation score on rhel7-1: 10000 ++pcmk__native_allocate: stateful-bundle-1 allocation score on rhel7-3: 0 ++pcmk__native_allocate: stateful-bundle-1 allocation score on rhel7-4: 0 ++pcmk__native_allocate: stateful-bundle-1 allocation score on rhel7-5: 0 ++pcmk__native_allocate: stateful-bundle-2 allocation score on lxc1: -INFINITY ++pcmk__native_allocate: stateful-bundle-2 allocation score on lxc2: -INFINITY ++pcmk__native_allocate: stateful-bundle-2 allocation score on remote-rhel7-2: -INFINITY ++pcmk__native_allocate: stateful-bundle-2 allocation score on rhel7-1: 0 ++pcmk__native_allocate: stateful-bundle-2 allocation score on rhel7-3: 0 ++pcmk__native_allocate: stateful-bundle-2 allocation score on rhel7-4: 10000 ++pcmk__native_allocate: stateful-bundle-2 allocation score on rhel7-5: 0 ++pcmk__native_allocate: stateful-bundle-docker-0 allocation score on lxc1: -10000 ++pcmk__native_allocate: stateful-bundle-docker-0 allocation score on lxc2: -10000 ++pcmk__native_allocate: stateful-bundle-docker-0 allocation score on remote-rhel7-2: -10000 ++pcmk__native_allocate: stateful-bundle-docker-0 allocation score on rhel7-1: 0 ++pcmk__native_allocate: stateful-bundle-docker-0 allocation score on rhel7-3: 0 ++pcmk__native_allocate: stateful-bundle-docker-0 allocation score on rhel7-4: 0 ++pcmk__native_allocate: stateful-bundle-docker-0 allocation score on rhel7-5: 0 ++pcmk__native_allocate: stateful-bundle-docker-1 allocation score on lxc1: -10000 ++pcmk__native_allocate: stateful-bundle-docker-1 allocation score on lxc2: -10000 ++pcmk__native_allocate: stateful-bundle-docker-1 allocation score on remote-rhel7-2: -10000 ++pcmk__native_allocate: stateful-bundle-docker-1 allocation score on rhel7-1: 0 ++pcmk__native_allocate: stateful-bundle-docker-1 allocation score on rhel7-3: 0 ++pcmk__native_allocate: stateful-bundle-docker-1 allocation score on rhel7-4: 0 ++pcmk__native_allocate: stateful-bundle-docker-1 allocation score on rhel7-5: -INFINITY ++pcmk__native_allocate: stateful-bundle-docker-2 allocation score on lxc1: -10000 ++pcmk__native_allocate: stateful-bundle-docker-2 allocation score on lxc2: -10000 ++pcmk__native_allocate: stateful-bundle-docker-2 allocation score on remote-rhel7-2: -10000 ++pcmk__native_allocate: stateful-bundle-docker-2 allocation score on rhel7-1: -INFINITY ++pcmk__native_allocate: stateful-bundle-docker-2 allocation score on rhel7-3: 0 ++pcmk__native_allocate: stateful-bundle-docker-2 allocation score on rhel7-4: 0 ++pcmk__native_allocate: stateful-bundle-docker-2 allocation score on rhel7-5: -INFINITY ++pcmk__native_allocate: stateful-bundle-ip-192.168.122.131 allocation score on lxc1: -INFINITY ++pcmk__native_allocate: stateful-bundle-ip-192.168.122.131 allocation score on lxc2: -INFINITY ++pcmk__native_allocate: stateful-bundle-ip-192.168.122.131 allocation score on remote-rhel7-2: -INFINITY ++pcmk__native_allocate: stateful-bundle-ip-192.168.122.131 allocation score on rhel7-1: -INFINITY ++pcmk__native_allocate: stateful-bundle-ip-192.168.122.131 allocation score on rhel7-3: -INFINITY ++pcmk__native_allocate: stateful-bundle-ip-192.168.122.131 allocation score on rhel7-4: -INFINITY ++pcmk__native_allocate: stateful-bundle-ip-192.168.122.131 allocation score on rhel7-5: 0 ++pcmk__native_allocate: stateful-bundle-ip-192.168.122.132 allocation score on lxc1: -INFINITY ++pcmk__native_allocate: stateful-bundle-ip-192.168.122.132 allocation score on lxc2: -INFINITY ++pcmk__native_allocate: stateful-bundle-ip-192.168.122.132 allocation score on remote-rhel7-2: -INFINITY ++pcmk__native_allocate: stateful-bundle-ip-192.168.122.132 allocation score on rhel7-1: 0 ++pcmk__native_allocate: stateful-bundle-ip-192.168.122.132 allocation score on rhel7-3: -INFINITY ++pcmk__native_allocate: stateful-bundle-ip-192.168.122.132 allocation score on rhel7-4: -INFINITY ++pcmk__native_allocate: stateful-bundle-ip-192.168.122.132 allocation score on rhel7-5: -INFINITY ++pcmk__native_allocate: stateful-bundle-ip-192.168.122.133 allocation score on lxc1: -INFINITY ++pcmk__native_allocate: stateful-bundle-ip-192.168.122.133 allocation score on lxc2: -INFINITY ++pcmk__native_allocate: stateful-bundle-ip-192.168.122.133 allocation score on remote-rhel7-2: -INFINITY ++pcmk__native_allocate: stateful-bundle-ip-192.168.122.133 allocation score on rhel7-1: -INFINITY ++pcmk__native_allocate: stateful-bundle-ip-192.168.122.133 allocation score on rhel7-3: -INFINITY ++pcmk__native_allocate: stateful-bundle-ip-192.168.122.133 allocation score on rhel7-4: 0 ++pcmk__native_allocate: stateful-bundle-ip-192.168.122.133 allocation score on rhel7-5: -INFINITY ++rsc1:0 promotion score on rhel7-4: 10 ++rsc1:1 promotion score on rhel7-3: 5 ++rsc1:2 promotion score on rhel7-5: 5 ++rsc1:3 promotion score on rhel7-1: 5 ++rsc1:4 promotion score on remote-rhel7-2: 5 ++rsc1:5 promotion score on lxc2: 5 ++rsc1:6 promotion score on lxc1: 5 ++rsc2:0 promotion score on rhel7-4: 10 ++rsc2:1 promotion score on rhel7-3: 5 ++rsc2:2 promotion score on rhel7-5: 5 ++rsc2:3 promotion score on rhel7-1: 5 ++rsc2:4 promotion score on remote-rhel7-2: 110 ++rsc2:5 promotion score on lxc2: 5 ++rsc2:6 promotion score on lxc1: 5 +diff --git a/cts/scheduler/on_fail_demote1.summary b/cts/scheduler/on_fail_demote1.summary +new file mode 100644 +index 0000000..b173582 +--- /dev/null ++++ b/cts/scheduler/on_fail_demote1.summary +@@ -0,0 +1,86 @@ ++Using the original execution date of: 2020-06-16 19:23:21Z ++ ++Current cluster status: ++Online: [ rhel7-1 rhel7-3 rhel7-4 rhel7-5 ] ++RemoteOnline: [ remote-rhel7-2 ] ++GuestOnline: [ lxc1:container1 lxc2:container2 stateful-bundle-0:stateful-bundle-docker-0 stateful-bundle-1:stateful-bundle-docker-1 stateful-bundle-2:stateful-bundle-docker-2 ] ++ ++ Fencing (stonith:fence_xvm): Started rhel7-4 ++ Clone Set: rsc1-clone [rsc1] (promotable) ++ rsc1 (ocf::pacemaker:Stateful): FAILED Master rhel7-4 ++ Slaves: [ lxc1 lxc2 remote-rhel7-2 rhel7-1 rhel7-3 rhel7-5 ] ++ Clone Set: rsc2-master [rsc2] (promotable) ++ rsc2 (ocf::pacemaker:Stateful): FAILED Master remote-rhel7-2 ++ Slaves: [ lxc1 lxc2 rhel7-1 rhel7-3 rhel7-4 rhel7-5 ] ++ remote-rhel7-2 (ocf::pacemaker:remote): Started rhel7-1 ++ container1 (ocf::heartbeat:VirtualDomain): Started rhel7-3 ++ container2 (ocf::heartbeat:VirtualDomain): Started rhel7-3 ++ Clone Set: lxc-ms-master [lxc-ms] (promotable) ++ lxc-ms (ocf::pacemaker:Stateful): FAILED Master lxc2 ++ Slaves: [ lxc1 ] ++ Stopped: [ remote-rhel7-2 rhel7-1 rhel7-3 rhel7-4 rhel7-5 ] ++ Container bundle set: stateful-bundle [pcmktest:http] ++ stateful-bundle-0 (192.168.122.131) (ocf::pacemaker:Stateful): FAILED Master rhel7-5 ++ stateful-bundle-1 (192.168.122.132) (ocf::pacemaker:Stateful): Slave rhel7-1 ++ stateful-bundle-2 (192.168.122.133) (ocf::pacemaker:Stateful): Slave rhel7-4 ++ ++Transition Summary: ++ * Re-promote rsc1:0 ( Master rhel7-4 ) ++ * Re-promote rsc2:4 ( Master remote-rhel7-2 ) ++ * Re-promote lxc-ms:0 ( Master lxc2 ) ++ * Re-promote bundled:0 ( Master stateful-bundle-0 ) ++ ++Executing cluster transition: ++ * Pseudo action: rsc1-clone_demote_0 ++ * Pseudo action: rsc2-master_demote_0 ++ * Pseudo action: lxc-ms-master_demote_0 ++ * Pseudo action: stateful-bundle_demote_0 ++ * Resource action: rsc1 demote on rhel7-4 ++ * Pseudo action: rsc1-clone_demoted_0 ++ * Pseudo action: rsc1-clone_promote_0 ++ * Resource action: rsc2 demote on remote-rhel7-2 ++ * Pseudo action: rsc2-master_demoted_0 ++ * Pseudo action: rsc2-master_promote_0 ++ * Resource action: lxc-ms demote on lxc2 ++ * Pseudo action: lxc-ms-master_demoted_0 ++ * Pseudo action: lxc-ms-master_promote_0 ++ * Pseudo action: stateful-bundle-master_demote_0 ++ * Resource action: rsc1 promote on rhel7-4 ++ * Pseudo action: rsc1-clone_promoted_0 ++ * Resource action: rsc2 promote on remote-rhel7-2 ++ * Pseudo action: rsc2-master_promoted_0 ++ * Resource action: lxc-ms promote on lxc2 ++ * Pseudo action: lxc-ms-master_promoted_0 ++ * Resource action: bundled demote on stateful-bundle-0 ++ * Pseudo action: stateful-bundle-master_demoted_0 ++ * Pseudo action: stateful-bundle_demoted_0 ++ * Pseudo action: stateful-bundle_promote_0 ++ * Pseudo action: stateful-bundle-master_promote_0 ++ * Resource action: bundled promote on stateful-bundle-0 ++ * Pseudo action: stateful-bundle-master_promoted_0 ++ * Pseudo action: stateful-bundle_promoted_0 ++Using the original execution date of: 2020-06-16 19:23:21Z ++ ++Revised cluster status: ++Online: [ rhel7-1 rhel7-3 rhel7-4 rhel7-5 ] ++RemoteOnline: [ remote-rhel7-2 ] ++GuestOnline: [ lxc1:container1 lxc2:container2 stateful-bundle-0:stateful-bundle-docker-0 stateful-bundle-1:stateful-bundle-docker-1 stateful-bundle-2:stateful-bundle-docker-2 ] ++ ++ Fencing (stonith:fence_xvm): Started rhel7-4 ++ Clone Set: rsc1-clone [rsc1] (promotable) ++ Masters: [ rhel7-4 ] ++ Slaves: [ lxc1 lxc2 remote-rhel7-2 rhel7-1 rhel7-3 rhel7-5 ] ++ Clone Set: rsc2-master [rsc2] (promotable) ++ Masters: [ remote-rhel7-2 ] ++ Slaves: [ lxc1 lxc2 rhel7-1 rhel7-3 rhel7-4 rhel7-5 ] ++ remote-rhel7-2 (ocf::pacemaker:remote): Started rhel7-1 ++ container1 (ocf::heartbeat:VirtualDomain): Started rhel7-3 ++ container2 (ocf::heartbeat:VirtualDomain): Started rhel7-3 ++ Clone Set: lxc-ms-master [lxc-ms] (promotable) ++ Masters: [ lxc2 ] ++ Slaves: [ lxc1 ] ++ Container bundle set: stateful-bundle [pcmktest:http] ++ stateful-bundle-0 (192.168.122.131) (ocf::pacemaker:Stateful): Master rhel7-5 ++ stateful-bundle-1 (192.168.122.132) (ocf::pacemaker:Stateful): Slave rhel7-1 ++ stateful-bundle-2 (192.168.122.133) (ocf::pacemaker:Stateful): Slave rhel7-4 ++ +diff --git a/cts/scheduler/on_fail_demote1.xml b/cts/scheduler/on_fail_demote1.xml +new file mode 100644 +index 0000000..9f3ff20 +--- /dev/null ++++ b/cts/scheduler/on_fail_demote1.xml +@@ -0,0 +1,616 @@ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ +diff --git a/cts/scheduler/on_fail_demote2.dot b/cts/scheduler/on_fail_demote2.dot +new file mode 100644 +index 0000000..06193cb +--- /dev/null ++++ b/cts/scheduler/on_fail_demote2.dot +@@ -0,0 +1,22 @@ ++ digraph "g" { ++"Cancel rsc1_monitor_10000 rhel7-4" -> "rsc1_demote_0 rhel7-4" [ style = bold] ++"Cancel rsc1_monitor_10000 rhel7-4" [ style=bold color="green" fontcolor="black"] ++"Cancel rsc1_monitor_11000 rhel7-3" -> "rsc1_promote_0 rhel7-3" [ style = bold] ++"Cancel rsc1_monitor_11000 rhel7-3" [ style=bold color="green" fontcolor="black"] ++"rsc1-clone_demote_0" -> "rsc1-clone_demoted_0" [ style = bold] ++"rsc1-clone_demote_0" -> "rsc1_demote_0 rhel7-4" [ style = bold] ++"rsc1-clone_demote_0" [ style=bold color="green" fontcolor="orange"] ++"rsc1-clone_demoted_0" -> "rsc1-clone_promote_0" [ style = bold] ++"rsc1-clone_demoted_0" [ style=bold color="green" fontcolor="orange"] ++"rsc1-clone_promote_0" -> "rsc1_promote_0 rhel7-3" [ style = bold] ++"rsc1-clone_promote_0" [ style=bold color="green" fontcolor="orange"] ++"rsc1-clone_promoted_0" [ style=bold color="green" fontcolor="orange"] ++"rsc1_demote_0 rhel7-4" -> "rsc1-clone_demoted_0" [ style = bold] ++"rsc1_demote_0 rhel7-4" -> "rsc1_monitor_11000 rhel7-4" [ style = bold] ++"rsc1_demote_0 rhel7-4" [ style=bold color="green" fontcolor="black"] ++"rsc1_monitor_10000 rhel7-3" [ style=bold color="green" fontcolor="black"] ++"rsc1_monitor_11000 rhel7-4" [ style=bold color="green" fontcolor="black"] ++"rsc1_promote_0 rhel7-3" -> "rsc1-clone_promoted_0" [ style = bold] ++"rsc1_promote_0 rhel7-3" -> "rsc1_monitor_10000 rhel7-3" [ style = bold] ++"rsc1_promote_0 rhel7-3" [ style=bold color="green" fontcolor="black"] ++} +diff --git a/cts/scheduler/on_fail_demote2.exp b/cts/scheduler/on_fail_demote2.exp +new file mode 100644 +index 0000000..492e86f +--- /dev/null ++++ b/cts/scheduler/on_fail_demote2.exp +@@ -0,0 +1,125 @@ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ +diff --git a/cts/scheduler/on_fail_demote2.scores b/cts/scheduler/on_fail_demote2.scores +new file mode 100644 +index 0000000..25aea90 +--- /dev/null ++++ b/cts/scheduler/on_fail_demote2.scores +@@ -0,0 +1,127 @@ ++Allocation scores: ++Using the original execution date of: 2020-06-16 19:23:21Z ++pcmk__clone_allocate: rsc1-clone allocation score on rhel7-1: 0 ++pcmk__clone_allocate: rsc1-clone allocation score on rhel7-2: 0 ++pcmk__clone_allocate: rsc1-clone allocation score on rhel7-3: 0 ++pcmk__clone_allocate: rsc1-clone allocation score on rhel7-4: 0 ++pcmk__clone_allocate: rsc1-clone allocation score on rhel7-5: 0 ++pcmk__clone_allocate: rsc1:0 allocation score on rhel7-1: 0 ++pcmk__clone_allocate: rsc1:0 allocation score on rhel7-2: 0 ++pcmk__clone_allocate: rsc1:0 allocation score on rhel7-3: 0 ++pcmk__clone_allocate: rsc1:0 allocation score on rhel7-4: 11 ++pcmk__clone_allocate: rsc1:0 allocation score on rhel7-5: 0 ++pcmk__clone_allocate: rsc1:1 allocation score on rhel7-1: 0 ++pcmk__clone_allocate: rsc1:1 allocation score on rhel7-2: 0 ++pcmk__clone_allocate: rsc1:1 allocation score on rhel7-3: 6 ++pcmk__clone_allocate: rsc1:1 allocation score on rhel7-4: 0 ++pcmk__clone_allocate: rsc1:1 allocation score on rhel7-5: 0 ++pcmk__clone_allocate: rsc1:2 allocation score on rhel7-1: 0 ++pcmk__clone_allocate: rsc1:2 allocation score on rhel7-2: 0 ++pcmk__clone_allocate: rsc1:2 allocation score on rhel7-3: 0 ++pcmk__clone_allocate: rsc1:2 allocation score on rhel7-4: 0 ++pcmk__clone_allocate: rsc1:2 allocation score on rhel7-5: 6 ++pcmk__clone_allocate: rsc1:3 allocation score on rhel7-1: 6 ++pcmk__clone_allocate: rsc1:3 allocation score on rhel7-2: 0 ++pcmk__clone_allocate: rsc1:3 allocation score on rhel7-3: 0 ++pcmk__clone_allocate: rsc1:3 allocation score on rhel7-4: 0 ++pcmk__clone_allocate: rsc1:3 allocation score on rhel7-5: 0 ++pcmk__clone_allocate: rsc1:4 allocation score on rhel7-1: 0 ++pcmk__clone_allocate: rsc1:4 allocation score on rhel7-2: 6 ++pcmk__clone_allocate: rsc1:4 allocation score on rhel7-3: 0 ++pcmk__clone_allocate: rsc1:4 allocation score on rhel7-4: 0 ++pcmk__clone_allocate: rsc1:4 allocation score on rhel7-5: 0 ++pcmk__clone_allocate: rsc2-master allocation score on rhel7-1: 0 ++pcmk__clone_allocate: rsc2-master allocation score on rhel7-2: 0 ++pcmk__clone_allocate: rsc2-master allocation score on rhel7-3: 0 ++pcmk__clone_allocate: rsc2-master allocation score on rhel7-4: 0 ++pcmk__clone_allocate: rsc2-master allocation score on rhel7-5: 0 ++pcmk__clone_allocate: rsc2:0 allocation score on rhel7-1: 0 ++pcmk__clone_allocate: rsc2:0 allocation score on rhel7-2: 0 ++pcmk__clone_allocate: rsc2:0 allocation score on rhel7-3: 0 ++pcmk__clone_allocate: rsc2:0 allocation score on rhel7-4: 11 ++pcmk__clone_allocate: rsc2:0 allocation score on rhel7-5: 0 ++pcmk__clone_allocate: rsc2:1 allocation score on rhel7-1: 0 ++pcmk__clone_allocate: rsc2:1 allocation score on rhel7-2: 0 ++pcmk__clone_allocate: rsc2:1 allocation score on rhel7-3: 6 ++pcmk__clone_allocate: rsc2:1 allocation score on rhel7-4: 0 ++pcmk__clone_allocate: rsc2:1 allocation score on rhel7-5: 0 ++pcmk__clone_allocate: rsc2:2 allocation score on rhel7-1: 0 ++pcmk__clone_allocate: rsc2:2 allocation score on rhel7-2: 0 ++pcmk__clone_allocate: rsc2:2 allocation score on rhel7-3: 0 ++pcmk__clone_allocate: rsc2:2 allocation score on rhel7-4: 0 ++pcmk__clone_allocate: rsc2:2 allocation score on rhel7-5: 6 ++pcmk__clone_allocate: rsc2:3 allocation score on rhel7-1: 6 ++pcmk__clone_allocate: rsc2:3 allocation score on rhel7-2: 0 ++pcmk__clone_allocate: rsc2:3 allocation score on rhel7-3: 0 ++pcmk__clone_allocate: rsc2:3 allocation score on rhel7-4: 0 ++pcmk__clone_allocate: rsc2:3 allocation score on rhel7-5: 0 ++pcmk__clone_allocate: rsc2:4 allocation score on rhel7-1: 0 ++pcmk__clone_allocate: rsc2:4 allocation score on rhel7-2: 6 ++pcmk__clone_allocate: rsc2:4 allocation score on rhel7-3: 0 ++pcmk__clone_allocate: rsc2:4 allocation score on rhel7-4: 0 ++pcmk__clone_allocate: rsc2:4 allocation score on rhel7-5: 0 ++pcmk__native_allocate: Fencing allocation score on rhel7-1: 0 ++pcmk__native_allocate: Fencing allocation score on rhel7-2: 0 ++pcmk__native_allocate: Fencing allocation score on rhel7-3: 0 ++pcmk__native_allocate: Fencing allocation score on rhel7-4: 0 ++pcmk__native_allocate: Fencing allocation score on rhel7-5: 0 ++pcmk__native_allocate: rsc1:0 allocation score on rhel7-1: -INFINITY ++pcmk__native_allocate: rsc1:0 allocation score on rhel7-2: -INFINITY ++pcmk__native_allocate: rsc1:0 allocation score on rhel7-3: -INFINITY ++pcmk__native_allocate: rsc1:0 allocation score on rhel7-4: 11 ++pcmk__native_allocate: rsc1:0 allocation score on rhel7-5: -INFINITY ++pcmk__native_allocate: rsc1:1 allocation score on rhel7-1: 0 ++pcmk__native_allocate: rsc1:1 allocation score on rhel7-2: 0 ++pcmk__native_allocate: rsc1:1 allocation score on rhel7-3: 6 ++pcmk__native_allocate: rsc1:1 allocation score on rhel7-4: 0 ++pcmk__native_allocate: rsc1:1 allocation score on rhel7-5: 0 ++pcmk__native_allocate: rsc1:2 allocation score on rhel7-1: 0 ++pcmk__native_allocate: rsc1:2 allocation score on rhel7-2: 0 ++pcmk__native_allocate: rsc1:2 allocation score on rhel7-3: -INFINITY ++pcmk__native_allocate: rsc1:2 allocation score on rhel7-4: 0 ++pcmk__native_allocate: rsc1:2 allocation score on rhel7-5: 6 ++pcmk__native_allocate: rsc1:3 allocation score on rhel7-1: 6 ++pcmk__native_allocate: rsc1:3 allocation score on rhel7-2: 0 ++pcmk__native_allocate: rsc1:3 allocation score on rhel7-3: -INFINITY ++pcmk__native_allocate: rsc1:3 allocation score on rhel7-4: 0 ++pcmk__native_allocate: rsc1:3 allocation score on rhel7-5: -INFINITY ++pcmk__native_allocate: rsc1:4 allocation score on rhel7-1: -INFINITY ++pcmk__native_allocate: rsc1:4 allocation score on rhel7-2: 6 ++pcmk__native_allocate: rsc1:4 allocation score on rhel7-3: -INFINITY ++pcmk__native_allocate: rsc1:4 allocation score on rhel7-4: 0 ++pcmk__native_allocate: rsc1:4 allocation score on rhel7-5: -INFINITY ++pcmk__native_allocate: rsc2:0 allocation score on rhel7-1: 0 ++pcmk__native_allocate: rsc2:0 allocation score on rhel7-2: 0 ++pcmk__native_allocate: rsc2:0 allocation score on rhel7-3: 0 ++pcmk__native_allocate: rsc2:0 allocation score on rhel7-4: 11 ++pcmk__native_allocate: rsc2:0 allocation score on rhel7-5: 0 ++pcmk__native_allocate: rsc2:1 allocation score on rhel7-1: 0 ++pcmk__native_allocate: rsc2:1 allocation score on rhel7-2: 0 ++pcmk__native_allocate: rsc2:1 allocation score on rhel7-3: 6 ++pcmk__native_allocate: rsc2:1 allocation score on rhel7-4: -INFINITY ++pcmk__native_allocate: rsc2:1 allocation score on rhel7-5: 0 ++pcmk__native_allocate: rsc2:2 allocation score on rhel7-1: 0 ++pcmk__native_allocate: rsc2:2 allocation score on rhel7-2: 0 ++pcmk__native_allocate: rsc2:2 allocation score on rhel7-3: -INFINITY ++pcmk__native_allocate: rsc2:2 allocation score on rhel7-4: -INFINITY ++pcmk__native_allocate: rsc2:2 allocation score on rhel7-5: 6 ++pcmk__native_allocate: rsc2:3 allocation score on rhel7-1: 6 ++pcmk__native_allocate: rsc2:3 allocation score on rhel7-2: 0 ++pcmk__native_allocate: rsc2:3 allocation score on rhel7-3: -INFINITY ++pcmk__native_allocate: rsc2:3 allocation score on rhel7-4: -INFINITY ++pcmk__native_allocate: rsc2:3 allocation score on rhel7-5: -INFINITY ++pcmk__native_allocate: rsc2:4 allocation score on rhel7-1: -INFINITY ++pcmk__native_allocate: rsc2:4 allocation score on rhel7-2: 6 ++pcmk__native_allocate: rsc2:4 allocation score on rhel7-3: -INFINITY ++pcmk__native_allocate: rsc2:4 allocation score on rhel7-4: -INFINITY ++pcmk__native_allocate: rsc2:4 allocation score on rhel7-5: -INFINITY ++rsc1:0 promotion score on rhel7-4: -INFINITY ++rsc1:1 promotion score on rhel7-3: 5 ++rsc1:2 promotion score on rhel7-5: 5 ++rsc1:3 promotion score on rhel7-1: 5 ++rsc1:4 promotion score on rhel7-2: 5 ++rsc2:0 promotion score on rhel7-4: 10 ++rsc2:1 promotion score on rhel7-3: 5 ++rsc2:2 promotion score on rhel7-5: 5 ++rsc2:3 promotion score on rhel7-1: 5 ++rsc2:4 promotion score on rhel7-2: 5 +diff --git a/cts/scheduler/on_fail_demote2.summary b/cts/scheduler/on_fail_demote2.summary +new file mode 100644 +index 0000000..795a11d +--- /dev/null ++++ b/cts/scheduler/on_fail_demote2.summary +@@ -0,0 +1,41 @@ ++Using the original execution date of: 2020-06-16 19:23:21Z ++ ++Current cluster status: ++Online: [ rhel7-1 rhel7-2 rhel7-3 rhel7-4 rhel7-5 ] ++ ++ Fencing (stonith:fence_xvm): Started rhel7-1 ++ Clone Set: rsc1-clone [rsc1] (promotable) ++ rsc1 (ocf::pacemaker:Stateful): FAILED Master rhel7-4 ++ Slaves: [ rhel7-1 rhel7-2 rhel7-3 rhel7-5 ] ++ Clone Set: rsc2-master [rsc2] (promotable) ++ Masters: [ rhel7-4 ] ++ Slaves: [ rhel7-1 rhel7-2 rhel7-3 rhel7-5 ] ++ ++Transition Summary: ++ * Demote rsc1:0 ( Master -> Slave rhel7-4 ) ++ * Promote rsc1:1 ( Slave -> Master rhel7-3 ) ++ ++Executing cluster transition: ++ * Resource action: rsc1 cancel=10000 on rhel7-4 ++ * Resource action: rsc1 cancel=11000 on rhel7-3 ++ * Pseudo action: rsc1-clone_demote_0 ++ * Resource action: rsc1 demote on rhel7-4 ++ * Pseudo action: rsc1-clone_demoted_0 ++ * Pseudo action: rsc1-clone_promote_0 ++ * Resource action: rsc1 monitor=11000 on rhel7-4 ++ * Resource action: rsc1 promote on rhel7-3 ++ * Pseudo action: rsc1-clone_promoted_0 ++ * Resource action: rsc1 monitor=10000 on rhel7-3 ++Using the original execution date of: 2020-06-16 19:23:21Z ++ ++Revised cluster status: ++Online: [ rhel7-1 rhel7-2 rhel7-3 rhel7-4 rhel7-5 ] ++ ++ Fencing (stonith:fence_xvm): Started rhel7-1 ++ Clone Set: rsc1-clone [rsc1] (promotable) ++ Masters: [ rhel7-3 ] ++ Slaves: [ rhel7-1 rhel7-2 rhel7-4 rhel7-5 ] ++ Clone Set: rsc2-master [rsc2] (promotable) ++ Masters: [ rhel7-4 ] ++ Slaves: [ rhel7-1 rhel7-2 rhel7-3 rhel7-5 ] ++ +diff --git a/cts/scheduler/on_fail_demote2.xml b/cts/scheduler/on_fail_demote2.xml +new file mode 100644 +index 0000000..ae91633 +--- /dev/null ++++ b/cts/scheduler/on_fail_demote2.xml +@@ -0,0 +1,221 @@ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ +diff --git a/cts/scheduler/on_fail_demote3.dot b/cts/scheduler/on_fail_demote3.dot +new file mode 100644 +index 0000000..e78325b +--- /dev/null ++++ b/cts/scheduler/on_fail_demote3.dot +@@ -0,0 +1,12 @@ ++ digraph "g" { ++"Cancel rsc1_monitor_10000 rhel7-4" -> "rsc1_demote_0 rhel7-4" [ style = bold] ++"Cancel rsc1_monitor_10000 rhel7-4" [ style=bold color="green" fontcolor="black"] ++"rsc1-clone_demote_0" -> "rsc1-clone_demoted_0" [ style = bold] ++"rsc1-clone_demote_0" -> "rsc1_demote_0 rhel7-4" [ style = bold] ++"rsc1-clone_demote_0" [ style=bold color="green" fontcolor="orange"] ++"rsc1-clone_demoted_0" [ style=bold color="green" fontcolor="orange"] ++"rsc1_demote_0 rhel7-4" -> "rsc1-clone_demoted_0" [ style = bold] ++"rsc1_demote_0 rhel7-4" -> "rsc1_monitor_11000 rhel7-4" [ style = bold] ++"rsc1_demote_0 rhel7-4" [ style=bold color="green" fontcolor="black"] ++"rsc1_monitor_11000 rhel7-4" [ style=bold color="green" fontcolor="black"] ++} +diff --git a/cts/scheduler/on_fail_demote3.exp b/cts/scheduler/on_fail_demote3.exp +new file mode 100644 +index 0000000..ed6bd6d +--- /dev/null ++++ b/cts/scheduler/on_fail_demote3.exp +@@ -0,0 +1,63 @@ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ +diff --git a/cts/scheduler/on_fail_demote3.scores b/cts/scheduler/on_fail_demote3.scores +new file mode 100644 +index 0000000..a85639a +--- /dev/null ++++ b/cts/scheduler/on_fail_demote3.scores +@@ -0,0 +1,127 @@ ++Allocation scores: ++Using the original execution date of: 2020-06-16 19:23:21Z ++pcmk__clone_allocate: rsc1-clone allocation score on rhel7-1: 0 ++pcmk__clone_allocate: rsc1-clone allocation score on rhel7-2: 0 ++pcmk__clone_allocate: rsc1-clone allocation score on rhel7-3: 0 ++pcmk__clone_allocate: rsc1-clone allocation score on rhel7-4: 0 ++pcmk__clone_allocate: rsc1-clone allocation score on rhel7-5: 0 ++pcmk__clone_allocate: rsc1:0 allocation score on rhel7-1: 0 ++pcmk__clone_allocate: rsc1:0 allocation score on rhel7-2: 0 ++pcmk__clone_allocate: rsc1:0 allocation score on rhel7-3: 0 ++pcmk__clone_allocate: rsc1:0 allocation score on rhel7-4: 11 ++pcmk__clone_allocate: rsc1:0 allocation score on rhel7-5: 0 ++pcmk__clone_allocate: rsc1:1 allocation score on rhel7-1: 0 ++pcmk__clone_allocate: rsc1:1 allocation score on rhel7-2: 0 ++pcmk__clone_allocate: rsc1:1 allocation score on rhel7-3: 6 ++pcmk__clone_allocate: rsc1:1 allocation score on rhel7-4: 0 ++pcmk__clone_allocate: rsc1:1 allocation score on rhel7-5: 0 ++pcmk__clone_allocate: rsc1:2 allocation score on rhel7-1: 0 ++pcmk__clone_allocate: rsc1:2 allocation score on rhel7-2: 0 ++pcmk__clone_allocate: rsc1:2 allocation score on rhel7-3: 0 ++pcmk__clone_allocate: rsc1:2 allocation score on rhel7-4: 0 ++pcmk__clone_allocate: rsc1:2 allocation score on rhel7-5: 6 ++pcmk__clone_allocate: rsc1:3 allocation score on rhel7-1: 6 ++pcmk__clone_allocate: rsc1:3 allocation score on rhel7-2: 0 ++pcmk__clone_allocate: rsc1:3 allocation score on rhel7-3: 0 ++pcmk__clone_allocate: rsc1:3 allocation score on rhel7-4: 0 ++pcmk__clone_allocate: rsc1:3 allocation score on rhel7-5: 0 ++pcmk__clone_allocate: rsc1:4 allocation score on rhel7-1: 0 ++pcmk__clone_allocate: rsc1:4 allocation score on rhel7-2: 6 ++pcmk__clone_allocate: rsc1:4 allocation score on rhel7-3: 0 ++pcmk__clone_allocate: rsc1:4 allocation score on rhel7-4: 0 ++pcmk__clone_allocate: rsc1:4 allocation score on rhel7-5: 0 ++pcmk__clone_allocate: rsc2-master allocation score on rhel7-1: 0 ++pcmk__clone_allocate: rsc2-master allocation score on rhel7-2: 0 ++pcmk__clone_allocate: rsc2-master allocation score on rhel7-3: 0 ++pcmk__clone_allocate: rsc2-master allocation score on rhel7-4: 0 ++pcmk__clone_allocate: rsc2-master allocation score on rhel7-5: 0 ++pcmk__clone_allocate: rsc2:0 allocation score on rhel7-1: 0 ++pcmk__clone_allocate: rsc2:0 allocation score on rhel7-2: 0 ++pcmk__clone_allocate: rsc2:0 allocation score on rhel7-3: 0 ++pcmk__clone_allocate: rsc2:0 allocation score on rhel7-4: 11 ++pcmk__clone_allocate: rsc2:0 allocation score on rhel7-5: 0 ++pcmk__clone_allocate: rsc2:1 allocation score on rhel7-1: 0 ++pcmk__clone_allocate: rsc2:1 allocation score on rhel7-2: 0 ++pcmk__clone_allocate: rsc2:1 allocation score on rhel7-3: 6 ++pcmk__clone_allocate: rsc2:1 allocation score on rhel7-4: 0 ++pcmk__clone_allocate: rsc2:1 allocation score on rhel7-5: 0 ++pcmk__clone_allocate: rsc2:2 allocation score on rhel7-1: 0 ++pcmk__clone_allocate: rsc2:2 allocation score on rhel7-2: 0 ++pcmk__clone_allocate: rsc2:2 allocation score on rhel7-3: 0 ++pcmk__clone_allocate: rsc2:2 allocation score on rhel7-4: 0 ++pcmk__clone_allocate: rsc2:2 allocation score on rhel7-5: 6 ++pcmk__clone_allocate: rsc2:3 allocation score on rhel7-1: 6 ++pcmk__clone_allocate: rsc2:3 allocation score on rhel7-2: 0 ++pcmk__clone_allocate: rsc2:3 allocation score on rhel7-3: 0 ++pcmk__clone_allocate: rsc2:3 allocation score on rhel7-4: 0 ++pcmk__clone_allocate: rsc2:3 allocation score on rhel7-5: 0 ++pcmk__clone_allocate: rsc2:4 allocation score on rhel7-1: 0 ++pcmk__clone_allocate: rsc2:4 allocation score on rhel7-2: 6 ++pcmk__clone_allocate: rsc2:4 allocation score on rhel7-3: 0 ++pcmk__clone_allocate: rsc2:4 allocation score on rhel7-4: 0 ++pcmk__clone_allocate: rsc2:4 allocation score on rhel7-5: 0 ++pcmk__native_allocate: Fencing allocation score on rhel7-1: 0 ++pcmk__native_allocate: Fencing allocation score on rhel7-2: 0 ++pcmk__native_allocate: Fencing allocation score on rhel7-3: 0 ++pcmk__native_allocate: Fencing allocation score on rhel7-4: 0 ++pcmk__native_allocate: Fencing allocation score on rhel7-5: 0 ++pcmk__native_allocate: rsc1:0 allocation score on rhel7-1: -INFINITY ++pcmk__native_allocate: rsc1:0 allocation score on rhel7-2: -INFINITY ++pcmk__native_allocate: rsc1:0 allocation score on rhel7-3: -INFINITY ++pcmk__native_allocate: rsc1:0 allocation score on rhel7-4: 11 ++pcmk__native_allocate: rsc1:0 allocation score on rhel7-5: -INFINITY ++pcmk__native_allocate: rsc1:1 allocation score on rhel7-1: 0 ++pcmk__native_allocate: rsc1:1 allocation score on rhel7-2: 0 ++pcmk__native_allocate: rsc1:1 allocation score on rhel7-3: 6 ++pcmk__native_allocate: rsc1:1 allocation score on rhel7-4: 0 ++pcmk__native_allocate: rsc1:1 allocation score on rhel7-5: 0 ++pcmk__native_allocate: rsc1:2 allocation score on rhel7-1: 0 ++pcmk__native_allocate: rsc1:2 allocation score on rhel7-2: 0 ++pcmk__native_allocate: rsc1:2 allocation score on rhel7-3: -INFINITY ++pcmk__native_allocate: rsc1:2 allocation score on rhel7-4: 0 ++pcmk__native_allocate: rsc1:2 allocation score on rhel7-5: 6 ++pcmk__native_allocate: rsc1:3 allocation score on rhel7-1: 6 ++pcmk__native_allocate: rsc1:3 allocation score on rhel7-2: 0 ++pcmk__native_allocate: rsc1:3 allocation score on rhel7-3: -INFINITY ++pcmk__native_allocate: rsc1:3 allocation score on rhel7-4: 0 ++pcmk__native_allocate: rsc1:3 allocation score on rhel7-5: -INFINITY ++pcmk__native_allocate: rsc1:4 allocation score on rhel7-1: -INFINITY ++pcmk__native_allocate: rsc1:4 allocation score on rhel7-2: 6 ++pcmk__native_allocate: rsc1:4 allocation score on rhel7-3: -INFINITY ++pcmk__native_allocate: rsc1:4 allocation score on rhel7-4: 0 ++pcmk__native_allocate: rsc1:4 allocation score on rhel7-5: -INFINITY ++pcmk__native_allocate: rsc2:0 allocation score on rhel7-1: 0 ++pcmk__native_allocate: rsc2:0 allocation score on rhel7-2: 0 ++pcmk__native_allocate: rsc2:0 allocation score on rhel7-3: 0 ++pcmk__native_allocate: rsc2:0 allocation score on rhel7-4: 11 ++pcmk__native_allocate: rsc2:0 allocation score on rhel7-5: 0 ++pcmk__native_allocate: rsc2:1 allocation score on rhel7-1: 0 ++pcmk__native_allocate: rsc2:1 allocation score on rhel7-2: 0 ++pcmk__native_allocate: rsc2:1 allocation score on rhel7-3: 6 ++pcmk__native_allocate: rsc2:1 allocation score on rhel7-4: -INFINITY ++pcmk__native_allocate: rsc2:1 allocation score on rhel7-5: 0 ++pcmk__native_allocate: rsc2:2 allocation score on rhel7-1: 0 ++pcmk__native_allocate: rsc2:2 allocation score on rhel7-2: 0 ++pcmk__native_allocate: rsc2:2 allocation score on rhel7-3: -INFINITY ++pcmk__native_allocate: rsc2:2 allocation score on rhel7-4: -INFINITY ++pcmk__native_allocate: rsc2:2 allocation score on rhel7-5: 6 ++pcmk__native_allocate: rsc2:3 allocation score on rhel7-1: 6 ++pcmk__native_allocate: rsc2:3 allocation score on rhel7-2: 0 ++pcmk__native_allocate: rsc2:3 allocation score on rhel7-3: -INFINITY ++pcmk__native_allocate: rsc2:3 allocation score on rhel7-4: -INFINITY ++pcmk__native_allocate: rsc2:3 allocation score on rhel7-5: -INFINITY ++pcmk__native_allocate: rsc2:4 allocation score on rhel7-1: -INFINITY ++pcmk__native_allocate: rsc2:4 allocation score on rhel7-2: 6 ++pcmk__native_allocate: rsc2:4 allocation score on rhel7-3: -INFINITY ++pcmk__native_allocate: rsc2:4 allocation score on rhel7-4: -INFINITY ++pcmk__native_allocate: rsc2:4 allocation score on rhel7-5: -INFINITY ++rsc1:0 promotion score on rhel7-4: -INFINITY ++rsc1:1 promotion score on rhel7-3: -INFINITY ++rsc1:2 promotion score on rhel7-5: -INFINITY ++rsc1:3 promotion score on rhel7-1: -INFINITY ++rsc1:4 promotion score on rhel7-2: -INFINITY ++rsc2:0 promotion score on rhel7-4: 10 ++rsc2:1 promotion score on rhel7-3: 5 ++rsc2:2 promotion score on rhel7-5: 5 ++rsc2:3 promotion score on rhel7-1: 5 ++rsc2:4 promotion score on rhel7-2: 5 +diff --git a/cts/scheduler/on_fail_demote3.summary b/cts/scheduler/on_fail_demote3.summary +new file mode 100644 +index 0000000..f1173fd +--- /dev/null ++++ b/cts/scheduler/on_fail_demote3.summary +@@ -0,0 +1,34 @@ ++Using the original execution date of: 2020-06-16 19:23:21Z ++ ++Current cluster status: ++Online: [ rhel7-1 rhel7-2 rhel7-3 rhel7-4 rhel7-5 ] ++ ++ Fencing (stonith:fence_xvm): Started rhel7-1 ++ Clone Set: rsc1-clone [rsc1] (promotable) ++ rsc1 (ocf::pacemaker:Stateful): FAILED Master rhel7-4 ++ Slaves: [ rhel7-1 rhel7-2 rhel7-3 rhel7-5 ] ++ Clone Set: rsc2-master [rsc2] (promotable) ++ Masters: [ rhel7-4 ] ++ Slaves: [ rhel7-1 rhel7-2 rhel7-3 rhel7-5 ] ++ ++Transition Summary: ++ * Demote rsc1:0 ( Master -> Slave rhel7-4 ) ++ ++Executing cluster transition: ++ * Resource action: rsc1 cancel=10000 on rhel7-4 ++ * Pseudo action: rsc1-clone_demote_0 ++ * Resource action: rsc1 demote on rhel7-4 ++ * Pseudo action: rsc1-clone_demoted_0 ++ * Resource action: rsc1 monitor=11000 on rhel7-4 ++Using the original execution date of: 2020-06-16 19:23:21Z ++ ++Revised cluster status: ++Online: [ rhel7-1 rhel7-2 rhel7-3 rhel7-4 rhel7-5 ] ++ ++ Fencing (stonith:fence_xvm): Started rhel7-1 ++ Clone Set: rsc1-clone [rsc1] (promotable) ++ Slaves: [ rhel7-1 rhel7-2 rhel7-3 rhel7-4 rhel7-5 ] ++ Clone Set: rsc2-master [rsc2] (promotable) ++ Masters: [ rhel7-4 ] ++ Slaves: [ rhel7-1 rhel7-2 rhel7-3 rhel7-5 ] ++ +diff --git a/cts/scheduler/on_fail_demote3.xml b/cts/scheduler/on_fail_demote3.xml +new file mode 100644 +index 0000000..a7b6806 +--- /dev/null ++++ b/cts/scheduler/on_fail_demote3.xml +@@ -0,0 +1,221 @@ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ +diff --git a/cts/scheduler/on_fail_demote4.dot b/cts/scheduler/on_fail_demote4.dot +new file mode 100644 +index 0000000..4715cd3 +--- /dev/null ++++ b/cts/scheduler/on_fail_demote4.dot +@@ -0,0 +1,383 @@ ++ digraph "g" { ++"Cancel rsc1_monitor_11000 rhel7-3" -> "rsc1_promote_0 rhel7-3" [ style = bold] ++"Cancel rsc1_monitor_11000 rhel7-3" [ style=bold color="green" fontcolor="black"] ++"Cancel rsc2_monitor_11000 rhel7-3" -> "rsc2_promote_0 rhel7-3" [ style = bold] ++"Cancel rsc2_monitor_11000 rhel7-3" [ style=bold color="green" fontcolor="black"] ++"Fencing_monitor_120000 rhel7-5" [ style=bold color="green" fontcolor="black"] ++"Fencing_start_0 rhel7-5" -> "Fencing_monitor_120000 rhel7-5" [ style = bold] ++"Fencing_start_0 rhel7-5" [ style=bold color="green" fontcolor="black"] ++"Fencing_stop_0 rhel7-4" -> "Fencing_start_0 rhel7-5" [ style = bold] ++"Fencing_stop_0 rhel7-4" [ style=bold color="green" fontcolor="orange"] ++"bundled_demote_0 stateful-bundle-0" -> "bundled_promote_0 stateful-bundle-0" [ style = bold] ++"bundled_demote_0 stateful-bundle-0" -> "bundled_stop_0 stateful-bundle-0" [ style = bold] ++"bundled_demote_0 stateful-bundle-0" -> "stateful-bundle-master_demoted_0" [ style = bold] ++"bundled_demote_0 stateful-bundle-0" [ style=bold color="green" fontcolor="orange"] ++"bundled_monitor_10000 stateful-bundle-0" [ style=bold color="green" fontcolor="black"] ++"bundled_monitor_11000 stateful-bundle-2" [ style=bold color="green" fontcolor="black"] ++"bundled_promote_0 stateful-bundle-0" -> "bundled_monitor_10000 stateful-bundle-0" [ style = bold] ++"bundled_promote_0 stateful-bundle-0" -> "stateful-bundle-master_promoted_0" [ style = bold] ++"bundled_promote_0 stateful-bundle-0" [ style=bold color="green" fontcolor="black"] ++"bundled_start_0 stateful-bundle-0" -> "bundled_monitor_10000 stateful-bundle-0" [ style = bold] ++"bundled_start_0 stateful-bundle-0" -> "bundled_promote_0 stateful-bundle-0" [ style = bold] ++"bundled_start_0 stateful-bundle-0" -> "bundled_start_0 stateful-bundle-2" [ style = bold] ++"bundled_start_0 stateful-bundle-0" -> "stateful-bundle-master_running_0" [ style = bold] ++"bundled_start_0 stateful-bundle-0" [ style=bold color="green" fontcolor="black"] ++"bundled_start_0 stateful-bundle-2" -> "bundled_monitor_11000 stateful-bundle-2" [ style = bold] ++"bundled_start_0 stateful-bundle-2" -> "stateful-bundle-master_running_0" [ style = bold] ++"bundled_start_0 stateful-bundle-2" [ style=bold color="green" fontcolor="black"] ++"bundled_stop_0 stateful-bundle-0" -> "bundled_start_0 stateful-bundle-0" [ style = bold] ++"bundled_stop_0 stateful-bundle-0" -> "stateful-bundle-master_stopped_0" [ style = bold] ++"bundled_stop_0 stateful-bundle-0" [ style=bold color="green" fontcolor="orange"] ++"bundled_stop_0 stateful-bundle-2" -> "bundled_start_0 stateful-bundle-2" [ style = bold] ++"bundled_stop_0 stateful-bundle-2" -> "bundled_stop_0 stateful-bundle-0" [ style = bold] ++"bundled_stop_0 stateful-bundle-2" -> "stateful-bundle-master_stopped_0" [ style = bold] ++"bundled_stop_0 stateful-bundle-2" [ style=bold color="green" fontcolor="orange"] ++"container2_monitor_20000 rhel7-3" [ style=bold color="green" fontcolor="black"] ++"container2_start_0 rhel7-3" -> "container2_monitor_20000 rhel7-3" [ style = bold] ++"container2_start_0 rhel7-3" -> "lxc-ms_promote_0 lxc2" [ style = bold] ++"container2_start_0 rhel7-3" -> "lxc-ms_start_0 lxc2" [ style = bold] ++"container2_start_0 rhel7-3" -> "lxc2_start_0 rhel7-3" [ style = bold] ++"container2_start_0 rhel7-3" -> "rsc1_start_0 lxc2" [ style = bold] ++"container2_start_0 rhel7-3" -> "rsc2_start_0 lxc2" [ style = bold] ++"container2_start_0 rhel7-3" [ style=bold color="green" fontcolor="black"] ++"container2_stop_0 rhel7-3" -> "container2_start_0 rhel7-3" [ style = bold] ++"container2_stop_0 rhel7-3" -> "stonith 'reboot' lxc2" [ style = bold] ++"container2_stop_0 rhel7-3" [ style=bold color="green" fontcolor="black"] ++"lxc-ms-master_demote_0" -> "lxc-ms-master_demoted_0" [ style = bold] ++"lxc-ms-master_demote_0" -> "lxc-ms_demote_0 lxc2" [ style = bold] ++"lxc-ms-master_demote_0" [ style=bold color="green" fontcolor="orange"] ++"lxc-ms-master_demoted_0" -> "lxc-ms-master_promote_0" [ style = bold] ++"lxc-ms-master_demoted_0" -> "lxc-ms-master_start_0" [ style = bold] ++"lxc-ms-master_demoted_0" -> "lxc-ms-master_stop_0" [ style = bold] ++"lxc-ms-master_demoted_0" [ style=bold color="green" fontcolor="orange"] ++"lxc-ms-master_promote_0" -> "lxc-ms_promote_0 lxc2" [ style = bold] ++"lxc-ms-master_promote_0" [ style=bold color="green" fontcolor="orange"] ++"lxc-ms-master_promoted_0" [ style=bold color="green" fontcolor="orange"] ++"lxc-ms-master_running_0" -> "lxc-ms-master_promote_0" [ style = bold] ++"lxc-ms-master_running_0" [ style=bold color="green" fontcolor="orange"] ++"lxc-ms-master_start_0" -> "lxc-ms-master_running_0" [ style = bold] ++"lxc-ms-master_start_0" -> "lxc-ms_start_0 lxc2" [ style = bold] ++"lxc-ms-master_start_0" [ style=bold color="green" fontcolor="orange"] ++"lxc-ms-master_stop_0" -> "lxc-ms-master_stopped_0" [ style = bold] ++"lxc-ms-master_stop_0" -> "lxc-ms_stop_0 lxc2" [ style = bold] ++"lxc-ms-master_stop_0" [ style=bold color="green" fontcolor="orange"] ++"lxc-ms-master_stopped_0" -> "lxc-ms-master_promote_0" [ style = bold] ++"lxc-ms-master_stopped_0" -> "lxc-ms-master_start_0" [ style = bold] ++"lxc-ms-master_stopped_0" [ style=bold color="green" fontcolor="orange"] ++"lxc-ms_demote_0 lxc2" -> "lxc-ms-master_demoted_0" [ style = bold] ++"lxc-ms_demote_0 lxc2" -> "lxc-ms_promote_0 lxc2" [ style = bold] ++"lxc-ms_demote_0 lxc2" -> "lxc-ms_stop_0 lxc2" [ style = bold] ++"lxc-ms_demote_0 lxc2" [ style=bold color="green" fontcolor="orange"] ++"lxc-ms_monitor_10000 lxc2" [ style=bold color="green" fontcolor="black"] ++"lxc-ms_promote_0 lxc2" -> "lxc-ms-master_promoted_0" [ style = bold] ++"lxc-ms_promote_0 lxc2" -> "lxc-ms_monitor_10000 lxc2" [ style = bold] ++"lxc-ms_promote_0 lxc2" [ style=bold color="green" fontcolor="black"] ++"lxc-ms_start_0 lxc2" -> "lxc-ms-master_running_0" [ style = bold] ++"lxc-ms_start_0 lxc2" -> "lxc-ms_monitor_10000 lxc2" [ style = bold] ++"lxc-ms_start_0 lxc2" -> "lxc-ms_promote_0 lxc2" [ style = bold] ++"lxc-ms_start_0 lxc2" [ style=bold color="green" fontcolor="black"] ++"lxc-ms_stop_0 lxc2" -> "lxc-ms-master_stopped_0" [ style = bold] ++"lxc-ms_stop_0 lxc2" -> "lxc-ms_start_0 lxc2" [ style = bold] ++"lxc-ms_stop_0 lxc2" [ style=bold color="green" fontcolor="orange"] ++"lxc2_monitor_30000 rhel7-3" [ style=bold color="green" fontcolor="black"] ++"lxc2_start_0 rhel7-3" -> "lxc-ms_monitor_10000 lxc2" [ style = bold] ++"lxc2_start_0 rhel7-3" -> "lxc-ms_promote_0 lxc2" [ style = bold] ++"lxc2_start_0 rhel7-3" -> "lxc-ms_start_0 lxc2" [ style = bold] ++"lxc2_start_0 rhel7-3" -> "lxc2_monitor_30000 rhel7-3" [ style = bold] ++"lxc2_start_0 rhel7-3" -> "rsc1_monitor_11000 lxc2" [ style = bold] ++"lxc2_start_0 rhel7-3" -> "rsc1_start_0 lxc2" [ style = bold] ++"lxc2_start_0 rhel7-3" -> "rsc2_monitor_11000 lxc2" [ style = bold] ++"lxc2_start_0 rhel7-3" -> "rsc2_start_0 lxc2" [ style = bold] ++"lxc2_start_0 rhel7-3" [ style=bold color="green" fontcolor="black"] ++"lxc2_stop_0 rhel7-3" -> "container2_stop_0 rhel7-3" [ style = bold] ++"lxc2_stop_0 rhel7-3" -> "lxc2_start_0 rhel7-3" [ style = bold] ++"lxc2_stop_0 rhel7-3" [ style=bold color="green" fontcolor="black"] ++"remote-rhel7-2_monitor_60000 rhel7-1" [ style=bold color="green" fontcolor="black"] ++"remote-rhel7-2_start_0 rhel7-1" -> "remote-rhel7-2_monitor_60000 rhel7-1" [ style = bold] ++"remote-rhel7-2_start_0 rhel7-1" [ style=bold color="green" fontcolor="black"] ++"remote-rhel7-2_stop_0 rhel7-1" -> "remote-rhel7-2_start_0 rhel7-1" [ style = bold] ++"remote-rhel7-2_stop_0 rhel7-1" [ style=bold color="green" fontcolor="black"] ++"rsc1-clone_demote_0" -> "rsc1-clone_demoted_0" [ style = bold] ++"rsc1-clone_demote_0" -> "rsc1_demote_0 rhel7-4" [ style = bold] ++"rsc1-clone_demote_0" [ style=bold color="green" fontcolor="orange"] ++"rsc1-clone_demoted_0" -> "rsc1-clone_promote_0" [ style = bold] ++"rsc1-clone_demoted_0" -> "rsc1-clone_start_0" [ style = bold] ++"rsc1-clone_demoted_0" -> "rsc1-clone_stop_0" [ style = bold] ++"rsc1-clone_demoted_0" [ style=bold color="green" fontcolor="orange"] ++"rsc1-clone_promote_0" -> "rsc1_promote_0 rhel7-3" [ style = bold] ++"rsc1-clone_promote_0" [ style=bold color="green" fontcolor="orange"] ++"rsc1-clone_promoted_0" [ style=bold color="green" fontcolor="orange"] ++"rsc1-clone_running_0" -> "rsc1-clone_promote_0" [ style = bold] ++"rsc1-clone_running_0" [ style=bold color="green" fontcolor="orange"] ++"rsc1-clone_start_0" -> "rsc1-clone_running_0" [ style = bold] ++"rsc1-clone_start_0" -> "rsc1_start_0 lxc2" [ style = bold] ++"rsc1-clone_start_0" [ style=bold color="green" fontcolor="orange"] ++"rsc1-clone_stop_0" -> "rsc1-clone_stopped_0" [ style = bold] ++"rsc1-clone_stop_0" -> "rsc1_stop_0 lxc2" [ style = bold] ++"rsc1-clone_stop_0" -> "rsc1_stop_0 remote-rhel7-2" [ style = bold] ++"rsc1-clone_stop_0" -> "rsc1_stop_0 rhel7-4" [ style = bold] ++"rsc1-clone_stop_0" [ style=bold color="green" fontcolor="orange"] ++"rsc1-clone_stopped_0" -> "rsc1-clone_promote_0" [ style = bold] ++"rsc1-clone_stopped_0" -> "rsc1-clone_start_0" [ style = bold] ++"rsc1-clone_stopped_0" [ style=bold color="green" fontcolor="orange"] ++"rsc1_demote_0 rhel7-4" -> "rsc1-clone_demoted_0" [ style = bold] ++"rsc1_demote_0 rhel7-4" -> "rsc1_stop_0 rhel7-4" [ style = bold] ++"rsc1_demote_0 rhel7-4" [ style=bold color="green" fontcolor="orange"] ++"rsc1_monitor_10000 rhel7-3" [ style=bold color="green" fontcolor="black"] ++"rsc1_monitor_11000 lxc2" [ style=bold color="green" fontcolor="black"] ++"rsc1_promote_0 rhel7-3" -> "rsc1-clone_promoted_0" [ style = bold] ++"rsc1_promote_0 rhel7-3" -> "rsc1_monitor_10000 rhel7-3" [ style = bold] ++"rsc1_promote_0 rhel7-3" [ style=bold color="green" fontcolor="black"] ++"rsc1_start_0 lxc2" -> "rsc1-clone_running_0" [ style = bold] ++"rsc1_start_0 lxc2" -> "rsc1_monitor_11000 lxc2" [ style = bold] ++"rsc1_start_0 lxc2" [ style=bold color="green" fontcolor="black"] ++"rsc1_stop_0 lxc2" -> "rsc1-clone_stopped_0" [ style = bold] ++"rsc1_stop_0 lxc2" -> "rsc1_start_0 lxc2" [ style = bold] ++"rsc1_stop_0 lxc2" [ style=bold color="green" fontcolor="orange"] ++"rsc1_stop_0 remote-rhel7-2" -> "remote-rhel7-2_stop_0 rhel7-1" [ style = bold] ++"rsc1_stop_0 remote-rhel7-2" -> "rsc1-clone_stopped_0" [ style = bold] ++"rsc1_stop_0 remote-rhel7-2" [ style=bold color="green" fontcolor="orange"] ++"rsc1_stop_0 rhel7-4" -> "rsc1-clone_stopped_0" [ style = bold] ++"rsc1_stop_0 rhel7-4" [ style=bold color="green" fontcolor="orange"] ++"rsc2-master_demote_0" -> "rsc2-master_demoted_0" [ style = bold] ++"rsc2-master_demote_0" -> "rsc2_demote_0 remote-rhel7-2" [ style = bold] ++"rsc2-master_demote_0" [ style=bold color="green" fontcolor="orange"] ++"rsc2-master_demoted_0" -> "rsc2-master_promote_0" [ style = bold] ++"rsc2-master_demoted_0" -> "rsc2-master_start_0" [ style = bold] ++"rsc2-master_demoted_0" -> "rsc2-master_stop_0" [ style = bold] ++"rsc2-master_demoted_0" [ style=bold color="green" fontcolor="orange"] ++"rsc2-master_promote_0" -> "rsc2_promote_0 rhel7-3" [ style = bold] ++"rsc2-master_promote_0" [ style=bold color="green" fontcolor="orange"] ++"rsc2-master_promoted_0" [ style=bold color="green" fontcolor="orange"] ++"rsc2-master_running_0" -> "rsc2-master_promote_0" [ style = bold] ++"rsc2-master_running_0" [ style=bold color="green" fontcolor="orange"] ++"rsc2-master_start_0" -> "rsc2-master_running_0" [ style = bold] ++"rsc2-master_start_0" -> "rsc2_start_0 lxc2" [ style = bold] ++"rsc2-master_start_0" [ style=bold color="green" fontcolor="orange"] ++"rsc2-master_stop_0" -> "rsc2-master_stopped_0" [ style = bold] ++"rsc2-master_stop_0" -> "rsc2_stop_0 lxc2" [ style = bold] ++"rsc2-master_stop_0" -> "rsc2_stop_0 remote-rhel7-2" [ style = bold] ++"rsc2-master_stop_0" -> "rsc2_stop_0 rhel7-4" [ style = bold] ++"rsc2-master_stop_0" [ style=bold color="green" fontcolor="orange"] ++"rsc2-master_stopped_0" -> "rsc2-master_promote_0" [ style = bold] ++"rsc2-master_stopped_0" -> "rsc2-master_start_0" [ style = bold] ++"rsc2-master_stopped_0" [ style=bold color="green" fontcolor="orange"] ++"rsc2_demote_0 remote-rhel7-2" -> "rsc2-master_demoted_0" [ style = bold] ++"rsc2_demote_0 remote-rhel7-2" -> "rsc2_stop_0 remote-rhel7-2" [ style = bold] ++"rsc2_demote_0 remote-rhel7-2" [ style=bold color="green" fontcolor="orange"] ++"rsc2_monitor_10000 rhel7-3" [ style=bold color="green" fontcolor="black"] ++"rsc2_monitor_11000 lxc2" [ style=bold color="green" fontcolor="black"] ++"rsc2_promote_0 rhel7-3" -> "rsc2-master_promoted_0" [ style = bold] ++"rsc2_promote_0 rhel7-3" -> "rsc2_monitor_10000 rhel7-3" [ style = bold] ++"rsc2_promote_0 rhel7-3" [ style=bold color="green" fontcolor="black"] ++"rsc2_start_0 lxc2" -> "rsc2-master_running_0" [ style = bold] ++"rsc2_start_0 lxc2" -> "rsc2_monitor_11000 lxc2" [ style = bold] ++"rsc2_start_0 lxc2" [ style=bold color="green" fontcolor="black"] ++"rsc2_stop_0 lxc2" -> "rsc2-master_stopped_0" [ style = bold] ++"rsc2_stop_0 lxc2" -> "rsc2_start_0 lxc2" [ style = bold] ++"rsc2_stop_0 lxc2" [ style=bold color="green" fontcolor="orange"] ++"rsc2_stop_0 remote-rhel7-2" -> "remote-rhel7-2_stop_0 rhel7-1" [ style = bold] ++"rsc2_stop_0 remote-rhel7-2" -> "rsc2-master_stopped_0" [ style = bold] ++"rsc2_stop_0 remote-rhel7-2" [ style=bold color="green" fontcolor="orange"] ++"rsc2_stop_0 rhel7-4" -> "rsc2-master_stopped_0" [ style = bold] ++"rsc2_stop_0 rhel7-4" [ style=bold color="green" fontcolor="orange"] ++"stateful-bundle-0_monitor_30000 rhel7-5" [ style=bold color="green" fontcolor="black"] ++"stateful-bundle-0_start_0 rhel7-5" -> "bundled_monitor_10000 stateful-bundle-0" [ style = bold] ++"stateful-bundle-0_start_0 rhel7-5" -> "bundled_promote_0 stateful-bundle-0" [ style = bold] ++"stateful-bundle-0_start_0 rhel7-5" -> "bundled_start_0 stateful-bundle-0" [ style = bold] ++"stateful-bundle-0_start_0 rhel7-5" -> "stateful-bundle-0_monitor_30000 rhel7-5" [ style = bold] ++"stateful-bundle-0_start_0 rhel7-5" [ style=bold color="green" fontcolor="black"] ++"stateful-bundle-0_stop_0 rhel7-5" -> "stateful-bundle-0_start_0 rhel7-5" [ style = bold] ++"stateful-bundle-0_stop_0 rhel7-5" -> "stateful-bundle-docker-0_stop_0 rhel7-5" [ style = bold] ++"stateful-bundle-0_stop_0 rhel7-5" [ style=bold color="green" fontcolor="black"] ++"stateful-bundle-2_monitor_30000 rhel7-3" [ style=bold color="green" fontcolor="black"] ++"stateful-bundle-2_start_0 rhel7-3" -> "bundled_monitor_11000 stateful-bundle-2" [ style = bold] ++"stateful-bundle-2_start_0 rhel7-3" -> "bundled_start_0 stateful-bundle-2" [ style = bold] ++"stateful-bundle-2_start_0 rhel7-3" -> "stateful-bundle-2_monitor_30000 rhel7-3" [ style = bold] ++"stateful-bundle-2_start_0 rhel7-3" [ style=bold color="green" fontcolor="black"] ++"stateful-bundle-2_stop_0 rhel7-4" -> "stateful-bundle-2_start_0 rhel7-3" [ style = bold] ++"stateful-bundle-2_stop_0 rhel7-4" -> "stateful-bundle-docker-2_stop_0 rhel7-4" [ style = bold] ++"stateful-bundle-2_stop_0 rhel7-4" [ style=bold color="green" fontcolor="orange"] ++"stateful-bundle-docker-0_monitor_60000 rhel7-5" [ style=bold color="green" fontcolor="black"] ++"stateful-bundle-docker-0_start_0 rhel7-5" -> "bundled_promote_0 stateful-bundle-0" [ style = bold] ++"stateful-bundle-docker-0_start_0 rhel7-5" -> "bundled_start_0 stateful-bundle-0" [ style = bold] ++"stateful-bundle-docker-0_start_0 rhel7-5" -> "stateful-bundle-0_start_0 rhel7-5" [ style = bold] ++"stateful-bundle-docker-0_start_0 rhel7-5" -> "stateful-bundle-docker-0_monitor_60000 rhel7-5" [ style = bold] ++"stateful-bundle-docker-0_start_0 rhel7-5" -> "stateful-bundle_running_0" [ style = bold] ++"stateful-bundle-docker-0_start_0 rhel7-5" [ style=bold color="green" fontcolor="black"] ++"stateful-bundle-docker-0_stop_0 rhel7-5" -> "stateful-bundle-docker-0_start_0 rhel7-5" [ style = bold] ++"stateful-bundle-docker-0_stop_0 rhel7-5" -> "stateful-bundle_stopped_0" [ style = bold] ++"stateful-bundle-docker-0_stop_0 rhel7-5" -> "stonith 'reboot' stateful-bundle-0" [ style = bold] ++"stateful-bundle-docker-0_stop_0 rhel7-5" [ style=bold color="green" fontcolor="black"] ++"stateful-bundle-docker-2_monitor_60000 rhel7-3" [ style=bold color="green" fontcolor="black"] ++"stateful-bundle-docker-2_start_0 rhel7-3" -> "bundled_start_0 stateful-bundle-2" [ style = bold] ++"stateful-bundle-docker-2_start_0 rhel7-3" -> "stateful-bundle-2_start_0 rhel7-3" [ style = bold] ++"stateful-bundle-docker-2_start_0 rhel7-3" -> "stateful-bundle-docker-2_monitor_60000 rhel7-3" [ style = bold] ++"stateful-bundle-docker-2_start_0 rhel7-3" -> "stateful-bundle_running_0" [ style = bold] ++"stateful-bundle-docker-2_start_0 rhel7-3" [ style=bold color="green" fontcolor="black"] ++"stateful-bundle-docker-2_stop_0 rhel7-4" -> "stateful-bundle-docker-2_start_0 rhel7-3" [ style = bold] ++"stateful-bundle-docker-2_stop_0 rhel7-4" -> "stateful-bundle-ip-192.168.122.133_stop_0 rhel7-4" [ style = bold] ++"stateful-bundle-docker-2_stop_0 rhel7-4" -> "stateful-bundle_stopped_0" [ style = bold] ++"stateful-bundle-docker-2_stop_0 rhel7-4" [ style=bold color="green" fontcolor="orange"] ++"stateful-bundle-ip-192.168.122.133_monitor_60000 rhel7-3" [ style=bold color="green" fontcolor="black"] ++"stateful-bundle-ip-192.168.122.133_start_0 rhel7-3" -> "stateful-bundle-docker-2_start_0 rhel7-3" [ style = bold] ++"stateful-bundle-ip-192.168.122.133_start_0 rhel7-3" -> "stateful-bundle-ip-192.168.122.133_monitor_60000 rhel7-3" [ style = bold] ++"stateful-bundle-ip-192.168.122.133_start_0 rhel7-3" [ style=bold color="green" fontcolor="black"] ++"stateful-bundle-ip-192.168.122.133_stop_0 rhel7-4" -> "stateful-bundle-ip-192.168.122.133_start_0 rhel7-3" [ style = bold] ++"stateful-bundle-ip-192.168.122.133_stop_0 rhel7-4" [ style=bold color="green" fontcolor="orange"] ++"stateful-bundle-master_demote_0" -> "bundled_demote_0 stateful-bundle-0" [ style = bold] ++"stateful-bundle-master_demote_0" -> "stateful-bundle-master_demoted_0" [ style = bold] ++"stateful-bundle-master_demote_0" [ style=bold color="green" fontcolor="orange"] ++"stateful-bundle-master_demoted_0" -> "stateful-bundle-master_promote_0" [ style = bold] ++"stateful-bundle-master_demoted_0" -> "stateful-bundle-master_start_0" [ style = bold] ++"stateful-bundle-master_demoted_0" -> "stateful-bundle-master_stop_0" [ style = bold] ++"stateful-bundle-master_demoted_0" -> "stateful-bundle_demoted_0" [ style = bold] ++"stateful-bundle-master_demoted_0" [ style=bold color="green" fontcolor="orange"] ++"stateful-bundle-master_promote_0" -> "bundled_promote_0 stateful-bundle-0" [ style = bold] ++"stateful-bundle-master_promote_0" [ style=bold color="green" fontcolor="orange"] ++"stateful-bundle-master_promoted_0" -> "stateful-bundle_promoted_0" [ style = bold] ++"stateful-bundle-master_promoted_0" [ style=bold color="green" fontcolor="orange"] ++"stateful-bundle-master_running_0" -> "stateful-bundle-master_promote_0" [ style = bold] ++"stateful-bundle-master_running_0" -> "stateful-bundle_running_0" [ style = bold] ++"stateful-bundle-master_running_0" [ style=bold color="green" fontcolor="orange"] ++"stateful-bundle-master_start_0" -> "bundled_start_0 stateful-bundle-0" [ style = bold] ++"stateful-bundle-master_start_0" -> "bundled_start_0 stateful-bundle-2" [ style = bold] ++"stateful-bundle-master_start_0" -> "stateful-bundle-master_running_0" [ style = bold] ++"stateful-bundle-master_start_0" [ style=bold color="green" fontcolor="orange"] ++"stateful-bundle-master_stop_0" -> "bundled_stop_0 stateful-bundle-0" [ style = bold] ++"stateful-bundle-master_stop_0" -> "bundled_stop_0 stateful-bundle-2" [ style = bold] ++"stateful-bundle-master_stop_0" -> "stateful-bundle-master_stopped_0" [ style = bold] ++"stateful-bundle-master_stop_0" [ style=bold color="green" fontcolor="orange"] ++"stateful-bundle-master_stopped_0" -> "stateful-bundle-master_promote_0" [ style = bold] ++"stateful-bundle-master_stopped_0" -> "stateful-bundle-master_start_0" [ style = bold] ++"stateful-bundle-master_stopped_0" -> "stateful-bundle_stopped_0" [ style = bold] ++"stateful-bundle-master_stopped_0" [ style=bold color="green" fontcolor="orange"] ++"stateful-bundle_demote_0" -> "stateful-bundle-master_demote_0" [ style = bold] ++"stateful-bundle_demote_0" -> "stateful-bundle_demoted_0" [ style = bold] ++"stateful-bundle_demote_0" [ style=bold color="green" fontcolor="orange"] ++"stateful-bundle_demoted_0" -> "stateful-bundle_promote_0" [ style = bold] ++"stateful-bundle_demoted_0" -> "stateful-bundle_start_0" [ style = bold] ++"stateful-bundle_demoted_0" -> "stateful-bundle_stop_0" [ style = bold] ++"stateful-bundle_demoted_0" [ style=bold color="green" fontcolor="orange"] ++"stateful-bundle_promote_0" -> "stateful-bundle-master_promote_0" [ style = bold] ++"stateful-bundle_promote_0" [ style=bold color="green" fontcolor="orange"] ++"stateful-bundle_promoted_0" [ style=bold color="green" fontcolor="orange"] ++"stateful-bundle_running_0" -> "stateful-bundle_promote_0" [ style = bold] ++"stateful-bundle_running_0" [ style=bold color="green" fontcolor="orange"] ++"stateful-bundle_start_0" -> "stateful-bundle-docker-0_start_0 rhel7-5" [ style = bold] ++"stateful-bundle_start_0" -> "stateful-bundle-docker-2_start_0 rhel7-3" [ style = bold] ++"stateful-bundle_start_0" -> "stateful-bundle-master_start_0" [ style = bold] ++"stateful-bundle_start_0" [ style=bold color="green" fontcolor="orange"] ++"stateful-bundle_stop_0" -> "bundled_stop_0 stateful-bundle-0" [ style = bold] ++"stateful-bundle_stop_0" -> "bundled_stop_0 stateful-bundle-2" [ style = bold] ++"stateful-bundle_stop_0" -> "stateful-bundle-docker-0_stop_0 rhel7-5" [ style = bold] ++"stateful-bundle_stop_0" -> "stateful-bundle-docker-2_stop_0 rhel7-4" [ style = bold] ++"stateful-bundle_stop_0" -> "stateful-bundle-master_stop_0" [ style = bold] ++"stateful-bundle_stop_0" [ style=bold color="green" fontcolor="orange"] ++"stateful-bundle_stopped_0" -> "stateful-bundle_promote_0" [ style = bold] ++"stateful-bundle_stopped_0" -> "stateful-bundle_start_0" [ style = bold] ++"stateful-bundle_stopped_0" [ style=bold color="green" fontcolor="orange"] ++"stonith 'reboot' lxc2" -> "Fencing_start_0 rhel7-5" [ style = bold] ++"stonith 'reboot' lxc2" -> "bundled_promote_0 stateful-bundle-0" [ style = bold] ++"stonith 'reboot' lxc2" -> "bundled_start_0 stateful-bundle-0" [ style = bold] ++"stonith 'reboot' lxc2" -> "bundled_start_0 stateful-bundle-2" [ style = bold] ++"stonith 'reboot' lxc2" -> "container2_start_0 rhel7-3" [ style = bold] ++"stonith 'reboot' lxc2" -> "lxc-ms-master_stop_0" [ style = bold] ++"stonith 'reboot' lxc2" -> "lxc-ms_demote_0 lxc2" [ style = bold] ++"stonith 'reboot' lxc2" -> "lxc-ms_promote_0 lxc2" [ style = bold] ++"stonith 'reboot' lxc2" -> "lxc-ms_start_0 lxc2" [ style = bold] ++"stonith 'reboot' lxc2" -> "lxc-ms_stop_0 lxc2" [ style = bold] ++"stonith 'reboot' lxc2" -> "lxc2_start_0 rhel7-3" [ style = bold] ++"stonith 'reboot' lxc2" -> "remote-rhel7-2_start_0 rhel7-1" [ style = bold] ++"stonith 'reboot' lxc2" -> "rsc1-clone_stop_0" [ style = bold] ++"stonith 'reboot' lxc2" -> "rsc1_promote_0 rhel7-3" [ style = bold] ++"stonith 'reboot' lxc2" -> "rsc1_start_0 lxc2" [ style = bold] ++"stonith 'reboot' lxc2" -> "rsc1_stop_0 lxc2" [ style = bold] ++"stonith 'reboot' lxc2" -> "rsc2-master_stop_0" [ style = bold] ++"stonith 'reboot' lxc2" -> "rsc2_promote_0 rhel7-3" [ style = bold] ++"stonith 'reboot' lxc2" -> "rsc2_start_0 lxc2" [ style = bold] ++"stonith 'reboot' lxc2" -> "rsc2_stop_0 lxc2" [ style = bold] ++"stonith 'reboot' lxc2" -> "stateful-bundle-0_start_0 rhel7-5" [ style = bold] ++"stonith 'reboot' lxc2" -> "stateful-bundle-2_start_0 rhel7-3" [ style = bold] ++"stonith 'reboot' lxc2" -> "stateful-bundle-docker-0_start_0 rhel7-5" [ style = bold] ++"stonith 'reboot' lxc2" -> "stateful-bundle-docker-2_start_0 rhel7-3" [ style = bold] ++"stonith 'reboot' lxc2" -> "stateful-bundle-ip-192.168.122.133_start_0 rhel7-3" [ style = bold] ++"stonith 'reboot' lxc2" [ style=bold color="green" fontcolor="orange"] ++"stonith 'reboot' remote-rhel7-2" -> "Fencing_start_0 rhel7-5" [ style = bold] ++"stonith 'reboot' remote-rhel7-2" -> "bundled_promote_0 stateful-bundle-0" [ style = bold] ++"stonith 'reboot' remote-rhel7-2" -> "bundled_start_0 stateful-bundle-0" [ style = bold] ++"stonith 'reboot' remote-rhel7-2" -> "bundled_start_0 stateful-bundle-2" [ style = bold] ++"stonith 'reboot' remote-rhel7-2" -> "container2_start_0 rhel7-3" [ style = bold] ++"stonith 'reboot' remote-rhel7-2" -> "lxc-ms_promote_0 lxc2" [ style = bold] ++"stonith 'reboot' remote-rhel7-2" -> "lxc-ms_start_0 lxc2" [ style = bold] ++"stonith 'reboot' remote-rhel7-2" -> "lxc2_start_0 rhel7-3" [ style = bold] ++"stonith 'reboot' remote-rhel7-2" -> "remote-rhel7-2_start_0 rhel7-1" [ style = bold] ++"stonith 'reboot' remote-rhel7-2" -> "rsc1-clone_stop_0" [ style = bold] ++"stonith 'reboot' remote-rhel7-2" -> "rsc1_promote_0 rhel7-3" [ style = bold] ++"stonith 'reboot' remote-rhel7-2" -> "rsc1_start_0 lxc2" [ style = bold] ++"stonith 'reboot' remote-rhel7-2" -> "rsc1_stop_0 remote-rhel7-2" [ style = bold] ++"stonith 'reboot' remote-rhel7-2" -> "rsc2-master_stop_0" [ style = bold] ++"stonith 'reboot' remote-rhel7-2" -> "rsc2_demote_0 remote-rhel7-2" [ style = bold] ++"stonith 'reboot' remote-rhel7-2" -> "rsc2_promote_0 rhel7-3" [ style = bold] ++"stonith 'reboot' remote-rhel7-2" -> "rsc2_start_0 lxc2" [ style = bold] ++"stonith 'reboot' remote-rhel7-2" -> "rsc2_stop_0 remote-rhel7-2" [ style = bold] ++"stonith 'reboot' remote-rhel7-2" -> "stateful-bundle-0_start_0 rhel7-5" [ style = bold] ++"stonith 'reboot' remote-rhel7-2" -> "stateful-bundle-2_start_0 rhel7-3" [ style = bold] ++"stonith 'reboot' remote-rhel7-2" -> "stateful-bundle-docker-0_start_0 rhel7-5" [ style = bold] ++"stonith 'reboot' remote-rhel7-2" -> "stateful-bundle-docker-2_start_0 rhel7-3" [ style = bold] ++"stonith 'reboot' remote-rhel7-2" -> "stateful-bundle-ip-192.168.122.133_start_0 rhel7-3" [ style = bold] ++"stonith 'reboot' remote-rhel7-2" -> "stonith 'reboot' rhel7-4" [ style = bold] ++"stonith 'reboot' remote-rhel7-2" [ style=bold color="green" fontcolor="black"] ++"stonith 'reboot' rhel7-4" -> "bundled_promote_0 stateful-bundle-0" [ style = bold] ++"stonith 'reboot' rhel7-4" -> "bundled_start_0 stateful-bundle-0" [ style = bold] ++"stonith 'reboot' rhel7-4" -> "bundled_start_0 stateful-bundle-2" [ style = bold] ++"stonith 'reboot' rhel7-4" -> "container2_start_0 rhel7-3" [ style = bold] ++"stonith 'reboot' rhel7-4" -> "lxc-ms_promote_0 lxc2" [ style = bold] ++"stonith 'reboot' rhel7-4" -> "lxc-ms_start_0 lxc2" [ style = bold] ++"stonith 'reboot' rhel7-4" -> "rsc1-clone_stop_0" [ style = bold] ++"stonith 'reboot' rhel7-4" -> "rsc1_demote_0 rhel7-4" [ style = bold] ++"stonith 'reboot' rhel7-4" -> "rsc1_promote_0 rhel7-3" [ style = bold] ++"stonith 'reboot' rhel7-4" -> "rsc1_start_0 lxc2" [ style = bold] ++"stonith 'reboot' rhel7-4" -> "rsc1_stop_0 rhel7-4" [ style = bold] ++"stonith 'reboot' rhel7-4" -> "rsc2-master_stop_0" [ style = bold] ++"stonith 'reboot' rhel7-4" -> "rsc2_promote_0 rhel7-3" [ style = bold] ++"stonith 'reboot' rhel7-4" -> "rsc2_start_0 lxc2" [ style = bold] ++"stonith 'reboot' rhel7-4" -> "rsc2_stop_0 rhel7-4" [ style = bold] ++"stonith 'reboot' rhel7-4" -> "stateful-bundle-docker-0_start_0 rhel7-5" [ style = bold] ++"stonith 'reboot' rhel7-4" -> "stateful-bundle-docker-2_start_0 rhel7-3" [ style = bold] ++"stonith 'reboot' rhel7-4" -> "stateful-bundle-docker-2_stop_0 rhel7-4" [ style = bold] ++"stonith 'reboot' rhel7-4" -> "stateful-bundle-ip-192.168.122.133_start_0 rhel7-3" [ style = bold] ++"stonith 'reboot' rhel7-4" -> "stateful-bundle-ip-192.168.122.133_stop_0 rhel7-4" [ style = bold] ++"stonith 'reboot' rhel7-4" -> "stonith 'reboot' stateful-bundle-2" [ style = bold] ++"stonith 'reboot' rhel7-4" [ style=bold color="green" fontcolor="black"] ++"stonith 'reboot' stateful-bundle-0" -> "bundled_promote_0 stateful-bundle-0" [ style = bold] ++"stonith 'reboot' stateful-bundle-0" -> "bundled_start_0 stateful-bundle-0" [ style = bold] ++"stonith 'reboot' stateful-bundle-0" -> "bundled_start_0 stateful-bundle-2" [ style = bold] ++"stonith 'reboot' stateful-bundle-0" -> "container2_start_0 rhel7-3" [ style = bold] ++"stonith 'reboot' stateful-bundle-0" -> "lxc-ms_promote_0 lxc2" [ style = bold] ++"stonith 'reboot' stateful-bundle-0" -> "lxc-ms_start_0 lxc2" [ style = bold] ++"stonith 'reboot' stateful-bundle-0" -> "rsc1_promote_0 rhel7-3" [ style = bold] ++"stonith 'reboot' stateful-bundle-0" -> "rsc1_start_0 lxc2" [ style = bold] ++"stonith 'reboot' stateful-bundle-0" -> "rsc2_promote_0 rhel7-3" [ style = bold] ++"stonith 'reboot' stateful-bundle-0" -> "rsc2_start_0 lxc2" [ style = bold] ++"stonith 'reboot' stateful-bundle-0" -> "stateful-bundle-docker-0_start_0 rhel7-5" [ style = bold] ++"stonith 'reboot' stateful-bundle-0" -> "stateful-bundle-docker-2_start_0 rhel7-3" [ style = bold] ++"stonith 'reboot' stateful-bundle-0" -> "stateful-bundle-ip-192.168.122.133_start_0 rhel7-3" [ style = bold] ++"stonith 'reboot' stateful-bundle-0" -> "stateful-bundle-master_stop_0" [ style = bold] ++"stonith 'reboot' stateful-bundle-0" [ style=bold color="green" fontcolor="orange"] ++"stonith 'reboot' stateful-bundle-2" -> "bundled_promote_0 stateful-bundle-0" [ style = bold] ++"stonith 'reboot' stateful-bundle-2" -> "bundled_start_0 stateful-bundle-0" [ style = bold] ++"stonith 'reboot' stateful-bundle-2" -> "bundled_start_0 stateful-bundle-2" [ style = bold] ++"stonith 'reboot' stateful-bundle-2" -> "container2_start_0 rhel7-3" [ style = bold] ++"stonith 'reboot' stateful-bundle-2" -> "lxc-ms_promote_0 lxc2" [ style = bold] ++"stonith 'reboot' stateful-bundle-2" -> "lxc-ms_start_0 lxc2" [ style = bold] ++"stonith 'reboot' stateful-bundle-2" -> "rsc1_promote_0 rhel7-3" [ style = bold] ++"stonith 'reboot' stateful-bundle-2" -> "rsc1_start_0 lxc2" [ style = bold] ++"stonith 'reboot' stateful-bundle-2" -> "rsc2_promote_0 rhel7-3" [ style = bold] ++"stonith 'reboot' stateful-bundle-2" -> "rsc2_start_0 lxc2" [ style = bold] ++"stonith 'reboot' stateful-bundle-2" -> "stateful-bundle-docker-0_start_0 rhel7-5" [ style = bold] ++"stonith 'reboot' stateful-bundle-2" -> "stateful-bundle-docker-2_start_0 rhel7-3" [ style = bold] ++"stonith 'reboot' stateful-bundle-2" -> "stateful-bundle-ip-192.168.122.133_start_0 rhel7-3" [ style = bold] ++"stonith 'reboot' stateful-bundle-2" -> "stateful-bundle-master_stop_0" [ style = bold] ++"stonith 'reboot' stateful-bundle-2" [ style=bold color="green" fontcolor="orange"] ++} +diff --git a/cts/scheduler/on_fail_demote4.exp b/cts/scheduler/on_fail_demote4.exp +new file mode 100644 +index 0000000..0789a12 +--- /dev/null ++++ b/cts/scheduler/on_fail_demote4.exp +@@ -0,0 +1,1818 @@ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ +diff --git a/cts/scheduler/on_fail_demote4.scores b/cts/scheduler/on_fail_demote4.scores +new file mode 100644 +index 0000000..cde3fec +--- /dev/null ++++ b/cts/scheduler/on_fail_demote4.scores +@@ -0,0 +1,470 @@ ++Allocation scores: ++Using the original execution date of: 2020-06-16 19:23:21Z ++bundled:0 promotion score on stateful-bundle-0: 10 ++bundled:1 promotion score on stateful-bundle-1: 5 ++bundled:2 promotion score on stateful-bundle-2: 5 ++lxc-ms:0 promotion score on lxc2: INFINITY ++lxc-ms:1 promotion score on lxc1: INFINITY ++pcmk__bundle_allocate: bundled:0 allocation score on stateful-bundle-0: 501 ++pcmk__bundle_allocate: bundled:1 allocation score on stateful-bundle-1: 501 ++pcmk__bundle_allocate: bundled:2 allocation score on stateful-bundle-2: 501 ++pcmk__bundle_allocate: stateful-bundle allocation score on lxc1: 0 ++pcmk__bundle_allocate: stateful-bundle allocation score on lxc2: 0 ++pcmk__bundle_allocate: stateful-bundle allocation score on remote-rhel7-2: 0 ++pcmk__bundle_allocate: stateful-bundle allocation score on rhel7-1: 0 ++pcmk__bundle_allocate: stateful-bundle allocation score on rhel7-3: 0 ++pcmk__bundle_allocate: stateful-bundle allocation score on rhel7-4: 0 ++pcmk__bundle_allocate: stateful-bundle allocation score on rhel7-5: 0 ++pcmk__bundle_allocate: stateful-bundle-0 allocation score on lxc1: -INFINITY ++pcmk__bundle_allocate: stateful-bundle-0 allocation score on lxc2: -INFINITY ++pcmk__bundle_allocate: stateful-bundle-0 allocation score on remote-rhel7-2: -INFINITY ++pcmk__bundle_allocate: stateful-bundle-0 allocation score on rhel7-1: 0 ++pcmk__bundle_allocate: stateful-bundle-0 allocation score on rhel7-3: 0 ++pcmk__bundle_allocate: stateful-bundle-0 allocation score on rhel7-4: 0 ++pcmk__bundle_allocate: stateful-bundle-0 allocation score on rhel7-5: 0 ++pcmk__bundle_allocate: stateful-bundle-1 allocation score on lxc1: -INFINITY ++pcmk__bundle_allocate: stateful-bundle-1 allocation score on lxc2: -INFINITY ++pcmk__bundle_allocate: stateful-bundle-1 allocation score on remote-rhel7-2: -INFINITY ++pcmk__bundle_allocate: stateful-bundle-1 allocation score on rhel7-1: 0 ++pcmk__bundle_allocate: stateful-bundle-1 allocation score on rhel7-3: 0 ++pcmk__bundle_allocate: stateful-bundle-1 allocation score on rhel7-4: 0 ++pcmk__bundle_allocate: stateful-bundle-1 allocation score on rhel7-5: 0 ++pcmk__bundle_allocate: stateful-bundle-2 allocation score on lxc1: -INFINITY ++pcmk__bundle_allocate: stateful-bundle-2 allocation score on lxc2: -INFINITY ++pcmk__bundle_allocate: stateful-bundle-2 allocation score on remote-rhel7-2: -INFINITY ++pcmk__bundle_allocate: stateful-bundle-2 allocation score on rhel7-1: 0 ++pcmk__bundle_allocate: stateful-bundle-2 allocation score on rhel7-3: 0 ++pcmk__bundle_allocate: stateful-bundle-2 allocation score on rhel7-4: 0 ++pcmk__bundle_allocate: stateful-bundle-2 allocation score on rhel7-5: 0 ++pcmk__bundle_allocate: stateful-bundle-docker-0 allocation score on lxc1: 0 ++pcmk__bundle_allocate: stateful-bundle-docker-0 allocation score on lxc2: 0 ++pcmk__bundle_allocate: stateful-bundle-docker-0 allocation score on remote-rhel7-2: 0 ++pcmk__bundle_allocate: stateful-bundle-docker-0 allocation score on rhel7-1: 0 ++pcmk__bundle_allocate: stateful-bundle-docker-0 allocation score on rhel7-3: 0 ++pcmk__bundle_allocate: stateful-bundle-docker-0 allocation score on rhel7-4: 0 ++pcmk__bundle_allocate: stateful-bundle-docker-0 allocation score on rhel7-5: 0 ++pcmk__bundle_allocate: stateful-bundle-docker-1 allocation score on lxc1: 0 ++pcmk__bundle_allocate: stateful-bundle-docker-1 allocation score on lxc2: 0 ++pcmk__bundle_allocate: stateful-bundle-docker-1 allocation score on remote-rhel7-2: 0 ++pcmk__bundle_allocate: stateful-bundle-docker-1 allocation score on rhel7-1: 0 ++pcmk__bundle_allocate: stateful-bundle-docker-1 allocation score on rhel7-3: 0 ++pcmk__bundle_allocate: stateful-bundle-docker-1 allocation score on rhel7-4: 0 ++pcmk__bundle_allocate: stateful-bundle-docker-1 allocation score on rhel7-5: 0 ++pcmk__bundle_allocate: stateful-bundle-docker-2 allocation score on lxc1: 0 ++pcmk__bundle_allocate: stateful-bundle-docker-2 allocation score on lxc2: 0 ++pcmk__bundle_allocate: stateful-bundle-docker-2 allocation score on remote-rhel7-2: 0 ++pcmk__bundle_allocate: stateful-bundle-docker-2 allocation score on rhel7-1: 0 ++pcmk__bundle_allocate: stateful-bundle-docker-2 allocation score on rhel7-3: 0 ++pcmk__bundle_allocate: stateful-bundle-docker-2 allocation score on rhel7-4: 0 ++pcmk__bundle_allocate: stateful-bundle-docker-2 allocation score on rhel7-5: 0 ++pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.131 allocation score on lxc1: 0 ++pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.131 allocation score on lxc2: 0 ++pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.131 allocation score on remote-rhel7-2: 0 ++pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.131 allocation score on rhel7-1: 0 ++pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.131 allocation score on rhel7-3: 0 ++pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.131 allocation score on rhel7-4: 0 ++pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.131 allocation score on rhel7-5: 0 ++pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.132 allocation score on lxc1: 0 ++pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.132 allocation score on lxc2: 0 ++pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.132 allocation score on remote-rhel7-2: 0 ++pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.132 allocation score on rhel7-1: 0 ++pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.132 allocation score on rhel7-3: 0 ++pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.132 allocation score on rhel7-4: 0 ++pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.132 allocation score on rhel7-5: 0 ++pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.133 allocation score on lxc1: 0 ++pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.133 allocation score on lxc2: 0 ++pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.133 allocation score on remote-rhel7-2: 0 ++pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.133 allocation score on rhel7-1: 0 ++pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.133 allocation score on rhel7-3: 0 ++pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.133 allocation score on rhel7-4: 0 ++pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.133 allocation score on rhel7-5: 0 ++pcmk__bundle_allocate: stateful-bundle-master allocation score on lxc1: 0 ++pcmk__bundle_allocate: stateful-bundle-master allocation score on lxc2: 0 ++pcmk__bundle_allocate: stateful-bundle-master allocation score on remote-rhel7-2: 0 ++pcmk__bundle_allocate: stateful-bundle-master allocation score on rhel7-1: 0 ++pcmk__bundle_allocate: stateful-bundle-master allocation score on rhel7-3: 0 ++pcmk__bundle_allocate: stateful-bundle-master allocation score on rhel7-4: 0 ++pcmk__bundle_allocate: stateful-bundle-master allocation score on rhel7-5: 0 ++pcmk__bundle_allocate: stateful-bundle-master allocation score on stateful-bundle-0: -INFINITY ++pcmk__bundle_allocate: stateful-bundle-master allocation score on stateful-bundle-1: -INFINITY ++pcmk__bundle_allocate: stateful-bundle-master allocation score on stateful-bundle-2: -INFINITY ++pcmk__clone_allocate: bundled:0 allocation score on stateful-bundle-0: INFINITY ++pcmk__clone_allocate: bundled:1 allocation score on stateful-bundle-1: INFINITY ++pcmk__clone_allocate: bundled:2 allocation score on stateful-bundle-2: INFINITY ++pcmk__clone_allocate: lxc-ms-master allocation score on lxc1: INFINITY ++pcmk__clone_allocate: lxc-ms-master allocation score on lxc2: INFINITY ++pcmk__clone_allocate: lxc-ms-master allocation score on remote-rhel7-2: 0 ++pcmk__clone_allocate: lxc-ms-master allocation score on rhel7-1: 0 ++pcmk__clone_allocate: lxc-ms-master allocation score on rhel7-3: 0 ++pcmk__clone_allocate: lxc-ms-master allocation score on rhel7-4: 0 ++pcmk__clone_allocate: lxc-ms-master allocation score on rhel7-5: 0 ++pcmk__clone_allocate: lxc-ms:0 allocation score on lxc1: INFINITY ++pcmk__clone_allocate: lxc-ms:0 allocation score on lxc2: INFINITY ++pcmk__clone_allocate: lxc-ms:0 allocation score on remote-rhel7-2: 0 ++pcmk__clone_allocate: lxc-ms:0 allocation score on rhel7-1: 0 ++pcmk__clone_allocate: lxc-ms:0 allocation score on rhel7-3: 0 ++pcmk__clone_allocate: lxc-ms:0 allocation score on rhel7-4: 0 ++pcmk__clone_allocate: lxc-ms:0 allocation score on rhel7-5: 0 ++pcmk__clone_allocate: lxc-ms:1 allocation score on lxc1: INFINITY ++pcmk__clone_allocate: lxc-ms:1 allocation score on lxc2: INFINITY ++pcmk__clone_allocate: lxc-ms:1 allocation score on remote-rhel7-2: 0 ++pcmk__clone_allocate: lxc-ms:1 allocation score on rhel7-1: 0 ++pcmk__clone_allocate: lxc-ms:1 allocation score on rhel7-3: 0 ++pcmk__clone_allocate: lxc-ms:1 allocation score on rhel7-4: 0 ++pcmk__clone_allocate: lxc-ms:1 allocation score on rhel7-5: 0 ++pcmk__clone_allocate: rsc1-clone allocation score on lxc1: 0 ++pcmk__clone_allocate: rsc1-clone allocation score on lxc2: 0 ++pcmk__clone_allocate: rsc1-clone allocation score on remote-rhel7-2: 0 ++pcmk__clone_allocate: rsc1-clone allocation score on rhel7-1: 0 ++pcmk__clone_allocate: rsc1-clone allocation score on rhel7-3: 0 ++pcmk__clone_allocate: rsc1-clone allocation score on rhel7-4: 0 ++pcmk__clone_allocate: rsc1-clone allocation score on rhel7-5: 0 ++pcmk__clone_allocate: rsc1:0 allocation score on lxc1: 0 ++pcmk__clone_allocate: rsc1:0 allocation score on lxc2: 0 ++pcmk__clone_allocate: rsc1:0 allocation score on remote-rhel7-2: 0 ++pcmk__clone_allocate: rsc1:0 allocation score on rhel7-1: 0 ++pcmk__clone_allocate: rsc1:0 allocation score on rhel7-3: 0 ++pcmk__clone_allocate: rsc1:0 allocation score on rhel7-4: 1 ++pcmk__clone_allocate: rsc1:0 allocation score on rhel7-5: 0 ++pcmk__clone_allocate: rsc1:1 allocation score on lxc1: 0 ++pcmk__clone_allocate: rsc1:1 allocation score on lxc2: 0 ++pcmk__clone_allocate: rsc1:1 allocation score on remote-rhel7-2: 0 ++pcmk__clone_allocate: rsc1:1 allocation score on rhel7-1: 0 ++pcmk__clone_allocate: rsc1:1 allocation score on rhel7-3: 6 ++pcmk__clone_allocate: rsc1:1 allocation score on rhel7-4: 0 ++pcmk__clone_allocate: rsc1:1 allocation score on rhel7-5: 0 ++pcmk__clone_allocate: rsc1:2 allocation score on lxc1: 0 ++pcmk__clone_allocate: rsc1:2 allocation score on lxc2: 0 ++pcmk__clone_allocate: rsc1:2 allocation score on remote-rhel7-2: 0 ++pcmk__clone_allocate: rsc1:2 allocation score on rhel7-1: 0 ++pcmk__clone_allocate: rsc1:2 allocation score on rhel7-3: 0 ++pcmk__clone_allocate: rsc1:2 allocation score on rhel7-4: 0 ++pcmk__clone_allocate: rsc1:2 allocation score on rhel7-5: 6 ++pcmk__clone_allocate: rsc1:3 allocation score on lxc1: 0 ++pcmk__clone_allocate: rsc1:3 allocation score on lxc2: 0 ++pcmk__clone_allocate: rsc1:3 allocation score on remote-rhel7-2: 0 ++pcmk__clone_allocate: rsc1:3 allocation score on rhel7-1: 6 ++pcmk__clone_allocate: rsc1:3 allocation score on rhel7-3: 0 ++pcmk__clone_allocate: rsc1:3 allocation score on rhel7-4: 0 ++pcmk__clone_allocate: rsc1:3 allocation score on rhel7-5: 0 ++pcmk__clone_allocate: rsc1:4 allocation score on lxc1: 0 ++pcmk__clone_allocate: rsc1:4 allocation score on lxc2: 0 ++pcmk__clone_allocate: rsc1:4 allocation score on remote-rhel7-2: 1 ++pcmk__clone_allocate: rsc1:4 allocation score on rhel7-1: 0 ++pcmk__clone_allocate: rsc1:4 allocation score on rhel7-3: 0 ++pcmk__clone_allocate: rsc1:4 allocation score on rhel7-4: 0 ++pcmk__clone_allocate: rsc1:4 allocation score on rhel7-5: 0 ++pcmk__clone_allocate: rsc1:5 allocation score on lxc1: 0 ++pcmk__clone_allocate: rsc1:5 allocation score on lxc2: 6 ++pcmk__clone_allocate: rsc1:5 allocation score on remote-rhel7-2: 0 ++pcmk__clone_allocate: rsc1:5 allocation score on rhel7-1: 0 ++pcmk__clone_allocate: rsc1:5 allocation score on rhel7-3: 0 ++pcmk__clone_allocate: rsc1:5 allocation score on rhel7-4: 0 ++pcmk__clone_allocate: rsc1:5 allocation score on rhel7-5: 0 ++pcmk__clone_allocate: rsc1:6 allocation score on lxc1: 6 ++pcmk__clone_allocate: rsc1:6 allocation score on lxc2: 0 ++pcmk__clone_allocate: rsc1:6 allocation score on remote-rhel7-2: 0 ++pcmk__clone_allocate: rsc1:6 allocation score on rhel7-1: 0 ++pcmk__clone_allocate: rsc1:6 allocation score on rhel7-3: 0 ++pcmk__clone_allocate: rsc1:6 allocation score on rhel7-4: 0 ++pcmk__clone_allocate: rsc1:6 allocation score on rhel7-5: 0 ++pcmk__clone_allocate: rsc2-master allocation score on lxc1: 0 ++pcmk__clone_allocate: rsc2-master allocation score on lxc2: 0 ++pcmk__clone_allocate: rsc2-master allocation score on remote-rhel7-2: 0 ++pcmk__clone_allocate: rsc2-master allocation score on rhel7-1: 0 ++pcmk__clone_allocate: rsc2-master allocation score on rhel7-3: 0 ++pcmk__clone_allocate: rsc2-master allocation score on rhel7-4: 0 ++pcmk__clone_allocate: rsc2-master allocation score on rhel7-5: 0 ++pcmk__clone_allocate: rsc2:0 allocation score on lxc1: 0 ++pcmk__clone_allocate: rsc2:0 allocation score on lxc2: 0 ++pcmk__clone_allocate: rsc2:0 allocation score on remote-rhel7-2: 0 ++pcmk__clone_allocate: rsc2:0 allocation score on rhel7-1: 0 ++pcmk__clone_allocate: rsc2:0 allocation score on rhel7-3: 0 ++pcmk__clone_allocate: rsc2:0 allocation score on rhel7-4: 1 ++pcmk__clone_allocate: rsc2:0 allocation score on rhel7-5: 0 ++pcmk__clone_allocate: rsc2:1 allocation score on lxc1: 0 ++pcmk__clone_allocate: rsc2:1 allocation score on lxc2: 0 ++pcmk__clone_allocate: rsc2:1 allocation score on remote-rhel7-2: 0 ++pcmk__clone_allocate: rsc2:1 allocation score on rhel7-1: 0 ++pcmk__clone_allocate: rsc2:1 allocation score on rhel7-3: 6 ++pcmk__clone_allocate: rsc2:1 allocation score on rhel7-4: 0 ++pcmk__clone_allocate: rsc2:1 allocation score on rhel7-5: 0 ++pcmk__clone_allocate: rsc2:2 allocation score on lxc1: 0 ++pcmk__clone_allocate: rsc2:2 allocation score on lxc2: 0 ++pcmk__clone_allocate: rsc2:2 allocation score on remote-rhel7-2: 0 ++pcmk__clone_allocate: rsc2:2 allocation score on rhel7-1: 0 ++pcmk__clone_allocate: rsc2:2 allocation score on rhel7-3: 0 ++pcmk__clone_allocate: rsc2:2 allocation score on rhel7-4: 0 ++pcmk__clone_allocate: rsc2:2 allocation score on rhel7-5: 6 ++pcmk__clone_allocate: rsc2:3 allocation score on lxc1: 0 ++pcmk__clone_allocate: rsc2:3 allocation score on lxc2: 0 ++pcmk__clone_allocate: rsc2:3 allocation score on remote-rhel7-2: 0 ++pcmk__clone_allocate: rsc2:3 allocation score on rhel7-1: 6 ++pcmk__clone_allocate: rsc2:3 allocation score on rhel7-3: 0 ++pcmk__clone_allocate: rsc2:3 allocation score on rhel7-4: 0 ++pcmk__clone_allocate: rsc2:3 allocation score on rhel7-5: 0 ++pcmk__clone_allocate: rsc2:4 allocation score on lxc1: 0 ++pcmk__clone_allocate: rsc2:4 allocation score on lxc2: 0 ++pcmk__clone_allocate: rsc2:4 allocation score on remote-rhel7-2: 1 ++pcmk__clone_allocate: rsc2:4 allocation score on rhel7-1: 0 ++pcmk__clone_allocate: rsc2:4 allocation score on rhel7-3: 0 ++pcmk__clone_allocate: rsc2:4 allocation score on rhel7-4: 0 ++pcmk__clone_allocate: rsc2:4 allocation score on rhel7-5: 0 ++pcmk__clone_allocate: rsc2:5 allocation score on lxc1: 0 ++pcmk__clone_allocate: rsc2:5 allocation score on lxc2: 6 ++pcmk__clone_allocate: rsc2:5 allocation score on remote-rhel7-2: 0 ++pcmk__clone_allocate: rsc2:5 allocation score on rhel7-1: 0 ++pcmk__clone_allocate: rsc2:5 allocation score on rhel7-3: 0 ++pcmk__clone_allocate: rsc2:5 allocation score on rhel7-4: 0 ++pcmk__clone_allocate: rsc2:5 allocation score on rhel7-5: 0 ++pcmk__clone_allocate: rsc2:6 allocation score on lxc1: 6 ++pcmk__clone_allocate: rsc2:6 allocation score on lxc2: 0 ++pcmk__clone_allocate: rsc2:6 allocation score on remote-rhel7-2: 0 ++pcmk__clone_allocate: rsc2:6 allocation score on rhel7-1: 0 ++pcmk__clone_allocate: rsc2:6 allocation score on rhel7-3: 0 ++pcmk__clone_allocate: rsc2:6 allocation score on rhel7-4: 0 ++pcmk__clone_allocate: rsc2:6 allocation score on rhel7-5: 0 ++pcmk__clone_allocate: stateful-bundle-master allocation score on lxc1: -INFINITY ++pcmk__clone_allocate: stateful-bundle-master allocation score on lxc2: -INFINITY ++pcmk__clone_allocate: stateful-bundle-master allocation score on remote-rhel7-2: -INFINITY ++pcmk__clone_allocate: stateful-bundle-master allocation score on rhel7-1: -INFINITY ++pcmk__clone_allocate: stateful-bundle-master allocation score on rhel7-3: -INFINITY ++pcmk__clone_allocate: stateful-bundle-master allocation score on rhel7-4: -INFINITY ++pcmk__clone_allocate: stateful-bundle-master allocation score on rhel7-5: -INFINITY ++pcmk__clone_allocate: stateful-bundle-master allocation score on stateful-bundle-0: 0 ++pcmk__clone_allocate: stateful-bundle-master allocation score on stateful-bundle-1: 0 ++pcmk__clone_allocate: stateful-bundle-master allocation score on stateful-bundle-2: 0 ++pcmk__native_allocate: Fencing allocation score on lxc1: -INFINITY ++pcmk__native_allocate: Fencing allocation score on lxc2: -INFINITY ++pcmk__native_allocate: Fencing allocation score on remote-rhel7-2: -INFINITY ++pcmk__native_allocate: Fencing allocation score on rhel7-1: 0 ++pcmk__native_allocate: Fencing allocation score on rhel7-3: 0 ++pcmk__native_allocate: Fencing allocation score on rhel7-4: 0 ++pcmk__native_allocate: Fencing allocation score on rhel7-5: 0 ++pcmk__native_allocate: bundled:0 allocation score on stateful-bundle-0: INFINITY ++pcmk__native_allocate: bundled:1 allocation score on stateful-bundle-1: INFINITY ++pcmk__native_allocate: bundled:2 allocation score on stateful-bundle-2: INFINITY ++pcmk__native_allocate: container1 allocation score on lxc1: -INFINITY ++pcmk__native_allocate: container1 allocation score on lxc2: -INFINITY ++pcmk__native_allocate: container1 allocation score on remote-rhel7-2: -INFINITY ++pcmk__native_allocate: container1 allocation score on rhel7-1: 0 ++pcmk__native_allocate: container1 allocation score on rhel7-3: INFINITY ++pcmk__native_allocate: container1 allocation score on rhel7-4: -INFINITY ++pcmk__native_allocate: container1 allocation score on rhel7-5: 0 ++pcmk__native_allocate: container2 allocation score on lxc1: -INFINITY ++pcmk__native_allocate: container2 allocation score on lxc2: -INFINITY ++pcmk__native_allocate: container2 allocation score on remote-rhel7-2: -INFINITY ++pcmk__native_allocate: container2 allocation score on rhel7-1: 0 ++pcmk__native_allocate: container2 allocation score on rhel7-3: INFINITY ++pcmk__native_allocate: container2 allocation score on rhel7-4: -INFINITY ++pcmk__native_allocate: container2 allocation score on rhel7-5: 0 ++pcmk__native_allocate: lxc-ms:0 allocation score on lxc1: -INFINITY ++pcmk__native_allocate: lxc-ms:0 allocation score on lxc2: INFINITY ++pcmk__native_allocate: lxc-ms:0 allocation score on remote-rhel7-2: -INFINITY ++pcmk__native_allocate: lxc-ms:0 allocation score on rhel7-1: 0 ++pcmk__native_allocate: lxc-ms:0 allocation score on rhel7-3: 0 ++pcmk__native_allocate: lxc-ms:0 allocation score on rhel7-4: -INFINITY ++pcmk__native_allocate: lxc-ms:0 allocation score on rhel7-5: 0 ++pcmk__native_allocate: lxc-ms:1 allocation score on lxc1: INFINITY ++pcmk__native_allocate: lxc-ms:1 allocation score on lxc2: INFINITY ++pcmk__native_allocate: lxc-ms:1 allocation score on remote-rhel7-2: -INFINITY ++pcmk__native_allocate: lxc-ms:1 allocation score on rhel7-1: 0 ++pcmk__native_allocate: lxc-ms:1 allocation score on rhel7-3: 0 ++pcmk__native_allocate: lxc-ms:1 allocation score on rhel7-4: -INFINITY ++pcmk__native_allocate: lxc-ms:1 allocation score on rhel7-5: 0 ++pcmk__native_allocate: lxc1 allocation score on lxc1: -INFINITY ++pcmk__native_allocate: lxc1 allocation score on lxc2: -INFINITY ++pcmk__native_allocate: lxc1 allocation score on remote-rhel7-2: -INFINITY ++pcmk__native_allocate: lxc1 allocation score on rhel7-1: -INFINITY ++pcmk__native_allocate: lxc1 allocation score on rhel7-3: 0 ++pcmk__native_allocate: lxc1 allocation score on rhel7-4: -INFINITY ++pcmk__native_allocate: lxc1 allocation score on rhel7-5: -INFINITY ++pcmk__native_allocate: lxc2 allocation score on lxc1: -INFINITY ++pcmk__native_allocate: lxc2 allocation score on lxc2: -INFINITY ++pcmk__native_allocate: lxc2 allocation score on remote-rhel7-2: -INFINITY ++pcmk__native_allocate: lxc2 allocation score on rhel7-1: -INFINITY ++pcmk__native_allocate: lxc2 allocation score on rhel7-3: 0 ++pcmk__native_allocate: lxc2 allocation score on rhel7-4: -INFINITY ++pcmk__native_allocate: lxc2 allocation score on rhel7-5: -INFINITY ++pcmk__native_allocate: remote-rhel7-2 allocation score on lxc1: -INFINITY ++pcmk__native_allocate: remote-rhel7-2 allocation score on lxc2: -INFINITY ++pcmk__native_allocate: remote-rhel7-2 allocation score on remote-rhel7-2: -INFINITY ++pcmk__native_allocate: remote-rhel7-2 allocation score on rhel7-1: 0 ++pcmk__native_allocate: remote-rhel7-2 allocation score on rhel7-3: 0 ++pcmk__native_allocate: remote-rhel7-2 allocation score on rhel7-4: 0 ++pcmk__native_allocate: remote-rhel7-2 allocation score on rhel7-5: 0 ++pcmk__native_allocate: rsc1:0 allocation score on lxc1: -INFINITY ++pcmk__native_allocate: rsc1:0 allocation score on lxc2: -INFINITY ++pcmk__native_allocate: rsc1:0 allocation score on remote-rhel7-2: -INFINITY ++pcmk__native_allocate: rsc1:0 allocation score on rhel7-1: -INFINITY ++pcmk__native_allocate: rsc1:0 allocation score on rhel7-3: -INFINITY ++pcmk__native_allocate: rsc1:0 allocation score on rhel7-4: -INFINITY ++pcmk__native_allocate: rsc1:0 allocation score on rhel7-5: -INFINITY ++pcmk__native_allocate: rsc1:1 allocation score on lxc1: 0 ++pcmk__native_allocate: rsc1:1 allocation score on lxc2: 0 ++pcmk__native_allocate: rsc1:1 allocation score on remote-rhel7-2: -INFINITY ++pcmk__native_allocate: rsc1:1 allocation score on rhel7-1: 0 ++pcmk__native_allocate: rsc1:1 allocation score on rhel7-3: 6 ++pcmk__native_allocate: rsc1:1 allocation score on rhel7-4: -INFINITY ++pcmk__native_allocate: rsc1:1 allocation score on rhel7-5: 0 ++pcmk__native_allocate: rsc1:2 allocation score on lxc1: 0 ++pcmk__native_allocate: rsc1:2 allocation score on lxc2: 0 ++pcmk__native_allocate: rsc1:2 allocation score on remote-rhel7-2: -INFINITY ++pcmk__native_allocate: rsc1:2 allocation score on rhel7-1: 0 ++pcmk__native_allocate: rsc1:2 allocation score on rhel7-3: -INFINITY ++pcmk__native_allocate: rsc1:2 allocation score on rhel7-4: -INFINITY ++pcmk__native_allocate: rsc1:2 allocation score on rhel7-5: 6 ++pcmk__native_allocate: rsc1:3 allocation score on lxc1: 0 ++pcmk__native_allocate: rsc1:3 allocation score on lxc2: 0 ++pcmk__native_allocate: rsc1:3 allocation score on remote-rhel7-2: -INFINITY ++pcmk__native_allocate: rsc1:3 allocation score on rhel7-1: 6 ++pcmk__native_allocate: rsc1:3 allocation score on rhel7-3: -INFINITY ++pcmk__native_allocate: rsc1:3 allocation score on rhel7-4: -INFINITY ++pcmk__native_allocate: rsc1:3 allocation score on rhel7-5: -INFINITY ++pcmk__native_allocate: rsc1:4 allocation score on lxc1: -INFINITY ++pcmk__native_allocate: rsc1:4 allocation score on lxc2: -INFINITY ++pcmk__native_allocate: rsc1:4 allocation score on remote-rhel7-2: -INFINITY ++pcmk__native_allocate: rsc1:4 allocation score on rhel7-1: -INFINITY ++pcmk__native_allocate: rsc1:4 allocation score on rhel7-3: -INFINITY ++pcmk__native_allocate: rsc1:4 allocation score on rhel7-4: -INFINITY ++pcmk__native_allocate: rsc1:4 allocation score on rhel7-5: -INFINITY ++pcmk__native_allocate: rsc1:5 allocation score on lxc1: -INFINITY ++pcmk__native_allocate: rsc1:5 allocation score on lxc2: 6 ++pcmk__native_allocate: rsc1:5 allocation score on remote-rhel7-2: -INFINITY ++pcmk__native_allocate: rsc1:5 allocation score on rhel7-1: -INFINITY ++pcmk__native_allocate: rsc1:5 allocation score on rhel7-3: -INFINITY ++pcmk__native_allocate: rsc1:5 allocation score on rhel7-4: -INFINITY ++pcmk__native_allocate: rsc1:5 allocation score on rhel7-5: -INFINITY ++pcmk__native_allocate: rsc1:6 allocation score on lxc1: 6 ++pcmk__native_allocate: rsc1:6 allocation score on lxc2: 0 ++pcmk__native_allocate: rsc1:6 allocation score on remote-rhel7-2: -INFINITY ++pcmk__native_allocate: rsc1:6 allocation score on rhel7-1: -INFINITY ++pcmk__native_allocate: rsc1:6 allocation score on rhel7-3: -INFINITY ++pcmk__native_allocate: rsc1:6 allocation score on rhel7-4: -INFINITY ++pcmk__native_allocate: rsc1:6 allocation score on rhel7-5: -INFINITY ++pcmk__native_allocate: rsc2:0 allocation score on lxc1: -INFINITY ++pcmk__native_allocate: rsc2:0 allocation score on lxc2: -INFINITY ++pcmk__native_allocate: rsc2:0 allocation score on remote-rhel7-2: -INFINITY ++pcmk__native_allocate: rsc2:0 allocation score on rhel7-1: -INFINITY ++pcmk__native_allocate: rsc2:0 allocation score on rhel7-3: -INFINITY ++pcmk__native_allocate: rsc2:0 allocation score on rhel7-4: -INFINITY ++pcmk__native_allocate: rsc2:0 allocation score on rhel7-5: -INFINITY ++pcmk__native_allocate: rsc2:1 allocation score on lxc1: 0 ++pcmk__native_allocate: rsc2:1 allocation score on lxc2: 0 ++pcmk__native_allocate: rsc2:1 allocation score on remote-rhel7-2: -INFINITY ++pcmk__native_allocate: rsc2:1 allocation score on rhel7-1: 0 ++pcmk__native_allocate: rsc2:1 allocation score on rhel7-3: 6 ++pcmk__native_allocate: rsc2:1 allocation score on rhel7-4: -INFINITY ++pcmk__native_allocate: rsc2:1 allocation score on rhel7-5: 0 ++pcmk__native_allocate: rsc2:2 allocation score on lxc1: 0 ++pcmk__native_allocate: rsc2:2 allocation score on lxc2: 0 ++pcmk__native_allocate: rsc2:2 allocation score on remote-rhel7-2: -INFINITY ++pcmk__native_allocate: rsc2:2 allocation score on rhel7-1: 0 ++pcmk__native_allocate: rsc2:2 allocation score on rhel7-3: -INFINITY ++pcmk__native_allocate: rsc2:2 allocation score on rhel7-4: -INFINITY ++pcmk__native_allocate: rsc2:2 allocation score on rhel7-5: 6 ++pcmk__native_allocate: rsc2:3 allocation score on lxc1: 0 ++pcmk__native_allocate: rsc2:3 allocation score on lxc2: 0 ++pcmk__native_allocate: rsc2:3 allocation score on remote-rhel7-2: -INFINITY ++pcmk__native_allocate: rsc2:3 allocation score on rhel7-1: 6 ++pcmk__native_allocate: rsc2:3 allocation score on rhel7-3: -INFINITY ++pcmk__native_allocate: rsc2:3 allocation score on rhel7-4: -INFINITY ++pcmk__native_allocate: rsc2:3 allocation score on rhel7-5: -INFINITY ++pcmk__native_allocate: rsc2:4 allocation score on lxc1: -INFINITY ++pcmk__native_allocate: rsc2:4 allocation score on lxc2: -INFINITY ++pcmk__native_allocate: rsc2:4 allocation score on remote-rhel7-2: -INFINITY ++pcmk__native_allocate: rsc2:4 allocation score on rhel7-1: -INFINITY ++pcmk__native_allocate: rsc2:4 allocation score on rhel7-3: -INFINITY ++pcmk__native_allocate: rsc2:4 allocation score on rhel7-4: -INFINITY ++pcmk__native_allocate: rsc2:4 allocation score on rhel7-5: -INFINITY ++pcmk__native_allocate: rsc2:5 allocation score on lxc1: -INFINITY ++pcmk__native_allocate: rsc2:5 allocation score on lxc2: 6 ++pcmk__native_allocate: rsc2:5 allocation score on remote-rhel7-2: -INFINITY ++pcmk__native_allocate: rsc2:5 allocation score on rhel7-1: -INFINITY ++pcmk__native_allocate: rsc2:5 allocation score on rhel7-3: -INFINITY ++pcmk__native_allocate: rsc2:5 allocation score on rhel7-4: -INFINITY ++pcmk__native_allocate: rsc2:5 allocation score on rhel7-5: -INFINITY ++pcmk__native_allocate: rsc2:6 allocation score on lxc1: 6 ++pcmk__native_allocate: rsc2:6 allocation score on lxc2: 0 ++pcmk__native_allocate: rsc2:6 allocation score on remote-rhel7-2: -INFINITY ++pcmk__native_allocate: rsc2:6 allocation score on rhel7-1: -INFINITY ++pcmk__native_allocate: rsc2:6 allocation score on rhel7-3: -INFINITY ++pcmk__native_allocate: rsc2:6 allocation score on rhel7-4: -INFINITY ++pcmk__native_allocate: rsc2:6 allocation score on rhel7-5: -INFINITY ++pcmk__native_allocate: stateful-bundle-0 allocation score on lxc1: -INFINITY ++pcmk__native_allocate: stateful-bundle-0 allocation score on lxc2: -INFINITY ++pcmk__native_allocate: stateful-bundle-0 allocation score on remote-rhel7-2: -INFINITY ++pcmk__native_allocate: stateful-bundle-0 allocation score on rhel7-1: 0 ++pcmk__native_allocate: stateful-bundle-0 allocation score on rhel7-3: 0 ++pcmk__native_allocate: stateful-bundle-0 allocation score on rhel7-4: 0 ++pcmk__native_allocate: stateful-bundle-0 allocation score on rhel7-5: 10000 ++pcmk__native_allocate: stateful-bundle-1 allocation score on lxc1: -INFINITY ++pcmk__native_allocate: stateful-bundle-1 allocation score on lxc2: -INFINITY ++pcmk__native_allocate: stateful-bundle-1 allocation score on remote-rhel7-2: -INFINITY ++pcmk__native_allocate: stateful-bundle-1 allocation score on rhel7-1: 10000 ++pcmk__native_allocate: stateful-bundle-1 allocation score on rhel7-3: 0 ++pcmk__native_allocate: stateful-bundle-1 allocation score on rhel7-4: 0 ++pcmk__native_allocate: stateful-bundle-1 allocation score on rhel7-5: 0 ++pcmk__native_allocate: stateful-bundle-2 allocation score on lxc1: -INFINITY ++pcmk__native_allocate: stateful-bundle-2 allocation score on lxc2: -INFINITY ++pcmk__native_allocate: stateful-bundle-2 allocation score on remote-rhel7-2: -INFINITY ++pcmk__native_allocate: stateful-bundle-2 allocation score on rhel7-1: 0 ++pcmk__native_allocate: stateful-bundle-2 allocation score on rhel7-3: 10000 ++pcmk__native_allocate: stateful-bundle-2 allocation score on rhel7-4: 0 ++pcmk__native_allocate: stateful-bundle-2 allocation score on rhel7-5: 0 ++pcmk__native_allocate: stateful-bundle-docker-0 allocation score on lxc1: -10000 ++pcmk__native_allocate: stateful-bundle-docker-0 allocation score on lxc2: -10000 ++pcmk__native_allocate: stateful-bundle-docker-0 allocation score on remote-rhel7-2: -INFINITY ++pcmk__native_allocate: stateful-bundle-docker-0 allocation score on rhel7-1: -INFINITY ++pcmk__native_allocate: stateful-bundle-docker-0 allocation score on rhel7-3: 0 ++pcmk__native_allocate: stateful-bundle-docker-0 allocation score on rhel7-4: -INFINITY ++pcmk__native_allocate: stateful-bundle-docker-0 allocation score on rhel7-5: 0 ++pcmk__native_allocate: stateful-bundle-docker-1 allocation score on lxc1: -10000 ++pcmk__native_allocate: stateful-bundle-docker-1 allocation score on lxc2: -10000 ++pcmk__native_allocate: stateful-bundle-docker-1 allocation score on remote-rhel7-2: -INFINITY ++pcmk__native_allocate: stateful-bundle-docker-1 allocation score on rhel7-1: 0 ++pcmk__native_allocate: stateful-bundle-docker-1 allocation score on rhel7-3: 0 ++pcmk__native_allocate: stateful-bundle-docker-1 allocation score on rhel7-4: -INFINITY ++pcmk__native_allocate: stateful-bundle-docker-1 allocation score on rhel7-5: 0 ++pcmk__native_allocate: stateful-bundle-docker-2 allocation score on lxc1: -10000 ++pcmk__native_allocate: stateful-bundle-docker-2 allocation score on lxc2: -10000 ++pcmk__native_allocate: stateful-bundle-docker-2 allocation score on remote-rhel7-2: -INFINITY ++pcmk__native_allocate: stateful-bundle-docker-2 allocation score on rhel7-1: -INFINITY ++pcmk__native_allocate: stateful-bundle-docker-2 allocation score on rhel7-3: 0 ++pcmk__native_allocate: stateful-bundle-docker-2 allocation score on rhel7-4: -INFINITY ++pcmk__native_allocate: stateful-bundle-docker-2 allocation score on rhel7-5: -INFINITY ++pcmk__native_allocate: stateful-bundle-ip-192.168.122.131 allocation score on lxc1: -INFINITY ++pcmk__native_allocate: stateful-bundle-ip-192.168.122.131 allocation score on lxc2: -INFINITY ++pcmk__native_allocate: stateful-bundle-ip-192.168.122.131 allocation score on remote-rhel7-2: -INFINITY ++pcmk__native_allocate: stateful-bundle-ip-192.168.122.131 allocation score on rhel7-1: -INFINITY ++pcmk__native_allocate: stateful-bundle-ip-192.168.122.131 allocation score on rhel7-3: -INFINITY ++pcmk__native_allocate: stateful-bundle-ip-192.168.122.131 allocation score on rhel7-4: -INFINITY ++pcmk__native_allocate: stateful-bundle-ip-192.168.122.131 allocation score on rhel7-5: 0 ++pcmk__native_allocate: stateful-bundle-ip-192.168.122.132 allocation score on lxc1: -INFINITY ++pcmk__native_allocate: stateful-bundle-ip-192.168.122.132 allocation score on lxc2: -INFINITY ++pcmk__native_allocate: stateful-bundle-ip-192.168.122.132 allocation score on remote-rhel7-2: -INFINITY ++pcmk__native_allocate: stateful-bundle-ip-192.168.122.132 allocation score on rhel7-1: 0 ++pcmk__native_allocate: stateful-bundle-ip-192.168.122.132 allocation score on rhel7-3: -INFINITY ++pcmk__native_allocate: stateful-bundle-ip-192.168.122.132 allocation score on rhel7-4: -INFINITY ++pcmk__native_allocate: stateful-bundle-ip-192.168.122.132 allocation score on rhel7-5: -INFINITY ++pcmk__native_allocate: stateful-bundle-ip-192.168.122.133 allocation score on lxc1: -INFINITY ++pcmk__native_allocate: stateful-bundle-ip-192.168.122.133 allocation score on lxc2: -INFINITY ++pcmk__native_allocate: stateful-bundle-ip-192.168.122.133 allocation score on remote-rhel7-2: -INFINITY ++pcmk__native_allocate: stateful-bundle-ip-192.168.122.133 allocation score on rhel7-1: -INFINITY ++pcmk__native_allocate: stateful-bundle-ip-192.168.122.133 allocation score on rhel7-3: 0 ++pcmk__native_allocate: stateful-bundle-ip-192.168.122.133 allocation score on rhel7-4: -INFINITY ++pcmk__native_allocate: stateful-bundle-ip-192.168.122.133 allocation score on rhel7-5: -INFINITY ++rsc1:0 promotion score on none: 0 ++rsc1:1 promotion score on rhel7-3: 5 ++rsc1:2 promotion score on rhel7-5: 5 ++rsc1:3 promotion score on rhel7-1: 5 ++rsc1:4 promotion score on none: 0 ++rsc1:5 promotion score on lxc2: 5 ++rsc1:6 promotion score on lxc1: 5 ++rsc2:0 promotion score on none: 0 ++rsc2:1 promotion score on rhel7-3: 5 ++rsc2:2 promotion score on rhel7-5: 5 ++rsc2:3 promotion score on rhel7-1: 5 ++rsc2:4 promotion score on none: 0 ++rsc2:5 promotion score on lxc2: 5 ++rsc2:6 promotion score on lxc1: 5 +diff --git a/cts/scheduler/on_fail_demote4.summary b/cts/scheduler/on_fail_demote4.summary +new file mode 100644 +index 0000000..20520ff +--- /dev/null ++++ b/cts/scheduler/on_fail_demote4.summary +@@ -0,0 +1,187 @@ ++Using the original execution date of: 2020-06-16 19:23:21Z ++ ++Current cluster status: ++RemoteNode remote-rhel7-2: UNCLEAN (offline) ++Node rhel7-4 (4): UNCLEAN (offline) ++Online: [ rhel7-1 rhel7-3 rhel7-5 ] ++GuestOnline: [ lxc1:container1 stateful-bundle-1:stateful-bundle-docker-1 ] ++ ++ Fencing (stonith:fence_xvm): Started rhel7-4 (UNCLEAN) ++ Clone Set: rsc1-clone [rsc1] (promotable) ++ rsc1 (ocf::pacemaker:Stateful): Master rhel7-4 (UNCLEAN) ++ rsc1 (ocf::pacemaker:Stateful): Slave remote-rhel7-2 (UNCLEAN) ++ Slaves: [ lxc1 rhel7-1 rhel7-3 rhel7-5 ] ++ Clone Set: rsc2-master [rsc2] (promotable) ++ rsc2 (ocf::pacemaker:Stateful): Slave rhel7-4 (UNCLEAN) ++ rsc2 (ocf::pacemaker:Stateful): Master remote-rhel7-2 (UNCLEAN) ++ Slaves: [ lxc1 rhel7-1 rhel7-3 rhel7-5 ] ++ remote-rhel7-2 (ocf::pacemaker:remote): FAILED rhel7-1 ++ container1 (ocf::heartbeat:VirtualDomain): Started rhel7-3 ++ container2 (ocf::heartbeat:VirtualDomain): FAILED rhel7-3 ++ Clone Set: lxc-ms-master [lxc-ms] (promotable) ++ Slaves: [ lxc1 ] ++ Stopped: [ remote-rhel7-2 rhel7-1 rhel7-3 rhel7-4 rhel7-5 ] ++ Container bundle set: stateful-bundle [pcmktest:http] ++ stateful-bundle-0 (192.168.122.131) (ocf::pacemaker:Stateful): FAILED Master rhel7-5 ++ stateful-bundle-1 (192.168.122.132) (ocf::pacemaker:Stateful): Slave rhel7-1 ++ stateful-bundle-2 (192.168.122.133) (ocf::pacemaker:Stateful): FAILED rhel7-4 (UNCLEAN) ++ ++Transition Summary: ++ * Fence (reboot) stateful-bundle-2 (resource: stateful-bundle-docker-2) 'guest is unclean' ++ * Fence (reboot) stateful-bundle-0 (resource: stateful-bundle-docker-0) 'guest is unclean' ++ * Fence (reboot) lxc2 (resource: container2) 'guest is unclean' ++ * Fence (reboot) remote-rhel7-2 'remote connection is unrecoverable' ++ * Fence (reboot) rhel7-4 'peer is no longer part of the cluster' ++ * Move Fencing ( rhel7-4 -> rhel7-5 ) ++ * Stop rsc1:0 ( Master rhel7-4 ) due to node availability ++ * Promote rsc1:1 ( Slave -> Master rhel7-3 ) ++ * Stop rsc1:4 ( Slave remote-rhel7-2 ) due to node availability ++ * Recover rsc1:5 ( Slave lxc2 ) ++ * Stop rsc2:0 ( Slave rhel7-4 ) due to node availability ++ * Promote rsc2:1 ( Slave -> Master rhel7-3 ) ++ * Stop rsc2:4 ( Master remote-rhel7-2 ) due to node availability ++ * Recover rsc2:5 ( Slave lxc2 ) ++ * Recover remote-rhel7-2 ( rhel7-1 ) ++ * Recover container2 ( rhel7-3 ) ++ * Recover lxc-ms:0 ( Master lxc2 ) ++ * Recover stateful-bundle-docker-0 ( rhel7-5 ) ++ * Restart stateful-bundle-0 ( rhel7-5 ) due to required stateful-bundle-docker-0 start ++ * Recover bundled:0 ( Master stateful-bundle-0 ) ++ * Move stateful-bundle-ip-192.168.122.133 ( rhel7-4 -> rhel7-3 ) ++ * Recover stateful-bundle-docker-2 ( rhel7-4 -> rhel7-3 ) ++ * Move stateful-bundle-2 ( rhel7-4 -> rhel7-3 ) ++ * Recover bundled:2 ( Slave stateful-bundle-2 ) ++ * Restart lxc2 ( rhel7-3 ) due to required container2 start ++ ++Executing cluster transition: ++ * Pseudo action: Fencing_stop_0 ++ * Resource action: rsc1 cancel=11000 on rhel7-3 ++ * Pseudo action: rsc1-clone_demote_0 ++ * Resource action: rsc2 cancel=11000 on rhel7-3 ++ * Pseudo action: rsc2-master_demote_0 ++ * Pseudo action: lxc-ms-master_demote_0 ++ * Resource action: stateful-bundle-0 stop on rhel7-5 ++ * Pseudo action: stateful-bundle-2_stop_0 ++ * Resource action: lxc2 stop on rhel7-3 ++ * Pseudo action: stateful-bundle_demote_0 ++ * Fencing remote-rhel7-2 (reboot) ++ * Fencing rhel7-4 (reboot) ++ * Pseudo action: rsc1_demote_0 ++ * Pseudo action: rsc1-clone_demoted_0 ++ * Pseudo action: rsc2_demote_0 ++ * Pseudo action: rsc2-master_demoted_0 ++ * Resource action: container2 stop on rhel7-3 ++ * Pseudo action: stateful-bundle-master_demote_0 ++ * Pseudo action: stonith-stateful-bundle-2-reboot on stateful-bundle-2 ++ * Pseudo action: stonith-lxc2-reboot on lxc2 ++ * Resource action: Fencing start on rhel7-5 ++ * Pseudo action: rsc1-clone_stop_0 ++ * Pseudo action: rsc2-master_stop_0 ++ * Pseudo action: lxc-ms_demote_0 ++ * Pseudo action: lxc-ms-master_demoted_0 ++ * Pseudo action: lxc-ms-master_stop_0 ++ * Pseudo action: bundled_demote_0 ++ * Pseudo action: stateful-bundle-master_demoted_0 ++ * Pseudo action: stateful-bundle_demoted_0 ++ * Pseudo action: stateful-bundle_stop_0 ++ * Resource action: Fencing monitor=120000 on rhel7-5 ++ * Pseudo action: rsc1_stop_0 ++ * Pseudo action: rsc1_stop_0 ++ * Pseudo action: rsc1_stop_0 ++ * Pseudo action: rsc1-clone_stopped_0 ++ * Pseudo action: rsc1-clone_start_0 ++ * Pseudo action: rsc2_stop_0 ++ * Pseudo action: rsc2_stop_0 ++ * Pseudo action: rsc2_stop_0 ++ * Pseudo action: rsc2-master_stopped_0 ++ * Pseudo action: rsc2-master_start_0 ++ * Resource action: remote-rhel7-2 stop on rhel7-1 ++ * Pseudo action: lxc-ms_stop_0 ++ * Pseudo action: lxc-ms-master_stopped_0 ++ * Pseudo action: lxc-ms-master_start_0 ++ * Resource action: stateful-bundle-docker-0 stop on rhel7-5 ++ * Pseudo action: stateful-bundle-docker-2_stop_0 ++ * Pseudo action: stonith-stateful-bundle-0-reboot on stateful-bundle-0 ++ * Resource action: remote-rhel7-2 start on rhel7-1 ++ * Resource action: remote-rhel7-2 monitor=60000 on rhel7-1 ++ * Resource action: container2 start on rhel7-3 ++ * Resource action: container2 monitor=20000 on rhel7-3 ++ * Pseudo action: stateful-bundle-master_stop_0 ++ * Pseudo action: stateful-bundle-ip-192.168.122.133_stop_0 ++ * Resource action: lxc2 start on rhel7-3 ++ * Resource action: lxc2 monitor=30000 on rhel7-3 ++ * Resource action: rsc1 start on lxc2 ++ * Pseudo action: rsc1-clone_running_0 ++ * Resource action: rsc2 start on lxc2 ++ * Pseudo action: rsc2-master_running_0 ++ * Resource action: lxc-ms start on lxc2 ++ * Pseudo action: lxc-ms-master_running_0 ++ * Pseudo action: bundled_stop_0 ++ * Resource action: stateful-bundle-ip-192.168.122.133 start on rhel7-3 ++ * Resource action: rsc1 monitor=11000 on lxc2 ++ * Pseudo action: rsc1-clone_promote_0 ++ * Resource action: rsc2 monitor=11000 on lxc2 ++ * Pseudo action: rsc2-master_promote_0 ++ * Pseudo action: lxc-ms-master_promote_0 ++ * Pseudo action: bundled_stop_0 ++ * Pseudo action: stateful-bundle-master_stopped_0 ++ * Resource action: stateful-bundle-ip-192.168.122.133 monitor=60000 on rhel7-3 ++ * Pseudo action: stateful-bundle_stopped_0 ++ * Pseudo action: stateful-bundle_start_0 ++ * Resource action: rsc1 promote on rhel7-3 ++ * Pseudo action: rsc1-clone_promoted_0 ++ * Resource action: rsc2 promote on rhel7-3 ++ * Pseudo action: rsc2-master_promoted_0 ++ * Resource action: lxc-ms promote on lxc2 ++ * Pseudo action: lxc-ms-master_promoted_0 ++ * Pseudo action: stateful-bundle-master_start_0 ++ * Resource action: stateful-bundle-docker-0 start on rhel7-5 ++ * Resource action: stateful-bundle-docker-0 monitor=60000 on rhel7-5 ++ * Resource action: stateful-bundle-0 start on rhel7-5 ++ * Resource action: stateful-bundle-0 monitor=30000 on rhel7-5 ++ * Resource action: stateful-bundle-docker-2 start on rhel7-3 ++ * Resource action: stateful-bundle-2 start on rhel7-3 ++ * Resource action: rsc1 monitor=10000 on rhel7-3 ++ * Resource action: rsc2 monitor=10000 on rhel7-3 ++ * Resource action: lxc-ms monitor=10000 on lxc2 ++ * Resource action: bundled start on stateful-bundle-0 ++ * Resource action: bundled start on stateful-bundle-2 ++ * Pseudo action: stateful-bundle-master_running_0 ++ * Resource action: stateful-bundle-docker-2 monitor=60000 on rhel7-3 ++ * Resource action: stateful-bundle-2 monitor=30000 on rhel7-3 ++ * Pseudo action: stateful-bundle_running_0 ++ * Resource action: bundled monitor=11000 on stateful-bundle-2 ++ * Pseudo action: stateful-bundle_promote_0 ++ * Pseudo action: stateful-bundle-master_promote_0 ++ * Resource action: bundled promote on stateful-bundle-0 ++ * Pseudo action: stateful-bundle-master_promoted_0 ++ * Pseudo action: stateful-bundle_promoted_0 ++ * Resource action: bundled monitor=10000 on stateful-bundle-0 ++Using the original execution date of: 2020-06-16 19:23:21Z ++ ++Revised cluster status: ++Online: [ rhel7-1 rhel7-3 rhel7-5 ] ++OFFLINE: [ rhel7-4 ] ++RemoteOnline: [ remote-rhel7-2 ] ++GuestOnline: [ lxc1:container1 lxc2:container2 stateful-bundle-0:stateful-bundle-docker-0 stateful-bundle-1:stateful-bundle-docker-1 stateful-bundle-2:stateful-bundle-docker-2 ] ++ ++ Fencing (stonith:fence_xvm): Started rhel7-5 ++ Clone Set: rsc1-clone [rsc1] (promotable) ++ Masters: [ rhel7-3 ] ++ Slaves: [ lxc1 lxc2 rhel7-1 rhel7-5 ] ++ Stopped: [ remote-rhel7-2 rhel7-4 ] ++ Clone Set: rsc2-master [rsc2] (promotable) ++ Masters: [ rhel7-3 ] ++ Slaves: [ lxc1 lxc2 rhel7-1 rhel7-5 ] ++ Stopped: [ remote-rhel7-2 rhel7-4 ] ++ remote-rhel7-2 (ocf::pacemaker:remote): Started rhel7-1 ++ container1 (ocf::heartbeat:VirtualDomain): Started rhel7-3 ++ container2 (ocf::heartbeat:VirtualDomain): Started rhel7-3 ++ Clone Set: lxc-ms-master [lxc-ms] (promotable) ++ Masters: [ lxc2 ] ++ Slaves: [ lxc1 ] ++ Container bundle set: stateful-bundle [pcmktest:http] ++ stateful-bundle-0 (192.168.122.131) (ocf::pacemaker:Stateful): Master rhel7-5 ++ stateful-bundle-1 (192.168.122.132) (ocf::pacemaker:Stateful): Slave rhel7-1 ++ stateful-bundle-2 (192.168.122.133) (ocf::pacemaker:Stateful): Slave rhel7-3 ++ +diff --git a/cts/scheduler/on_fail_demote4.xml b/cts/scheduler/on_fail_demote4.xml +new file mode 100644 +index 0000000..eb4c4cc +--- /dev/null ++++ b/cts/scheduler/on_fail_demote4.xml +@@ -0,0 +1,625 @@ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ +-- +1.8.3.1 + + +From 204961e95d9de140d998d71a0e53b5b9baa5d39e Mon Sep 17 00:00:00 2001 +From: Ken Gaillot +Date: Tue, 26 May 2020 18:04:32 -0500 +Subject: [PATCH 12/20] Doc: Pacemaker Explained: document new on-fail="demote" + option + +--- + doc/Pacemaker_Explained/en-US/Ch-Resources.txt | 36 ++++++++++++++++++++++++++ + 1 file changed, 36 insertions(+) + +diff --git a/doc/Pacemaker_Explained/en-US/Ch-Resources.txt b/doc/Pacemaker_Explained/en-US/Ch-Resources.txt +index d8e7115..9df9243 100644 +--- a/doc/Pacemaker_Explained/en-US/Ch-Resources.txt ++++ b/doc/Pacemaker_Explained/en-US/Ch-Resources.txt +@@ -676,6 +676,10 @@ a|The action to take if this action ever fails. Allowed values: + * +ignore:+ Pretend the resource did not fail. + * +block:+ Don't perform any further operations on the resource. + * +stop:+ Stop the resource and do not start it elsewhere. ++* +demote:+ Demote the resource, without a full restart. This is valid only for ++ +promote+ actions, and for +monitor+ actions with both a nonzero +interval+ ++ and +role+ set to +Master+; for any other action, a configuration error will ++ be logged, and the default behavior will be used. + * +restart:+ Stop the resource and start it again (possibly on a different node). + * +fence:+ STONITH the node on which the resource failed. + * +standby:+ Move _all_ resources away from the node on which the resource failed. +@@ -714,6 +718,38 @@ indexterm:[Action,Property,on-fail] + + |========================================================= + ++[NOTE] ++==== ++When +on-fail+ is set to +demote+, recovery from failure by a successful demote ++causes the cluster to recalculate whether and where a new instance should be ++promoted. The node with the failure is eligible, so if master scores have not ++changed, it will be promoted again. ++ ++There is no direct equivalent of +migration-threshold+ for the master role, but ++the same effect can be achieved with a location constraint using a ++<> with a node attribute expression for the resource's fail ++count. ++ ++For example, to immediately ban the master role from a node with any failed ++promote or master monitor: ++[source,XML] ++---- ++ ++ ++ ++ ++ ++ ++---- ++ ++This example assumes that there is a promotable clone of the +my_primitive+ ++resource (note that the primitive name, not the clone name, is used in the ++rule), and that there is a recurring 10-second-interval monitor configured for ++the master role (fail count attributes specify the interval in milliseconds). ++==== ++ + [[s-resource-monitoring]] + === Monitoring Resources for Failure === + +-- +1.8.3.1 + + +From d4b9117e72b178bb6f4458cd89bee13060f78dcb Mon Sep 17 00:00:00 2001 +From: Ken Gaillot +Date: Tue, 26 May 2020 18:10:33 -0500 +Subject: [PATCH 13/20] Doc: Pacemaker Explained: correct on-fail default + +--- + doc/Pacemaker_Explained/en-US/Ch-Resources.txt | 9 +++++++-- + 1 file changed, 7 insertions(+), 2 deletions(-) + +diff --git a/doc/Pacemaker_Explained/en-US/Ch-Resources.txt b/doc/Pacemaker_Explained/en-US/Ch-Resources.txt +index 9df9243..88892db 100644 +--- a/doc/Pacemaker_Explained/en-US/Ch-Resources.txt ++++ b/doc/Pacemaker_Explained/en-US/Ch-Resources.txt +@@ -669,8 +669,13 @@ XML attributes take precedence over +nvpair+ elements if both are specified. + indexterm:[Action,Property,timeout] + + |on-fail +-|restart '(except for +stop+ operations, which default to' fence 'when +- STONITH is enabled and' block 'otherwise)' ++a|Varies by action: ++ ++* +stop+: +fence+ if +stonith-enabled+ is true or +block+ otherwise ++* +demote+: +on-fail+ of the +monitor+ action with +role+ set to +Master+, if ++ present, enabled, and configured to a value other than +demote+, or +restart+ ++ otherwise ++* all other actions: +restart+ + a|The action to take if this action ever fails. Allowed values: + + * +ignore:+ Pretend the resource did not fail. +-- +1.8.3.1 + + +From 0b683445318c783ecef8d6f023b35a6c056ee321 Mon Sep 17 00:00:00 2001 +From: Ken Gaillot +Date: Tue, 2 Jun 2020 15:05:56 -0500 +Subject: [PATCH 14/20] Refactor: scheduler: functionize checking quorum policy + in effect + +... for readability and ease of future changes +--- + lib/pengine/utils.c | 18 ++++++++++++++---- + 1 file changed, 14 insertions(+), 4 deletions(-) + +diff --git a/lib/pengine/utils.c b/lib/pengine/utils.c +index fee9efb..5d6b836 100644 +--- a/lib/pengine/utils.c ++++ b/lib/pengine/utils.c +@@ -481,6 +481,17 @@ sort_rsc_priority(gconstpointer a, gconstpointer b) + return 0; + } + ++static enum pe_quorum_policy ++effective_quorum_policy(pe_resource_t *rsc, pe_working_set_t *data_set) ++{ ++ enum pe_quorum_policy policy = data_set->no_quorum_policy; ++ ++ if (is_set(data_set->flags, pe_flag_have_quorum)) { ++ policy = no_quorum_ignore; ++ } ++ return policy; ++} ++ + pe_action_t * + custom_action(pe_resource_t * rsc, char *key, const char *task, + pe_node_t * on_node, gboolean optional, gboolean save_action, +@@ -593,6 +604,7 @@ custom_action(pe_resource_t * rsc, char *key, const char *task, + + if (rsc != NULL) { + enum action_tasks a_task = text2task(action->task); ++ enum pe_quorum_policy quorum_policy = effective_quorum_policy(rsc, data_set); + int warn_level = LOG_TRACE; + + if (save_action) { +@@ -675,13 +687,11 @@ custom_action(pe_resource_t * rsc, char *key, const char *task, + crm_trace("Action %s requires only stonith", action->uuid); + action->runnable = TRUE; + #endif +- } else if (is_set(data_set->flags, pe_flag_have_quorum) == FALSE +- && data_set->no_quorum_policy == no_quorum_stop) { ++ } else if (quorum_policy == no_quorum_stop) { + pe_action_set_flag_reason(__FUNCTION__, __LINE__, action, NULL, "no quorum", pe_action_runnable, TRUE); + crm_debug("%s\t%s (cancelled : quorum)", action->node->details->uname, action->uuid); + +- } else if (is_set(data_set->flags, pe_flag_have_quorum) == FALSE +- && data_set->no_quorum_policy == no_quorum_freeze) { ++ } else if (quorum_policy == no_quorum_freeze) { + pe_rsc_trace(rsc, "Check resource is already active: %s %s %s %s", rsc->id, action->uuid, role2text(rsc->next_role), role2text(rsc->role)); + if (rsc->fns->active(rsc, TRUE) == FALSE || rsc->next_role > rsc->role) { + pe_action_set_flag_reason(__FUNCTION__, __LINE__, action, NULL, "quorum freeze", pe_action_runnable, TRUE); +-- +1.8.3.1 + + +From b1ae359382f15e28e90d9144ca7b1d5f04820c10 Mon Sep 17 00:00:00 2001 +From: Ken Gaillot +Date: Tue, 2 Jun 2020 15:06:32 -0500 +Subject: [PATCH 15/20] Feature: scheduler: support "demote" choice for + no-quorum-policy option + +If quorum is lost, promotable resources in the master role will be demoted but +left running, and all other resources will be stopped. +--- + daemons/controld/controld_control.c | 2 +- + include/crm/pengine/pe_types.h | 3 ++- + lib/common/options.c | 1 + + lib/pengine/common.c | 2 +- + lib/pengine/pe_output.c | 14 ++++++++++++++ + lib/pengine/unpack.c | 7 +++++++ + lib/pengine/utils.c | 14 ++++++++++++++ + 7 files changed, 40 insertions(+), 3 deletions(-) + +diff --git a/daemons/controld/controld_control.c b/daemons/controld/controld_control.c +index 7d29205..059eb7b 100644 +--- a/daemons/controld/controld_control.c ++++ b/daemons/controld/controld_control.c +@@ -626,7 +626,7 @@ static pcmk__cluster_option_t crmd_opts[] = { + + // Already documented in libpe_status (other values must be kept identical) + { +- "no-quorum-policy", NULL, "enum", "stop, freeze, ignore, suicide", ++ "no-quorum-policy", NULL, "enum", "stop, freeze, ignore, demote, suicide", + "stop", pcmk__valid_quorum, NULL, NULL + }, + { +diff --git a/include/crm/pengine/pe_types.h b/include/crm/pengine/pe_types.h +index ed5eb12..f3cb4ef 100644 +--- a/include/crm/pengine/pe_types.h ++++ b/include/crm/pengine/pe_types.h +@@ -61,7 +61,8 @@ enum pe_quorum_policy { + no_quorum_freeze, + no_quorum_stop, + no_quorum_ignore, +- no_quorum_suicide ++ no_quorum_suicide, ++ no_quorum_demote + }; + + enum node_type { +diff --git a/lib/common/options.c b/lib/common/options.c +index 9399642..9e041c9 100644 +--- a/lib/common/options.c ++++ b/lib/common/options.c +@@ -407,6 +407,7 @@ pcmk__valid_quorum(const char *value) + return safe_str_eq(value, "stop") + || safe_str_eq(value, "freeze") + || safe_str_eq(value, "ignore") ++ || safe_str_eq(value, "demote") + || safe_str_eq(value, "suicide"); + } + +diff --git a/lib/pengine/common.c b/lib/pengine/common.c +index f4f2106..37f287b 100644 +--- a/lib/pengine/common.c ++++ b/lib/pengine/common.c +@@ -54,7 +54,7 @@ static pcmk__cluster_option_t pe_opts[] = { + * long description + */ + { +- "no-quorum-policy", NULL, "enum", "stop, freeze, ignore, suicide", ++ "no-quorum-policy", NULL, "enum", "stop, freeze, ignore, demote, suicide", + "stop", pcmk__valid_quorum, + "What to do when the cluster does not have quorum", + NULL +diff --git a/lib/pengine/pe_output.c b/lib/pengine/pe_output.c +index 75bf0d5..ad469ab 100644 +--- a/lib/pengine/pe_output.c ++++ b/lib/pengine/pe_output.c +@@ -729,6 +729,11 @@ pe__cluster_options_html(pcmk__output_t *out, va_list args) { + out->list_item(out, NULL, "No quorum policy: Stop ALL resources"); + break; + ++ case no_quorum_demote: ++ out->list_item(out, NULL, "No quorum policy: Demote promotable " ++ "resources and stop all other resources"); ++ break; ++ + case no_quorum_ignore: + out->list_item(out, NULL, "No quorum policy: Ignore"); + break; +@@ -785,6 +790,11 @@ pe__cluster_options_text(pcmk__output_t *out, va_list args) { + out->list_item(out, NULL, "No quorum policy: Stop ALL resources"); + break; + ++ case no_quorum_demote: ++ out->list_item(out, NULL, "No quorum policy: Demote promotable " ++ "resources and stop all other resources"); ++ break; ++ + case no_quorum_ignore: + out->list_item(out, NULL, "No quorum policy: Ignore"); + break; +@@ -817,6 +827,10 @@ pe__cluster_options_xml(pcmk__output_t *out, va_list args) { + xmlSetProp(node, (pcmkXmlStr) "no-quorum-policy", (pcmkXmlStr) "stop"); + break; + ++ case no_quorum_demote: ++ xmlSetProp(node, (pcmkXmlStr) "no-quorum-policy", (pcmkXmlStr) "demote"); ++ break; ++ + case no_quorum_ignore: + xmlSetProp(node, (pcmkXmlStr) "no-quorum-policy", (pcmkXmlStr) "ignore"); + break; +diff --git a/lib/pengine/unpack.c b/lib/pengine/unpack.c +index a219805..a480680 100644 +--- a/lib/pengine/unpack.c ++++ b/lib/pengine/unpack.c +@@ -268,6 +268,9 @@ unpack_config(xmlNode * config, pe_working_set_t * data_set) + } else if (safe_str_eq(value, "freeze")) { + data_set->no_quorum_policy = no_quorum_freeze; + ++ } else if (safe_str_eq(value, "demote")) { ++ data_set->no_quorum_policy = no_quorum_demote; ++ + } else if (safe_str_eq(value, "suicide")) { + if (is_set(data_set->flags, pe_flag_stonith_enabled)) { + int do_panic = 0; +@@ -297,6 +300,10 @@ unpack_config(xmlNode * config, pe_working_set_t * data_set) + case no_quorum_stop: + crm_debug("On loss of quorum: Stop ALL resources"); + break; ++ case no_quorum_demote: ++ crm_debug("On loss of quorum: " ++ "Demote promotable resources and stop other resources"); ++ break; + case no_quorum_suicide: + crm_notice("On loss of quorum: Fence all remaining nodes"); + break; +diff --git a/lib/pengine/utils.c b/lib/pengine/utils.c +index 5d6b836..f8b631a 100644 +--- a/lib/pengine/utils.c ++++ b/lib/pengine/utils.c +@@ -488,6 +488,20 @@ effective_quorum_policy(pe_resource_t *rsc, pe_working_set_t *data_set) + + if (is_set(data_set->flags, pe_flag_have_quorum)) { + policy = no_quorum_ignore; ++ ++ } else if (data_set->no_quorum_policy == no_quorum_demote) { ++ switch (rsc->role) { ++ case RSC_ROLE_MASTER: ++ case RSC_ROLE_SLAVE: ++ if (rsc->next_role > RSC_ROLE_SLAVE) { ++ rsc->next_role = RSC_ROLE_SLAVE; ++ } ++ policy = no_quorum_ignore; ++ break; ++ default: ++ policy = no_quorum_stop; ++ break; ++ } + } + return policy; + } +-- +1.8.3.1 + + +From 5d809e136f2927259ad570e409e3bbb68f7ce7b4 Mon Sep 17 00:00:00 2001 +From: Ken Gaillot +Date: Wed, 17 Jun 2020 12:29:50 -0500 +Subject: [PATCH 16/20] Test: scheduler: add regression test for + no-quorum-policy="demote" + +--- + cts/cts-scheduler.in | 1 + + cts/scheduler/no_quorum_demote.dot | 22 ++++ + cts/scheduler/no_quorum_demote.exp | 81 ++++++++++++ + cts/scheduler/no_quorum_demote.scores | 72 +++++++++++ + cts/scheduler/no_quorum_demote.summary | 38 ++++++ + cts/scheduler/no_quorum_demote.xml | 224 +++++++++++++++++++++++++++++++++ + 6 files changed, 438 insertions(+) + create mode 100644 cts/scheduler/no_quorum_demote.dot + create mode 100644 cts/scheduler/no_quorum_demote.exp + create mode 100644 cts/scheduler/no_quorum_demote.scores + create mode 100644 cts/scheduler/no_quorum_demote.summary + create mode 100644 cts/scheduler/no_quorum_demote.xml + +diff --git a/cts/cts-scheduler.in b/cts/cts-scheduler.in +index 0e68e73..9e34379 100644 +--- a/cts/cts-scheduler.in ++++ b/cts/cts-scheduler.in +@@ -482,6 +482,7 @@ TESTS = [ + [ "on_fail_demote2", "Recovery with on-fail=\"demote\" with promotion on different node" ], + [ "on_fail_demote3", "Recovery with on-fail=\"demote\" with no promotion" ], + [ "on_fail_demote4", "Recovery with on-fail=\"demote\" on failed cluster, remote, guest, and bundle nodes" ], ++ [ "no_quorum_demote", "Promotable demotion and primitive stop with no-quorum-policy=\"demote\"" ], + ], + [ + [ "history-1", "Correctly parse stateful-1 resource state" ], +diff --git a/cts/scheduler/no_quorum_demote.dot b/cts/scheduler/no_quorum_demote.dot +new file mode 100644 +index 0000000..ea5b30c +--- /dev/null ++++ b/cts/scheduler/no_quorum_demote.dot +@@ -0,0 +1,22 @@ ++ digraph "g" { ++"Cancel rsc1_monitor_10000 rhel7-1" -> "rsc1_demote_0 rhel7-1" [ style = bold] ++"Cancel rsc1_monitor_10000 rhel7-1" [ style=bold color="green" fontcolor="black"] ++"Fencing_monitor_120000 rhel7-1" [ style=dashed color="red" fontcolor="black"] ++"Fencing_start_0 rhel7-1" -> "Fencing_monitor_120000 rhel7-1" [ style = dashed] ++"Fencing_start_0 rhel7-1" [ style=dashed color="red" fontcolor="black"] ++"Fencing_stop_0 rhel7-1" -> "Fencing_start_0 rhel7-1" [ style = dashed] ++"Fencing_stop_0 rhel7-1" [ style=bold color="green" fontcolor="black"] ++"rsc1-clone_demote_0" -> "rsc1-clone_demoted_0" [ style = bold] ++"rsc1-clone_demote_0" -> "rsc1_demote_0 rhel7-1" [ style = bold] ++"rsc1-clone_demote_0" [ style=bold color="green" fontcolor="orange"] ++"rsc1-clone_demoted_0" [ style=bold color="green" fontcolor="orange"] ++"rsc1_demote_0 rhel7-1" -> "rsc1-clone_demoted_0" [ style = bold] ++"rsc1_demote_0 rhel7-1" -> "rsc1_monitor_11000 rhel7-1" [ style = bold] ++"rsc1_demote_0 rhel7-1" [ style=bold color="green" fontcolor="black"] ++"rsc1_monitor_11000 rhel7-1" [ style=bold color="green" fontcolor="black"] ++"rsc2_monitor_10000 rhel7-2" [ style=dashed color="red" fontcolor="black"] ++"rsc2_start_0 rhel7-2" -> "rsc2_monitor_10000 rhel7-2" [ style = dashed] ++"rsc2_start_0 rhel7-2" [ style=dashed color="red" fontcolor="black"] ++"rsc2_stop_0 rhel7-2" -> "rsc2_start_0 rhel7-2" [ style = dashed] ++"rsc2_stop_0 rhel7-2" [ style=bold color="green" fontcolor="black"] ++} +diff --git a/cts/scheduler/no_quorum_demote.exp b/cts/scheduler/no_quorum_demote.exp +new file mode 100644 +index 0000000..245574c +--- /dev/null ++++ b/cts/scheduler/no_quorum_demote.exp +@@ -0,0 +1,81 @@ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ +diff --git a/cts/scheduler/no_quorum_demote.scores b/cts/scheduler/no_quorum_demote.scores +new file mode 100644 +index 0000000..dddc57b +--- /dev/null ++++ b/cts/scheduler/no_quorum_demote.scores +@@ -0,0 +1,72 @@ ++Allocation scores: ++Using the original execution date of: 2020-06-17 17:26:35Z ++pcmk__clone_allocate: rsc1-clone allocation score on rhel7-1: 0 ++pcmk__clone_allocate: rsc1-clone allocation score on rhel7-2: 0 ++pcmk__clone_allocate: rsc1-clone allocation score on rhel7-3: 0 ++pcmk__clone_allocate: rsc1-clone allocation score on rhel7-4: 0 ++pcmk__clone_allocate: rsc1-clone allocation score on rhel7-5: 0 ++pcmk__clone_allocate: rsc1:0 allocation score on rhel7-1: 11 ++pcmk__clone_allocate: rsc1:0 allocation score on rhel7-2: 0 ++pcmk__clone_allocate: rsc1:0 allocation score on rhel7-3: 0 ++pcmk__clone_allocate: rsc1:0 allocation score on rhel7-4: 0 ++pcmk__clone_allocate: rsc1:0 allocation score on rhel7-5: 0 ++pcmk__clone_allocate: rsc1:1 allocation score on rhel7-1: 0 ++pcmk__clone_allocate: rsc1:1 allocation score on rhel7-2: 6 ++pcmk__clone_allocate: rsc1:1 allocation score on rhel7-3: 0 ++pcmk__clone_allocate: rsc1:1 allocation score on rhel7-4: 0 ++pcmk__clone_allocate: rsc1:1 allocation score on rhel7-5: 0 ++pcmk__clone_allocate: rsc1:2 allocation score on rhel7-1: 10 ++pcmk__clone_allocate: rsc1:2 allocation score on rhel7-2: 5 ++pcmk__clone_allocate: rsc1:2 allocation score on rhel7-3: 0 ++pcmk__clone_allocate: rsc1:2 allocation score on rhel7-4: 0 ++pcmk__clone_allocate: rsc1:2 allocation score on rhel7-5: 0 ++pcmk__clone_allocate: rsc1:3 allocation score on rhel7-1: 10 ++pcmk__clone_allocate: rsc1:3 allocation score on rhel7-2: 5 ++pcmk__clone_allocate: rsc1:3 allocation score on rhel7-3: 0 ++pcmk__clone_allocate: rsc1:3 allocation score on rhel7-4: 0 ++pcmk__clone_allocate: rsc1:3 allocation score on rhel7-5: 0 ++pcmk__clone_allocate: rsc1:4 allocation score on rhel7-1: 10 ++pcmk__clone_allocate: rsc1:4 allocation score on rhel7-2: 5 ++pcmk__clone_allocate: rsc1:4 allocation score on rhel7-3: 0 ++pcmk__clone_allocate: rsc1:4 allocation score on rhel7-4: 0 ++pcmk__clone_allocate: rsc1:4 allocation score on rhel7-5: 0 ++pcmk__native_allocate: Fencing allocation score on rhel7-1: 0 ++pcmk__native_allocate: Fencing allocation score on rhel7-2: 0 ++pcmk__native_allocate: Fencing allocation score on rhel7-3: 0 ++pcmk__native_allocate: Fencing allocation score on rhel7-4: 0 ++pcmk__native_allocate: Fencing allocation score on rhel7-5: 0 ++pcmk__native_allocate: rsc1:0 allocation score on rhel7-1: 11 ++pcmk__native_allocate: rsc1:0 allocation score on rhel7-2: 0 ++pcmk__native_allocate: rsc1:0 allocation score on rhel7-3: -INFINITY ++pcmk__native_allocate: rsc1:0 allocation score on rhel7-4: -INFINITY ++pcmk__native_allocate: rsc1:0 allocation score on rhel7-5: -INFINITY ++pcmk__native_allocate: rsc1:1 allocation score on rhel7-1: -INFINITY ++pcmk__native_allocate: rsc1:1 allocation score on rhel7-2: 6 ++pcmk__native_allocate: rsc1:1 allocation score on rhel7-3: -INFINITY ++pcmk__native_allocate: rsc1:1 allocation score on rhel7-4: -INFINITY ++pcmk__native_allocate: rsc1:1 allocation score on rhel7-5: -INFINITY ++pcmk__native_allocate: rsc1:2 allocation score on rhel7-1: -INFINITY ++pcmk__native_allocate: rsc1:2 allocation score on rhel7-2: -INFINITY ++pcmk__native_allocate: rsc1:2 allocation score on rhel7-3: -INFINITY ++pcmk__native_allocate: rsc1:2 allocation score on rhel7-4: -INFINITY ++pcmk__native_allocate: rsc1:2 allocation score on rhel7-5: -INFINITY ++pcmk__native_allocate: rsc1:3 allocation score on rhel7-1: -INFINITY ++pcmk__native_allocate: rsc1:3 allocation score on rhel7-2: -INFINITY ++pcmk__native_allocate: rsc1:3 allocation score on rhel7-3: -INFINITY ++pcmk__native_allocate: rsc1:3 allocation score on rhel7-4: -INFINITY ++pcmk__native_allocate: rsc1:3 allocation score on rhel7-5: -INFINITY ++pcmk__native_allocate: rsc1:4 allocation score on rhel7-1: -INFINITY ++pcmk__native_allocate: rsc1:4 allocation score on rhel7-2: -INFINITY ++pcmk__native_allocate: rsc1:4 allocation score on rhel7-3: -INFINITY ++pcmk__native_allocate: rsc1:4 allocation score on rhel7-4: -INFINITY ++pcmk__native_allocate: rsc1:4 allocation score on rhel7-5: -INFINITY ++pcmk__native_allocate: rsc2 allocation score on rhel7-1: 0 ++pcmk__native_allocate: rsc2 allocation score on rhel7-2: 0 ++pcmk__native_allocate: rsc2 allocation score on rhel7-3: 0 ++pcmk__native_allocate: rsc2 allocation score on rhel7-4: 0 ++pcmk__native_allocate: rsc2 allocation score on rhel7-5: 0 ++rsc1:0 promotion score on rhel7-1: 10 ++rsc1:1 promotion score on rhel7-2: 5 ++rsc1:2 promotion score on none: 0 ++rsc1:3 promotion score on none: 0 ++rsc1:4 promotion score on none: 0 +diff --git a/cts/scheduler/no_quorum_demote.summary b/cts/scheduler/no_quorum_demote.summary +new file mode 100644 +index 0000000..9b69ca1 +--- /dev/null ++++ b/cts/scheduler/no_quorum_demote.summary +@@ -0,0 +1,38 @@ ++Using the original execution date of: 2020-06-17 17:26:35Z ++ ++Current cluster status: ++Online: [ rhel7-1 rhel7-2 ] ++OFFLINE: [ rhel7-3 rhel7-4 rhel7-5 ] ++ ++ Fencing (stonith:fence_xvm): Started rhel7-1 ++ Clone Set: rsc1-clone [rsc1] (promotable) ++ Masters: [ rhel7-1 ] ++ Slaves: [ rhel7-2 ] ++ Stopped: [ rhel7-3 rhel7-4 rhel7-5 ] ++ rsc2 (ocf::pacemaker:Dummy): Started rhel7-2 ++ ++Transition Summary: ++ * Stop Fencing ( rhel7-1 ) due to no quorum ++ * Demote rsc1:0 ( Master -> Slave rhel7-1 ) ++ * Stop rsc2 ( rhel7-2 ) due to no quorum ++ ++Executing cluster transition: ++ * Resource action: Fencing stop on rhel7-1 ++ * Resource action: rsc1 cancel=10000 on rhel7-1 ++ * Pseudo action: rsc1-clone_demote_0 ++ * Resource action: rsc2 stop on rhel7-2 ++ * Resource action: rsc1 demote on rhel7-1 ++ * Pseudo action: rsc1-clone_demoted_0 ++ * Resource action: rsc1 monitor=11000 on rhel7-1 ++Using the original execution date of: 2020-06-17 17:26:35Z ++ ++Revised cluster status: ++Online: [ rhel7-1 rhel7-2 ] ++OFFLINE: [ rhel7-3 rhel7-4 rhel7-5 ] ++ ++ Fencing (stonith:fence_xvm): Stopped ++ Clone Set: rsc1-clone [rsc1] (promotable) ++ Slaves: [ rhel7-1 rhel7-2 ] ++ Stopped: [ rhel7-3 rhel7-4 rhel7-5 ] ++ rsc2 (ocf::pacemaker:Dummy): Stopped ++ +diff --git a/cts/scheduler/no_quorum_demote.xml b/cts/scheduler/no_quorum_demote.xml +new file mode 100644 +index 0000000..8497f0a +--- /dev/null ++++ b/cts/scheduler/no_quorum_demote.xml +@@ -0,0 +1,224 @@ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ +-- +1.8.3.1 + + +From 015b5c012ce41a8035260522f67127135937baa2 Mon Sep 17 00:00:00 2001 +From: Ken Gaillot +Date: Thu, 28 May 2020 12:13:20 -0500 +Subject: [PATCH 17/20] Doc: Pacemaker Explained: document + no-quorum-policy=demote + +--- + doc/Pacemaker_Explained/en-US/Ch-Options.txt | 2 ++ + 1 file changed, 2 insertions(+) + +diff --git a/doc/Pacemaker_Explained/en-US/Ch-Options.txt b/doc/Pacemaker_Explained/en-US/Ch-Options.txt +index faefe7c..b158f00 100644 +--- a/doc/Pacemaker_Explained/en-US/Ch-Options.txt ++++ b/doc/Pacemaker_Explained/en-US/Ch-Options.txt +@@ -181,6 +181,8 @@ What to do when the cluster does not have quorum. Allowed values: + * +ignore:+ continue all resource management + * +freeze:+ continue resource management, but don't recover resources from nodes not in the affected partition + * +stop:+ stop all resources in the affected cluster partition ++* +demote:+ demote promotable resources and stop all other resources in the ++ affected cluster partition + * +suicide:+ fence all nodes in the affected cluster partition + + | batch-limit | 0 | +-- +1.8.3.1 + + +From 01c5ec67e0a6ee1395d771f8fbaf619a44ab2ca2 Mon Sep 17 00:00:00 2001 +From: Ken Gaillot +Date: Tue, 2 Jun 2020 19:23:11 -0500 +Subject: [PATCH 18/20] Low: scheduler: match initial no-quorum-policy struct + value to actual default + +It doesn't matter in practice since the actual default is parsed from the +option definition via pe_pref(), but it's confusing to have them different. +--- + lib/pengine/status.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/lib/pengine/status.c b/lib/pengine/status.c +index 8dc5095..ca34639 100644 +--- a/lib/pengine/status.c ++++ b/lib/pengine/status.c +@@ -360,7 +360,7 @@ set_working_set_defaults(pe_working_set_t * data_set) + + data_set->order_id = 1; + data_set->action_id = 1; +- data_set->no_quorum_policy = no_quorum_freeze; ++ data_set->no_quorum_policy = no_quorum_stop; + + data_set->flags = 0x0ULL; + set_bit(data_set->flags, pe_flag_stop_rsc_orphans); +-- +1.8.3.1 + + +From 7eec572dbba3ade059e5206a2ba496f9da3a68bc Mon Sep 17 00:00:00 2001 +From: Ken Gaillot +Date: Fri, 5 Jun 2020 10:02:05 -0500 +Subject: [PATCH 19/20] Build: libcrmcommon: bump CRM feature set + +... for op_expression/rsc_expression rules, on-fail=demote, and +no-quorum-policy=demote +--- + include/crm/crm.h | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/include/crm/crm.h b/include/crm/crm.h +index d2ffb61..dc2adc1 100644 +--- a/include/crm/crm.h ++++ b/include/crm/crm.h +@@ -51,7 +51,7 @@ extern "C" { + * >=3.0.13: Fail counts include operation name and interval + * >=3.2.0: DC supports PCMK_LRM_OP_INVALID and PCMK_LRM_OP_NOT_CONNECTED + */ +-# define CRM_FEATURE_SET "3.3.0" ++# define CRM_FEATURE_SET "3.4.0" + + # define EOS '\0' + # define DIMOF(a) ((int) (sizeof(a)/sizeof(a[0])) ) +-- +1.8.3.1 + + +From c4429d86ef00bb1749adc476f9c6874e3f5d95b9 Mon Sep 17 00:00:00 2001 +From: Ken Gaillot +Date: Tue, 16 Jun 2020 14:38:35 -0500 +Subject: [PATCH 20/20] Log: scheduler: downgrade "active on" messages to trace + +... now that they're logged more often via pcmk__rsc_is_filtered() +--- + lib/pengine/native.c | 14 +++++++------- + 1 file changed, 7 insertions(+), 7 deletions(-) + +diff --git a/lib/pengine/native.c b/lib/pengine/native.c +index f0d83d7..20658a0 100644 +--- a/lib/pengine/native.c ++++ b/lib/pengine/native.c +@@ -359,22 +359,22 @@ native_parameter(pe_resource_t * rsc, pe_node_t * node, gboolean create, const c + gboolean + native_active(pe_resource_t * rsc, gboolean all) + { +- GListPtr gIter = rsc->running_on; +- +- for (; gIter != NULL; gIter = gIter->next) { ++ for (GList *gIter = rsc->running_on; gIter != NULL; gIter = gIter->next) { + pe_node_t *a_node = (pe_node_t *) gIter->data; + + if (a_node->details->unclean) { +- crm_debug("Resource %s: node %s is unclean", rsc->id, a_node->details->uname); ++ pe_rsc_trace(rsc, "Resource %s: node %s is unclean", ++ rsc->id, a_node->details->uname); + return TRUE; + } else if (a_node->details->online == FALSE) { +- crm_debug("Resource %s: node %s is offline", rsc->id, a_node->details->uname); ++ pe_rsc_trace(rsc, "Resource %s: node %s is offline", ++ rsc->id, a_node->details->uname); + } else { +- crm_debug("Resource %s active on %s", rsc->id, a_node->details->uname); ++ pe_rsc_trace(rsc, "Resource %s active on %s", ++ rsc->id, a_node->details->uname); + return TRUE; + } + } +- + return FALSE; + } + +-- +1.8.3.1 + diff --git a/SOURCES/003-trace.patch b/SOURCES/003-trace.patch new file mode 100644 index 0000000..e56e644 --- /dev/null +++ b/SOURCES/003-trace.patch @@ -0,0 +1,30 @@ +From 47c3e06b098c7e148c54675588d03b4d2bea40b5 Mon Sep 17 00:00:00 2001 +From: Chris Lumens +Date: Mon, 22 Jun 2020 16:20:01 -0400 +Subject: [PATCH] Fix: libpacemaker: Don't allow a potential NULL in a format + string. + +This is only tripping up F32 s390x builds, but I don't suppose there's +any reason it couldn't come up elsewhere later. +--- + lib/pacemaker/pcmk_sched_constraints.c | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +diff --git a/lib/pacemaker/pcmk_sched_constraints.c b/lib/pacemaker/pcmk_sched_constraints.c +index 9c3a88d..d8c3e69 100644 +--- a/lib/pacemaker/pcmk_sched_constraints.c ++++ b/lib/pacemaker/pcmk_sched_constraints.c +@@ -1595,8 +1595,8 @@ custom_action_order(pe_resource_t * lh_rsc, char *lh_action_task, pe_action_t * + order = calloc(1, sizeof(pe__ordering_t)); + + crm_trace("Creating[%d] %s %s %s - %s %s %s", data_set->order_id, +- lh_rsc?lh_rsc->id:"NA", lh_action_task, lh_action?lh_action->uuid:"NA", +- rh_rsc?rh_rsc->id:"NA", rh_action_task, rh_action?rh_action->uuid:"NA"); ++ lh_rsc?lh_rsc->id:"NA", lh_action_task?lh_action_task:"NA", lh_action?lh_action->uuid:"NA", ++ rh_rsc?rh_rsc->id:"NA", rh_action_task?rh_action_task:"NA", rh_action?rh_action->uuid:"NA"); + + /* CRM_ASSERT(data_set->order_id != 291); */ + +-- +1.8.3.1 + diff --git a/SOURCES/004-test.patch b/SOURCES/004-test.patch new file mode 100644 index 0000000..e17850b --- /dev/null +++ b/SOURCES/004-test.patch @@ -0,0 +1,27 @@ +From 7ed7675615ada7d0be5654e0dcb26de60cf5b5e9 Mon Sep 17 00:00:00 2001 +From: Ken Gaillot +Date: Mon, 22 Jun 2020 20:03:56 -0500 +Subject: [PATCH] Test: scheduler: explicitly disable concurrent-fencing in + on_fail_demote4 + +... so the expected output is the same regardless of what default the build was +compiled with +--- + cts/scheduler/on_fail_demote4.xml | 1 + + 1 file changed, 1 insertion(+) + +diff --git a/cts/scheduler/on_fail_demote4.xml b/cts/scheduler/on_fail_demote4.xml +index eb4c4cc..1082266 100644 +--- a/cts/scheduler/on_fail_demote4.xml ++++ b/cts/scheduler/on_fail_demote4.xml +@@ -8,6 +8,7 @@ + + + ++ + + + +-- +1.8.3.1 + diff --git a/SOURCES/005-sysconfig.patch b/SOURCES/005-sysconfig.patch new file mode 100644 index 0000000..4e49cab --- /dev/null +++ b/SOURCES/005-sysconfig.patch @@ -0,0 +1,32 @@ +From 85040eb19b9405464b01a7e67eb6769d2a03c611 Mon Sep 17 00:00:00 2001 +From: Ken Gaillot +Date: Fri, 19 Jun 2020 17:49:22 -0500 +Subject: [PATCH] Doc: sysconfig: remove outdated reference to wildcards in + PCMK_trace_files + +Wildcards stopped working when the log filtering implementation changed in +1.1.8 to support PCMK_trace_tags. It's not worth the effort to fix at this +point, so just update the comment in the sysconfig file. +--- + daemons/pacemakerd/pacemaker.sysconfig | 5 ++--- + 1 file changed, 2 insertions(+), 3 deletions(-) + +diff --git a/daemons/pacemakerd/pacemaker.sysconfig b/daemons/pacemakerd/pacemaker.sysconfig +index c7745d8..e4a5c4d 100644 +--- a/daemons/pacemakerd/pacemaker.sysconfig ++++ b/daemons/pacemakerd/pacemaker.sysconfig +@@ -34,9 +34,8 @@ + # Log all messages from a comma-separated list of functions. + # PCMK_trace_functions=function1,function2,function3 + +-# Log all messages from a comma-separated list of files (no path). +-# Wildcards are supported, e.g. PCMK_trace_files=prefix*.c +-# PCMK_trace_files=file.c,other.h ++# Log all messages from a comma-separated list of file names (without path). ++# PCMK_trace_files=file1.c,file2.c + + # Log all messages matching comma-separated list of formats. + # PCMK_trace_formats="Sent delete %d" +-- +1.8.3.1 + diff --git a/SPECS/pacemaker.spec b/SPECS/pacemaker.spec index 79cecce..707362a 100644 --- a/SPECS/pacemaker.spec +++ b/SPECS/pacemaker.spec @@ -1,4 +1,5 @@ -# Globals and defines to control package behavior (configure these as desired) +# User-configurable globals and defines to control package behavior +# (these should not test {with X} values, which are declared later) ## User and group to use for nonprivileged services %global uname hacluster @@ -21,12 +22,11 @@ ## Upstream pacemaker version, and its package version (specversion ## can be incremented to build packages reliably considered "newer" ## than previously built packages with the same pcmkversion) -%global pcmkversion 2.0.3 +%global pcmkversion 2.0.4 %global specversion 3 -## Upstream commit (or git tag, such as "Pacemaker-" plus the -## {pcmkversion} macro for an official release) to use for this package -%global commit 4b1f869f0f64ef0d248b6aa4781d38ecccf83318 +## Upstream commit (full commit ID, abbreviated commit ID, or tag) to build +%global commit 2deceaa3ae1fbadd844f5c5b47fd33129fa2c227 ## Since git v2.11, the extent of abbreviation is autoscaled by default ## (used to be constant of 7), so we need to convey it for non-tags, too. %global commit_abbrev 7 @@ -39,24 +39,58 @@ %global nagios_hash 105ab8a +# Define conditionals so that "rpmbuild --with " and +# "rpmbuild --without " can enable and disable specific features + +## Add option to enable support for stonith/external fencing agents +%bcond_with stonithd + +## Add option to disable support for storing sensitive information outside CIB +%bcond_without cibsecrets + +## Add option to create binaries suitable for use with profiling tools +%bcond_with profiling + +## Add option to create binaries with coverage analysis +%bcond_with coverage + +## Add option to generate documentation (requires Publican, Asciidoc and Inkscape) +%bcond_with doc + +## Add option to prefix package version with "0." +## (so later "official" packages will be considered updates) +%bcond_with pre_release + +## Add option to ship Upstart job files +%bcond_with upstart_job + +## Add option to turn off hardening of libraries and daemon executables +%bcond_without hardening + +## Add option to disable links for legacy daemon names +%bcond_without legacy_links + + # Define globals for convenient use later ## Workaround to use parentheses in other globals %global lparen ( %global rparen ) -## Short version of git commit -%define shortcommit %(c=%{commit}; case ${c} in - Pacemaker-*%{rparen} echo ${c:10};; - *%{rparen} echo ${c:0:%{commit_abbrev}};; esac) +## Whether this is a tagged release (final or release candidate) +%define tag_release %(c=%{commit}; case ${c} in Pacemaker-*%{rparen} echo 1 ;; + *%{rparen} echo 0 ;; esac) -## Whether this is a tagged release -%define tag_release %([ %{commit} != Pacemaker-%{shortcommit} ]; echo $?) - -## Whether this is a release candidate (in case of a tagged release) -%define pre_release %([ "%{tag_release}" -eq 0 ] || { - case "%{shortcommit}" in *-rc[[:digit:]]*%{rparen} false;; - esac; }; echo $?) +## Portion of export/dist tarball name after "pacemaker-", and release version +%if 0%{tag_release} +%define archive_version %(c=%{commit}; echo ${c:10}) +%define archive_github_url %{commit}#/%{name}-%{archive_version}.tar.gz +%else +%define archive_version %(c=%{commit}; echo ${c:0:%{commit_abbrev}}) +%define archive_github_url %{archive_version}#/%{name}-%{archive_version}.tar.gz +%endif +# RHEL always uses a simple release number +%define pcmk_release %{specversion} ## Heuristic used to infer bleeding-edge deployments that are ## less likely to have working versions of the documentation tools @@ -181,35 +215,6 @@ %endif -# Define conditionals so that "rpmbuild --with " and -# "rpmbuild --without " can enable and disable specific features - -## Add option to enable support for stonith/external fencing agents -%bcond_with stonithd - -## Add option to create binaries suitable for use with profiling tools -%bcond_with profiling - -## Add option to create binaries with coverage analysis -%bcond_with coverage - -## Add option to generate documentation (requires Publican, Asciidoc and Inkscape) -%bcond_with doc - -## Add option to prefix package version with "0." -## (so later "official" packages will be considered updates) -%bcond_with pre_release - -## Add option to ship Upstart job files -%bcond_with upstart_job - -## Add option to turn off hardening of libraries and daemon executables -%bcond_without hardening - -## Add option to disable links for legacy daemon names -%bcond_without legacy_links - - # Keep sane profiling data if requested %if %{with profiling} @@ -219,24 +224,6 @@ %endif -# Define the release version -# (do not look at externally enforced pre-release flag for tagged releases -# as only -rc tags, captured with the second condition, implies that then) -%if (!%{tag_release} && %{with pre_release}) || 0%{pre_release} -%if 0%{pre_release} -%define pcmk_release 0.%{specversion}.%(s=%{shortcommit}; echo ${s: -3}) -%else -%define pcmk_release 0.%{specversion}.%{shortcommit}.git -%endif -%else -%if 0%{tag_release} -%define pcmk_release %{specversion} -%else -# Never use the short commit in a RHEL release number -%define pcmk_release %{specversion} -%endif -%endif - Name: pacemaker Summary: Scalable High-Availability cluster resource manager Version: %{pcmkversion} @@ -250,13 +237,23 @@ License: GPLv2+ and LGPLv2+ and BSD Url: http://www.clusterlabs.org Group: System Environment/Daemons -# Hint: use "spectool -s 0 pacemaker.spec" (rpmdevtools) to check the final URL: -# https://github.com/ClusterLabs/pacemaker/archive/e91769e5a39f5cb2f7b097d3c612368f0530535e/pacemaker-e91769e.tar.gz -Source0: https://github.com/%{github_owner}/%{name}/archive/%{commit}/%{name}-%{shortcommit}.tar.gz +# Example: https://codeload.github.com/ClusterLabs/pacemaker/tar.gz/e91769e +# will download pacemaker-e91769e.tar.gz +# +# The ending part starting with '#' is ignored by github but necessary for +# rpmbuild to know what the tar archive name is. (The downloaded file will be +# named correctly only for commit IDs, not tagged releases.) +# +# You can use "spectool -s 0 pacemaker.spec" (rpmdevtools) to show final URL. +Source0: https://codeload.github.com/%{github_owner}/%{name}/tar.gz/%{archive_github_url} Source1: nagios-agents-metadata-%{nagios_hash}.tar.gz # upstream commits -#Patch1: 001-xxx.patch +Patch1: 001-rules.patch +Patch2: 002-demote.patch +Patch3: 003-trace.patch +Patch4: 004-test.patch +Patch5: 005-sysconfig.patch # downstream-only commits #Patch100: xxx.patch @@ -271,7 +268,11 @@ Requires: psmisc %endif %{?systemd_requires} +%if %{defined centos} +ExclusiveArch: aarch64 i686 ppc64le s390x x86_64 %{arm} +%else ExclusiveArch: aarch64 i686 ppc64le s390x x86_64 +%endif Requires: %{python_path} BuildRequires: %{python_name}-devel @@ -335,7 +336,8 @@ when related resources fail and can be configured to periodically check resource health. Available rpmbuild rebuild options: - --with(out) : coverage doc stonithd hardening pre_release profiling + --with(out) : cibsecrets coverage doc stonithd hardening pre_release + profiling %package cli License: GPLv2+ and LGPLv2+ @@ -509,7 +511,7 @@ The metadata files required for Pacemaker to execute the nagios plugin monitor resources. %prep -%autosetup -a 1 -n %{name}-%{commit} -S git_am -p 1 +%autosetup -a 1 -n %{name}-%{archive_version} -S git_am -p 1 %build @@ -541,6 +543,7 @@ export LDFLAGS_HARDENED_LIB="%{?_hardening_ldflags}" %{!?with_legacy_links: --disable-legacy-links} \ %{?with_profiling: --with-profiling} \ %{?with_coverage: --with-coverage} \ + %{?with_cibsecrets: --with-cibsecrets} \ %{!?with_doc: --with-brand=} \ %{?gnutls_priorities: --with-gnutls-priorities="%{gnutls_priorities}"} \ --with-initdir=%{_initrddir} \ @@ -561,6 +564,7 @@ sed -i 's|^runpath_var=LD_RUN_PATH|runpath_var=DIE_RPATH_DIE|g' libtool make %{_smp_mflags} V=1 %check +make %{_smp_mflags} check { cts/cts-scheduler --run load-stopped-loop \ && cts/cts-cli \ && touch .CHECKED @@ -740,7 +744,7 @@ getent passwd %{uname} >/dev/null || useradd -r -g %{gname} -u %{hacluster_id} - exit 0 %if %{defined ldconfig_scriptlets} -%ldconfig_scriptlets libs +%ldconfig_scriptlets -n %{pkgname_pcmk_libs} %ldconfig_scriptlets cluster-libs %else %post -n %{pkgname_pcmk_libs} -p /sbin/ldconfig @@ -813,6 +817,9 @@ exit 0 %{_sbindir}/attrd_updater %{_sbindir}/cibadmin +%if %{with cibsecrets} +%{_sbindir}/cibsecret +%endif %{_sbindir}/crm_diff %{_sbindir}/crm_error %{_sbindir}/crm_failcount @@ -944,6 +951,51 @@ exit 0 %license %{nagios_name}-%{nagios_hash}/COPYING %changelog +* Thu Jun 25 2020 Ken Gaillot - 2.0.4-3 +- Allow resource and operation defaults per resource or operation type +- Rebase on upstream 2.0.4 final release +- Support on-fail="demote" and no-quorum-policy="demote" options +- Remove incorrect comment from sysconfig file +- Resolves: rhbz1628701 +- Resolves: rhbz1828488 +- Resolves: rhbz1837747 +- Resolves: rhbz1848789 + +* Wed Jun 10 2020 Ken Gaillot - 2.0.4-2 +- Improve cibsecret help and clean up code per static analysis +- Resolves: rhbz1793860 + +* Mon Jun 8 2020 Ken Gaillot - 2.0.4-1 +- Clear leaving node's attributes if there is no DC +- Add crm_mon --node option to limit display to particular node or tagged nodes +- Add crm_mon --include/--exclude options to select what sections are shown +- priority-fencing-delay option bases delay on where resources are active +- Pending DC fencing gets 'stuck' in status display +- crm_rule can now check rule expiration when "years" is specified +- crm_mon now formats error messages better +- Support for CIB secrets is enabled +- Rebase on latest upstream Pacemaker release +- Fix regression introduced in 8.2 so crm_node -n works on remote nodes +- Avoid infinite loop when topology is removed while unfencing is in progress +- Resolves: rhbz1300604 +- Resolves: rhbz1363907 +- Resolves: rhbz1784601 +- Resolves: rhbz1787751 +- Resolves: rhbz1790591 +- Resolves: rhbz1793653 +- Resolves: rhbz1793860 +- Resolves: rhbz1828488 +- Resolves: rhbz1830535 +- Resolves: rhbz1831775 + +* Mon Jan 27 2020 Ken Gaillot - 2.0.3-5 +- Clear leaving node's attributes if there is no DC +- Resolves: rhbz1791841 + +* Thu Jan 16 2020 Ken Gaillot - 2.0.3-4 +- Implement shutdown-lock feature +- Resolves: rhbz1712584 + * Wed Nov 27 2019 Ken Gaillot - 2.0.3-3 - Rebase on Pacemaker-2.0.3 final release - Resolves: rhbz1752538