pacemaker/SOURCES/001-rules.patch

4948 lines
194 KiB
Diff
Raw Normal View History

From 2f10dde2f2a0ac7a3d74cb2f398be1deaba75615 Mon Sep 17 00:00:00 2001
From: Chris Lumens <clumens@redhat.com>
Date: Mon, 6 Apr 2020 11:22:50 -0400
Subject: [PATCH 01/17] Feature: scheduler: Add new expression_type values.
---
include/crm/pengine/rules.h | 4 +++-
lib/pengine/rules.c | 6 ++++++
2 files changed, 9 insertions(+), 1 deletion(-)
diff --git a/include/crm/pengine/rules.h b/include/crm/pengine/rules.h
index ebd3148..37f092b 100644
--- a/include/crm/pengine/rules.h
+++ b/include/crm/pengine/rules.h
@@ -28,7 +28,9 @@ enum expression_type {
loc_expr,
role_expr,
time_expr,
- version_expr
+ version_expr,
+ rsc_expr,
+ op_expr
};
typedef struct pe_re_match_data {
diff --git a/lib/pengine/rules.c b/lib/pengine/rules.c
index fa9a222..130bada 100644
--- a/lib/pengine/rules.c
+++ b/lib/pengine/rules.c
@@ -189,6 +189,12 @@ find_expression_type(xmlNode * expr)
if (safe_str_eq(tag, "date_expression")) {
return time_expr;
+ } else if (safe_str_eq(tag, "rsc_expression")) {
+ return rsc_expr;
+
+ } else if (safe_str_eq(tag, "op_expression")) {
+ return op_expr;
+
} else if (safe_str_eq(tag, XML_TAG_RULE)) {
return nested_rule;
--
1.8.3.1
From bc7491e5226af2a2e7f1a9b2d61892d3af0767fe Mon Sep 17 00:00:00 2001
From: Chris Lumens <clumens@redhat.com>
Date: Fri, 3 Apr 2020 15:03:23 -0400
Subject: [PATCH 02/17] Refactor: scheduler: Add new pe__eval_*_expr functions.
These new functions all take the same input arguments - an xmlNodePtr
and a pe_rule_eval_data_t. This latter type holds all the parameters
that could possibly be useful for evaluating some rule. Most functions
will only need a few items out of this structure.
Then, implement pe_test_*_expression in terms of these new functions.
---
include/crm/pengine/common.h | 37 ++-
include/crm/pengine/rules.h | 13 -
include/crm/pengine/rules_internal.h | 5 +
lib/pengine/rules.c | 592 +++++++++++++++++++----------------
4 files changed, 363 insertions(+), 284 deletions(-)
diff --git a/include/crm/pengine/common.h b/include/crm/pengine/common.h
index 48c2b66..3a770b7 100644
--- a/include/crm/pengine/common.h
+++ b/include/crm/pengine/common.h
@@ -1,5 +1,5 @@
/*
- * Copyright 2004-2019 the Pacemaker project contributors
+ * Copyright 2004-2020 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
@@ -15,6 +15,9 @@ extern "C" {
#endif
# include <glib.h>
+# include <regex.h>
+
+# include <crm/common/iso8601.h>
extern gboolean was_processing_error;
extern gboolean was_processing_warning;
@@ -131,6 +134,38 @@ recovery2text(enum rsc_recovery_type type)
return "Unknown";
}
+typedef struct pe_re_match_data {
+ char *string;
+ int nregs;
+ regmatch_t *pmatch;
+} pe_re_match_data_t;
+
+typedef struct pe_match_data {
+ pe_re_match_data_t *re;
+ GHashTable *params;
+ GHashTable *meta;
+} pe_match_data_t;
+
+typedef struct pe_rsc_eval_data {
+ const char *standard;
+ const char *provider;
+ const char *agent;
+} pe_rsc_eval_data_t;
+
+typedef struct pe_op_eval_data {
+ const char *op_name;
+ guint interval;
+} pe_op_eval_data_t;
+
+typedef struct pe_rule_eval_data {
+ GHashTable *node_hash;
+ enum rsc_role_e role;
+ crm_time_t *now;
+ pe_match_data_t *match_data;
+ pe_rsc_eval_data_t *rsc_data;
+ pe_op_eval_data_t *op_data;
+} pe_rule_eval_data_t;
+
#ifdef __cplusplus
}
#endif
diff --git a/include/crm/pengine/rules.h b/include/crm/pengine/rules.h
index 37f092b..d7bdbf9 100644
--- a/include/crm/pengine/rules.h
+++ b/include/crm/pengine/rules.h
@@ -15,7 +15,6 @@ extern "C" {
#endif
# include <glib.h>
-# include <regex.h>
# include <crm/crm.h>
# include <crm/common/iso8601.h>
@@ -33,18 +32,6 @@ enum expression_type {
op_expr
};
-typedef struct pe_re_match_data {
- char *string;
- int nregs;
- regmatch_t *pmatch;
-} pe_re_match_data_t;
-
-typedef struct pe_match_data {
- pe_re_match_data_t *re;
- GHashTable *params;
- GHashTable *meta;
-} pe_match_data_t;
-
enum expression_type find_expression_type(xmlNode * expr);
gboolean pe_evaluate_rules(xmlNode *ruleset, GHashTable *node_hash,
diff --git a/include/crm/pengine/rules_internal.h b/include/crm/pengine/rules_internal.h
index fd65c1e..8a22108 100644
--- a/include/crm/pengine/rules_internal.h
+++ b/include/crm/pengine/rules_internal.h
@@ -21,6 +21,11 @@ void pe_free_alert_list(GListPtr alert_list);
crm_time_t *pe_parse_xml_duration(crm_time_t * start, xmlNode * duration_spec);
+gboolean pe__eval_attr_expr(xmlNode *expr, pe_rule_eval_data_t *rule_data);
+int pe__eval_date_expr(xmlNode *expr, pe_rule_eval_data_t *rule_data,
+ crm_time_t *next_change);
+gboolean pe__eval_role_expr(xmlNode *expr, pe_rule_eval_data_t *rule_data);
+
int pe_eval_date_expression(xmlNode *time_expr,
crm_time_t *now,
crm_time_t *next_change);
diff --git a/lib/pengine/rules.c b/lib/pengine/rules.c
index 130bada..3f316c2 100644
--- a/lib/pengine/rules.c
+++ b/lib/pengine/rules.c
@@ -219,201 +219,34 @@ find_expression_type(xmlNode * expr)
}
gboolean
-pe_test_role_expression(xmlNode * expr, enum rsc_role_e role, crm_time_t * now)
+pe_test_role_expression(xmlNode *expr, enum rsc_role_e role, crm_time_t *now)
{
- gboolean accept = FALSE;
- const char *op = NULL;
- const char *value = NULL;
-
- if (role == RSC_ROLE_UNKNOWN) {
- return accept;
- }
-
- value = crm_element_value(expr, XML_EXPR_ATTR_VALUE);
- op = crm_element_value(expr, XML_EXPR_ATTR_OPERATION);
-
- if (safe_str_eq(op, "defined")) {
- if (role > RSC_ROLE_STARTED) {
- accept = TRUE;
- }
-
- } else if (safe_str_eq(op, "not_defined")) {
- if (role < RSC_ROLE_SLAVE && role > RSC_ROLE_UNKNOWN) {
- accept = TRUE;
- }
-
- } else if (safe_str_eq(op, "eq")) {
- if (text2role(value) == role) {
- accept = TRUE;
- }
-
- } else if (safe_str_eq(op, "ne")) {
- // Test "ne" only with promotable clone roles
- if (role < RSC_ROLE_SLAVE && role > RSC_ROLE_UNKNOWN) {
- accept = FALSE;
-
- } else if (text2role(value) != role) {
- accept = TRUE;
- }
- }
- return accept;
+ pe_rule_eval_data_t rule_data = {
+ .node_hash = NULL,
+ .role = role,
+ .now = now,
+ .match_data = NULL,
+ .rsc_data = NULL,
+ .op_data = NULL
+ };
+
+ return pe__eval_role_expr(expr, &rule_data);
}
gboolean
pe_test_attr_expression(xmlNode *expr, GHashTable *hash, crm_time_t *now,
pe_match_data_t *match_data)
{
- gboolean accept = FALSE;
- gboolean attr_allocated = FALSE;
- int cmp = 0;
- const char *h_val = NULL;
- GHashTable *table = NULL;
-
- const char *op = NULL;
- const char *type = NULL;
- const char *attr = NULL;
- const char *value = NULL;
- const char *value_source = NULL;
-
- attr = crm_element_value(expr, XML_EXPR_ATTR_ATTRIBUTE);
- op = crm_element_value(expr, XML_EXPR_ATTR_OPERATION);
- value = crm_element_value(expr, XML_EXPR_ATTR_VALUE);
- type = crm_element_value(expr, XML_EXPR_ATTR_TYPE);
- value_source = crm_element_value(expr, XML_EXPR_ATTR_VALUE_SOURCE);
-
- if (attr == NULL || op == NULL) {
- pe_err("Invalid attribute or operation in expression"
- " (\'%s\' \'%s\' \'%s\')", crm_str(attr), crm_str(op), crm_str(value));
- return FALSE;
- }
-
- if (match_data) {
- if (match_data->re) {
- char *resolved_attr = pe_expand_re_matches(attr, match_data->re);
-
- if (resolved_attr) {
- attr = (const char *) resolved_attr;
- attr_allocated = TRUE;
- }
- }
-
- if (safe_str_eq(value_source, "param")) {
- table = match_data->params;
- } else if (safe_str_eq(value_source, "meta")) {
- table = match_data->meta;
- }
- }
-
- if (table) {
- const char *param_name = value;
- const char *param_value = NULL;
-
- if (param_name && param_name[0]) {
- if ((param_value = (const char *)g_hash_table_lookup(table, param_name))) {
- value = param_value;
- }
- }
- }
-
- if (hash != NULL) {
- h_val = (const char *)g_hash_table_lookup(hash, attr);
- }
-
- if (attr_allocated) {
- free((char *)attr);
- attr = NULL;
- }
-
- if (value != NULL && h_val != NULL) {
- if (type == NULL) {
- if (safe_str_eq(op, "lt")
- || safe_str_eq(op, "lte")
- || safe_str_eq(op, "gt")
- || safe_str_eq(op, "gte")) {
- type = "number";
-
- } else {
- type = "string";
- }
- crm_trace("Defaulting to %s based comparison for '%s' op", type, op);
- }
-
- if (safe_str_eq(type, "string")) {
- cmp = strcasecmp(h_val, value);
-
- } else if (safe_str_eq(type, "number")) {
- int h_val_f = crm_parse_int(h_val, NULL);
- int value_f = crm_parse_int(value, NULL);
-
- if (h_val_f < value_f) {
- cmp = -1;
- } else if (h_val_f > value_f) {
- cmp = 1;
- } else {
- cmp = 0;
- }
-
- } else if (safe_str_eq(type, "version")) {
- cmp = compare_version(h_val, value);
-
- }
-
- } else if (value == NULL && h_val == NULL) {
- cmp = 0;
- } else if (value == NULL) {
- cmp = 1;
- } else {
- cmp = -1;
- }
-
- if (safe_str_eq(op, "defined")) {
- if (h_val != NULL) {
- accept = TRUE;
- }
-
- } else if (safe_str_eq(op, "not_defined")) {
- if (h_val == NULL) {
- accept = TRUE;
- }
-
- } else if (safe_str_eq(op, "eq")) {
- if ((h_val == value) || cmp == 0) {
- accept = TRUE;
- }
-
- } else if (safe_str_eq(op, "ne")) {
- if ((h_val == NULL && value != NULL)
- || (h_val != NULL && value == NULL)
- || cmp != 0) {
- accept = TRUE;
- }
-
- } else if (value == NULL || h_val == NULL) {
- // The comparison is meaningless from this point on
- accept = FALSE;
-
- } else if (safe_str_eq(op, "lt")) {
- if (cmp < 0) {
- accept = TRUE;
- }
-
- } else if (safe_str_eq(op, "lte")) {
- if (cmp <= 0) {
- accept = TRUE;
- }
-
- } else if (safe_str_eq(op, "gt")) {
- if (cmp > 0) {
- accept = TRUE;
- }
-
- } else if (safe_str_eq(op, "gte")) {
- if (cmp >= 0) {
- accept = TRUE;
- }
- }
-
- return accept;
+ pe_rule_eval_data_t rule_data = {
+ .node_hash = hash,
+ .role = RSC_ROLE_UNKNOWN,
+ .now = now,
+ .match_data = match_data,
+ .rsc_data = NULL,
+ .op_data = NULL
+ };
+
+ return pe__eval_attr_expr(expr, &rule_data);
}
/* As per the nethack rules:
@@ -587,10 +420,18 @@ pe_parse_xml_duration(crm_time_t * start, xmlNode * duration_spec)
* \return TRUE if date expression is in effect at given time, FALSE otherwise
*/
gboolean
-pe_test_date_expression(xmlNode *time_expr, crm_time_t *now,
- crm_time_t *next_change)
+pe_test_date_expression(xmlNode *expr, crm_time_t *now, crm_time_t *next_change)
{
- switch (pe_eval_date_expression(time_expr, now, next_change)) {
+ pe_rule_eval_data_t rule_data = {
+ .node_hash = NULL,
+ .role = RSC_ROLE_UNKNOWN,
+ .now = now,
+ .match_data = NULL,
+ .rsc_data = NULL,
+ .op_data = NULL
+ };
+
+ switch (pe__eval_date_expr(expr, &rule_data, next_change)) {
case pcmk_rc_within_range:
case pcmk_rc_ok:
return TRUE;
@@ -623,86 +464,18 @@ crm_time_set_if_earlier(crm_time_t *next_change, crm_time_t *t)
* \return Standard Pacemaker return code
*/
int
-pe_eval_date_expression(xmlNode *time_expr, crm_time_t *now,
- crm_time_t *next_change)
+pe_eval_date_expression(xmlNode *expr, crm_time_t *now, crm_time_t *next_change)
{
- crm_time_t *start = NULL;
- crm_time_t *end = NULL;
- const char *value = NULL;
- const char *op = crm_element_value(time_expr, "operation");
-
- xmlNode *duration_spec = NULL;
- xmlNode *date_spec = NULL;
-
- // "undetermined" will also be returned for parsing errors
- int rc = pcmk_rc_undetermined;
-
- crm_trace("Testing expression: %s", ID(time_expr));
-
- duration_spec = first_named_child(time_expr, "duration");
- date_spec = first_named_child(time_expr, "date_spec");
-
- value = crm_element_value(time_expr, "start");
- if (value != NULL) {
- start = crm_time_new(value);
- }
- value = crm_element_value(time_expr, "end");
- if (value != NULL) {
- end = crm_time_new(value);
- }
-
- if (start != NULL && end == NULL && duration_spec != NULL) {
- end = pe_parse_xml_duration(start, duration_spec);
- }
-
- if ((op == NULL) || safe_str_eq(op, "in_range")) {
- if ((start == NULL) && (end == NULL)) {
- // in_range requires at least one of start or end
- } else if ((start != NULL) && (crm_time_compare(now, start) < 0)) {
- rc = pcmk_rc_before_range;
- crm_time_set_if_earlier(next_change, start);
- } else if ((end != NULL) && (crm_time_compare(now, end) > 0)) {
- rc = pcmk_rc_after_range;
- } else {
- rc = pcmk_rc_within_range;
- if (end && next_change) {
- // Evaluation doesn't change until second after end
- crm_time_add_seconds(end, 1);
- crm_time_set_if_earlier(next_change, end);
- }
- }
-
- } else if (safe_str_eq(op, "date_spec")) {
- rc = pe_cron_range_satisfied(now, date_spec);
- // @TODO set next_change appropriately
-
- } else if (safe_str_eq(op, "gt")) {
- if (start == NULL) {
- // gt requires start
- } else if (crm_time_compare(now, start) > 0) {
- rc = pcmk_rc_within_range;
- } else {
- rc = pcmk_rc_before_range;
-
- // Evaluation doesn't change until second after start
- crm_time_add_seconds(start, 1);
- crm_time_set_if_earlier(next_change, start);
- }
-
- } else if (safe_str_eq(op, "lt")) {
- if (end == NULL) {
- // lt requires end
- } else if (crm_time_compare(now, end) < 0) {
- rc = pcmk_rc_within_range;
- crm_time_set_if_earlier(next_change, end);
- } else {
- rc = pcmk_rc_after_range;
- }
- }
-
- crm_time_free(start);
- crm_time_free(end);
- return rc;
+ pe_rule_eval_data_t rule_data = {
+ .node_hash = NULL,
+ .role = RSC_ROLE_UNKNOWN,
+ .now = now,
+ .match_data = NULL,
+ .rsc_data = NULL,
+ .op_data = NULL
+ };
+
+ return pe__eval_date_expr(expr, &rule_data, next_change);
}
// Information about a block of nvpair elements
@@ -1111,6 +884,285 @@ pe_unpack_versioned_parameters(xmlNode *versioned_params, const char *ra_version
}
#endif
+gboolean
+pe__eval_attr_expr(xmlNodePtr expr, pe_rule_eval_data_t *rule_data)
+{
+ gboolean accept = FALSE;
+ gboolean attr_allocated = FALSE;
+ int cmp = 0;
+ const char *h_val = NULL;
+ GHashTable *table = NULL;
+
+ const char *op = NULL;
+ const char *type = NULL;
+ const char *attr = NULL;
+ const char *value = NULL;
+ const char *value_source = NULL;
+
+ attr = crm_element_value(expr, XML_EXPR_ATTR_ATTRIBUTE);
+ op = crm_element_value(expr, XML_EXPR_ATTR_OPERATION);
+ value = crm_element_value(expr, XML_EXPR_ATTR_VALUE);
+ type = crm_element_value(expr, XML_EXPR_ATTR_TYPE);
+ value_source = crm_element_value(expr, XML_EXPR_ATTR_VALUE_SOURCE);
+
+ if (attr == NULL || op == NULL) {
+ pe_err("Invalid attribute or operation in expression"
+ " (\'%s\' \'%s\' \'%s\')", crm_str(attr), crm_str(op), crm_str(value));
+ return FALSE;
+ }
+
+ if (rule_data->match_data) {
+ if (rule_data->match_data->re) {
+ char *resolved_attr = pe_expand_re_matches(attr, rule_data->match_data->re);
+
+ if (resolved_attr) {
+ attr = (const char *) resolved_attr;
+ attr_allocated = TRUE;
+ }
+ }
+
+ if (safe_str_eq(value_source, "param")) {
+ table = rule_data->match_data->params;
+ } else if (safe_str_eq(value_source, "meta")) {
+ table = rule_data->match_data->meta;
+ }
+ }
+
+ if (table) {
+ const char *param_name = value;
+ const char *param_value = NULL;
+
+ if (param_name && param_name[0]) {
+ if ((param_value = (const char *)g_hash_table_lookup(table, param_name))) {
+ value = param_value;
+ }
+ }
+ }
+
+ if (rule_data->node_hash != NULL) {
+ h_val = (const char *)g_hash_table_lookup(rule_data->node_hash, attr);
+ }
+
+ if (attr_allocated) {
+ free((char *)attr);
+ attr = NULL;
+ }
+
+ if (value != NULL && h_val != NULL) {
+ if (type == NULL) {
+ if (safe_str_eq(op, "lt")
+ || safe_str_eq(op, "lte")
+ || safe_str_eq(op, "gt")
+ || safe_str_eq(op, "gte")) {
+ type = "number";
+
+ } else {
+ type = "string";
+ }
+ crm_trace("Defaulting to %s based comparison for '%s' op", type, op);
+ }
+
+ if (safe_str_eq(type, "string")) {
+ cmp = strcasecmp(h_val, value);
+
+ } else if (safe_str_eq(type, "number")) {
+ int h_val_f = crm_parse_int(h_val, NULL);
+ int value_f = crm_parse_int(value, NULL);
+
+ if (h_val_f < value_f) {
+ cmp = -1;
+ } else if (h_val_f > value_f) {
+ cmp = 1;
+ } else {
+ cmp = 0;
+ }
+
+ } else if (safe_str_eq(type, "version")) {
+ cmp = compare_version(h_val, value);
+
+ }
+
+ } else if (value == NULL && h_val == NULL) {
+ cmp = 0;
+ } else if (value == NULL) {
+ cmp = 1;
+ } else {
+ cmp = -1;
+ }
+
+ if (safe_str_eq(op, "defined")) {
+ if (h_val != NULL) {
+ accept = TRUE;
+ }
+
+ } else if (safe_str_eq(op, "not_defined")) {
+ if (h_val == NULL) {
+ accept = TRUE;
+ }
+
+ } else if (safe_str_eq(op, "eq")) {
+ if ((h_val == value) || cmp == 0) {
+ accept = TRUE;
+ }
+
+ } else if (safe_str_eq(op, "ne")) {
+ if ((h_val == NULL && value != NULL)
+ || (h_val != NULL && value == NULL)
+ || cmp != 0) {
+ accept = TRUE;
+ }
+
+ } else if (value == NULL || h_val == NULL) {
+ // The comparison is meaningless from this point on
+ accept = FALSE;
+
+ } else if (safe_str_eq(op, "lt")) {
+ if (cmp < 0) {
+ accept = TRUE;
+ }
+
+ } else if (safe_str_eq(op, "lte")) {
+ if (cmp <= 0) {
+ accept = TRUE;
+ }
+
+ } else if (safe_str_eq(op, "gt")) {
+ if (cmp > 0) {
+ accept = TRUE;
+ }
+
+ } else if (safe_str_eq(op, "gte")) {
+ if (cmp >= 0) {
+ accept = TRUE;
+ }
+ }
+
+ return accept;
+}
+
+int
+pe__eval_date_expr(xmlNodePtr expr, pe_rule_eval_data_t *rule_data, crm_time_t *next_change)
+{
+ crm_time_t *start = NULL;
+ crm_time_t *end = NULL;
+ const char *value = NULL;
+ const char *op = crm_element_value(expr, "operation");
+
+ xmlNode *duration_spec = NULL;
+ xmlNode *date_spec = NULL;
+
+ // "undetermined" will also be returned for parsing errors
+ int rc = pcmk_rc_undetermined;
+
+ crm_trace("Testing expression: %s", ID(expr));
+
+ duration_spec = first_named_child(expr, "duration");
+ date_spec = first_named_child(expr, "date_spec");
+
+ value = crm_element_value(expr, "start");
+ if (value != NULL) {
+ start = crm_time_new(value);
+ }
+ value = crm_element_value(expr, "end");
+ if (value != NULL) {
+ end = crm_time_new(value);
+ }
+
+ if (start != NULL && end == NULL && duration_spec != NULL) {
+ end = pe_parse_xml_duration(start, duration_spec);
+ }
+
+ if ((op == NULL) || safe_str_eq(op, "in_range")) {
+ if ((start == NULL) && (end == NULL)) {
+ // in_range requires at least one of start or end
+ } else if ((start != NULL) && (crm_time_compare(rule_data->now, start) < 0)) {
+ rc = pcmk_rc_before_range;
+ crm_time_set_if_earlier(next_change, start);
+ } else if ((end != NULL) && (crm_time_compare(rule_data->now, end) > 0)) {
+ rc = pcmk_rc_after_range;
+ } else {
+ rc = pcmk_rc_within_range;
+ if (end && next_change) {
+ // Evaluation doesn't change until second after end
+ crm_time_add_seconds(end, 1);
+ crm_time_set_if_earlier(next_change, end);
+ }
+ }
+
+ } else if (safe_str_eq(op, "date_spec")) {
+ rc = pe_cron_range_satisfied(rule_data->now, date_spec);
+ // @TODO set next_change appropriately
+
+ } else if (safe_str_eq(op, "gt")) {
+ if (start == NULL) {
+ // gt requires start
+ } else if (crm_time_compare(rule_data->now, start) > 0) {
+ rc = pcmk_rc_within_range;
+ } else {
+ rc = pcmk_rc_before_range;
+
+ // Evaluation doesn't change until second after start
+ crm_time_add_seconds(start, 1);
+ crm_time_set_if_earlier(next_change, start);
+ }
+
+ } else if (safe_str_eq(op, "lt")) {
+ if (end == NULL) {
+ // lt requires end
+ } else if (crm_time_compare(rule_data->now, end) < 0) {
+ rc = pcmk_rc_within_range;
+ crm_time_set_if_earlier(next_change, end);
+ } else {
+ rc = pcmk_rc_after_range;
+ }
+ }
+
+ crm_time_free(start);
+ crm_time_free(end);
+ return rc;
+}
+
+gboolean
+pe__eval_role_expr(xmlNodePtr expr, pe_rule_eval_data_t *rule_data)
+{
+ gboolean accept = FALSE;
+ const char *op = NULL;
+ const char *value = NULL;
+
+ if (rule_data->role == RSC_ROLE_UNKNOWN) {
+ return accept;
+ }
+
+ value = crm_element_value(expr, XML_EXPR_ATTR_VALUE);
+ op = crm_element_value(expr, XML_EXPR_ATTR_OPERATION);
+
+ if (safe_str_eq(op, "defined")) {
+ if (rule_data->role > RSC_ROLE_STARTED) {
+ accept = TRUE;
+ }
+
+ } else if (safe_str_eq(op, "not_defined")) {
+ if (rule_data->role < RSC_ROLE_SLAVE && rule_data->role > RSC_ROLE_UNKNOWN) {
+ accept = TRUE;
+ }
+
+ } else if (safe_str_eq(op, "eq")) {
+ if (text2role(value) == rule_data->role) {
+ accept = TRUE;
+ }
+
+ } else if (safe_str_eq(op, "ne")) {
+ // Test "ne" only with promotable clone roles
+ if (rule_data->role < RSC_ROLE_SLAVE && rule_data->role > RSC_ROLE_UNKNOWN) {
+ accept = FALSE;
+
+ } else if (text2role(value) != rule_data->role) {
+ accept = TRUE;
+ }
+ }
+ return accept;
+}
+
// Deprecated functions kept only for backward API compatibility
gboolean test_ruleset(xmlNode *ruleset, GHashTable *node_hash, crm_time_t *now);
gboolean test_rule(xmlNode *rule, GHashTable *node_hash, enum rsc_role_e role,
--
1.8.3.1
From 56a1337a54f3ba8a175ff3252658e1e43f7c670b Mon Sep 17 00:00:00 2001
From: Chris Lumens <clumens@redhat.com>
Date: Tue, 28 Apr 2020 14:34:40 -0400
Subject: [PATCH 03/17] Feature: scheduler: Add new rule tests for op_defaults
and rsc_defaults.
These are like all the other rule evaluating functions, but they do not
have any wrappers for the older style API.
---
include/crm/pengine/rules_internal.h | 2 ++
lib/pengine/rules.c | 68 ++++++++++++++++++++++++++++++++++++
2 files changed, 70 insertions(+)
diff --git a/include/crm/pengine/rules_internal.h b/include/crm/pengine/rules_internal.h
index 8a22108..f60263a 100644
--- a/include/crm/pengine/rules_internal.h
+++ b/include/crm/pengine/rules_internal.h
@@ -24,7 +24,9 @@ crm_time_t *pe_parse_xml_duration(crm_time_t * start, xmlNode * duration_spec);
gboolean pe__eval_attr_expr(xmlNode *expr, pe_rule_eval_data_t *rule_data);
int pe__eval_date_expr(xmlNode *expr, pe_rule_eval_data_t *rule_data,
crm_time_t *next_change);
+gboolean pe__eval_op_expr(xmlNodePtr expr, pe_rule_eval_data_t *rule_data);
gboolean pe__eval_role_expr(xmlNode *expr, pe_rule_eval_data_t *rule_data);
+gboolean pe__eval_rsc_expr(xmlNodePtr expr, pe_rule_eval_data_t *rule_data);
int pe_eval_date_expression(xmlNode *time_expr,
crm_time_t *now,
diff --git a/lib/pengine/rules.c b/lib/pengine/rules.c
index 3f316c2..a5af57a 100644
--- a/lib/pengine/rules.c
+++ b/lib/pengine/rules.c
@@ -1123,6 +1123,38 @@ pe__eval_date_expr(xmlNodePtr expr, pe_rule_eval_data_t *rule_data, crm_time_t *
}
gboolean
+pe__eval_op_expr(xmlNodePtr expr, pe_rule_eval_data_t *rule_data) {
+ const char *name = crm_element_value(expr, XML_NVPAIR_ATTR_NAME);
+ const char *interval_s = crm_element_value(expr, XML_LRM_ATTR_INTERVAL);
+ guint interval;
+
+ crm_trace("Testing op_defaults expression: %s", ID(expr));
+
+ if (rule_data->op_data == NULL) {
+ crm_trace("No operations data provided");
+ return FALSE;
+ }
+
+ interval = crm_parse_interval_spec(interval_s);
+ if (interval == 0 && errno != 0) {
+ crm_trace("Could not parse interval: %s", interval_s);
+ return FALSE;
+ }
+
+ if (interval_s != NULL && interval != rule_data->op_data->interval) {
+ crm_trace("Interval doesn't match: %d != %d", interval, rule_data->op_data->interval);
+ return FALSE;
+ }
+
+ if (!crm_str_eq(name, rule_data->op_data->op_name, TRUE)) {
+ crm_trace("Name doesn't match: %s != %s", name, rule_data->op_data->op_name);
+ return FALSE;
+ }
+
+ return TRUE;
+}
+
+gboolean
pe__eval_role_expr(xmlNodePtr expr, pe_rule_eval_data_t *rule_data)
{
gboolean accept = FALSE;
@@ -1163,6 +1195,42 @@ pe__eval_role_expr(xmlNodePtr expr, pe_rule_eval_data_t *rule_data)
return accept;
}
+gboolean
+pe__eval_rsc_expr(xmlNodePtr expr, pe_rule_eval_data_t *rule_data)
+{
+ const char *class = crm_element_value(expr, XML_AGENT_ATTR_CLASS);
+ const char *provider = crm_element_value(expr, XML_AGENT_ATTR_PROVIDER);
+ const char *type = crm_element_value(expr, XML_EXPR_ATTR_TYPE);
+
+ crm_trace("Testing rsc_defaults expression: %s", ID(expr));
+
+ if (rule_data->rsc_data == NULL) {
+ crm_trace("No resource data provided");
+ return FALSE;
+ }
+
+ if (class != NULL &&
+ !crm_str_eq(class, rule_data->rsc_data->standard, TRUE)) {
+ crm_trace("Class doesn't match: %s != %s", class, rule_data->rsc_data->standard);
+ return FALSE;
+ }
+
+ if ((provider == NULL && rule_data->rsc_data->provider != NULL) ||
+ (provider != NULL && rule_data->rsc_data->provider == NULL) ||
+ !crm_str_eq(provider, rule_data->rsc_data->provider, TRUE)) {
+ crm_trace("Provider doesn't match: %s != %s", provider, rule_data->rsc_data->provider);
+ return FALSE;
+ }
+
+ if (type != NULL &&
+ !crm_str_eq(type, rule_data->rsc_data->agent, TRUE)) {
+ crm_trace("Agent doesn't match: %s != %s", type, rule_data->rsc_data->agent);
+ return FALSE;
+ }
+
+ return TRUE;
+}
+
// Deprecated functions kept only for backward API compatibility
gboolean test_ruleset(xmlNode *ruleset, GHashTable *node_hash, crm_time_t *now);
gboolean test_rule(xmlNode *rule, GHashTable *node_hash, enum rsc_role_e role,
--
1.8.3.1
From 5a4da3f77feee0d3bac50e9adc4eb4b35724dfb2 Mon Sep 17 00:00:00 2001
From: Chris Lumens <clumens@redhat.com>
Date: Tue, 28 Apr 2020 14:41:08 -0400
Subject: [PATCH 04/17] Refactor: scheduler: Reimplement core rule eval
functions.
The core functions of pe_evaluate_rules, pe_test_rule, and
pe_test_expression have been turned into new, similarly named functions
that take a pe_rule_eval_data_t as an argument. The old ones still
exist as wrappers around the new ones.
---
include/crm/pengine/rules.h | 7 ++
lib/pengine/rules.c | 259 ++++++++++++++++++++++++++------------------
2 files changed, 162 insertions(+), 104 deletions(-)
diff --git a/include/crm/pengine/rules.h b/include/crm/pengine/rules.h
index d7bdbf9..a74c629 100644
--- a/include/crm/pengine/rules.h
+++ b/include/crm/pengine/rules.h
@@ -61,6 +61,13 @@ GHashTable *pe_unpack_versioned_parameters(xmlNode *versioned_params, const char
char *pe_expand_re_matches(const char *string, pe_re_match_data_t * match_data);
+gboolean pe_eval_rules(xmlNode *ruleset, pe_rule_eval_data_t *rule_data,
+ crm_time_t *next_change);
+gboolean pe_eval_expr(xmlNode *rule, pe_rule_eval_data_t *rule_data,
+ crm_time_t *next_change);
+gboolean pe_eval_subexpr(xmlNode *expr, pe_rule_eval_data_t *rule_data,
+ crm_time_t *next_change);
+
#ifndef PCMK__NO_COMPAT
/* Everything here is deprecated and kept only for public API backward
* compatibility. It will be moved to compatibility.h when 2.1.0 is released.
diff --git a/lib/pengine/rules.c b/lib/pengine/rules.c
index a5af57a..a6353ef 100644
--- a/lib/pengine/rules.c
+++ b/lib/pengine/rules.c
@@ -38,25 +38,16 @@ gboolean
pe_evaluate_rules(xmlNode *ruleset, GHashTable *node_hash, crm_time_t *now,
crm_time_t *next_change)
{
- // If there are no rules, pass by default
- gboolean ruleset_default = TRUE;
-
- for (xmlNode *rule = first_named_child(ruleset, XML_TAG_RULE);
- rule != NULL; rule = crm_next_same_xml(rule)) {
+ pe_rule_eval_data_t rule_data = {
+ .node_hash = node_hash,
+ .role = RSC_ROLE_UNKNOWN,
+ .now = now,
+ .match_data = NULL,
+ .rsc_data = NULL,
+ .op_data = NULL
+ };
- ruleset_default = FALSE;
- if (pe_test_rule(rule, node_hash, RSC_ROLE_UNKNOWN, now, next_change,
- NULL)) {
- /* Only the deprecated "lifetime" element of location constraints
- * may contain more than one rule at the top level -- the schema
- * limits a block of nvpairs to a single top-level rule. So, this
- * effectively means that a lifetime is active if any rule it
- * contains is active.
- */
- return TRUE;
- }
- }
- return ruleset_default;
+ return pe_eval_rules(ruleset, &rule_data, next_change);
}
gboolean
@@ -64,44 +55,16 @@ pe_test_rule(xmlNode *rule, GHashTable *node_hash, enum rsc_role_e role,
crm_time_t *now, crm_time_t *next_change,
pe_match_data_t *match_data)
{
- xmlNode *expr = NULL;
- gboolean test = TRUE;
- gboolean empty = TRUE;
- gboolean passed = TRUE;
- gboolean do_and = TRUE;
- const char *value = NULL;
-
- rule = expand_idref(rule, NULL);
- value = crm_element_value(rule, XML_RULE_ATTR_BOOLEAN_OP);
- if (safe_str_eq(value, "or")) {
- do_and = FALSE;
- passed = FALSE;
- }
-
- crm_trace("Testing rule %s", ID(rule));
- for (expr = __xml_first_child_element(rule); expr != NULL;
- expr = __xml_next_element(expr)) {
-
- test = pe_test_expression(expr, node_hash, role, now, next_change,
- match_data);
- empty = FALSE;
-
- if (test && do_and == FALSE) {
- crm_trace("Expression %s/%s passed", ID(rule), ID(expr));
- return TRUE;
-
- } else if (test == FALSE && do_and) {
- crm_trace("Expression %s/%s failed", ID(rule), ID(expr));
- return FALSE;
- }
- }
-
- if (empty) {
- crm_err("Invalid Rule %s: rules must contain at least one expression", ID(rule));
- }
+ pe_rule_eval_data_t rule_data = {
+ .node_hash = node_hash,
+ .role = role,
+ .now = now,
+ .match_data = match_data,
+ .rsc_data = NULL,
+ .op_data = NULL
+ };
- crm_trace("Rule %s %s", ID(rule), passed ? "passed" : "failed");
- return passed;
+ return pe_eval_expr(rule, &rule_data, next_change);
}
/*!
@@ -125,56 +88,16 @@ pe_test_expression(xmlNode *expr, GHashTable *node_hash, enum rsc_role_e role,
crm_time_t *now, crm_time_t *next_change,
pe_match_data_t *match_data)
{
- gboolean accept = FALSE;
- const char *uname = NULL;
-
- switch (find_expression_type(expr)) {
- case nested_rule:
- accept = pe_test_rule(expr, node_hash, role, now, next_change,
- match_data);
- break;
- case attr_expr:
- case loc_expr:
- /* these expressions can never succeed if there is
- * no node to compare with
- */
- if (node_hash != NULL) {
- accept = pe_test_attr_expression(expr, node_hash, now, match_data);
- }
- break;
-
- case time_expr:
- accept = pe_test_date_expression(expr, now, next_change);
- break;
-
- case role_expr:
- accept = pe_test_role_expression(expr, role, now);
- break;
-
-#if ENABLE_VERSIONED_ATTRS
- case version_expr:
- if (node_hash && g_hash_table_lookup_extended(node_hash,
- CRM_ATTR_RA_VERSION,
- NULL, NULL)) {
- accept = pe_test_attr_expression(expr, node_hash, now, NULL);
- } else {
- // we are going to test it when we have ra-version
- accept = TRUE;
- }
- break;
-#endif
-
- default:
- CRM_CHECK(FALSE /* bad type */ , return FALSE);
- accept = FALSE;
- }
- if (node_hash) {
- uname = g_hash_table_lookup(node_hash, CRM_ATTR_UNAME);
- }
+ pe_rule_eval_data_t rule_data = {
+ .node_hash = node_hash,
+ .role = role,
+ .now = now,
+ .match_data = match_data,
+ .rsc_data = NULL,
+ .op_data = NULL
+ };
- crm_trace("Expression %s %s on %s",
- ID(expr), accept ? "passed" : "failed", uname ? uname : "all nodes");
- return accept;
+ return pe_eval_subexpr(expr, &rule_data, next_change);
}
enum expression_type
@@ -885,6 +808,134 @@ pe_unpack_versioned_parameters(xmlNode *versioned_params, const char *ra_version
#endif
gboolean
+pe_eval_rules(xmlNode *ruleset, pe_rule_eval_data_t *rule_data, crm_time_t *next_change)
+{
+ // If there are no rules, pass by default
+ gboolean ruleset_default = TRUE;
+
+ for (xmlNode *rule = first_named_child(ruleset, XML_TAG_RULE);
+ rule != NULL; rule = crm_next_same_xml(rule)) {
+
+ ruleset_default = FALSE;
+ if (pe_eval_expr(rule, rule_data, next_change)) {
+ /* Only the deprecated "lifetime" element of location constraints
+ * may contain more than one rule at the top level -- the schema
+ * limits a block of nvpairs to a single top-level rule. So, this
+ * effectively means that a lifetime is active if any rule it
+ * contains is active.
+ */
+ return TRUE;
+ }
+ }
+
+ return ruleset_default;
+}
+
+gboolean
+pe_eval_expr(xmlNode *rule, pe_rule_eval_data_t *rule_data, crm_time_t *next_change)
+{
+ xmlNode *expr = NULL;
+ gboolean test = TRUE;
+ gboolean empty = TRUE;
+ gboolean passed = TRUE;
+ gboolean do_and = TRUE;
+ const char *value = NULL;
+
+ rule = expand_idref(rule, NULL);
+ value = crm_element_value(rule, XML_RULE_ATTR_BOOLEAN_OP);
+ if (safe_str_eq(value, "or")) {
+ do_and = FALSE;
+ passed = FALSE;
+ }
+
+ crm_trace("Testing rule %s", ID(rule));
+ for (expr = __xml_first_child_element(rule); expr != NULL;
+ expr = __xml_next_element(expr)) {
+
+ test = pe_eval_subexpr(expr, rule_data, next_change);
+ empty = FALSE;
+
+ if (test && do_and == FALSE) {
+ crm_trace("Expression %s/%s passed", ID(rule), ID(expr));
+ return TRUE;
+
+ } else if (test == FALSE && do_and) {
+ crm_trace("Expression %s/%s failed", ID(rule), ID(expr));
+ return FALSE;
+ }
+ }
+
+ if (empty) {
+ crm_err("Invalid Rule %s: rules must contain at least one expression", ID(rule));
+ }
+
+ crm_trace("Rule %s %s", ID(rule), passed ? "passed" : "failed");
+ return passed;
+}
+
+gboolean
+pe_eval_subexpr(xmlNode *expr, pe_rule_eval_data_t *rule_data, crm_time_t *next_change)
+{
+ gboolean accept = FALSE;
+ const char *uname = NULL;
+
+ switch (find_expression_type(expr)) {
+ case nested_rule:
+ accept = pe_eval_expr(expr, rule_data, next_change);
+ break;
+ case attr_expr:
+ case loc_expr:
+ /* these expressions can never succeed if there is
+ * no node to compare with
+ */
+ if (rule_data->node_hash != NULL) {
+ accept = pe__eval_attr_expr(expr, rule_data);
+ }
+ break;
+
+ case time_expr:
+ accept = pe_test_date_expression(expr, rule_data->now, next_change);
+ break;
+
+ case role_expr:
+ accept = pe__eval_role_expr(expr, rule_data);
+ break;
+
+ case rsc_expr:
+ accept = pe__eval_rsc_expr(expr, rule_data);
+ break;
+
+ case op_expr:
+ accept = pe__eval_op_expr(expr, rule_data);
+ break;
+
+#if ENABLE_VERSIONED_ATTRS
+ case version_expr:
+ if (rule_data->node_hash &&
+ g_hash_table_lookup_extended(rule_data->node_hash,
+ CRM_ATTR_RA_VERSION, NULL, NULL)) {
+ accept = pe__eval_attr_expr(expr, rule_data);
+ } else {
+ // we are going to test it when we have ra-version
+ accept = TRUE;
+ }
+ break;
+#endif
+
+ default:
+ CRM_CHECK(FALSE /* bad type */ , return FALSE);
+ accept = FALSE;
+ }
+ if (rule_data->node_hash) {
+ uname = g_hash_table_lookup(rule_data->node_hash, CRM_ATTR_UNAME);
+ }
+
+ crm_trace("Expression %s %s on %s",
+ ID(expr), accept ? "passed" : "failed", uname ? uname : "all nodes");
+ return accept;
+}
+
+gboolean
pe__eval_attr_expr(xmlNodePtr expr, pe_rule_eval_data_t *rule_data)
{
gboolean accept = FALSE;
--
1.8.3.1
From ea6318252164578fd27dcef657e80f5225337a4b Mon Sep 17 00:00:00 2001
From: Chris Lumens <clumens@redhat.com>
Date: Tue, 7 Apr 2020 15:57:06 -0400
Subject: [PATCH 05/17] Refactor: scheduler: Add rule_data to unpack_data_s.
This is just to get rid of a couple extra arguments to some internal
functions and make them look like the external functions.
---
lib/pengine/rules.c | 65 ++++++++++++++++++++++++++++++++++++-----------------
1 file changed, 44 insertions(+), 21 deletions(-)
diff --git a/lib/pengine/rules.c b/lib/pengine/rules.c
index a6353ef..2709d68 100644
--- a/lib/pengine/rules.c
+++ b/lib/pengine/rules.c
@@ -555,10 +555,9 @@ add_versioned_attributes(xmlNode * attr_set, xmlNode * versioned_attrs)
typedef struct unpack_data_s {
gboolean overwrite;
- GHashTable *node_hash;
void *hash;
- crm_time_t *now;
crm_time_t *next_change;
+ pe_rule_eval_data_t *rule_data;
xmlNode *top;
} unpack_data_t;
@@ -568,14 +567,14 @@ unpack_attr_set(gpointer data, gpointer user_data)
sorted_set_t *pair = data;
unpack_data_t *unpack_data = user_data;
- if (!pe_evaluate_rules(pair->attr_set, unpack_data->node_hash,
- unpack_data->now, unpack_data->next_change)) {
+ if (!pe_eval_rules(pair->attr_set, unpack_data->rule_data,
+ unpack_data->next_change)) {
return;
}
#if ENABLE_VERSIONED_ATTRS
- if (get_versioned_rule(pair->attr_set) && !(unpack_data->node_hash &&
- g_hash_table_lookup_extended(unpack_data->node_hash,
+ if (get_versioned_rule(pair->attr_set) && !(unpack_data->rule_data->node_hash &&
+ g_hash_table_lookup_extended(unpack_data->rule_data->node_hash,
CRM_ATTR_RA_VERSION, NULL, NULL))) {
// we haven't actually tested versioned expressions yet
return;
@@ -593,8 +592,8 @@ unpack_versioned_attr_set(gpointer data, gpointer user_data)
sorted_set_t *pair = data;
unpack_data_t *unpack_data = user_data;
- if (pe_evaluate_rules(pair->attr_set, unpack_data->node_hash,
- unpack_data->now, unpack_data->next_change)) {
+ if (pe_eval_rules(pair->attr_set, unpack_data->rule_data,
+ unpack_data->next_change)) {
add_versioned_attributes(pair->attr_set, unpack_data->hash);
}
}
@@ -658,19 +657,17 @@ make_pairs(xmlNode *top, xmlNode *xml_obj, const char *set_name,
* \param[in] top XML document root (used to expand id-ref's)
* \param[in] xml_obj XML element containing blocks of nvpair elements
* \param[in] set_name If not NULL, only use blocks of this element type
- * \param[in] node_hash Node attributes to use when evaluating rules
* \param[out] hash Where to store extracted name/value pairs
* \param[in] always_first If not NULL, process block with this ID first
* \param[in] overwrite Whether to replace existing values with same name
- * \param[in] now Time to use when evaluating rules
+ * \param[in] rule_data Matching parameters to use when unpacking
* \param[out] next_change If not NULL, set to when rule evaluation will change
* \param[in] unpack_func Function to call to unpack each block
*/
static void
unpack_nvpair_blocks(xmlNode *top, xmlNode *xml_obj, const char *set_name,
- GHashTable *node_hash, void *hash,
- const char *always_first, gboolean overwrite,
- crm_time_t *now, crm_time_t *next_change,
+ void *hash, const char *always_first, gboolean overwrite,
+ pe_rule_eval_data_t *rule_data, crm_time_t *next_change,
GFunc unpack_func)
{
GList *pairs = make_pairs(top, xml_obj, set_name, always_first);
@@ -678,11 +675,10 @@ unpack_nvpair_blocks(xmlNode *top, xmlNode *xml_obj, const char *set_name,
if (pairs) {
unpack_data_t data = {
.hash = hash,
- .node_hash = node_hash,
- .now = now,
.overwrite = overwrite,
.next_change = next_change,
.top = top,
+ .rule_data = rule_data
};
g_list_foreach(pairs, unpack_func, &data);
@@ -709,8 +705,17 @@ pe_unpack_nvpairs(xmlNode *top, xmlNode *xml_obj, const char *set_name,
const char *always_first, gboolean overwrite,
crm_time_t *now, crm_time_t *next_change)
{
- unpack_nvpair_blocks(top, xml_obj, set_name, node_hash, hash, always_first,
- overwrite, now, next_change, unpack_attr_set);
+ pe_rule_eval_data_t rule_data = {
+ .node_hash = node_hash,
+ .role = RSC_ROLE_UNKNOWN,
+ .now = now,
+ .match_data = NULL,
+ .rsc_data = NULL,
+ .op_data = NULL
+ };
+
+ unpack_nvpair_blocks(top, xml_obj, set_name, hash, always_first,
+ overwrite, &rule_data, next_change, unpack_attr_set);
}
#if ENABLE_VERSIONED_ATTRS
@@ -720,8 +725,17 @@ pe_unpack_versioned_attributes(xmlNode *top, xmlNode *xml_obj,
xmlNode *hash, crm_time_t *now,
crm_time_t *next_change)
{
- unpack_nvpair_blocks(top, xml_obj, set_name, node_hash, hash, NULL, FALSE,
- now, next_change, unpack_versioned_attr_set);
+ pe_rule_eval_data_t rule_data = {
+ .node_hash = node_hash,
+ .role = RSC_ROLE_UNKNOWN,
+ .now = now,
+ .match_data = NULL,
+ .rsc_data = NULL,
+ .op_data = NULL
+ };
+
+ unpack_nvpair_blocks(top, xml_obj, set_name, hash, NULL, FALSE,
+ &rule_data, next_change, unpack_versioned_attr_set);
}
#endif
@@ -1366,6 +1380,15 @@ unpack_instance_attributes(xmlNode *top, xmlNode *xml_obj, const char *set_name,
const char *always_first, gboolean overwrite,
crm_time_t *now)
{
- unpack_nvpair_blocks(top, xml_obj, set_name, node_hash, hash, always_first,
- overwrite, now, NULL, unpack_attr_set);
+ pe_rule_eval_data_t rule_data = {
+ .node_hash = node_hash,
+ .role = RSC_ROLE_UNKNOWN,
+ .now = now,
+ .match_data = NULL,
+ .rsc_data = NULL,
+ .op_data = NULL
+ };
+
+ unpack_nvpair_blocks(top, xml_obj, set_name, hash, always_first,
+ overwrite, &rule_data, NULL, unpack_attr_set);
}
--
1.8.3.1
From 54646db6f5e4f1bb141b35798bcad5c3cc025afe Mon Sep 17 00:00:00 2001
From: Chris Lumens <clumens@redhat.com>
Date: Wed, 8 Apr 2020 10:41:41 -0400
Subject: [PATCH 06/17] Refactor: scheduler: Change args to
pe__unpack_dataset_nvpairs.
It should now take a pe_rule_eval_data_t instead of various separate
arguments. This will allow passing further data that needs to be tested
against in the future (such as rsc_defaults and op_defaults). It's also
convenient to make versions of pe_unpack_nvpairs and
pe_unpack_versioned_attributes that take the same arguments.
Then, adapt callers of pe__unpack_dataset_nvpairs to pass the new
argument.
---
include/crm/pengine/internal.h | 2 +-
include/crm/pengine/rules.h | 9 +++++++
lib/pengine/complex.c | 41 ++++++++++++++++++++++-------
lib/pengine/rules.c | 23 ++++++++++++++--
lib/pengine/unpack.c | 33 ++++++++++++++++++++---
lib/pengine/utils.c | 60 +++++++++++++++++++++++++++++++-----------
6 files changed, 137 insertions(+), 31 deletions(-)
diff --git a/include/crm/pengine/internal.h b/include/crm/pengine/internal.h
index 189ba7b..3e59502 100644
--- a/include/crm/pengine/internal.h
+++ b/include/crm/pengine/internal.h
@@ -460,7 +460,7 @@ void pe__update_recheck_time(time_t recheck, pe_working_set_t *data_set);
void pe__register_messages(pcmk__output_t *out);
void pe__unpack_dataset_nvpairs(xmlNode *xml_obj, const char *set_name,
- GHashTable *node_hash, GHashTable *hash,
+ pe_rule_eval_data_t *rule_data, GHashTable *hash,
const char *always_first, gboolean overwrite,
pe_working_set_t *data_set);
diff --git a/include/crm/pengine/rules.h b/include/crm/pengine/rules.h
index a74c629..cbae8ed 100644
--- a/include/crm/pengine/rules.h
+++ b/include/crm/pengine/rules.h
@@ -46,12 +46,21 @@ gboolean pe_test_expression(xmlNode *expr, GHashTable *node_hash,
crm_time_t *next_change,
pe_match_data_t *match_data);
+void pe_eval_nvpairs(xmlNode *top, xmlNode *xml_obj, const char *set_name,
+ pe_rule_eval_data_t *rule_data, GHashTable *hash,
+ const char *always_first, gboolean overwrite,
+ crm_time_t *next_change);
+
void pe_unpack_nvpairs(xmlNode *top, xmlNode *xml_obj, const char *set_name,
GHashTable *node_hash, GHashTable *hash,
const char *always_first, gboolean overwrite,
crm_time_t *now, crm_time_t *next_change);
#if ENABLE_VERSIONED_ATTRS
+void pe_eval_versioned_attributes(xmlNode *top, xmlNode *xml_obj,
+ const char *set_name, pe_rule_eval_data_t *rule_data,
+ xmlNode *hash, crm_time_t *next_change);
+
void pe_unpack_versioned_attributes(xmlNode *top, xmlNode *xml_obj,
const char *set_name, GHashTable *node_hash,
xmlNode *hash, crm_time_t *now,
diff --git a/lib/pengine/complex.c b/lib/pengine/complex.c
index 16f3a71..d91c95e 100644
--- a/lib/pengine/complex.c
+++ b/lib/pengine/complex.c
@@ -95,10 +95,17 @@ void
get_meta_attributes(GHashTable * meta_hash, pe_resource_t * rsc,
pe_node_t * node, pe_working_set_t * data_set)
{
- GHashTable *node_hash = NULL;
+ pe_rule_eval_data_t rule_data = {
+ .node_hash = NULL,
+ .role = RSC_ROLE_UNKNOWN,
+ .now = data_set->now,
+ .match_data = NULL,
+ .rsc_data = NULL,
+ .op_data = NULL
+ };
if (node) {
- node_hash = node->details->attrs;
+ rule_data.node_hash = node->details->attrs;
}
if (rsc->xml) {
@@ -112,7 +119,7 @@ get_meta_attributes(GHashTable * meta_hash, pe_resource_t * rsc,
}
}
- pe__unpack_dataset_nvpairs(rsc->xml, XML_TAG_META_SETS, node_hash,
+ pe__unpack_dataset_nvpairs(rsc->xml, XML_TAG_META_SETS, &rule_data,
meta_hash, NULL, FALSE, data_set);
/* set anything else based on the parent */
@@ -122,20 +129,27 @@ get_meta_attributes(GHashTable * meta_hash, pe_resource_t * rsc,
/* and finally check the defaults */
pe__unpack_dataset_nvpairs(data_set->rsc_defaults, XML_TAG_META_SETS,
- node_hash, meta_hash, NULL, FALSE, data_set);
+ &rule_data, meta_hash, NULL, FALSE, data_set);
}
void
get_rsc_attributes(GHashTable * meta_hash, pe_resource_t * rsc,
pe_node_t * node, pe_working_set_t * data_set)
{
- GHashTable *node_hash = NULL;
+ pe_rule_eval_data_t rule_data = {
+ .node_hash = NULL,
+ .role = RSC_ROLE_UNKNOWN,
+ .now = data_set->now,
+ .match_data = NULL,
+ .rsc_data = NULL,
+ .op_data = NULL
+ };
if (node) {
- node_hash = node->details->attrs;
+ rule_data.node_hash = node->details->attrs;
}
- pe__unpack_dataset_nvpairs(rsc->xml, XML_TAG_ATTR_SETS, node_hash,
+ pe__unpack_dataset_nvpairs(rsc->xml, XML_TAG_ATTR_SETS, &rule_data,
meta_hash, NULL, FALSE, data_set);
/* set anything else based on the parent */
@@ -145,7 +159,7 @@ get_rsc_attributes(GHashTable * meta_hash, pe_resource_t * rsc,
} else {
/* and finally check the defaults */
pe__unpack_dataset_nvpairs(data_set->rsc_defaults, XML_TAG_ATTR_SETS,
- node_hash, meta_hash, NULL, FALSE, data_set);
+ &rule_data, meta_hash, NULL, FALSE, data_set);
}
}
@@ -376,6 +390,15 @@ common_unpack(xmlNode * xml_obj, pe_resource_t ** rsc,
bool remote_node = FALSE;
bool has_versioned_params = FALSE;
+ pe_rule_eval_data_t rule_data = {
+ .node_hash = NULL,
+ .role = RSC_ROLE_UNKNOWN,
+ .now = data_set->now,
+ .match_data = NULL,
+ .rsc_data = NULL,
+ .op_data = NULL
+ };
+
crm_log_xml_trace(xml_obj, "Processing resource input...");
if (id == NULL) {
@@ -706,7 +729,7 @@ common_unpack(xmlNode * xml_obj, pe_resource_t ** rsc,
(*rsc)->utilization = crm_str_table_new();
- pe__unpack_dataset_nvpairs((*rsc)->xml, XML_TAG_UTILIZATION, NULL,
+ pe__unpack_dataset_nvpairs((*rsc)->xml, XML_TAG_UTILIZATION, &rule_data,
(*rsc)->utilization, NULL, FALSE, data_set);
/* data_set->resources = g_list_append(data_set->resources, (*rsc)); */
diff --git a/lib/pengine/rules.c b/lib/pengine/rules.c
index 2709d68..7575011 100644
--- a/lib/pengine/rules.c
+++ b/lib/pengine/rules.c
@@ -686,6 +686,16 @@ unpack_nvpair_blocks(xmlNode *top, xmlNode *xml_obj, const char *set_name,
}
}
+void
+pe_eval_nvpairs(xmlNode *top, xmlNode *xml_obj, const char *set_name,
+ pe_rule_eval_data_t *rule_data, GHashTable *hash,
+ const char *always_first, gboolean overwrite,
+ crm_time_t *next_change)
+{
+ unpack_nvpair_blocks(top, xml_obj, set_name, hash, always_first,
+ overwrite, rule_data, next_change, unpack_attr_set);
+}
+
/*!
* \brief Extract nvpair blocks contained by an XML element into a hash table
*
@@ -714,12 +724,21 @@ pe_unpack_nvpairs(xmlNode *top, xmlNode *xml_obj, const char *set_name,
.op_data = NULL
};
- unpack_nvpair_blocks(top, xml_obj, set_name, hash, always_first,
- overwrite, &rule_data, next_change, unpack_attr_set);
+ pe_eval_nvpairs(top, xml_obj, set_name, &rule_data, hash,
+ always_first, overwrite, next_change);
}
#if ENABLE_VERSIONED_ATTRS
void
+pe_eval_versioned_attributes(xmlNode *top, xmlNode *xml_obj, const char *set_name,
+ pe_rule_eval_data_t *rule_data, xmlNode *hash,
+ crm_time_t *next_change)
+{
+ unpack_nvpair_blocks(top, xml_obj, set_name, hash, NULL, FALSE, rule_data,
+ next_change, unpack_versioned_attr_set);
+}
+
+void
pe_unpack_versioned_attributes(xmlNode *top, xmlNode *xml_obj,
const char *set_name, GHashTable *node_hash,
xmlNode *hash, crm_time_t *now,
diff --git a/lib/pengine/unpack.c b/lib/pengine/unpack.c
index 532a3e6..8784857 100644
--- a/lib/pengine/unpack.c
+++ b/lib/pengine/unpack.c
@@ -188,9 +188,18 @@ unpack_config(xmlNode * config, pe_working_set_t * data_set)
const char *value = NULL;
GHashTable *config_hash = crm_str_table_new();
+ pe_rule_eval_data_t rule_data = {
+ .node_hash = NULL,
+ .role = RSC_ROLE_UNKNOWN,
+ .now = data_set->now,
+ .match_data = NULL,
+ .rsc_data = NULL,
+ .op_data = NULL
+ };
+
data_set->config_hash = config_hash;
- pe__unpack_dataset_nvpairs(config, XML_CIB_TAG_PROPSET, NULL, config_hash,
+ pe__unpack_dataset_nvpairs(config, XML_CIB_TAG_PROPSET, &rule_data, config_hash,
CIB_OPTIONS_FIRST, FALSE, data_set);
verify_pe_options(data_set->config_hash);
@@ -515,6 +524,15 @@ unpack_nodes(xmlNode * xml_nodes, pe_working_set_t * data_set)
const char *type = NULL;
const char *score = NULL;
+ pe_rule_eval_data_t rule_data = {
+ .node_hash = NULL,
+ .role = RSC_ROLE_UNKNOWN,
+ .now = data_set->now,
+ .match_data = NULL,
+ .rsc_data = NULL,
+ .op_data = NULL
+ };
+
for (xml_obj = __xml_first_child_element(xml_nodes); xml_obj != NULL;
xml_obj = __xml_next_element(xml_obj)) {
@@ -547,7 +565,7 @@ unpack_nodes(xmlNode * xml_nodes, pe_working_set_t * data_set)
handle_startup_fencing(data_set, new_node);
add_node_attrs(xml_obj, new_node, FALSE, data_set);
- pe__unpack_dataset_nvpairs(xml_obj, XML_TAG_UTILIZATION, NULL,
+ pe__unpack_dataset_nvpairs(xml_obj, XML_TAG_UTILIZATION, &rule_data,
new_node->details->utilization, NULL,
FALSE, data_set);
@@ -3698,6 +3716,15 @@ add_node_attrs(xmlNode *xml_obj, pe_node_t *node, bool overwrite,
{
const char *cluster_name = NULL;
+ pe_rule_eval_data_t rule_data = {
+ .node_hash = NULL,
+ .role = RSC_ROLE_UNKNOWN,
+ .now = data_set->now,
+ .match_data = NULL,
+ .rsc_data = NULL,
+ .op_data = NULL
+ };
+
g_hash_table_insert(node->details->attrs,
strdup(CRM_ATTR_UNAME), strdup(node->details->uname));
@@ -3719,7 +3746,7 @@ add_node_attrs(xmlNode *xml_obj, pe_node_t *node, bool overwrite,
strdup(cluster_name));
}
- pe__unpack_dataset_nvpairs(xml_obj, XML_TAG_ATTR_SETS, NULL,
+ pe__unpack_dataset_nvpairs(xml_obj, XML_TAG_ATTR_SETS, &rule_data,
node->details->attrs, NULL, overwrite, data_set);
if (pe_node_attribute_raw(node, CRM_ATTR_SITE_NAME) == NULL) {
diff --git a/lib/pengine/utils.c b/lib/pengine/utils.c
index c9b45e0..d01936d 100644
--- a/lib/pengine/utils.c
+++ b/lib/pengine/utils.c
@@ -597,10 +597,19 @@ custom_action(pe_resource_t * rsc, char *key, const char *task,
if (is_set(action->flags, pe_action_have_node_attrs) == FALSE
&& action->node != NULL && action->op_entry != NULL) {
+ pe_rule_eval_data_t rule_data = {
+ .node_hash = action->node->details->attrs,
+ .role = RSC_ROLE_UNKNOWN,
+ .now = data_set->now,
+ .match_data = NULL,
+ .rsc_data = NULL,
+ .op_data = NULL
+ };
+
pe_set_action_bit(action, pe_action_have_node_attrs);
pe__unpack_dataset_nvpairs(action->op_entry, XML_TAG_ATTR_SETS,
- action->node->details->attrs,
- action->extra, NULL, FALSE, data_set);
+ &rule_data, action->extra, NULL,
+ FALSE, data_set);
}
if (is_set(action->flags, pe_action_pseudo)) {
@@ -873,6 +882,15 @@ pe_get_configured_timeout(pe_resource_t *rsc, const char *action, pe_working_set
const char *timeout = NULL;
int timeout_ms = 0;
+ pe_rule_eval_data_t rule_data = {
+ .node_hash = NULL,
+ .role = RSC_ROLE_UNKNOWN,
+ .now = data_set->now,
+ .match_data = NULL,
+ .rsc_data = NULL,
+ .op_data = NULL
+ };
+
for (child = first_named_child(rsc->ops_xml, XML_ATTR_OP);
child != NULL; child = crm_next_same_xml(child)) {
if (safe_str_eq(action, crm_element_value(child, XML_NVPAIR_ATTR_NAME))) {
@@ -884,7 +902,7 @@ pe_get_configured_timeout(pe_resource_t *rsc, const char *action, pe_working_set
if (timeout == NULL && data_set->op_defaults) {
GHashTable *action_meta = crm_str_table_new();
pe__unpack_dataset_nvpairs(data_set->op_defaults, XML_TAG_META_SETS,
- NULL, action_meta, NULL, FALSE, data_set);
+ &rule_data, action_meta, NULL, FALSE, data_set);
timeout = g_hash_table_lookup(action_meta, XML_ATTR_TIMEOUT);
}
@@ -964,10 +982,19 @@ unpack_operation(pe_action_t * action, xmlNode * xml_obj, pe_resource_t * contai
pe_rsc_action_details_t *rsc_details = NULL;
#endif
+ pe_rule_eval_data_t rule_data = {
+ .node_hash = NULL,
+ .role = RSC_ROLE_UNKNOWN,
+ .now = data_set->now,
+ .match_data = NULL,
+ .rsc_data = NULL,
+ .op_data = NULL
+ };
+
CRM_CHECK(action && action->rsc, return);
// Cluster-wide <op_defaults> <meta_attributes>
- pe__unpack_dataset_nvpairs(data_set->op_defaults, XML_TAG_META_SETS, NULL,
+ pe__unpack_dataset_nvpairs(data_set->op_defaults, XML_TAG_META_SETS, &rule_data,
action->meta, NULL, FALSE, data_set);
// Probe timeouts default differently, so handle timeout default later
@@ -981,19 +1008,20 @@ unpack_operation(pe_action_t * action, xmlNode * xml_obj, pe_resource_t * contai
xmlAttrPtr xIter = NULL;
// <op> <meta_attributes> take precedence over defaults
- pe__unpack_dataset_nvpairs(xml_obj, XML_TAG_META_SETS, NULL,
+ pe__unpack_dataset_nvpairs(xml_obj, XML_TAG_META_SETS, &rule_data,
action->meta, NULL, TRUE, data_set);
#if ENABLE_VERSIONED_ATTRS
rsc_details = pe_rsc_action_details(action);
- pe_unpack_versioned_attributes(data_set->input, xml_obj,
- XML_TAG_ATTR_SETS, NULL,
- rsc_details->versioned_parameters,
- data_set->now, NULL);
- pe_unpack_versioned_attributes(data_set->input, xml_obj,
- XML_TAG_META_SETS, NULL,
- rsc_details->versioned_meta,
- data_set->now, NULL);
+
+ pe_eval_versioned_attributes(data_set->input, xml_obj,
+ XML_TAG_ATTR_SETS, &rule_data,
+ rsc_details->versioned_parameters,
+ NULL);
+ pe_eval_versioned_attributes(data_set->input, xml_obj,
+ XML_TAG_META_SETS, &rule_data,
+ rsc_details->versioned_meta,
+ NULL);
#endif
/* Anything set as an <op> XML property has highest precedence.
@@ -2693,14 +2721,14 @@ pe__update_recheck_time(time_t recheck, pe_working_set_t *data_set)
*/
void
pe__unpack_dataset_nvpairs(xmlNode *xml_obj, const char *set_name,
- GHashTable *node_hash, GHashTable *hash,
+ pe_rule_eval_data_t *rule_data, GHashTable *hash,
const char *always_first, gboolean overwrite,
pe_working_set_t *data_set)
{
crm_time_t *next_change = crm_time_new_undefined();
- pe_unpack_nvpairs(data_set->input, xml_obj, set_name, node_hash, hash,
- always_first, overwrite, data_set->now, next_change);
+ pe_eval_nvpairs(data_set->input, xml_obj, set_name, rule_data, hash,
+ always_first, overwrite, next_change);
if (crm_time_is_defined(next_change)) {
time_t recheck = (time_t) crm_time_get_seconds_since_epoch(next_change);
--
1.8.3.1
From ad06f60bae1fcb5d204fa18a0b21ade78aaee5f4 Mon Sep 17 00:00:00 2001
From: Chris Lumens <clumens@redhat.com>
Date: Wed, 8 Apr 2020 13:43:26 -0400
Subject: [PATCH 07/17] Refactor: scheduler: unpack_operation should be static.
---
lib/pengine/utils.c | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/lib/pengine/utils.c b/lib/pengine/utils.c
index d01936d..c345875 100644
--- a/lib/pengine/utils.c
+++ b/lib/pengine/utils.c
@@ -23,8 +23,8 @@
extern xmlNode *get_object_root(const char *object_type, xmlNode * the_root);
void print_str_str(gpointer key, gpointer value, gpointer user_data);
gboolean ghash_free_str_str(gpointer key, gpointer value, gpointer user_data);
-void unpack_operation(pe_action_t * action, xmlNode * xml_obj, pe_resource_t * container,
- pe_working_set_t * data_set);
+static void unpack_operation(pe_action_t * action, xmlNode * xml_obj, pe_resource_t * container,
+ pe_working_set_t * data_set);
static xmlNode *find_rsc_op_entry_helper(pe_resource_t * rsc, const char *key,
gboolean include_disabled);
@@ -968,7 +968,7 @@ unpack_versioned_meta(xmlNode *versioned_meta, xmlNode *xml_obj,
* \param[in] container Resource that contains affected resource, if any
* \param[in] data_set Cluster state
*/
-void
+static void
unpack_operation(pe_action_t * action, xmlNode * xml_obj, pe_resource_t * container,
pe_working_set_t * data_set)
{
--
1.8.3.1
From 7e57d955c9209af62dffc0639c50d51121028c26 Mon Sep 17 00:00:00 2001
From: Chris Lumens <clumens@redhat.com>
Date: Wed, 8 Apr 2020 14:58:35 -0400
Subject: [PATCH 08/17] Refactor: scheduler: Pass interval to unpack_operation.
---
lib/pengine/utils.c | 36 ++++++++++++++----------------------
1 file changed, 14 insertions(+), 22 deletions(-)
diff --git a/lib/pengine/utils.c b/lib/pengine/utils.c
index c345875..1e3b0bd 100644
--- a/lib/pengine/utils.c
+++ b/lib/pengine/utils.c
@@ -24,7 +24,7 @@ extern xmlNode *get_object_root(const char *object_type, xmlNode * the_root);
void print_str_str(gpointer key, gpointer value, gpointer user_data);
gboolean ghash_free_str_str(gpointer key, gpointer value, gpointer user_data);
static void unpack_operation(pe_action_t * action, xmlNode * xml_obj, pe_resource_t * container,
- pe_working_set_t * data_set);
+ pe_working_set_t * data_set, guint interval_ms);
static xmlNode *find_rsc_op_entry_helper(pe_resource_t * rsc, const char *key,
gboolean include_disabled);
@@ -568,9 +568,13 @@ custom_action(pe_resource_t * rsc, char *key, const char *task,
}
if (rsc != NULL) {
+ guint interval_ms = 0;
+
action->op_entry = find_rsc_op_entry_helper(rsc, key, TRUE);
+ parse_op_key(key, NULL, NULL, &interval_ms);
- unpack_operation(action, action->op_entry, rsc->container, data_set);
+ unpack_operation(action, action->op_entry, rsc->container, data_set,
+ interval_ms);
if (save_action) {
rsc->actions = g_list_prepend(rsc->actions, action);
@@ -963,20 +967,20 @@ unpack_versioned_meta(xmlNode *versioned_meta, xmlNode *xml_obj,
* and start delay values as integer milliseconds), requirements, and
* failure policy.
*
- * \param[in,out] action Action to unpack into
- * \param[in] xml_obj Operation XML (or NULL if all defaults)
- * \param[in] container Resource that contains affected resource, if any
- * \param[in] data_set Cluster state
+ * \param[in,out] action Action to unpack into
+ * \param[in] xml_obj Operation XML (or NULL if all defaults)
+ * \param[in] container Resource that contains affected resource, if any
+ * \param[in] data_set Cluster state
+ * \param[in] interval_ms How frequently to perform the operation
*/
static void
unpack_operation(pe_action_t * action, xmlNode * xml_obj, pe_resource_t * container,
- pe_working_set_t * data_set)
+ pe_working_set_t * data_set, guint interval_ms)
{
- guint interval_ms = 0;
int timeout = 0;
char *value_ms = NULL;
const char *value = NULL;
- const char *field = NULL;
+ const char *field = XML_LRM_ATTR_INTERVAL;
char *default_timeout = NULL;
#if ENABLE_VERSIONED_ATTRS
pe_rsc_action_details_t *rsc_details = NULL;
@@ -1038,23 +1042,11 @@ unpack_operation(pe_action_t * action, xmlNode * xml_obj, pe_resource_t * contai
g_hash_table_remove(action->meta, "id");
// Normalize interval to milliseconds
- field = XML_LRM_ATTR_INTERVAL;
- value = g_hash_table_lookup(action->meta, field);
- if (value != NULL) {
- interval_ms = crm_parse_interval_spec(value);
-
- } else if ((xml_obj == NULL) && !strcmp(action->task, RSC_STATUS)) {
- /* An orphaned recurring monitor will not have any XML. However, we
- * want the interval to be set, so the action can be properly detected
- * as a recurring monitor. Parse it from the key in this case.
- */
- parse_op_key(action->uuid, NULL, NULL, &interval_ms);
- }
if (interval_ms > 0) {
value_ms = crm_strdup_printf("%u", interval_ms);
g_hash_table_replace(action->meta, strdup(field), value_ms);
- } else if (value) {
+ } else if (g_hash_table_lookup(action->meta, field) != NULL) {
g_hash_table_remove(action->meta, field);
}
--
1.8.3.1
From e4c411d9674e222647dd3ed31714c369f54ccad1 Mon Sep 17 00:00:00 2001
From: Chris Lumens <clumens@redhat.com>
Date: Thu, 9 Apr 2020 16:15:17 -0400
Subject: [PATCH 09/17] Feature: scheduler: Pass rsc_defaults and op_defaults
data.
See: rhbz#1628701.
---
lib/pengine/complex.c | 8 +++++++-
lib/pengine/utils.c | 15 +++++++++++++--
2 files changed, 20 insertions(+), 3 deletions(-)
diff --git a/lib/pengine/complex.c b/lib/pengine/complex.c
index d91c95e..1f06348 100644
--- a/lib/pengine/complex.c
+++ b/lib/pengine/complex.c
@@ -95,12 +95,18 @@ void
get_meta_attributes(GHashTable * meta_hash, pe_resource_t * rsc,
pe_node_t * node, pe_working_set_t * data_set)
{
+ pe_rsc_eval_data_t rsc_rule_data = {
+ .standard = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS),
+ .provider = crm_element_value(rsc->xml, XML_AGENT_ATTR_PROVIDER),
+ .agent = crm_element_value(rsc->xml, XML_EXPR_ATTR_TYPE)
+ };
+
pe_rule_eval_data_t rule_data = {
.node_hash = NULL,
.role = RSC_ROLE_UNKNOWN,
.now = data_set->now,
.match_data = NULL,
- .rsc_data = NULL,
+ .rsc_data = &rsc_rule_data,
.op_data = NULL
};
diff --git a/lib/pengine/utils.c b/lib/pengine/utils.c
index 1e3b0bd..d5309ed 100644
--- a/lib/pengine/utils.c
+++ b/lib/pengine/utils.c
@@ -986,13 +986,24 @@ unpack_operation(pe_action_t * action, xmlNode * xml_obj, pe_resource_t * contai
pe_rsc_action_details_t *rsc_details = NULL;
#endif
+ pe_rsc_eval_data_t rsc_rule_data = {
+ .standard = crm_element_value(action->rsc->xml, XML_AGENT_ATTR_CLASS),
+ .provider = crm_element_value(action->rsc->xml, XML_AGENT_ATTR_PROVIDER),
+ .agent = crm_element_value(action->rsc->xml, XML_EXPR_ATTR_TYPE)
+ };
+
+ pe_op_eval_data_t op_rule_data = {
+ .op_name = action->task,
+ .interval = interval_ms
+ };
+
pe_rule_eval_data_t rule_data = {
.node_hash = NULL,
.role = RSC_ROLE_UNKNOWN,
.now = data_set->now,
.match_data = NULL,
- .rsc_data = NULL,
- .op_data = NULL
+ .rsc_data = &rsc_rule_data,
+ .op_data = &op_rule_data
};
CRM_CHECK(action && action->rsc, return);
--
1.8.3.1
From 57eedcad739071530f01e1fd691734f7681a08a1 Mon Sep 17 00:00:00 2001
From: Chris Lumens <clumens@redhat.com>
Date: Fri, 17 Apr 2020 12:30:51 -0400
Subject: [PATCH 10/17] Feature: xml: Add rsc_expression and op_expression to
the XML schema.
---
cts/cli/regression.upgrade.exp | 7 +-
cts/cli/regression.validity.exp | 22 ++-
xml/constraints-next.rng | 4 +-
xml/nodes-3.4.rng | 44 +++++
xml/nvset-3.4.rng | 63 ++++++
xml/options-3.4.rng | 111 +++++++++++
xml/resources-3.4.rng | 425 ++++++++++++++++++++++++++++++++++++++++
xml/rule-3.4.rng | 165 ++++++++++++++++
8 files changed, 833 insertions(+), 8 deletions(-)
create mode 100644 xml/nodes-3.4.rng
create mode 100644 xml/nvset-3.4.rng
create mode 100644 xml/options-3.4.rng
create mode 100644 xml/resources-3.4.rng
create mode 100644 xml/rule-3.4.rng
diff --git a/cts/cli/regression.upgrade.exp b/cts/cli/regression.upgrade.exp
index 28ca057..50b22df 100644
--- a/cts/cli/regression.upgrade.exp
+++ b/cts/cli/regression.upgrade.exp
@@ -79,8 +79,11 @@ update_validation debug: Configuration valid for schema: pacemaker-3.2
update_validation debug: pacemaker-3.2-style configuration is also valid for pacemaker-3.3
update_validation debug: Testing 'pacemaker-3.3' validation (17 of X)
update_validation debug: Configuration valid for schema: pacemaker-3.3
-update_validation trace: Stopping at pacemaker-3.3
-update_validation info: Transformed the configuration from pacemaker-2.10 to pacemaker-3.3
+update_validation debug: pacemaker-3.3-style configuration is also valid for pacemaker-3.4
+update_validation debug: Testing 'pacemaker-3.4' validation (18 of X)
+update_validation debug: Configuration valid for schema: pacemaker-3.4
+update_validation trace: Stopping at pacemaker-3.4
+update_validation info: Transformed the configuration from pacemaker-2.10 to pacemaker-3.4
=#=#=#= Current cib after: Upgrade to latest CIB schema (trigger 2.10.xsl + the wrapping) =#=#=#=
<cib epoch="2" num_updates="0" admin_epoch="1">
<configuration>
diff --git a/cts/cli/regression.validity.exp b/cts/cli/regression.validity.exp
index 46e54b5..4407074 100644
--- a/cts/cli/regression.validity.exp
+++ b/cts/cli/regression.validity.exp
@@ -105,7 +105,11 @@ update_validation debug: Testing 'pacemaker-3.3' validation (17 of X)
element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order
element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order
update_validation trace: pacemaker-3.3 validation failed
-Cannot upgrade configuration (claiming schema pacemaker-1.2) to at least pacemaker-3.0 because it does not validate with any schema from pacemaker-1.2 to pacemaker-3.3
+update_validation debug: Testing 'pacemaker-3.4' validation (18 of X)
+element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order
+element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order
+update_validation trace: pacemaker-3.4 validation failed
+Cannot upgrade configuration (claiming schema pacemaker-1.2) to at least pacemaker-3.0 because it does not validate with any schema from pacemaker-1.2 to pacemaker-3.4
=#=#=#= End test: Run crm_simulate with invalid CIB (enum violation) - Invalid configuration (78) =#=#=#=
* Passed: crm_simulate - Run crm_simulate with invalid CIB (enum violation)
=#=#=#= Begin test: Try to make resulting CIB invalid (unrecognized validate-with) =#=#=#=
@@ -198,7 +202,10 @@ update_validation trace: pacemaker-3.2 validation failed
update_validation debug: Testing 'pacemaker-3.3' validation (17 of X)
element cib: Relax-NG validity error : Invalid attribute validate-with for element cib
update_validation trace: pacemaker-3.3 validation failed
-Cannot upgrade configuration (claiming schema pacemaker-9999.0) to at least pacemaker-3.0 because it does not validate with any schema from unknown to pacemaker-3.3
+update_validation debug: Testing 'pacemaker-3.4' validation (18 of X)
+element cib: Relax-NG validity error : Invalid attribute validate-with for element cib
+update_validation trace: pacemaker-3.4 validation failed
+Cannot upgrade configuration (claiming schema pacemaker-9999.0) to at least pacemaker-3.0 because it does not validate with any schema from unknown to pacemaker-3.4
=#=#=#= End test: Run crm_simulate with invalid CIB (unrecognized validate-with) - Invalid configuration (78) =#=#=#=
* Passed: crm_simulate - Run crm_simulate with invalid CIB (unrecognized validate-with)
=#=#=#= Begin test: Try to make resulting CIB invalid, but possibly recoverable (valid with X.Y+1) =#=#=#=
@@ -286,8 +293,11 @@ update_validation debug: Configuration valid for schema: pacemaker-3.2
update_validation debug: pacemaker-3.2-style configuration is also valid for pacemaker-3.3
update_validation debug: Testing 'pacemaker-3.3' validation (17 of X)
update_validation debug: Configuration valid for schema: pacemaker-3.3
-update_validation trace: Stopping at pacemaker-3.3
-update_validation info: Transformed the configuration from pacemaker-1.2 to pacemaker-3.3
+update_validation debug: pacemaker-3.3-style configuration is also valid for pacemaker-3.4
+update_validation debug: Testing 'pacemaker-3.4' validation (18 of X)
+update_validation debug: Configuration valid for schema: pacemaker-3.4
+update_validation trace: Stopping at pacemaker-3.4
+update_validation info: Transformed the configuration from pacemaker-1.2 to pacemaker-3.4
unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
@@ -393,6 +403,8 @@ element rsc_order: Relax-NG validity error : Invalid attribute first-action for
element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order
element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order
element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order
+element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order
+element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order
=#=#=#= Current cib after: Make resulting CIB invalid, and without validate-with attribute =#=#=#=
<cib epoch="31" num_updates="0" admin_epoch="0" validate-with="none">
<configuration>
@@ -450,6 +462,8 @@ validity.bad.xml:10: element rsc_order: Relax-NG validity error : Invalid attrib
validity.bad.xml:10: element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order
validity.bad.xml:10: element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order
validity.bad.xml:10: element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order
+validity.bad.xml:10: element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order
+validity.bad.xml:10: element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order
unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
diff --git a/xml/constraints-next.rng b/xml/constraints-next.rng
index 7e0d98e..1fa3e75 100644
--- a/xml/constraints-next.rng
+++ b/xml/constraints-next.rng
@@ -43,7 +43,7 @@
<attribute name="node"><text/></attribute>
</group>
<oneOrMore>
- <externalRef href="rule-2.9.rng"/>
+ <externalRef href="rule-3.4.rng"/>
</oneOrMore>
</choice>
<optional>
@@ -255,7 +255,7 @@
<define name="element-lifetime">
<element name="lifetime">
<oneOrMore>
- <externalRef href="rule-2.9.rng"/>
+ <externalRef href="rule-3.4.rng"/>
</oneOrMore>
</element>
</define>
diff --git a/xml/nodes-3.4.rng b/xml/nodes-3.4.rng
new file mode 100644
index 0000000..0132c72
--- /dev/null
+++ b/xml/nodes-3.4.rng
@@ -0,0 +1,44 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<grammar xmlns="http://relaxng.org/ns/structure/1.0"
+ datatypeLibrary="http://www.w3.org/2001/XMLSchema-datatypes">
+ <start>
+ <ref name="element-nodes"/>
+ </start>
+
+ <define name="element-nodes">
+ <element name="nodes">
+ <zeroOrMore>
+ <element name="node">
+ <attribute name="id"><text/></attribute>
+ <attribute name="uname"><text/></attribute>
+ <optional>
+ <attribute name="type">
+ <choice>
+ <value>member</value>
+ <value>ping</value>
+ <value>remote</value>
+ </choice>
+ </attribute>
+ </optional>
+ <optional>
+ <attribute name="description"><text/></attribute>
+ </optional>
+ <optional>
+ <externalRef href="score.rng"/>
+ </optional>
+ <zeroOrMore>
+ <choice>
+ <element name="instance_attributes">
+ <externalRef href="nvset-3.4.rng"/>
+ </element>
+ <element name="utilization">
+ <externalRef href="nvset-3.4.rng"/>
+ </element>
+ </choice>
+ </zeroOrMore>
+ </element>
+ </zeroOrMore>
+ </element>
+ </define>
+
+</grammar>
diff --git a/xml/nvset-3.4.rng b/xml/nvset-3.4.rng
new file mode 100644
index 0000000..91a7d23
--- /dev/null
+++ b/xml/nvset-3.4.rng
@@ -0,0 +1,63 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- just as nvset-2.9.rng, but allows for instantiated @name restrictions -->
+<grammar xmlns="http://relaxng.org/ns/structure/1.0"
+ datatypeLibrary="http://www.w3.org/2001/XMLSchema-datatypes">
+ <start>
+ <ref name="element-nvset"/>
+ </start>
+
+ <!-- nvpair/@name:
+ * generic string by default, parent grammar may want to prohibit
+ enumerated names -->
+ <define name="element-nvset.name">
+ <attribute name="name">
+ <text/>
+ </attribute>
+ </define>
+
+ <!-- nvpair/@name:
+ * defer element-nvset.name grammar item
+ nvpair/@value:
+ generic string by default, parent grammar may want to restrict
+ enumerated pairs (i.e. related to @name) at once -->
+ <define name="element-nvset.name-value">
+ <ref name="element-nvset.name"/>
+ <optional>
+ <attribute name="value"><text/></attribute>
+ </optional>
+ </define>
+
+ <define name="element-nvset">
+ <choice>
+ <attribute name="id-ref"><data type="IDREF"/></attribute>
+ <group>
+ <attribute name="id"><data type="ID"/></attribute>
+ <interleave>
+ <optional>
+ <externalRef href="rule-3.4.rng"/>
+ </optional>
+ <zeroOrMore>
+ <element name="nvpair">
+ <choice>
+ <group>
+ <attribute name="id-ref"><data type="IDREF"/></attribute>
+ <optional>
+ <attribute name="name"><text/></attribute>
+ </optional>
+ </group>
+ <group>
+ <attribute name="id"><data type="ID"/></attribute>
+ <ref name="element-nvset.name-value"/>
+ </group>
+ </choice>
+ </element>
+ </zeroOrMore>
+ <optional>
+ <externalRef href="score.rng"/>
+ </optional>
+ </interleave>
+ </group>
+ </choice>
+ </define>
+
+</grammar>
diff --git a/xml/options-3.4.rng b/xml/options-3.4.rng
new file mode 100644
index 0000000..22330d8
--- /dev/null
+++ b/xml/options-3.4.rng
@@ -0,0 +1,111 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<grammar xmlns="http://relaxng.org/ns/structure/1.0"
+ datatypeLibrary="http://www.w3.org/2001/XMLSchema-datatypes">
+ <start>
+ <ref name="options"/>
+ </start>
+
+ <!--
+ see upgrade-2.10.xsl
+ - cibtr:table for="cluster-properties"
+ -->
+ <define name="cluster_property_set.nvpair.name-value-unsupported">
+ <choice>
+ <group>
+ <attribute name="name">
+ <value type="string">cluster-infrastructure</value>
+ </attribute>
+ <attribute name="value">
+ <data type="string">
+ <except>
+ <choice>
+ <value>heartbeat</value>
+ <value>openais</value>
+ <value>classic openais</value>
+ <value>classic openais (with plugin)</value>
+ <value>cman</value>
+ </choice>
+ </except>
+ </data>
+ </attribute>
+ </group>
+ <group>
+ <attribute name="name">
+ <data type="string">
+ <except>
+ <choice>
+ <value>cluster-infrastructure</value>
+ <value>cluster_recheck_interval</value>
+ <value>dc_deadtime</value>
+ <value>default-action-timeout</value>
+ <value>default_action_timeout</value>
+ <value>default-migration-threshold</value>
+ <value>default_migration_threshold</value>
+ <value>default-resource-failure-stickiness</value>
+ <value>default_resource_failure_stickiness</value>
+ <value>default-resource-stickiness</value>
+ <value>default_resource_stickiness</value>
+ <value>election_timeout</value>
+ <value>expected-quorum-votes</value>
+ <value>is-managed-default</value>
+ <value>is_managed_default</value>
+ <value>no_quorum_policy</value>
+ <value>notification-agent</value>
+ <value>notification-recipient</value>
+ <value>remove_after_stop</value>
+ <value>shutdown_escalation</value>
+ <value>startup_fencing</value>
+ <value>stonith_action</value>
+ <value>stonith_enabled</value>
+ <value>stop_orphan_actions</value>
+ <value>stop_orphan_resources</value>
+ <value>symmetric_cluster</value>
+ <value>transition_idle_timeout</value>
+ </choice>
+ </except>
+ </data>
+ </attribute>
+ <optional>
+ <attribute name="value"><text/></attribute>
+ </optional>
+ </group>
+ </choice>
+ </define>
+
+ <define name="options">
+ <interleave>
+ <element name="crm_config">
+ <zeroOrMore>
+ <element name="cluster_property_set">
+ <grammar>
+ <include href="nvset-3.4.rng">
+ <define name="element-nvset.name-value">
+ <parentRef name="cluster_property_set.nvpair.name-value-unsupported"/>
+ </define>
+ </include>
+ </grammar>
+ </element>
+ </zeroOrMore>
+ </element>
+ <optional>
+ <element name="rsc_defaults">
+ <zeroOrMore>
+ <element name="meta_attributes">
+ <externalRef href="nvset-3.4.rng"/>
+ </element>
+ </zeroOrMore>
+ </element>
+ </optional>
+ <optional>
+ <element name="op_defaults">
+ <zeroOrMore>
+ <element name="meta_attributes">
+ <externalRef href="nvset-3.4.rng"/>
+ </element>
+ </zeroOrMore>
+ </element>
+ </optional>
+ </interleave>
+ </define>
+
+</grammar>
diff --git a/xml/resources-3.4.rng b/xml/resources-3.4.rng
new file mode 100644
index 0000000..fbb4b65
--- /dev/null
+++ b/xml/resources-3.4.rng
@@ -0,0 +1,425 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<grammar xmlns="http://relaxng.org/ns/structure/1.0"
+ datatypeLibrary="http://www.w3.org/2001/XMLSchema-datatypes">
+ <start>
+ <ref name="element-resources"/>
+ </start>
+
+ <define name="element-resources">
+ <element name="resources">
+ <zeroOrMore>
+ <choice>
+ <ref name="element-primitive"/>
+ <ref name="element-template"/>
+ <ref name="element-group"/>
+ <ref name="element-clone"/>
+ <ref name="element-master"/>
+ <ref name="element-bundle"/>
+ </choice>
+ </zeroOrMore>
+ </element>
+ </define>
+
+ <!--
+ see upgrade-2.10.xsl
+ - cibtr:table for="resource-meta-attributes"
+ -->
+ <define name="primitive-template.meta_attributes.nvpair.name-unsupported">
+ <attribute name="name">
+ <data type="string">
+ <except>
+ <choice>
+ <value>isolation</value>
+ <value>isolation-host</value>
+ <value>isolation-instance</value>
+ <value>isolation-wrapper</value>
+ </choice>
+ </except>
+ </data>
+ </attribute>
+ </define>
+
+ <define name="element-resource-extra.primitive-template">
+ <zeroOrMore>
+ <choice>
+ <element name="meta_attributes">
+ <grammar>
+ <include href="nvset-3.4.rng">
+ <define name="element-nvset.name">
+ <parentRef name="primitive-template.meta_attributes.nvpair.name-unsupported"/>
+ </define>
+ </include>
+ </grammar>
+ </element>
+ <element name="instance_attributes">
+ <externalRef href="nvset-3.4.rng"/>
+ </element>
+ </choice>
+ </zeroOrMore>
+ </define>
+
+ <define name="element-primitive">
+ <element name="primitive">
+ <interleave>
+ <attribute name="id"><data type="ID"/></attribute>
+ <choice>
+ <group>
+ <ref name="element-resource-class"/>
+ <attribute name="type"><text/></attribute>
+ </group>
+ <attribute name="template"><data type="IDREF"/></attribute>
+ </choice>
+ <optional>
+ <attribute name="description"><text/></attribute>
+ </optional>
+ <ref name="element-resource-extra.primitive-template"/>
+ <ref name="element-operations"/>
+ <zeroOrMore>
+ <element name="utilization">
+ <externalRef href="nvset-3.4.rng"/>
+ </element>
+ </zeroOrMore>
+ </interleave>
+ </element>
+ </define>
+
+ <define name="element-template">
+ <element name="template">
+ <interleave>
+ <attribute name="id"><data type="ID"/></attribute>
+ <ref name="element-resource-class"/>
+ <attribute name="type"><text/></attribute>
+ <optional>
+ <attribute name="description"><text/></attribute>
+ </optional>
+ <ref name="element-resource-extra.primitive-template"/>
+ <ref name="element-operations"/>
+ <zeroOrMore>
+ <element name="utilization">
+ <externalRef href="nvset-3.4.rng"/>
+ </element>
+ </zeroOrMore>
+ </interleave>
+ </element>
+ </define>
+
+ <define name="element-bundle">
+ <element name="bundle">
+ <interleave>
+ <attribute name="id"><data type="ID"/></attribute>
+ <optional>
+ <attribute name="description"><text/></attribute>
+ </optional>
+ <ref name="element-resource-extra"/>
+ <choice>
+ <element name="docker">
+ <attribute name="image"><text/></attribute>
+ <optional>
+ <attribute name="replicas"><data type="integer"/></attribute>
+ </optional>
+ <optional>
+ <attribute name="replicas-per-host"><data type="integer"/></attribute>
+ </optional>
+ <optional>
+ <choice>
+ <attribute name="masters"><data type="integer"/></attribute>
+ <attribute name="promoted-max"><data type="integer"/></attribute>
+ </choice>
+ </optional>
+ <optional>
+ <attribute name="run-command"> <text/></attribute>
+ </optional>
+ <optional>
+ <attribute name="network"><text/></attribute>
+ </optional>
+ <optional>
+ <attribute name="options"><text/></attribute>
+ </optional>
+ </element>
+ <element name="rkt">
+ <attribute name="image"><text/></attribute>
+ <optional>
+ <attribute name="replicas"><data type="integer"/></attribute>
+ </optional>
+ <optional>
+ <attribute name="replicas-per-host"><data type="integer"/></attribute>
+ </optional>
+ <optional>
+ <choice>
+ <attribute name="masters"><data type="integer"/></attribute>
+ <attribute name="promoted-max"><data type="integer"/></attribute>
+ </choice>
+ </optional>
+ <optional>
+ <attribute name="run-command"> <text/></attribute>
+ </optional>
+ <optional>
+ <attribute name="network"><text/></attribute>
+ </optional>
+ <optional>
+ <attribute name="options"><text/></attribute>
+ </optional>
+ </element>
+ <element name="podman">
+ <attribute name="image"><text/></attribute>
+ <optional>
+ <attribute name="replicas"><data type="integer"/></attribute>
+ </optional>
+ <optional>
+ <attribute name="replicas-per-host"><data type="integer"/></attribute>
+ </optional>
+ <optional>
+ <choice>
+ <attribute name="masters"><data type="integer"/></attribute>
+ <attribute name="promoted-max"><data type="integer"/></attribute>
+ </choice>
+ </optional>
+ <optional>
+ <attribute name="run-command"> <text/></attribute>
+ </optional>
+ <optional>
+ <attribute name="network"><text/></attribute>
+ </optional>
+ <optional>
+ <attribute name="options"><text/></attribute>
+ </optional>
+ </element>
+ </choice>
+ <optional>
+ <element name="network">
+ <optional>
+ <attribute name="ip-range-start"><text/></attribute>
+ </optional>
+ <optional>
+ <attribute name="control-port"><data type="integer"/></attribute>
+ </optional>
+ <optional>
+ <attribute name="host-interface"><text/></attribute>
+ </optional>
+ <optional>
+ <attribute name="host-netmask"><data type="integer"/></attribute>
+ </optional>
+ <optional>
+ <attribute name="add-host"><data type="boolean"/></attribute>
+ </optional>
+ <zeroOrMore>
+ <element name="port-mapping">
+ <attribute name="id"><data type="ID"/></attribute>
+ <choice>
+ <group>
+ <attribute name="port"><data type="integer"/></attribute>
+ <optional>
+ <attribute name="internal-port"><data type="integer"/></attribute>
+ </optional>
+ </group>
+ <attribute name="range">
+ <data type="string">
+ <param name="pattern">([0-9\-]+)</param>
+ </data>
+ </attribute>
+ </choice>
+ </element>
+ </zeroOrMore>
+ </element>
+ </optional>
+ <optional>
+ <element name="storage">
+ <zeroOrMore>
+ <element name="storage-mapping">
+ <attribute name="id"><data type="ID"/></attribute>
+ <choice>
+ <attribute name="source-dir"><text/></attribute>
+ <attribute name="source-dir-root"><text/></attribute>
+ </choice>
+ <attribute name="target-dir"><text/></attribute>
+ <optional>
+ <attribute name="options"><text/></attribute>
+ </optional>
+ </element>
+ </zeroOrMore>
+ </element>
+ </optional>
+ <optional>
+ <ref name="element-primitive"/>
+ </optional>
+ </interleave>
+ </element>
+ </define>
+
+ <define name="element-group">
+ <element name="group">
+ <attribute name="id"><data type="ID"/></attribute>
+ <optional>
+ <attribute name="description"><text/></attribute>
+ </optional>
+ <interleave>
+ <ref name="element-resource-extra"/>
+ <oneOrMore>
+ <ref name="element-primitive"/>
+ </oneOrMore>
+ </interleave>
+ </element>
+ </define>
+
+ <define name="element-clone">
+ <element name="clone">
+ <attribute name="id"><data type="ID"/></attribute>
+ <optional>
+ <attribute name="description"><text/></attribute>
+ </optional>
+ <interleave>
+ <ref name="element-resource-extra"/>
+ <choice>
+ <ref name="element-primitive"/>
+ <ref name="element-group"/>
+ </choice>
+ </interleave>
+ </element>
+ </define>
+
+ <define name="element-master">
+ <element name="master">
+ <attribute name="id"><data type="ID"/></attribute>
+ <optional>
+ <attribute name="description"><text/></attribute>
+ </optional>
+ <interleave>
+ <ref name="element-resource-extra"/>
+ <choice>
+ <ref name="element-primitive"/>
+ <ref name="element-group"/>
+ </choice>
+ </interleave>
+ </element>
+ </define>
+
+ <define name="element-resource-extra">
+ <zeroOrMore>
+ <choice>
+ <element name="meta_attributes">
+ <externalRef href="nvset-3.4.rng"/>
+ </element>
+ <element name="instance_attributes">
+ <externalRef href="nvset-3.4.rng"/>
+ </element>
+ </choice>
+ </zeroOrMore>
+ </define>
+
+ <!--
+ see upgrade-2.10.xsl
+ - cibtr:table for="resources-operation"
+ -->
+ <define name="op.meta_attributes.nvpair.name-unsupported">
+ <attribute name="name">
+ <data type="string">
+ <except>
+ <choice>
+ <value>requires</value>
+ </choice>
+ </except>
+ </data>
+ </attribute>
+ </define>
+
+ <define name="element-resource-extra.op">
+ <zeroOrMore>
+ <choice>
+ <element name="meta_attributes">
+ <grammar>
+ <include href="nvset-3.4.rng">
+ <define name="element-nvset.name">
+ <parentRef name="op.meta_attributes.nvpair.name-unsupported"/>
+ </define>
+ </include>
+ </grammar>
+ </element>
+ <element name="instance_attributes">
+ <externalRef href="nvset-3.4.rng"/>
+ </element>
+ </choice>
+ </zeroOrMore>
+ </define>
+
+ <define name="element-operations">
+ <optional>
+ <element name="operations">
+ <optional>
+ <attribute name="id"><data type="ID"/></attribute>
+ </optional>
+ <optional>
+ <attribute name="id-ref"><data type="IDREF"/></attribute>
+ </optional>
+ <zeroOrMore>
+ <element name="op">
+ <attribute name="id"><data type="ID"/></attribute>
+ <attribute name="name"><text/></attribute>
+ <attribute name="interval"><text/></attribute>
+ <optional>
+ <attribute name="description"><text/></attribute>
+ </optional>
+ <optional>
+ <choice>
+ <attribute name="start-delay"><text/></attribute>
+ <attribute name="interval-origin"><text/></attribute>
+ </choice>
+ </optional>
+ <optional>
+ <attribute name="timeout"><text/></attribute>
+ </optional>
+ <optional>
+ <attribute name="enabled"><data type="boolean"/></attribute>
+ </optional>
+ <optional>
+ <attribute name="record-pending"><data type="boolean"/></attribute>
+ </optional>
+ <optional>
+ <attribute name="role">
+ <choice>
+ <value>Stopped</value>
+ <value>Started</value>
+ <value>Slave</value>
+ <value>Master</value>
+ </choice>
+ </attribute>
+ </optional>
+ <optional>
+ <attribute name="on-fail">
+ <choice>
+ <value>ignore</value>
+ <value>block</value>
+ <value>stop</value>
+ <value>restart</value>
+ <value>standby</value>
+ <value>fence</value>
+ <value>restart-container</value>
+ </choice>
+ </attribute>
+ </optional>
+ <ref name="element-resource-extra.op"/>
+ </element>
+ </zeroOrMore>
+ </element>
+ </optional>
+ </define>
+
+ <define name="element-resource-class">
+ <choice>
+ <group>
+ <attribute name="class"><value>ocf</value></attribute>
+ <attribute name="provider"><text/></attribute>
+ </group>
+ <attribute name="class">
+ <choice>
+ <value>lsb</value>
+ <value>heartbeat</value>
+ <value>stonith</value>
+ <value>upstart</value>
+ <value>service</value>
+ <value>systemd</value>
+ <value>nagios</value>
+ </choice>
+ </attribute>
+ </choice>
+ </define>
+</grammar>
diff --git a/xml/rule-3.4.rng b/xml/rule-3.4.rng
new file mode 100644
index 0000000..5d1daf0
--- /dev/null
+++ b/xml/rule-3.4.rng
@@ -0,0 +1,165 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<grammar xmlns="http://relaxng.org/ns/structure/1.0"
+ xmlns:ann="http://relaxng.org/ns/compatibility/annotations/1.0"
+ datatypeLibrary="http://www.w3.org/2001/XMLSchema-datatypes">
+ <start>
+ <ref name="element-rule"/>
+ </start>
+
+ <define name="element-rule">
+ <element name="rule">
+ <choice>
+ <attribute name="id-ref"><data type="IDREF"/></attribute>
+ <group>
+ <attribute name="id"><data type="ID"/></attribute>
+ <choice>
+ <externalRef href="score.rng"/>
+ <attribute name="score-attribute"><text/></attribute>
+ </choice>
+ <optional>
+ <attribute name="boolean-op">
+ <choice>
+ <value>or</value>
+ <value>and</value>
+ </choice>
+ </attribute>
+ </optional>
+ <optional>
+ <attribute name="role"><text/></attribute>
+ </optional>
+ <oneOrMore>
+ <choice>
+ <element name="expression">
+ <attribute name="id"><data type="ID"/></attribute>
+ <attribute name="attribute"><text/></attribute>
+ <attribute name="operation">
+ <choice>
+ <value>lt</value>
+ <value>gt</value>
+ <value>lte</value>
+ <value>gte</value>
+ <value>eq</value>
+ <value>ne</value>
+ <value>defined</value>
+ <value>not_defined</value>
+ </choice>
+ </attribute>
+ <optional>
+ <attribute name="value"><text/></attribute>
+ </optional>
+ <optional>
+ <attribute name="type" ann:defaultValue="string">
+ <choice>
+ <value>string</value>
+ <value>number</value>
+ <value>version</value>
+ </choice>
+ </attribute>
+ </optional>
+ <optional>
+ <attribute name="value-source" ann:defaultValue="literal">
+ <choice>
+ <value>literal</value>
+ <value>param</value>
+ <value>meta</value>
+ </choice>
+ </attribute>
+ </optional>
+ </element>
+ <element name="date_expression">
+ <attribute name="id"><data type="ID"/></attribute>
+ <choice>
+ <group>
+ <attribute name="operation"><value>in_range</value></attribute>
+ <choice>
+ <group>
+ <optional>
+ <attribute name="start"><text/></attribute>
+ </optional>
+ <attribute name="end"><text/></attribute>
+ </group>
+ <group>
+ <attribute name="start"><text/></attribute>
+ <element name="duration">
+ <ref name="date-common"/>
+ </element>
+ </group>
+ </choice>
+ </group>
+ <group>
+ <attribute name="operation"><value>gt</value></attribute>
+ <attribute name="start"><text/></attribute>
+ </group>
+ <group>
+ <attribute name="operation"><value>lt</value></attribute>
+ <choice>
+ <attribute name="end"><text/></attribute>
+ </choice>
+ </group>
+ <group>
+ <attribute name="operation"><value>date_spec</value></attribute>
+ <element name="date_spec">
+ <ref name="date-common"/>
+ </element>
+ </group>
+ </choice>
+ </element>
+ <element name="rsc_expression">
+ <attribute name="id"><data type="ID"/></attribute>
+ <optional>
+ <attribute name="class"><text/></attribute>
+ </optional>
+ <optional>
+ <attribute name="provider"><text/></attribute>
+ </optional>
+ <optional>
+ <attribute name="type"><text/></attribute>
+ </optional>
+ </element>
+ <element name="op_expression">
+ <attribute name="id"><data type="ID"/></attribute>
+ <attribute name="name"><text/></attribute>
+ <optional>
+ <attribute name="interval"><text/></attribute>
+ </optional>
+ </element>
+ <ref name="element-rule"/>
+ </choice>
+ </oneOrMore>
+ </group>
+ </choice>
+ </element>
+ </define>
+
+ <define name="date-common">
+ <attribute name="id"><data type="ID"/></attribute>
+ <optional>
+ <attribute name="hours"><text/></attribute>
+ </optional>
+ <optional>
+ <attribute name="monthdays"><text/></attribute>
+ </optional>
+ <optional>
+ <attribute name="weekdays"><text/></attribute>
+ </optional>
+ <optional>
+ <attribute name="yearsdays"><text/></attribute>
+ </optional>
+ <optional>
+ <attribute name="months"><text/></attribute>
+ </optional>
+ <optional>
+ <attribute name="weeks"><text/></attribute>
+ </optional>
+ <optional>
+ <attribute name="years"><text/></attribute>
+ </optional>
+ <optional>
+ <attribute name="weekyears"><text/></attribute>
+ </optional>
+ <optional>
+ <attribute name="moon"><text/></attribute>
+ </optional>
+ </define>
+
+</grammar>
--
1.8.3.1
From b0e2345d92fb7cf42c133b24457eeb07126db8a0 Mon Sep 17 00:00:00 2001
From: Chris Lumens <clumens@redhat.com>
Date: Mon, 27 Apr 2020 16:24:22 -0400
Subject: [PATCH 11/17] Fix: scheduler: Change trace output in populate_hash.
Only show the "Setting attribute:" text when it comes time to actually
set the attribute. Also show the value being set. This makes it
clearer that an attribute is actually being set, not just that the
function is processing something.
---
lib/pengine/rules.c | 3 +--
1 file changed, 1 insertion(+), 2 deletions(-)
diff --git a/lib/pengine/rules.c b/lib/pengine/rules.c
index 7575011..b0fca55 100644
--- a/lib/pengine/rules.c
+++ b/lib/pengine/rules.c
@@ -463,7 +463,6 @@ populate_hash(xmlNode * nvpair_list, GHashTable * hash, gboolean overwrite, xmlN
name = crm_element_value(ref_nvpair, XML_NVPAIR_ATTR_NAME);
}
- crm_trace("Setting attribute: %s", name);
value = crm_element_value(an_attr, XML_NVPAIR_ATTR_VALUE);
if (value == NULL) {
value = crm_element_value(ref_nvpair, XML_NVPAIR_ATTR_VALUE);
@@ -471,7 +470,6 @@ populate_hash(xmlNode * nvpair_list, GHashTable * hash, gboolean overwrite, xmlN
if (name == NULL || value == NULL) {
continue;
-
}
old_value = g_hash_table_lookup(hash, name);
@@ -484,6 +482,7 @@ populate_hash(xmlNode * nvpair_list, GHashTable * hash, gboolean overwrite, xmlN
continue;
} else if (old_value == NULL) {
+ crm_trace("Setting attribute: %s = %s", name, value);
g_hash_table_insert(hash, strdup(name), strdup(value));
} else if (overwrite) {
--
1.8.3.1
From d35854384b231c79b8aba1ce4c5caf5dd51ec982 Mon Sep 17 00:00:00 2001
From: Chris Lumens <clumens@redhat.com>
Date: Fri, 1 May 2020 15:45:31 -0400
Subject: [PATCH 12/17] Test: scheduler: Add a regression test for op_defaults.
---
cts/cts-scheduler.in | 3 +
cts/scheduler/op-defaults.dot | 33 ++++++
cts/scheduler/op-defaults.exp | 211 ++++++++++++++++++++++++++++++++++++++
cts/scheduler/op-defaults.scores | 11 ++
cts/scheduler/op-defaults.summary | 46 +++++++++
cts/scheduler/op-defaults.xml | 87 ++++++++++++++++
6 files changed, 391 insertions(+)
create mode 100644 cts/scheduler/op-defaults.dot
create mode 100644 cts/scheduler/op-defaults.exp
create mode 100644 cts/scheduler/op-defaults.scores
create mode 100644 cts/scheduler/op-defaults.summary
create mode 100644 cts/scheduler/op-defaults.xml
diff --git a/cts/cts-scheduler.in b/cts/cts-scheduler.in
index 5d72205..b83f812 100644
--- a/cts/cts-scheduler.in
+++ b/cts/cts-scheduler.in
@@ -962,6 +962,9 @@ TESTS = [
[ "shutdown-lock", "Ensure shutdown lock works properly" ],
[ "shutdown-lock-expiration", "Ensure shutdown lock expiration works properly" ],
],
+ [
+ [ "op-defaults", "Test op_defaults conditional expressions " ],
+ ],
# @TODO: If pacemaker implements versioned attributes, uncomment these tests
#[
diff --git a/cts/scheduler/op-defaults.dot b/cts/scheduler/op-defaults.dot
new file mode 100644
index 0000000..5536c15
--- /dev/null
+++ b/cts/scheduler/op-defaults.dot
@@ -0,0 +1,33 @@
+ digraph "g" {
+"dummy-rsc_monitor_0 cluster01" -> "dummy-rsc_start_0 cluster02" [ style = bold]
+"dummy-rsc_monitor_0 cluster01" [ style=bold color="green" fontcolor="black"]
+"dummy-rsc_monitor_0 cluster02" -> "dummy-rsc_start_0 cluster02" [ style = bold]
+"dummy-rsc_monitor_0 cluster02" [ style=bold color="green" fontcolor="black"]
+"dummy-rsc_monitor_60000 cluster02" [ style=bold color="green" fontcolor="black"]
+"dummy-rsc_start_0 cluster02" -> "dummy-rsc_monitor_60000 cluster02" [ style = bold]
+"dummy-rsc_start_0 cluster02" [ style=bold color="green" fontcolor="black"]
+"fencing_monitor_0 cluster01" -> "fencing_start_0 cluster01" [ style = bold]
+"fencing_monitor_0 cluster01" [ style=bold color="green" fontcolor="black"]
+"fencing_monitor_0 cluster02" -> "fencing_start_0 cluster01" [ style = bold]
+"fencing_monitor_0 cluster02" [ style=bold color="green" fontcolor="black"]
+"fencing_start_0 cluster01" [ style=bold color="green" fontcolor="black"]
+"ip-rsc2_monitor_0 cluster01" -> "ip-rsc2_start_0 cluster01" [ style = bold]
+"ip-rsc2_monitor_0 cluster01" [ style=bold color="green" fontcolor="black"]
+"ip-rsc2_monitor_0 cluster02" -> "ip-rsc2_start_0 cluster01" [ style = bold]
+"ip-rsc2_monitor_0 cluster02" [ style=bold color="green" fontcolor="black"]
+"ip-rsc2_monitor_10000 cluster01" [ style=bold color="green" fontcolor="black"]
+"ip-rsc2_start_0 cluster01" -> "ip-rsc2_monitor_10000 cluster01" [ style = bold]
+"ip-rsc2_start_0 cluster01" [ style=bold color="green" fontcolor="black"]
+"ip-rsc_monitor_0 cluster01" -> "ip-rsc_start_0 cluster02" [ style = bold]
+"ip-rsc_monitor_0 cluster01" [ style=bold color="green" fontcolor="black"]
+"ip-rsc_monitor_0 cluster02" -> "ip-rsc_start_0 cluster02" [ style = bold]
+"ip-rsc_monitor_0 cluster02" [ style=bold color="green" fontcolor="black"]
+"ip-rsc_monitor_20000 cluster02" [ style=bold color="green" fontcolor="black"]
+"ip-rsc_start_0 cluster02" -> "ip-rsc_monitor_20000 cluster02" [ style = bold]
+"ip-rsc_start_0 cluster02" [ style=bold color="green" fontcolor="black"]
+"ping-rsc-ping_monitor_0 cluster01" -> "ping-rsc-ping_start_0 cluster01" [ style = bold]
+"ping-rsc-ping_monitor_0 cluster01" [ style=bold color="green" fontcolor="black"]
+"ping-rsc-ping_monitor_0 cluster02" -> "ping-rsc-ping_start_0 cluster01" [ style = bold]
+"ping-rsc-ping_monitor_0 cluster02" [ style=bold color="green" fontcolor="black"]
+"ping-rsc-ping_start_0 cluster01" [ style=bold color="green" fontcolor="black"]
+}
diff --git a/cts/scheduler/op-defaults.exp b/cts/scheduler/op-defaults.exp
new file mode 100644
index 0000000..b81eacb
--- /dev/null
+++ b/cts/scheduler/op-defaults.exp
@@ -0,0 +1,211 @@
+<transition_graph cluster-delay="60s" stonith-timeout="60s" failed-stop-offset="INFINITY" failed-start-offset="INFINITY" transition_id="0">
+ <synapse id="0">
+ <action_set>
+ <rsc_op id="11" operation="start" operation_key="fencing_start_0" on_node="cluster01" on_node_uuid="1">
+ <primitive id="fencing" class="stonith" type="fence_xvm"/>
+ <attributes CRM_meta_on_node="cluster01" CRM_meta_on_node_uuid="1" CRM_meta_timeout="5000" ip_family="ipv4"/>
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="1" operation="monitor" operation_key="fencing_monitor_0" on_node="cluster01" on_node_uuid="1"/>
+ </trigger>
+ <trigger>
+ <rsc_op id="6" operation="monitor" operation_key="fencing_monitor_0" on_node="cluster02" on_node_uuid="2"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="1">
+ <action_set>
+ <rsc_op id="6" operation="monitor" operation_key="fencing_monitor_0" on_node="cluster02" on_node_uuid="2">
+ <primitive id="fencing" class="stonith" type="fence_xvm"/>
+ <attributes CRM_meta_on_node="cluster02" CRM_meta_on_node_uuid="2" CRM_meta_op_target_rc="7" CRM_meta_timeout="7000" ip_family="ipv4"/>
+ </rsc_op>
+ </action_set>
+ <inputs/>
+ </synapse>
+ <synapse id="2">
+ <action_set>
+ <rsc_op id="1" operation="monitor" operation_key="fencing_monitor_0" on_node="cluster01" on_node_uuid="1">
+ <primitive id="fencing" class="stonith" type="fence_xvm"/>
+ <attributes CRM_meta_on_node="cluster01" CRM_meta_on_node_uuid="1" CRM_meta_op_target_rc="7" CRM_meta_timeout="7000" ip_family="ipv4"/>
+ </rsc_op>
+ </action_set>
+ <inputs/>
+ </synapse>
+ <synapse id="3">
+ <action_set>
+ <rsc_op id="13" operation="monitor" operation_key="ip-rsc_monitor_20000" on_node="cluster02" on_node_uuid="2">
+ <primitive id="ip-rsc" class="ocf" provider="heartbeat" type="IPaddr2"/>
+ <attributes CRM_meta_interval="20000" CRM_meta_name="monitor" CRM_meta_on_node="cluster02" CRM_meta_on_node_uuid="2" CRM_meta_timeout="7000" cidr_netmask="32" ip="172.17.1.1"/>
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="12" operation="start" operation_key="ip-rsc_start_0" on_node="cluster02" on_node_uuid="2"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="4">
+ <action_set>
+ <rsc_op id="12" operation="start" operation_key="ip-rsc_start_0" on_node="cluster02" on_node_uuid="2">
+ <primitive id="ip-rsc" class="ocf" provider="heartbeat" type="IPaddr2"/>
+ <attributes CRM_meta_on_node="cluster02" CRM_meta_on_node_uuid="2" CRM_meta_timeout="5000" cidr_netmask="32" ip="172.17.1.1"/>
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="2" operation="monitor" operation_key="ip-rsc_monitor_0" on_node="cluster01" on_node_uuid="1"/>
+ </trigger>
+ <trigger>
+ <rsc_op id="7" operation="monitor" operation_key="ip-rsc_monitor_0" on_node="cluster02" on_node_uuid="2"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="5">
+ <action_set>
+ <rsc_op id="7" operation="monitor" operation_key="ip-rsc_monitor_0" on_node="cluster02" on_node_uuid="2">
+ <primitive id="ip-rsc" class="ocf" provider="heartbeat" type="IPaddr2"/>
+ <attributes CRM_meta_on_node="cluster02" CRM_meta_on_node_uuid="2" CRM_meta_op_target_rc="7" CRM_meta_timeout="7000" cidr_netmask="32" ip="172.17.1.1"/>
+ </rsc_op>
+ </action_set>
+ <inputs/>
+ </synapse>
+ <synapse id="6">
+ <action_set>
+ <rsc_op id="2" operation="monitor" operation_key="ip-rsc_monitor_0" on_node="cluster01" on_node_uuid="1">
+ <primitive id="ip-rsc" class="ocf" provider="heartbeat" type="IPaddr2"/>
+ <attributes CRM_meta_on_node="cluster01" CRM_meta_on_node_uuid="1" CRM_meta_op_target_rc="7" CRM_meta_timeout="7000" cidr_netmask="32" ip="172.17.1.1"/>
+ </rsc_op>
+ </action_set>
+ <inputs/>
+ </synapse>
+ <synapse id="7">
+ <action_set>
+ <rsc_op id="15" operation="monitor" operation_key="ip-rsc2_monitor_10000" on_node="cluster01" on_node_uuid="1">
+ <primitive id="ip-rsc2" class="ocf" provider="heartbeat" type="IPaddr2"/>
+ <attributes CRM_meta_interval="10000" CRM_meta_name="monitor" CRM_meta_on_node="cluster01" CRM_meta_on_node_uuid="1" CRM_meta_timeout="8000" cidr_netmask="32" ip="172.17.1.1"/>
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="14" operation="start" operation_key="ip-rsc2_start_0" on_node="cluster01" on_node_uuid="1"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="8">
+ <action_set>
+ <rsc_op id="14" operation="start" operation_key="ip-rsc2_start_0" on_node="cluster01" on_node_uuid="1">
+ <primitive id="ip-rsc2" class="ocf" provider="heartbeat" type="IPaddr2"/>
+ <attributes CRM_meta_on_node="cluster01" CRM_meta_on_node_uuid="1" CRM_meta_timeout="5000" cidr_netmask="32" ip="172.17.1.1"/>
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="3" operation="monitor" operation_key="ip-rsc2_monitor_0" on_node="cluster01" on_node_uuid="1"/>
+ </trigger>
+ <trigger>
+ <rsc_op id="8" operation="monitor" operation_key="ip-rsc2_monitor_0" on_node="cluster02" on_node_uuid="2"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="9">
+ <action_set>
+ <rsc_op id="8" operation="monitor" operation_key="ip-rsc2_monitor_0" on_node="cluster02" on_node_uuid="2">
+ <primitive id="ip-rsc2" class="ocf" provider="heartbeat" type="IPaddr2"/>
+ <attributes CRM_meta_on_node="cluster02" CRM_meta_on_node_uuid="2" CRM_meta_op_target_rc="7" CRM_meta_timeout="7000" cidr_netmask="32" ip="172.17.1.1"/>
+ </rsc_op>
+ </action_set>
+ <inputs/>
+ </synapse>
+ <synapse id="10">
+ <action_set>
+ <rsc_op id="3" operation="monitor" operation_key="ip-rsc2_monitor_0" on_node="cluster01" on_node_uuid="1">
+ <primitive id="ip-rsc2" class="ocf" provider="heartbeat" type="IPaddr2"/>
+ <attributes CRM_meta_on_node="cluster01" CRM_meta_on_node_uuid="1" CRM_meta_op_target_rc="7" CRM_meta_timeout="7000" cidr_netmask="32" ip="172.17.1.1"/>
+ </rsc_op>
+ </action_set>
+ <inputs/>
+ </synapse>
+ <synapse id="11">
+ <action_set>
+ <rsc_op id="17" operation="monitor" operation_key="dummy-rsc_monitor_60000" on_node="cluster02" on_node_uuid="2">
+ <primitive id="dummy-rsc" class="ocf" provider="pacemaker" type="Dummy"/>
+ <attributes CRM_meta_interval="60000" CRM_meta_name="monitor" CRM_meta_on_fail="stop" CRM_meta_on_node="cluster02" CRM_meta_on_node_uuid="2" CRM_meta_timeout="7000" op_sleep="10"/>
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="16" operation="start" operation_key="dummy-rsc_start_0" on_node="cluster02" on_node_uuid="2"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="12">
+ <action_set>
+ <rsc_op id="16" operation="start" operation_key="dummy-rsc_start_0" on_node="cluster02" on_node_uuid="2">
+ <primitive id="dummy-rsc" class="ocf" provider="pacemaker" type="Dummy"/>
+ <attributes CRM_meta_on_node="cluster02" CRM_meta_on_node_uuid="2" CRM_meta_timeout="6000" op_sleep="10"/>
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="4" operation="monitor" operation_key="dummy-rsc_monitor_0" on_node="cluster01" on_node_uuid="1"/>
+ </trigger>
+ <trigger>
+ <rsc_op id="9" operation="monitor" operation_key="dummy-rsc_monitor_0" on_node="cluster02" on_node_uuid="2"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="13">
+ <action_set>
+ <rsc_op id="9" operation="monitor" operation_key="dummy-rsc_monitor_0" on_node="cluster02" on_node_uuid="2">
+ <primitive id="dummy-rsc" class="ocf" provider="pacemaker" type="Dummy"/>
+ <attributes CRM_meta_on_node="cluster02" CRM_meta_on_node_uuid="2" CRM_meta_op_target_rc="7" CRM_meta_timeout="7000" op_sleep="10"/>
+ </rsc_op>
+ </action_set>
+ <inputs/>
+ </synapse>
+ <synapse id="14">
+ <action_set>
+ <rsc_op id="4" operation="monitor" operation_key="dummy-rsc_monitor_0" on_node="cluster01" on_node_uuid="1">
+ <primitive id="dummy-rsc" class="ocf" provider="pacemaker" type="Dummy"/>
+ <attributes CRM_meta_on_node="cluster01" CRM_meta_on_node_uuid="1" CRM_meta_op_target_rc="7" CRM_meta_timeout="7000" op_sleep="10"/>
+ </rsc_op>
+ </action_set>
+ <inputs/>
+ </synapse>
+ <synapse id="15">
+ <action_set>
+ <rsc_op id="18" operation="start" operation_key="ping-rsc-ping_start_0" on_node="cluster01" on_node_uuid="1">
+ <primitive id="ping-rsc-ping" class="ocf" provider="pacemaker" type="ping"/>
+ <attributes CRM_meta_on_node="cluster01" CRM_meta_on_node_uuid="1" CRM_meta_timeout="5000" host_list="4.2.2.2"/>
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="5" operation="monitor" operation_key="ping-rsc-ping_monitor_0" on_node="cluster01" on_node_uuid="1"/>
+ </trigger>
+ <trigger>
+ <rsc_op id="10" operation="monitor" operation_key="ping-rsc-ping_monitor_0" on_node="cluster02" on_node_uuid="2"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="16">
+ <action_set>
+ <rsc_op id="10" operation="monitor" operation_key="ping-rsc-ping_monitor_0" on_node="cluster02" on_node_uuid="2">
+ <primitive id="ping-rsc-ping" class="ocf" provider="pacemaker" type="ping"/>
+ <attributes CRM_meta_on_node="cluster02" CRM_meta_on_node_uuid="2" CRM_meta_op_target_rc="7" CRM_meta_timeout="7000" host_list="4.2.2.2"/>
+ </rsc_op>
+ </action_set>
+ <inputs/>
+ </synapse>
+ <synapse id="17">
+ <action_set>
+ <rsc_op id="5" operation="monitor" operation_key="ping-rsc-ping_monitor_0" on_node="cluster01" on_node_uuid="1">
+ <primitive id="ping-rsc-ping" class="ocf" provider="pacemaker" type="ping"/>
+ <attributes CRM_meta_on_node="cluster01" CRM_meta_on_node_uuid="1" CRM_meta_op_target_rc="7" CRM_meta_timeout="7000" host_list="4.2.2.2"/>
+ </rsc_op>
+ </action_set>
+ <inputs/>
+ </synapse>
+</transition_graph>
diff --git a/cts/scheduler/op-defaults.scores b/cts/scheduler/op-defaults.scores
new file mode 100644
index 0000000..1c622f0
--- /dev/null
+++ b/cts/scheduler/op-defaults.scores
@@ -0,0 +1,11 @@
+Allocation scores:
+pcmk__native_allocate: dummy-rsc allocation score on cluster01: 0
+pcmk__native_allocate: dummy-rsc allocation score on cluster02: 0
+pcmk__native_allocate: fencing allocation score on cluster01: 0
+pcmk__native_allocate: fencing allocation score on cluster02: 0
+pcmk__native_allocate: ip-rsc allocation score on cluster01: 0
+pcmk__native_allocate: ip-rsc allocation score on cluster02: 0
+pcmk__native_allocate: ip-rsc2 allocation score on cluster01: 0
+pcmk__native_allocate: ip-rsc2 allocation score on cluster02: 0
+pcmk__native_allocate: ping-rsc-ping allocation score on cluster01: 0
+pcmk__native_allocate: ping-rsc-ping allocation score on cluster02: 0
diff --git a/cts/scheduler/op-defaults.summary b/cts/scheduler/op-defaults.summary
new file mode 100644
index 0000000..b580939
--- /dev/null
+++ b/cts/scheduler/op-defaults.summary
@@ -0,0 +1,46 @@
+
+Current cluster status:
+Online: [ cluster01 cluster02 ]
+
+ fencing (stonith:fence_xvm): Stopped
+ ip-rsc (ocf::heartbeat:IPaddr2): Stopped
+ ip-rsc2 (ocf::heartbeat:IPaddr2): Stopped
+ dummy-rsc (ocf::pacemaker:Dummy): Stopped
+ ping-rsc-ping (ocf::pacemaker:ping): Stopped
+
+Transition Summary:
+ * Start fencing ( cluster01 )
+ * Start ip-rsc ( cluster02 )
+ * Start ip-rsc2 ( cluster01 )
+ * Start dummy-rsc ( cluster02 )
+ * Start ping-rsc-ping ( cluster01 )
+
+Executing cluster transition:
+ * Resource action: fencing monitor on cluster02
+ * Resource action: fencing monitor on cluster01
+ * Resource action: ip-rsc monitor on cluster02
+ * Resource action: ip-rsc monitor on cluster01
+ * Resource action: ip-rsc2 monitor on cluster02
+ * Resource action: ip-rsc2 monitor on cluster01
+ * Resource action: dummy-rsc monitor on cluster02
+ * Resource action: dummy-rsc monitor on cluster01
+ * Resource action: ping-rsc-ping monitor on cluster02
+ * Resource action: ping-rsc-ping monitor on cluster01
+ * Resource action: fencing start on cluster01
+ * Resource action: ip-rsc start on cluster02
+ * Resource action: ip-rsc2 start on cluster01
+ * Resource action: dummy-rsc start on cluster02
+ * Resource action: ping-rsc-ping start on cluster01
+ * Resource action: ip-rsc monitor=20000 on cluster02
+ * Resource action: ip-rsc2 monitor=10000 on cluster01
+ * Resource action: dummy-rsc monitor=60000 on cluster02
+
+Revised cluster status:
+Online: [ cluster01 cluster02 ]
+
+ fencing (stonith:fence_xvm): Started cluster01
+ ip-rsc (ocf::heartbeat:IPaddr2): Started cluster02
+ ip-rsc2 (ocf::heartbeat:IPaddr2): Started cluster01
+ dummy-rsc (ocf::pacemaker:Dummy): Started cluster02
+ ping-rsc-ping (ocf::pacemaker:ping): Started cluster01
+
diff --git a/cts/scheduler/op-defaults.xml b/cts/scheduler/op-defaults.xml
new file mode 100644
index 0000000..ae3b248
--- /dev/null
+++ b/cts/scheduler/op-defaults.xml
@@ -0,0 +1,87 @@
+<cib crm_feature_set="3.3.0" validate-with="pacemaker-3.4" epoch="130" num_updates="31" admin_epoch="1" cib-last-written="Fri Apr 24 16:08:36 2020" update-origin="cluster01" update-client="crmd" update-user="hacluster" have-quorum="1" dc-uuid="1">
+ <configuration>
+ <crm_config>
+ <cluster_property_set id="cib-bootstrap-options">
+ <nvpair id="cib-bootstrap-options-have-watchdog" name="have-watchdog" value="false"/>
+ <nvpair id="cib-bootstrap-options-dc-version" name="dc-version" value="2.0.3-1.c40fb040a.git.el7-c40fb040a"/>
+ <nvpair id="cib-bootstrap-options-cluster-infrastructure" name="cluster-infrastructure" value="corosync"/>
+ <nvpair id="cib-bootstrap-options-cluster-name" name="cluster-name" value="test-cluster"/>
+ <nvpair id="cib-bootstrap-options-stonith-enabled" name="stonith-enabled" value="true"/>
+ <nvpair id="cib-bootstrap-options-maintenance-mode" name="maintenance-mode" value="false"/>
+ </cluster_property_set>
+ </crm_config>
+ <nodes>
+ <node id="1" uname="cluster01"/>
+ <node id="2" uname="cluster02"/>
+ </nodes>
+ <resources>
+ <primitive class="stonith" id="fencing" type="fence_xvm">
+ <instance_attributes id="fencing-instance_attributes">
+ <nvpair id="fencing-instance_attributes-ip_family" name="ip_family" value="ipv4"/>
+ </instance_attributes>
+ <operations/>
+ </primitive>
+ <primitive class="ocf" id="ip-rsc" provider="heartbeat" type="IPaddr2">
+ <instance_attributes id="ip-rsc-instance_attributes">
+ <nvpair id="ip-rsc-instance_attributes-cidr_netmask" name="cidr_netmask" value="32"/>
+ <nvpair id="ip-rsc-instance_attributes-ip" name="ip" value="172.17.1.1"/>
+ </instance_attributes>
+ <operations>
+ <op id="ip-rsc-monitor-interval-20s" interval="20s" name="monitor"/>
+ </operations>
+ </primitive>
+ <primitive class="ocf" id="ip-rsc2" provider="heartbeat" type="IPaddr2">
+ <instance_attributes id="ip-rsc2-instance_attributes">
+ <nvpair id="ip-rsc2-instance_attributes-cidr_netmask" name="cidr_netmask" value="32"/>
+ <nvpair id="ip-rsc2-instance_attributes-ip" name="ip" value="172.17.1.1"/>
+ </instance_attributes>
+ <operations>
+ <op id="ip-rsc2-monitor-interval-10s" interval="10s" name="monitor"/>
+ </operations>
+ </primitive>
+ <primitive class="ocf" id="dummy-rsc" provider="pacemaker" type="Dummy">
+ <instance_attributes id="dummy-rsc-instance_attributes">
+ <nvpair id="dummy-rsc-instance_attributes-op_sleep" name="op_sleep" value="10"/>
+ </instance_attributes>
+ <operations>
+ <op id="dummy-rsc-monitor-interval-60s" interval="60s" name="monitor" on-fail="stop"/>
+ </operations>
+ </primitive>
+ <primitive class="ocf" id="ping-rsc-ping" provider="pacemaker" type="ping">
+ <instance_attributes id="ping-rsc-instance_attributes">
+ <nvpair id="ping-rsc-host_list" name="host_list" value="4.2.2.2"/>
+ </instance_attributes>
+ <operations/>
+ </primitive>
+ </resources>
+ <constraints/>
+ <tags/>
+ <op_defaults>
+ <meta_attributes id="op-defaults">
+ <nvpair id="op-defaults-timeout" name="timeout" value="5s"/>
+ </meta_attributes>
+ <meta_attributes id="op-dummy-defaults">
+ <rule id="op-dummy-default-rule" score="INFINITY">
+ <rsc_expression id="op-dummy-default-expr" class="ocf" provider="pacemaker" type="Dummy"/>
+ </rule>
+ <nvpair id="op-dummy-timeout" name="timeout" value="6s"/>
+ </meta_attributes>
+ <meta_attributes id="op-monitor-defaults">
+ <rule id="op-monitor-default-rule" score="INFINITY">
+ <op_expression id="op-monitor-default-expr" name="monitor"/>
+ </rule>
+ <nvpair id="op-monitor-timeout" name="timeout" value="7s"/>
+ </meta_attributes>
+ <meta_attributes id="op-monitor-interval-defaults">
+ <rule id="op-monitor-interval-default-rule" score="INFINITY">
+ <op_expression id="op-monitor-interval-default-expr" name="monitor" interval="10s"/>
+ </rule>
+ <nvpair id="op-monitor-interval-timeout" name="timeout" value="8s"/>
+ </meta_attributes>
+ </op_defaults>
+ </configuration>
+ <status>
+ <node_state id="1" uname="cluster01" in_ccm="true" crmd="online" crm-debug-origin="do_state_transition" join="member" expected="member"/>
+ <node_state id="2" uname="cluster02" in_ccm="true" crmd="online" crm-debug-origin="do_update_resource" join="member" expected="member"/>
+ </status>
+</cib>
--
1.8.3.1
From 67067927bc1b8e000c06d2b5a4ae6b9223ca13c7 Mon Sep 17 00:00:00 2001
From: Chris Lumens <clumens@redhat.com>
Date: Wed, 13 May 2020 10:40:34 -0400
Subject: [PATCH 13/17] Test: scheduler: Add a regression test for
rsc_defaults.
---
cts/cts-scheduler.in | 3 +-
cts/scheduler/rsc-defaults.dot | 18 ++++++
cts/scheduler/rsc-defaults.exp | 124 +++++++++++++++++++++++++++++++++++++
cts/scheduler/rsc-defaults.scores | 11 ++++
cts/scheduler/rsc-defaults.summary | 38 ++++++++++++
cts/scheduler/rsc-defaults.xml | 78 +++++++++++++++++++++++
6 files changed, 271 insertions(+), 1 deletion(-)
create mode 100644 cts/scheduler/rsc-defaults.dot
create mode 100644 cts/scheduler/rsc-defaults.exp
create mode 100644 cts/scheduler/rsc-defaults.scores
create mode 100644 cts/scheduler/rsc-defaults.summary
create mode 100644 cts/scheduler/rsc-defaults.xml
diff --git a/cts/cts-scheduler.in b/cts/cts-scheduler.in
index b83f812..9022ce9 100644
--- a/cts/cts-scheduler.in
+++ b/cts/cts-scheduler.in
@@ -963,7 +963,8 @@ TESTS = [
[ "shutdown-lock-expiration", "Ensure shutdown lock expiration works properly" ],
],
[
- [ "op-defaults", "Test op_defaults conditional expressions " ],
+ [ "op-defaults", "Test op_defaults conditional expressions" ],
+ [ "rsc-defaults", "Test rsc_defaults conditional expressions" ],
],
# @TODO: If pacemaker implements versioned attributes, uncomment these tests
diff --git a/cts/scheduler/rsc-defaults.dot b/cts/scheduler/rsc-defaults.dot
new file mode 100644
index 0000000..d776614
--- /dev/null
+++ b/cts/scheduler/rsc-defaults.dot
@@ -0,0 +1,18 @@
+ digraph "g" {
+"dummy-rsc_monitor_0 cluster01" [ style=bold color="green" fontcolor="black"]
+"dummy-rsc_monitor_0 cluster02" [ style=bold color="green" fontcolor="black"]
+"fencing_monitor_0 cluster01" -> "fencing_start_0 cluster01" [ style = bold]
+"fencing_monitor_0 cluster01" [ style=bold color="green" fontcolor="black"]
+"fencing_monitor_0 cluster02" -> "fencing_start_0 cluster01" [ style = bold]
+"fencing_monitor_0 cluster02" [ style=bold color="green" fontcolor="black"]
+"fencing_start_0 cluster01" [ style=bold color="green" fontcolor="black"]
+"ip-rsc2_monitor_0 cluster01" [ style=bold color="green" fontcolor="black"]
+"ip-rsc2_monitor_0 cluster02" [ style=bold color="green" fontcolor="black"]
+"ip-rsc_monitor_0 cluster01" [ style=bold color="green" fontcolor="black"]
+"ip-rsc_monitor_0 cluster02" [ style=bold color="green" fontcolor="black"]
+"ping-rsc-ping_monitor_0 cluster01" -> "ping-rsc-ping_start_0 cluster02" [ style = bold]
+"ping-rsc-ping_monitor_0 cluster01" [ style=bold color="green" fontcolor="black"]
+"ping-rsc-ping_monitor_0 cluster02" -> "ping-rsc-ping_start_0 cluster02" [ style = bold]
+"ping-rsc-ping_monitor_0 cluster02" [ style=bold color="green" fontcolor="black"]
+"ping-rsc-ping_start_0 cluster02" [ style=bold color="green" fontcolor="black"]
+}
diff --git a/cts/scheduler/rsc-defaults.exp b/cts/scheduler/rsc-defaults.exp
new file mode 100644
index 0000000..4aec360
--- /dev/null
+++ b/cts/scheduler/rsc-defaults.exp
@@ -0,0 +1,124 @@
+<transition_graph cluster-delay="60s" stonith-timeout="60s" failed-stop-offset="INFINITY" failed-start-offset="INFINITY" transition_id="0">
+ <synapse id="0">
+ <action_set>
+ <rsc_op id="11" operation="start" operation_key="fencing_start_0" on_node="cluster01" on_node_uuid="1">
+ <primitive id="fencing" class="stonith" type="fence_xvm"/>
+ <attributes CRM_meta_on_node="cluster01" CRM_meta_on_node_uuid="1" CRM_meta_timeout="20000" ip_family="ipv4"/>
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="1" operation="monitor" operation_key="fencing_monitor_0" on_node="cluster01" on_node_uuid="1"/>
+ </trigger>
+ <trigger>
+ <rsc_op id="6" operation="monitor" operation_key="fencing_monitor_0" on_node="cluster02" on_node_uuid="2"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="1">
+ <action_set>
+ <rsc_op id="6" operation="monitor" operation_key="fencing_monitor_0" on_node="cluster02" on_node_uuid="2">
+ <primitive id="fencing" class="stonith" type="fence_xvm"/>
+ <attributes CRM_meta_on_node="cluster02" CRM_meta_on_node_uuid="2" CRM_meta_op_target_rc="7" CRM_meta_timeout="20000" ip_family="ipv4"/>
+ </rsc_op>
+ </action_set>
+ <inputs/>
+ </synapse>
+ <synapse id="2">
+ <action_set>
+ <rsc_op id="1" operation="monitor" operation_key="fencing_monitor_0" on_node="cluster01" on_node_uuid="1">
+ <primitive id="fencing" class="stonith" type="fence_xvm"/>
+ <attributes CRM_meta_on_node="cluster01" CRM_meta_on_node_uuid="1" CRM_meta_op_target_rc="7" CRM_meta_timeout="20000" ip_family="ipv4"/>
+ </rsc_op>
+ </action_set>
+ <inputs/>
+ </synapse>
+ <synapse id="3">
+ <action_set>
+ <rsc_op id="7" operation="monitor" operation_key="ip-rsc_monitor_0" on_node="cluster02" on_node_uuid="2">
+ <primitive id="ip-rsc" class="ocf" provider="heartbeat" type="IPaddr2"/>
+ <attributes CRM_meta_on_node="cluster02" CRM_meta_on_node_uuid="2" CRM_meta_op_target_rc="7" CRM_meta_timeout="20000" cidr_netmask="32" ip="172.17.1.1"/>
+ </rsc_op>
+ </action_set>
+ <inputs/>
+ </synapse>
+ <synapse id="4">
+ <action_set>
+ <rsc_op id="2" operation="monitor" operation_key="ip-rsc_monitor_0" on_node="cluster01" on_node_uuid="1">
+ <primitive id="ip-rsc" class="ocf" provider="heartbeat" type="IPaddr2"/>
+ <attributes CRM_meta_on_node="cluster01" CRM_meta_on_node_uuid="1" CRM_meta_op_target_rc="7" CRM_meta_timeout="20000" cidr_netmask="32" ip="172.17.1.1"/>
+ </rsc_op>
+ </action_set>
+ <inputs/>
+ </synapse>
+ <synapse id="5">
+ <action_set>
+ <rsc_op id="8" operation="monitor" operation_key="ip-rsc2_monitor_0" on_node="cluster02" on_node_uuid="2">
+ <primitive id="ip-rsc2" class="ocf" provider="heartbeat" type="IPaddr2"/>
+ <attributes CRM_meta_on_node="cluster02" CRM_meta_on_node_uuid="2" CRM_meta_op_target_rc="7" CRM_meta_timeout="20000" cidr_netmask="32" ip="172.17.1.1"/>
+ </rsc_op>
+ </action_set>
+ <inputs/>
+ </synapse>
+ <synapse id="6">
+ <action_set>
+ <rsc_op id="3" operation="monitor" operation_key="ip-rsc2_monitor_0" on_node="cluster01" on_node_uuid="1">
+ <primitive id="ip-rsc2" class="ocf" provider="heartbeat" type="IPaddr2"/>
+ <attributes CRM_meta_on_node="cluster01" CRM_meta_on_node_uuid="1" CRM_meta_op_target_rc="7" CRM_meta_timeout="20000" cidr_netmask="32" ip="172.17.1.1"/>
+ </rsc_op>
+ </action_set>
+ <inputs/>
+ </synapse>
+ <synapse id="7">
+ <action_set>
+ <rsc_op id="9" operation="monitor" operation_key="dummy-rsc_monitor_0" on_node="cluster02" on_node_uuid="2">
+ <primitive id="dummy-rsc" class="ocf" provider="pacemaker" type="Dummy"/>
+ <attributes CRM_meta_on_node="cluster02" CRM_meta_on_node_uuid="2" CRM_meta_op_target_rc="7" CRM_meta_timeout="20000" op_sleep="10"/>
+ </rsc_op>
+ </action_set>
+ <inputs/>
+ </synapse>
+ <synapse id="8">
+ <action_set>
+ <rsc_op id="4" operation="monitor" operation_key="dummy-rsc_monitor_0" on_node="cluster01" on_node_uuid="1">
+ <primitive id="dummy-rsc" class="ocf" provider="pacemaker" type="Dummy"/>
+ <attributes CRM_meta_on_node="cluster01" CRM_meta_on_node_uuid="1" CRM_meta_op_target_rc="7" CRM_meta_timeout="20000" op_sleep="10"/>
+ </rsc_op>
+ </action_set>
+ <inputs/>
+ </synapse>
+ <synapse id="9">
+ <action_set>
+ <rsc_op id="14" operation="start" operation_key="ping-rsc-ping_start_0" on_node="cluster02" on_node_uuid="2">
+ <primitive id="ping-rsc-ping" class="ocf" provider="pacemaker" type="ping"/>
+ <attributes CRM_meta_on_node="cluster02" CRM_meta_on_node_uuid="2" CRM_meta_timeout="20000" host_list="4.2.2.2"/>
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="5" operation="monitor" operation_key="ping-rsc-ping_monitor_0" on_node="cluster01" on_node_uuid="1"/>
+ </trigger>
+ <trigger>
+ <rsc_op id="10" operation="monitor" operation_key="ping-rsc-ping_monitor_0" on_node="cluster02" on_node_uuid="2"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="10">
+ <action_set>
+ <rsc_op id="10" operation="monitor" operation_key="ping-rsc-ping_monitor_0" on_node="cluster02" on_node_uuid="2">
+ <primitive id="ping-rsc-ping" class="ocf" provider="pacemaker" type="ping"/>
+ <attributes CRM_meta_on_node="cluster02" CRM_meta_on_node_uuid="2" CRM_meta_op_target_rc="7" CRM_meta_timeout="20000" host_list="4.2.2.2"/>
+ </rsc_op>
+ </action_set>
+ <inputs/>
+ </synapse>
+ <synapse id="11">
+ <action_set>
+ <rsc_op id="5" operation="monitor" operation_key="ping-rsc-ping_monitor_0" on_node="cluster01" on_node_uuid="1">
+ <primitive id="ping-rsc-ping" class="ocf" provider="pacemaker" type="ping"/>
+ <attributes CRM_meta_on_node="cluster01" CRM_meta_on_node_uuid="1" CRM_meta_op_target_rc="7" CRM_meta_timeout="20000" host_list="4.2.2.2"/>
+ </rsc_op>
+ </action_set>
+ <inputs/>
+ </synapse>
+</transition_graph>
diff --git a/cts/scheduler/rsc-defaults.scores b/cts/scheduler/rsc-defaults.scores
new file mode 100644
index 0000000..e7f1bab
--- /dev/null
+++ b/cts/scheduler/rsc-defaults.scores
@@ -0,0 +1,11 @@
+Allocation scores:
+pcmk__native_allocate: dummy-rsc allocation score on cluster01: 0
+pcmk__native_allocate: dummy-rsc allocation score on cluster02: 0
+pcmk__native_allocate: fencing allocation score on cluster01: 0
+pcmk__native_allocate: fencing allocation score on cluster02: 0
+pcmk__native_allocate: ip-rsc allocation score on cluster01: -INFINITY
+pcmk__native_allocate: ip-rsc allocation score on cluster02: -INFINITY
+pcmk__native_allocate: ip-rsc2 allocation score on cluster01: -INFINITY
+pcmk__native_allocate: ip-rsc2 allocation score on cluster02: -INFINITY
+pcmk__native_allocate: ping-rsc-ping allocation score on cluster01: 0
+pcmk__native_allocate: ping-rsc-ping allocation score on cluster02: 0
diff --git a/cts/scheduler/rsc-defaults.summary b/cts/scheduler/rsc-defaults.summary
new file mode 100644
index 0000000..0066f2e
--- /dev/null
+++ b/cts/scheduler/rsc-defaults.summary
@@ -0,0 +1,38 @@
+2 of 5 resource instances DISABLED and 0 BLOCKED from further action due to failure
+
+Current cluster status:
+Online: [ cluster01 cluster02 ]
+
+ fencing (stonith:fence_xvm): Stopped
+ ip-rsc (ocf::heartbeat:IPaddr2): Stopped (disabled)
+ ip-rsc2 (ocf::heartbeat:IPaddr2): Stopped (disabled)
+ dummy-rsc (ocf::pacemaker:Dummy): Stopped (unmanaged)
+ ping-rsc-ping (ocf::pacemaker:ping): Stopped
+
+Transition Summary:
+ * Start fencing ( cluster01 )
+ * Start ping-rsc-ping ( cluster02 )
+
+Executing cluster transition:
+ * Resource action: fencing monitor on cluster02
+ * Resource action: fencing monitor on cluster01
+ * Resource action: ip-rsc monitor on cluster02
+ * Resource action: ip-rsc monitor on cluster01
+ * Resource action: ip-rsc2 monitor on cluster02
+ * Resource action: ip-rsc2 monitor on cluster01
+ * Resource action: dummy-rsc monitor on cluster02
+ * Resource action: dummy-rsc monitor on cluster01
+ * Resource action: ping-rsc-ping monitor on cluster02
+ * Resource action: ping-rsc-ping monitor on cluster01
+ * Resource action: fencing start on cluster01
+ * Resource action: ping-rsc-ping start on cluster02
+
+Revised cluster status:
+Online: [ cluster01 cluster02 ]
+
+ fencing (stonith:fence_xvm): Started cluster01
+ ip-rsc (ocf::heartbeat:IPaddr2): Stopped (disabled)
+ ip-rsc2 (ocf::heartbeat:IPaddr2): Stopped (disabled)
+ dummy-rsc (ocf::pacemaker:Dummy): Stopped (unmanaged)
+ ping-rsc-ping (ocf::pacemaker:ping): Started cluster02
+
diff --git a/cts/scheduler/rsc-defaults.xml b/cts/scheduler/rsc-defaults.xml
new file mode 100644
index 0000000..38cae8b
--- /dev/null
+++ b/cts/scheduler/rsc-defaults.xml
@@ -0,0 +1,78 @@
+<cib crm_feature_set="3.3.0" validate-with="pacemaker-3.4" epoch="130" num_updates="31" admin_epoch="1" cib-last-written="Fri Apr 24 16:08:36 2020" update-origin="cluster01" update-client="crmd" update-user="hacluster" have-quorum="1" dc-uuid="1">
+ <configuration>
+ <crm_config>
+ <cluster_property_set id="cib-bootstrap-options">
+ <nvpair id="cib-bootstrap-options-have-watchdog" name="have-watchdog" value="false"/>
+ <nvpair id="cib-bootstrap-options-dc-version" name="dc-version" value="2.0.3-1.c40fb040a.git.el7-c40fb040a"/>
+ <nvpair id="cib-bootstrap-options-cluster-infrastructure" name="cluster-infrastructure" value="corosync"/>
+ <nvpair id="cib-bootstrap-options-cluster-name" name="cluster-name" value="test-cluster"/>
+ <nvpair id="cib-bootstrap-options-stonith-enabled" name="stonith-enabled" value="true"/>
+ <nvpair id="cib-bootstrap-options-maintenance-mode" name="maintenance-mode" value="false"/>
+ </cluster_property_set>
+ </crm_config>
+ <nodes>
+ <node id="1" uname="cluster01"/>
+ <node id="2" uname="cluster02"/>
+ </nodes>
+ <resources>
+ <primitive class="stonith" id="fencing" type="fence_xvm">
+ <instance_attributes id="fencing-instance_attributes">
+ <nvpair id="fencing-instance_attributes-ip_family" name="ip_family" value="ipv4"/>
+ </instance_attributes>
+ <operations/>
+ </primitive>
+ <primitive class="ocf" id="ip-rsc" provider="heartbeat" type="IPaddr2">
+ <instance_attributes id="ip-rsc-instance_attributes">
+ <nvpair id="ip-rsc-instance_attributes-cidr_netmask" name="cidr_netmask" value="32"/>
+ <nvpair id="ip-rsc-instance_attributes-ip" name="ip" value="172.17.1.1"/>
+ </instance_attributes>
+ <operations>
+ <op id="ip-rsc-monitor-interval-20s" interval="20s" name="monitor"/>
+ </operations>
+ </primitive>
+ <primitive class="ocf" id="ip-rsc2" provider="heartbeat" type="IPaddr2">
+ <instance_attributes id="ip-rsc2-instance_attributes">
+ <nvpair id="ip-rsc2-instance_attributes-cidr_netmask" name="cidr_netmask" value="32"/>
+ <nvpair id="ip-rsc2-instance_attributes-ip" name="ip" value="172.17.1.1"/>
+ </instance_attributes>
+ <operations>
+ <op id="ip-rsc2-monitor-interval-10s" interval="10s" name="monitor"/>
+ </operations>
+ </primitive>
+ <primitive class="ocf" id="dummy-rsc" provider="pacemaker" type="Dummy">
+ <instance_attributes id="dummy-rsc-instance_attributes">
+ <nvpair id="dummy-rsc-instance_attributes-op_sleep" name="op_sleep" value="10"/>
+ </instance_attributes>
+ <operations>
+ <op id="dummy-rsc-monitor-interval-60s" interval="60s" name="monitor" on-fail="stop"/>
+ </operations>
+ </primitive>
+ <primitive class="ocf" id="ping-rsc-ping" provider="pacemaker" type="ping">
+ <instance_attributes id="ping-rsc-instance_attributes">
+ <nvpair id="ping-rsc-host_list" name="host_list" value="4.2.2.2"/>
+ </instance_attributes>
+ <operations/>
+ </primitive>
+ </resources>
+ <constraints/>
+ <tags/>
+ <rsc_defaults>
+ <meta_attributes id="op-unmanaged">
+ <rule id="op-unmanaged-rule" score="INFINITY">
+ <rsc_expression id="op-unmanaged-expr" class="ocf" provider="pacemaker" type="Dummy"/>
+ </rule>
+ <nvpair id="op-unmanaged-nvpair" name="is-managed" value="false"/>
+ </meta_attributes>
+ <meta_attributes id="op-target-role">
+ <rule id="op-target-role-rule" score="INFINITY">
+ <rsc_expression id="op-target-role-expr" class="ocf" provider="heartbeat" type="IPaddr2"/>
+ </rule>
+ <nvpair id="op-target-role-nvpair" name="target-role" value="Stopped"/>
+ </meta_attributes>
+ </rsc_defaults>
+ </configuration>
+ <status>
+ <node_state id="1" uname="cluster01" in_ccm="true" crmd="online" crm-debug-origin="do_state_transition" join="member" expected="member"/>
+ <node_state id="2" uname="cluster02" in_ccm="true" crmd="online" crm-debug-origin="do_update_resource" join="member" expected="member"/>
+ </status>
+</cib>
--
1.8.3.1
From bcfe068ccb3f3cb6cc3509257fbc4a59bc2b1a41 Mon Sep 17 00:00:00 2001
From: Chris Lumens <clumens@redhat.com>
Date: Wed, 13 May 2020 12:47:35 -0400
Subject: [PATCH 14/17] Test: scheduler: Add a regression test for op_defaults
with an AND expr.
---
cts/cts-scheduler.in | 1 +
cts/scheduler/op-defaults-2.dot | 33 ++++++
cts/scheduler/op-defaults-2.exp | 211 ++++++++++++++++++++++++++++++++++++
cts/scheduler/op-defaults-2.scores | 11 ++
cts/scheduler/op-defaults-2.summary | 46 ++++++++
cts/scheduler/op-defaults-2.xml | 73 +++++++++++++
6 files changed, 375 insertions(+)
create mode 100644 cts/scheduler/op-defaults-2.dot
create mode 100644 cts/scheduler/op-defaults-2.exp
create mode 100644 cts/scheduler/op-defaults-2.scores
create mode 100644 cts/scheduler/op-defaults-2.summary
create mode 100644 cts/scheduler/op-defaults-2.xml
diff --git a/cts/cts-scheduler.in b/cts/cts-scheduler.in
index 9022ce9..669b344 100644
--- a/cts/cts-scheduler.in
+++ b/cts/cts-scheduler.in
@@ -964,6 +964,7 @@ TESTS = [
],
[
[ "op-defaults", "Test op_defaults conditional expressions" ],
+ [ "op-defaults-2", "Test op_defaults AND'ed conditional expressions" ],
[ "rsc-defaults", "Test rsc_defaults conditional expressions" ],
],
diff --git a/cts/scheduler/op-defaults-2.dot b/cts/scheduler/op-defaults-2.dot
new file mode 100644
index 0000000..5c67bd8
--- /dev/null
+++ b/cts/scheduler/op-defaults-2.dot
@@ -0,0 +1,33 @@
+ digraph "g" {
+"dummy-rsc_monitor_0 cluster01" -> "dummy-rsc_start_0 cluster02" [ style = bold]
+"dummy-rsc_monitor_0 cluster01" [ style=bold color="green" fontcolor="black"]
+"dummy-rsc_monitor_0 cluster02" -> "dummy-rsc_start_0 cluster02" [ style = bold]
+"dummy-rsc_monitor_0 cluster02" [ style=bold color="green" fontcolor="black"]
+"dummy-rsc_monitor_10000 cluster02" [ style=bold color="green" fontcolor="black"]
+"dummy-rsc_start_0 cluster02" -> "dummy-rsc_monitor_10000 cluster02" [ style = bold]
+"dummy-rsc_start_0 cluster02" [ style=bold color="green" fontcolor="black"]
+"fencing_monitor_0 cluster01" -> "fencing_start_0 cluster01" [ style = bold]
+"fencing_monitor_0 cluster01" [ style=bold color="green" fontcolor="black"]
+"fencing_monitor_0 cluster02" -> "fencing_start_0 cluster01" [ style = bold]
+"fencing_monitor_0 cluster02" [ style=bold color="green" fontcolor="black"]
+"fencing_start_0 cluster01" [ style=bold color="green" fontcolor="black"]
+"ip-rsc_monitor_0 cluster01" -> "ip-rsc_start_0 cluster02" [ style = bold]
+"ip-rsc_monitor_0 cluster01" [ style=bold color="green" fontcolor="black"]
+"ip-rsc_monitor_0 cluster02" -> "ip-rsc_start_0 cluster02" [ style = bold]
+"ip-rsc_monitor_0 cluster02" [ style=bold color="green" fontcolor="black"]
+"ip-rsc_monitor_20000 cluster02" [ style=bold color="green" fontcolor="black"]
+"ip-rsc_start_0 cluster02" -> "ip-rsc_monitor_20000 cluster02" [ style = bold]
+"ip-rsc_start_0 cluster02" [ style=bold color="green" fontcolor="black"]
+"ping-rsc-ping_monitor_0 cluster01" -> "ping-rsc-ping_start_0 cluster01" [ style = bold]
+"ping-rsc-ping_monitor_0 cluster01" [ style=bold color="green" fontcolor="black"]
+"ping-rsc-ping_monitor_0 cluster02" -> "ping-rsc-ping_start_0 cluster01" [ style = bold]
+"ping-rsc-ping_monitor_0 cluster02" [ style=bold color="green" fontcolor="black"]
+"ping-rsc-ping_start_0 cluster01" [ style=bold color="green" fontcolor="black"]
+"rsc-passes_monitor_0 cluster01" -> "rsc-passes_start_0 cluster01" [ style = bold]
+"rsc-passes_monitor_0 cluster01" [ style=bold color="green" fontcolor="black"]
+"rsc-passes_monitor_0 cluster02" -> "rsc-passes_start_0 cluster01" [ style = bold]
+"rsc-passes_monitor_0 cluster02" [ style=bold color="green" fontcolor="black"]
+"rsc-passes_monitor_10000 cluster01" [ style=bold color="green" fontcolor="black"]
+"rsc-passes_start_0 cluster01" -> "rsc-passes_monitor_10000 cluster01" [ style = bold]
+"rsc-passes_start_0 cluster01" [ style=bold color="green" fontcolor="black"]
+}
diff --git a/cts/scheduler/op-defaults-2.exp b/cts/scheduler/op-defaults-2.exp
new file mode 100644
index 0000000..4324fde
--- /dev/null
+++ b/cts/scheduler/op-defaults-2.exp
@@ -0,0 +1,211 @@
+<transition_graph cluster-delay="60s" stonith-timeout="60s" failed-stop-offset="INFINITY" failed-start-offset="INFINITY" transition_id="0">
+ <synapse id="0">
+ <action_set>
+ <rsc_op id="11" operation="start" operation_key="fencing_start_0" on_node="cluster01" on_node_uuid="1">
+ <primitive id="fencing" class="stonith" type="fence_xvm"/>
+ <attributes CRM_meta_on_node="cluster01" CRM_meta_on_node_uuid="1" CRM_meta_timeout="20000" ip_family="ipv4"/>
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="1" operation="monitor" operation_key="fencing_monitor_0" on_node="cluster01" on_node_uuid="1"/>
+ </trigger>
+ <trigger>
+ <rsc_op id="6" operation="monitor" operation_key="fencing_monitor_0" on_node="cluster02" on_node_uuid="2"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="1">
+ <action_set>
+ <rsc_op id="6" operation="monitor" operation_key="fencing_monitor_0" on_node="cluster02" on_node_uuid="2">
+ <primitive id="fencing" class="stonith" type="fence_xvm"/>
+ <attributes CRM_meta_on_node="cluster02" CRM_meta_on_node_uuid="2" CRM_meta_op_target_rc="7" CRM_meta_timeout="20000" ip_family="ipv4"/>
+ </rsc_op>
+ </action_set>
+ <inputs/>
+ </synapse>
+ <synapse id="2">
+ <action_set>
+ <rsc_op id="1" operation="monitor" operation_key="fencing_monitor_0" on_node="cluster01" on_node_uuid="1">
+ <primitive id="fencing" class="stonith" type="fence_xvm"/>
+ <attributes CRM_meta_on_node="cluster01" CRM_meta_on_node_uuid="1" CRM_meta_op_target_rc="7" CRM_meta_timeout="20000" ip_family="ipv4"/>
+ </rsc_op>
+ </action_set>
+ <inputs/>
+ </synapse>
+ <synapse id="3">
+ <action_set>
+ <rsc_op id="13" operation="monitor" operation_key="ip-rsc_monitor_20000" on_node="cluster02" on_node_uuid="2">
+ <primitive id="ip-rsc" class="ocf" provider="heartbeat" type="IPaddr2"/>
+ <attributes CRM_meta_interval="20000" CRM_meta_name="monitor" CRM_meta_on_node="cluster02" CRM_meta_on_node_uuid="2" CRM_meta_timeout="20000" cidr_netmask="32" ip="172.17.1.1"/>
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="12" operation="start" operation_key="ip-rsc_start_0" on_node="cluster02" on_node_uuid="2"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="4">
+ <action_set>
+ <rsc_op id="12" operation="start" operation_key="ip-rsc_start_0" on_node="cluster02" on_node_uuid="2">
+ <primitive id="ip-rsc" class="ocf" provider="heartbeat" type="IPaddr2"/>
+ <attributes CRM_meta_on_node="cluster02" CRM_meta_on_node_uuid="2" CRM_meta_timeout="20000" cidr_netmask="32" ip="172.17.1.1"/>
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="2" operation="monitor" operation_key="ip-rsc_monitor_0" on_node="cluster01" on_node_uuid="1"/>
+ </trigger>
+ <trigger>
+ <rsc_op id="7" operation="monitor" operation_key="ip-rsc_monitor_0" on_node="cluster02" on_node_uuid="2"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="5">
+ <action_set>
+ <rsc_op id="7" operation="monitor" operation_key="ip-rsc_monitor_0" on_node="cluster02" on_node_uuid="2">
+ <primitive id="ip-rsc" class="ocf" provider="heartbeat" type="IPaddr2"/>
+ <attributes CRM_meta_on_node="cluster02" CRM_meta_on_node_uuid="2" CRM_meta_op_target_rc="7" CRM_meta_timeout="20000" cidr_netmask="32" ip="172.17.1.1"/>
+ </rsc_op>
+ </action_set>
+ <inputs/>
+ </synapse>
+ <synapse id="6">
+ <action_set>
+ <rsc_op id="2" operation="monitor" operation_key="ip-rsc_monitor_0" on_node="cluster01" on_node_uuid="1">
+ <primitive id="ip-rsc" class="ocf" provider="heartbeat" type="IPaddr2"/>
+ <attributes CRM_meta_on_node="cluster01" CRM_meta_on_node_uuid="1" CRM_meta_op_target_rc="7" CRM_meta_timeout="20000" cidr_netmask="32" ip="172.17.1.1"/>
+ </rsc_op>
+ </action_set>
+ <inputs/>
+ </synapse>
+ <synapse id="7">
+ <action_set>
+ <rsc_op id="15" operation="monitor" operation_key="rsc-passes_monitor_10000" on_node="cluster01" on_node_uuid="1">
+ <primitive id="rsc-passes" class="ocf" provider="heartbeat" type="IPaddr2"/>
+ <attributes CRM_meta_interval="10000" CRM_meta_name="monitor" CRM_meta_on_node="cluster01" CRM_meta_on_node_uuid="1" CRM_meta_timeout="8000" cidr_netmask="32" ip="172.17.1.1"/>
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="14" operation="start" operation_key="rsc-passes_start_0" on_node="cluster01" on_node_uuid="1"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="8">
+ <action_set>
+ <rsc_op id="14" operation="start" operation_key="rsc-passes_start_0" on_node="cluster01" on_node_uuid="1">
+ <primitive id="rsc-passes" class="ocf" provider="heartbeat" type="IPaddr2"/>
+ <attributes CRM_meta_on_node="cluster01" CRM_meta_on_node_uuid="1" CRM_meta_timeout="20000" cidr_netmask="32" ip="172.17.1.1"/>
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="3" operation="monitor" operation_key="rsc-passes_monitor_0" on_node="cluster01" on_node_uuid="1"/>
+ </trigger>
+ <trigger>
+ <rsc_op id="8" operation="monitor" operation_key="rsc-passes_monitor_0" on_node="cluster02" on_node_uuid="2"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="9">
+ <action_set>
+ <rsc_op id="8" operation="monitor" operation_key="rsc-passes_monitor_0" on_node="cluster02" on_node_uuid="2">
+ <primitive id="rsc-passes" class="ocf" provider="heartbeat" type="IPaddr2"/>
+ <attributes CRM_meta_on_node="cluster02" CRM_meta_on_node_uuid="2" CRM_meta_op_target_rc="7" CRM_meta_timeout="20000" cidr_netmask="32" ip="172.17.1.1"/>
+ </rsc_op>
+ </action_set>
+ <inputs/>
+ </synapse>
+ <synapse id="10">
+ <action_set>
+ <rsc_op id="3" operation="monitor" operation_key="rsc-passes_monitor_0" on_node="cluster01" on_node_uuid="1">
+ <primitive id="rsc-passes" class="ocf" provider="heartbeat" type="IPaddr2"/>
+ <attributes CRM_meta_on_node="cluster01" CRM_meta_on_node_uuid="1" CRM_meta_op_target_rc="7" CRM_meta_timeout="20000" cidr_netmask="32" ip="172.17.1.1"/>
+ </rsc_op>
+ </action_set>
+ <inputs/>
+ </synapse>
+ <synapse id="11">
+ <action_set>
+ <rsc_op id="17" operation="monitor" operation_key="dummy-rsc_monitor_10000" on_node="cluster02" on_node_uuid="2">
+ <primitive id="dummy-rsc" class="ocf" provider="pacemaker" type="Dummy"/>
+ <attributes CRM_meta_interval="10000" CRM_meta_name="monitor" CRM_meta_on_fail="stop" CRM_meta_on_node="cluster02" CRM_meta_on_node_uuid="2" CRM_meta_timeout="20000" op_sleep="10"/>
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="16" operation="start" operation_key="dummy-rsc_start_0" on_node="cluster02" on_node_uuid="2"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="12">
+ <action_set>
+ <rsc_op id="16" operation="start" operation_key="dummy-rsc_start_0" on_node="cluster02" on_node_uuid="2">
+ <primitive id="dummy-rsc" class="ocf" provider="pacemaker" type="Dummy"/>
+ <attributes CRM_meta_on_node="cluster02" CRM_meta_on_node_uuid="2" CRM_meta_timeout="20000" op_sleep="10"/>
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="4" operation="monitor" operation_key="dummy-rsc_monitor_0" on_node="cluster01" on_node_uuid="1"/>
+ </trigger>
+ <trigger>
+ <rsc_op id="9" operation="monitor" operation_key="dummy-rsc_monitor_0" on_node="cluster02" on_node_uuid="2"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="13">
+ <action_set>
+ <rsc_op id="9" operation="monitor" operation_key="dummy-rsc_monitor_0" on_node="cluster02" on_node_uuid="2">
+ <primitive id="dummy-rsc" class="ocf" provider="pacemaker" type="Dummy"/>
+ <attributes CRM_meta_on_node="cluster02" CRM_meta_on_node_uuid="2" CRM_meta_op_target_rc="7" CRM_meta_timeout="20000" op_sleep="10"/>
+ </rsc_op>
+ </action_set>
+ <inputs/>
+ </synapse>
+ <synapse id="14">
+ <action_set>
+ <rsc_op id="4" operation="monitor" operation_key="dummy-rsc_monitor_0" on_node="cluster01" on_node_uuid="1">
+ <primitive id="dummy-rsc" class="ocf" provider="pacemaker" type="Dummy"/>
+ <attributes CRM_meta_on_node="cluster01" CRM_meta_on_node_uuid="1" CRM_meta_op_target_rc="7" CRM_meta_timeout="20000" op_sleep="10"/>
+ </rsc_op>
+ </action_set>
+ <inputs/>
+ </synapse>
+ <synapse id="15">
+ <action_set>
+ <rsc_op id="18" operation="start" operation_key="ping-rsc-ping_start_0" on_node="cluster01" on_node_uuid="1">
+ <primitive id="ping-rsc-ping" class="ocf" provider="pacemaker" type="ping"/>
+ <attributes CRM_meta_on_node="cluster01" CRM_meta_on_node_uuid="1" CRM_meta_timeout="20000" host_list="4.2.2.2"/>
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="5" operation="monitor" operation_key="ping-rsc-ping_monitor_0" on_node="cluster01" on_node_uuid="1"/>
+ </trigger>
+ <trigger>
+ <rsc_op id="10" operation="monitor" operation_key="ping-rsc-ping_monitor_0" on_node="cluster02" on_node_uuid="2"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="16">
+ <action_set>
+ <rsc_op id="10" operation="monitor" operation_key="ping-rsc-ping_monitor_0" on_node="cluster02" on_node_uuid="2">
+ <primitive id="ping-rsc-ping" class="ocf" provider="pacemaker" type="ping"/>
+ <attributes CRM_meta_on_node="cluster02" CRM_meta_on_node_uuid="2" CRM_meta_op_target_rc="7" CRM_meta_timeout="20000" host_list="4.2.2.2"/>
+ </rsc_op>
+ </action_set>
+ <inputs/>
+ </synapse>
+ <synapse id="17">
+ <action_set>
+ <rsc_op id="5" operation="monitor" operation_key="ping-rsc-ping_monitor_0" on_node="cluster01" on_node_uuid="1">
+ <primitive id="ping-rsc-ping" class="ocf" provider="pacemaker" type="ping"/>
+ <attributes CRM_meta_on_node="cluster01" CRM_meta_on_node_uuid="1" CRM_meta_op_target_rc="7" CRM_meta_timeout="20000" host_list="4.2.2.2"/>
+ </rsc_op>
+ </action_set>
+ <inputs/>
+ </synapse>
+</transition_graph>
diff --git a/cts/scheduler/op-defaults-2.scores b/cts/scheduler/op-defaults-2.scores
new file mode 100644
index 0000000..180c8b4
--- /dev/null
+++ b/cts/scheduler/op-defaults-2.scores
@@ -0,0 +1,11 @@
+Allocation scores:
+pcmk__native_allocate: dummy-rsc allocation score on cluster01: 0
+pcmk__native_allocate: dummy-rsc allocation score on cluster02: 0
+pcmk__native_allocate: fencing allocation score on cluster01: 0
+pcmk__native_allocate: fencing allocation score on cluster02: 0
+pcmk__native_allocate: ip-rsc allocation score on cluster01: 0
+pcmk__native_allocate: ip-rsc allocation score on cluster02: 0
+pcmk__native_allocate: ping-rsc-ping allocation score on cluster01: 0
+pcmk__native_allocate: ping-rsc-ping allocation score on cluster02: 0
+pcmk__native_allocate: rsc-passes allocation score on cluster01: 0
+pcmk__native_allocate: rsc-passes allocation score on cluster02: 0
diff --git a/cts/scheduler/op-defaults-2.summary b/cts/scheduler/op-defaults-2.summary
new file mode 100644
index 0000000..16a68be
--- /dev/null
+++ b/cts/scheduler/op-defaults-2.summary
@@ -0,0 +1,46 @@
+
+Current cluster status:
+Online: [ cluster01 cluster02 ]
+
+ fencing (stonith:fence_xvm): Stopped
+ ip-rsc (ocf::heartbeat:IPaddr2): Stopped
+ rsc-passes (ocf::heartbeat:IPaddr2): Stopped
+ dummy-rsc (ocf::pacemaker:Dummy): Stopped
+ ping-rsc-ping (ocf::pacemaker:ping): Stopped
+
+Transition Summary:
+ * Start fencing ( cluster01 )
+ * Start ip-rsc ( cluster02 )
+ * Start rsc-passes ( cluster01 )
+ * Start dummy-rsc ( cluster02 )
+ * Start ping-rsc-ping ( cluster01 )
+
+Executing cluster transition:
+ * Resource action: fencing monitor on cluster02
+ * Resource action: fencing monitor on cluster01
+ * Resource action: ip-rsc monitor on cluster02
+ * Resource action: ip-rsc monitor on cluster01
+ * Resource action: rsc-passes monitor on cluster02
+ * Resource action: rsc-passes monitor on cluster01
+ * Resource action: dummy-rsc monitor on cluster02
+ * Resource action: dummy-rsc monitor on cluster01
+ * Resource action: ping-rsc-ping monitor on cluster02
+ * Resource action: ping-rsc-ping monitor on cluster01
+ * Resource action: fencing start on cluster01
+ * Resource action: ip-rsc start on cluster02
+ * Resource action: rsc-passes start on cluster01
+ * Resource action: dummy-rsc start on cluster02
+ * Resource action: ping-rsc-ping start on cluster01
+ * Resource action: ip-rsc monitor=20000 on cluster02
+ * Resource action: rsc-passes monitor=10000 on cluster01
+ * Resource action: dummy-rsc monitor=10000 on cluster02
+
+Revised cluster status:
+Online: [ cluster01 cluster02 ]
+
+ fencing (stonith:fence_xvm): Started cluster01
+ ip-rsc (ocf::heartbeat:IPaddr2): Started cluster02
+ rsc-passes (ocf::heartbeat:IPaddr2): Started cluster01
+ dummy-rsc (ocf::pacemaker:Dummy): Started cluster02
+ ping-rsc-ping (ocf::pacemaker:ping): Started cluster01
+
diff --git a/cts/scheduler/op-defaults-2.xml b/cts/scheduler/op-defaults-2.xml
new file mode 100644
index 0000000..9f3c288
--- /dev/null
+++ b/cts/scheduler/op-defaults-2.xml
@@ -0,0 +1,73 @@
+<cib crm_feature_set="3.3.0" validate-with="pacemaker-3.4" epoch="130" num_updates="31" admin_epoch="1" cib-last-written="Fri Apr 24 16:08:36 2020" update-origin="cluster01" update-client="crmd" update-user="hacluster" have-quorum="1" dc-uuid="1">
+ <configuration>
+ <crm_config>
+ <cluster_property_set id="cib-bootstrap-options">
+ <nvpair id="cib-bootstrap-options-have-watchdog" name="have-watchdog" value="false"/>
+ <nvpair id="cib-bootstrap-options-dc-version" name="dc-version" value="2.0.3-1.c40fb040a.git.el7-c40fb040a"/>
+ <nvpair id="cib-bootstrap-options-cluster-infrastructure" name="cluster-infrastructure" value="corosync"/>
+ <nvpair id="cib-bootstrap-options-cluster-name" name="cluster-name" value="test-cluster"/>
+ <nvpair id="cib-bootstrap-options-stonith-enabled" name="stonith-enabled" value="true"/>
+ <nvpair id="cib-bootstrap-options-maintenance-mode" name="maintenance-mode" value="false"/>
+ </cluster_property_set>
+ </crm_config>
+ <nodes>
+ <node id="1" uname="cluster01"/>
+ <node id="2" uname="cluster02"/>
+ </nodes>
+ <resources>
+ <primitive class="stonith" id="fencing" type="fence_xvm">
+ <instance_attributes id="fencing-instance_attributes">
+ <nvpair id="fencing-instance_attributes-ip_family" name="ip_family" value="ipv4"/>
+ </instance_attributes>
+ <operations/>
+ </primitive>
+ <primitive class="ocf" id="ip-rsc" provider="heartbeat" type="IPaddr2">
+ <instance_attributes id="ip-rsc-instance_attributes">
+ <nvpair id="ip-rsc-instance_attributes-cidr_netmask" name="cidr_netmask" value="32"/>
+ <nvpair id="ip-rsc-instance_attributes-ip" name="ip" value="172.17.1.1"/>
+ </instance_attributes>
+ <operations>
+ <op id="ip-rsc-monitor-interval-20s" interval="20s" name="monitor"/>
+ </operations>
+ </primitive>
+ <primitive class="ocf" id="rsc-passes" provider="heartbeat" type="IPaddr2">
+ <instance_attributes id="rsc-passes-instance_attributes">
+ <nvpair id="rsc-passes-instance_attributes-cidr_netmask" name="cidr_netmask" value="32"/>
+ <nvpair id="rsc-passes-instance_attributes-ip" name="ip" value="172.17.1.1"/>
+ </instance_attributes>
+ <operations>
+ <op id="rsc-passes-monitor-interval-10s" interval="10s" name="monitor"/>
+ </operations>
+ </primitive>
+ <primitive class="ocf" id="dummy-rsc" provider="pacemaker" type="Dummy">
+ <instance_attributes id="dummy-rsc-instance_attributes">
+ <nvpair id="dummy-rsc-instance_attributes-op_sleep" name="op_sleep" value="10"/>
+ </instance_attributes>
+ <operations>
+ <op id="dummy-rsc-monitor-interval-10s" interval="10s" name="monitor" on-fail="stop"/>
+ </operations>
+ </primitive>
+ <primitive class="ocf" id="ping-rsc-ping" provider="pacemaker" type="ping">
+ <instance_attributes id="ping-rsc-instance_attributes">
+ <nvpair id="ping-rsc-host_list" name="host_list" value="4.2.2.2"/>
+ </instance_attributes>
+ <operations/>
+ </primitive>
+ </resources>
+ <constraints/>
+ <tags/>
+ <op_defaults>
+ <meta_attributes id="op-monitor-and">
+ <rule id="op-monitor-and-rule" score="INFINITY">
+ <rsc_expression id="op-monitor-and-rsc-expr" class="ocf" provider="heartbeat" type="IPaddr2"/>
+ <op_expression id="op-monitor-and-op-expr" name="monitor" interval="10s"/>
+ </rule>
+ <nvpair id="op-monitor-and-timeout" name="timeout" value="8s"/>
+ </meta_attributes>
+ </op_defaults>
+ </configuration>
+ <status>
+ <node_state id="1" uname="cluster01" in_ccm="true" crmd="online" crm-debug-origin="do_state_transition" join="member" expected="member"/>
+ <node_state id="2" uname="cluster02" in_ccm="true" crmd="online" crm-debug-origin="do_update_resource" join="member" expected="member"/>
+ </status>
+</cib>
--
1.8.3.1
From 017b783c2037d641c40a39dd7ec3a9eba0aaa6df Mon Sep 17 00:00:00 2001
From: Chris Lumens <clumens@redhat.com>
Date: Wed, 13 May 2020 15:18:28 -0400
Subject: [PATCH 15/17] Doc: Pacemaker Explained: Add documentation for
rsc_expr and op_expr.
---
doc/Pacemaker_Explained/en-US/Ch-Rules.txt | 174 +++++++++++++++++++++++++++++
1 file changed, 174 insertions(+)
diff --git a/doc/Pacemaker_Explained/en-US/Ch-Rules.txt b/doc/Pacemaker_Explained/en-US/Ch-Rules.txt
index 9d617f6..5df5f82 100644
--- a/doc/Pacemaker_Explained/en-US/Ch-Rules.txt
+++ b/doc/Pacemaker_Explained/en-US/Ch-Rules.txt
@@ -522,6 +522,124 @@ You may wish to write +end="2005-03-31T23:59:59"+ to avoid confusion.
-------
=====
+== Resource Expressions ==
+
+An +rsc_expression+ is a rule condition based on a resource agent's properties.
+This rule is only valid within an +rsc_defaults+ or +op_defaults+ context. None
+of the matching attributes of +class+, +provider+, and +type+ are required. If
+one is omitted, all values of that attribute will match. For instance, omitting
++type+ means every type will match.
+
+.Attributes of an rsc_expression Element
+[width="95%",cols="2m,<5",options="header",align="center"]
+|=========================================================
+
+|Field
+|Description
+
+|id
+|A unique name for the expression (required)
+ indexterm:[XML attribute,id attribute,rsc_expression element]
+ indexterm:[XML element,rsc_expression element,id attribute]
+
+|class
+|The standard name to be matched against resource agents
+ indexterm:[XML attribute,class attribute,rsc_expression element]
+ indexterm:[XML element,rsc_expression element,class attribute]
+
+|provider
+|If given, the vendor to be matched against resource agents. This
+ only makes sense for agents using the OCF spec.
+ indexterm:[XML attribute,provider attribute,rsc_expression element]
+ indexterm:[XML element,rsc_expression element,provider attribute]
+
+|type
+|The name of the resource agent to be matched
+ indexterm:[XML attribute,type attribute,rsc_expression element]
+ indexterm:[XML element,rsc_expression element,type attribute]
+
+|=========================================================
+
+=== Example Resource-Based Expressions ===
+
+A small sample of how resource-based expressions can be used:
+
+.True for all ocf:heartbeat:IPaddr2 resources
+====
+[source,XML]
+----
+<rule id="rule1" score="INFINITY">
+ <rsc_expression id="rule_expr1" class="ocf" provider="heartbeat" type="IPaddr2"/>
+</rule>
+----
+====
+
+.Provider doesn't apply to non-OCF resources
+====
+[source,XML]
+----
+<rule id="rule2" score="INFINITY">
+ <rsc_expression id="rule_expr2" class="stonith" type="fence_xvm"/>
+</rule>
+----
+====
+
+== Operation Expressions ==
+
+An +op_expression+ is a rule condition based on an action of some resource
+agent. This rule is only valid within an +op_defaults+ context.
+
+.Attributes of an op_expression Element
+[width="95%",cols="2m,<5",options="header",align="center"]
+|=========================================================
+
+|Field
+|Description
+
+|id
+|A unique name for the expression (required)
+ indexterm:[XML attribute,id attribute,op_expression element]
+ indexterm:[XML element,op_expression element,id attribute]
+
+|name
+|The action name to match against. This can be any action supported by
+ the resource agent; common values include +monitor+, +start+, and +stop+
+ (required).
+ indexterm:[XML attribute,name attribute,op_expression element]
+ indexterm:[XML element,op_expression element,name attribute]
+
+|interval
+|The interval of the action to match against. If not given, only
+ the name attribute will be used to match.
+ indexterm:[XML attribute,interval attribute,op_expression element]
+ indexterm:[XML element,op_expression element,interval attribute]
+
+|=========================================================
+
+=== Example Operation-Based Expressions ===
+
+A small sample of how operation-based expressions can be used:
+
+.True for all monitor actions
+====
+[source,XML]
+----
+<rule id="rule1" score="INFINITY">
+ <op_expression id="rule_expr1" name="monitor"/>
+</rule>
+----
+====
+
+.True for all monitor actions with a 10 second interval
+====
+[source,XML]
+----
+<rule id="rule2" score="INFINITY">
+ <op_expression id="rule_expr2" name="monitor" interval="10s"/>
+</rule>
+----
+====
+
== Using Rules to Determine Resource Location ==
indexterm:[Rule,Determine Resource Location]
indexterm:[Resource,Location,Determine by Rules]
@@ -710,6 +828,62 @@ Rules may be used similarly in +instance_attributes+ or +utilization+ blocks.
Any single block may directly contain only a single rule, but that rule may
itself contain any number of rules.
++rsc_expression+ and +op_expression+ blocks may additionally be used to set defaults
+on either a single resource or across an entire class of resources with a single
+rule. +rsc_expression+ may be used to select resource agents within both +rsc_defaults+
+and +op_defaults+, while +op_expression+ may only be used within +op_defaults+. If
+multiple rules succeed for a given resource agent, the last one specified will be
+the one that takes effect. As with any other rule, boolean operations may be used
+to make more complicated expressions.
+
+.Set all IPaddr2 resources to stopped
+=====
+[source,XML]
+-------
+<rsc_defaults>
+ <meta_attributes id="op-target-role">
+ <rule id="op-target-role-rule" score="INFINITY">
+ <rsc_expression id="op-target-role-expr" class="ocf" provider="heartbeat"
+ type="IPaddr2"/>
+ </rule>
+ <nvpair id="op-target-role-nvpair" name="target-role" value="Stopped"/>
+ </meta_attributes>
+</rsc_defaults>
+-------
+=====
+
+.Set all monitor action timeouts to 7 seconds
+=====
+[source,XML]
+-------
+<op_defaults>
+ <meta_attributes id="op-monitor-defaults">
+ <rule id="op-monitor-default-rule" score="INFINITY">
+ <op_expression id="op-monitor-default-expr" name="monitor"/>
+ </rule>
+ <nvpair id="op-monitor-timeout" name="timeout" value="7s"/>
+ </meta_attributes>
+</op_defaults>
+-------
+=====
+
+.Set the monitor action timeout on all IPaddr2 resources with a given monitor interval to 8 seconds
+=====
+[source,XML]
+-------
+<op_defaults>
+ <meta_attributes id="op-monitor-and">
+ <rule id="op-monitor-and-rule" score="INFINITY">
+ <rsc_expression id="op-monitor-and-rsc-expr" class="ocf" provider="heartbeat"
+ type="IPaddr2"/>
+ <op_expression id="op-monitor-and-op-expr" name="monitor" interval="10s"/>
+ </rule>
+ <nvpair id="op-monitor-and-timeout" name="timeout" value="8s"/>
+ </meta_attributes>
+</op_defaults>
+-------
+=====
+
=== Using Rules to Control Cluster Options ===
indexterm:[Rule,Controlling Cluster Options]
indexterm:[Cluster,Setting Options with Rules]
--
1.8.3.1
From b8dd16c5e454445f73416ae8b74649545ee1b472 Mon Sep 17 00:00:00 2001
From: Chris Lumens <clumens@redhat.com>
Date: Wed, 13 May 2020 16:26:21 -0400
Subject: [PATCH 16/17] Test: scheduler: Add a test for multiple rules applying
to the same resource.
---
cts/cts-scheduler.in | 1 +
cts/scheduler/op-defaults-3.dot | 14 +++++++
cts/scheduler/op-defaults-3.exp | 83 +++++++++++++++++++++++++++++++++++++
cts/scheduler/op-defaults-3.scores | 5 +++
cts/scheduler/op-defaults-3.summary | 26 ++++++++++++
cts/scheduler/op-defaults-3.xml | 54 ++++++++++++++++++++++++
6 files changed, 183 insertions(+)
create mode 100644 cts/scheduler/op-defaults-3.dot
create mode 100644 cts/scheduler/op-defaults-3.exp
create mode 100644 cts/scheduler/op-defaults-3.scores
create mode 100644 cts/scheduler/op-defaults-3.summary
create mode 100644 cts/scheduler/op-defaults-3.xml
diff --git a/cts/cts-scheduler.in b/cts/cts-scheduler.in
index 669b344..2c2d14f 100644
--- a/cts/cts-scheduler.in
+++ b/cts/cts-scheduler.in
@@ -965,6 +965,7 @@ TESTS = [
[
[ "op-defaults", "Test op_defaults conditional expressions" ],
[ "op-defaults-2", "Test op_defaults AND'ed conditional expressions" ],
+ [ "op-defaults-3", "Test op_defaults precedence" ],
[ "rsc-defaults", "Test rsc_defaults conditional expressions" ],
],
diff --git a/cts/scheduler/op-defaults-3.dot b/cts/scheduler/op-defaults-3.dot
new file mode 100644
index 0000000..382f630
--- /dev/null
+++ b/cts/scheduler/op-defaults-3.dot
@@ -0,0 +1,14 @@
+ digraph "g" {
+"dummy-rsc_monitor_0 cluster01" -> "dummy-rsc_start_0 cluster02" [ style = bold]
+"dummy-rsc_monitor_0 cluster01" [ style=bold color="green" fontcolor="black"]
+"dummy-rsc_monitor_0 cluster02" -> "dummy-rsc_start_0 cluster02" [ style = bold]
+"dummy-rsc_monitor_0 cluster02" [ style=bold color="green" fontcolor="black"]
+"dummy-rsc_monitor_10000 cluster02" [ style=bold color="green" fontcolor="black"]
+"dummy-rsc_start_0 cluster02" -> "dummy-rsc_monitor_10000 cluster02" [ style = bold]
+"dummy-rsc_start_0 cluster02" [ style=bold color="green" fontcolor="black"]
+"fencing_monitor_0 cluster01" -> "fencing_start_0 cluster01" [ style = bold]
+"fencing_monitor_0 cluster01" [ style=bold color="green" fontcolor="black"]
+"fencing_monitor_0 cluster02" -> "fencing_start_0 cluster01" [ style = bold]
+"fencing_monitor_0 cluster02" [ style=bold color="green" fontcolor="black"]
+"fencing_start_0 cluster01" [ style=bold color="green" fontcolor="black"]
+}
diff --git a/cts/scheduler/op-defaults-3.exp b/cts/scheduler/op-defaults-3.exp
new file mode 100644
index 0000000..6d567dc
--- /dev/null
+++ b/cts/scheduler/op-defaults-3.exp
@@ -0,0 +1,83 @@
+<transition_graph cluster-delay="60s" stonith-timeout="60s" failed-stop-offset="INFINITY" failed-start-offset="INFINITY" transition_id="0">
+ <synapse id="0">
+ <action_set>
+ <rsc_op id="5" operation="start" operation_key="fencing_start_0" on_node="cluster01" on_node_uuid="1">
+ <primitive id="fencing" class="stonith" type="fence_xvm"/>
+ <attributes CRM_meta_on_node="cluster01" CRM_meta_on_node_uuid="1" CRM_meta_timeout="20000" ip_family="ipv4"/>
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="1" operation="monitor" operation_key="fencing_monitor_0" on_node="cluster01" on_node_uuid="1"/>
+ </trigger>
+ <trigger>
+ <rsc_op id="3" operation="monitor" operation_key="fencing_monitor_0" on_node="cluster02" on_node_uuid="2"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="1">
+ <action_set>
+ <rsc_op id="3" operation="monitor" operation_key="fencing_monitor_0" on_node="cluster02" on_node_uuid="2">
+ <primitive id="fencing" class="stonith" type="fence_xvm"/>
+ <attributes CRM_meta_on_node="cluster02" CRM_meta_on_node_uuid="2" CRM_meta_op_target_rc="7" CRM_meta_timeout="7000" ip_family="ipv4"/>
+ </rsc_op>
+ </action_set>
+ <inputs/>
+ </synapse>
+ <synapse id="2">
+ <action_set>
+ <rsc_op id="1" operation="monitor" operation_key="fencing_monitor_0" on_node="cluster01" on_node_uuid="1">
+ <primitive id="fencing" class="stonith" type="fence_xvm"/>
+ <attributes CRM_meta_on_node="cluster01" CRM_meta_on_node_uuid="1" CRM_meta_op_target_rc="7" CRM_meta_timeout="7000" ip_family="ipv4"/>
+ </rsc_op>
+ </action_set>
+ <inputs/>
+ </synapse>
+ <synapse id="3">
+ <action_set>
+ <rsc_op id="7" operation="monitor" operation_key="dummy-rsc_monitor_10000" on_node="cluster02" on_node_uuid="2">
+ <primitive id="dummy-rsc" class="ocf" provider="pacemaker" type="Dummy"/>
+ <attributes CRM_meta_interval="10000" CRM_meta_name="monitor" CRM_meta_on_fail="stop" CRM_meta_on_node="cluster02" CRM_meta_on_node_uuid="2" CRM_meta_timeout="7000" op_sleep="10"/>
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="6" operation="start" operation_key="dummy-rsc_start_0" on_node="cluster02" on_node_uuid="2"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="4">
+ <action_set>
+ <rsc_op id="6" operation="start" operation_key="dummy-rsc_start_0" on_node="cluster02" on_node_uuid="2">
+ <primitive id="dummy-rsc" class="ocf" provider="pacemaker" type="Dummy"/>
+ <attributes CRM_meta_on_node="cluster02" CRM_meta_on_node_uuid="2" CRM_meta_timeout="20000" op_sleep="10"/>
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="2" operation="monitor" operation_key="dummy-rsc_monitor_0" on_node="cluster01" on_node_uuid="1"/>
+ </trigger>
+ <trigger>
+ <rsc_op id="4" operation="monitor" operation_key="dummy-rsc_monitor_0" on_node="cluster02" on_node_uuid="2"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="5">
+ <action_set>
+ <rsc_op id="4" operation="monitor" operation_key="dummy-rsc_monitor_0" on_node="cluster02" on_node_uuid="2">
+ <primitive id="dummy-rsc" class="ocf" provider="pacemaker" type="Dummy"/>
+ <attributes CRM_meta_on_node="cluster02" CRM_meta_on_node_uuid="2" CRM_meta_op_target_rc="7" CRM_meta_timeout="7000" op_sleep="10"/>
+ </rsc_op>
+ </action_set>
+ <inputs/>
+ </synapse>
+ <synapse id="6">
+ <action_set>
+ <rsc_op id="2" operation="monitor" operation_key="dummy-rsc_monitor_0" on_node="cluster01" on_node_uuid="1">
+ <primitive id="dummy-rsc" class="ocf" provider="pacemaker" type="Dummy"/>
+ <attributes CRM_meta_on_node="cluster01" CRM_meta_on_node_uuid="1" CRM_meta_op_target_rc="7" CRM_meta_timeout="7000" op_sleep="10"/>
+ </rsc_op>
+ </action_set>
+ <inputs/>
+ </synapse>
+</transition_graph>
diff --git a/cts/scheduler/op-defaults-3.scores b/cts/scheduler/op-defaults-3.scores
new file mode 100644
index 0000000..0a5190a
--- /dev/null
+++ b/cts/scheduler/op-defaults-3.scores
@@ -0,0 +1,5 @@
+Allocation scores:
+pcmk__native_allocate: dummy-rsc allocation score on cluster01: 0
+pcmk__native_allocate: dummy-rsc allocation score on cluster02: 0
+pcmk__native_allocate: fencing allocation score on cluster01: 0
+pcmk__native_allocate: fencing allocation score on cluster02: 0
diff --git a/cts/scheduler/op-defaults-3.summary b/cts/scheduler/op-defaults-3.summary
new file mode 100644
index 0000000..a83eb15
--- /dev/null
+++ b/cts/scheduler/op-defaults-3.summary
@@ -0,0 +1,26 @@
+
+Current cluster status:
+Online: [ cluster01 cluster02 ]
+
+ fencing (stonith:fence_xvm): Stopped
+ dummy-rsc (ocf::pacemaker:Dummy): Stopped
+
+Transition Summary:
+ * Start fencing ( cluster01 )
+ * Start dummy-rsc ( cluster02 )
+
+Executing cluster transition:
+ * Resource action: fencing monitor on cluster02
+ * Resource action: fencing monitor on cluster01
+ * Resource action: dummy-rsc monitor on cluster02
+ * Resource action: dummy-rsc monitor on cluster01
+ * Resource action: fencing start on cluster01
+ * Resource action: dummy-rsc start on cluster02
+ * Resource action: dummy-rsc monitor=10000 on cluster02
+
+Revised cluster status:
+Online: [ cluster01 cluster02 ]
+
+ fencing (stonith:fence_xvm): Started cluster01
+ dummy-rsc (ocf::pacemaker:Dummy): Started cluster02
+
diff --git a/cts/scheduler/op-defaults-3.xml b/cts/scheduler/op-defaults-3.xml
new file mode 100644
index 0000000..4a8912e
--- /dev/null
+++ b/cts/scheduler/op-defaults-3.xml
@@ -0,0 +1,54 @@
+<cib crm_feature_set="3.3.0" validate-with="pacemaker-3.4" epoch="130" num_updates="31" admin_epoch="1" cib-last-written="Fri Apr 24 16:08:36 2020" update-origin="cluster01" update-client="crmd" update-user="hacluster" have-quorum="1" dc-uuid="1">
+ <configuration>
+ <crm_config>
+ <cluster_property_set id="cib-bootstrap-options">
+ <nvpair id="cib-bootstrap-options-have-watchdog" name="have-watchdog" value="false"/>
+ <nvpair id="cib-bootstrap-options-dc-version" name="dc-version" value="2.0.3-1.c40fb040a.git.el7-c40fb040a"/>
+ <nvpair id="cib-bootstrap-options-cluster-infrastructure" name="cluster-infrastructure" value="corosync"/>
+ <nvpair id="cib-bootstrap-options-cluster-name" name="cluster-name" value="test-cluster"/>
+ <nvpair id="cib-bootstrap-options-stonith-enabled" name="stonith-enabled" value="true"/>
+ <nvpair id="cib-bootstrap-options-maintenance-mode" name="maintenance-mode" value="false"/>
+ </cluster_property_set>
+ </crm_config>
+ <nodes>
+ <node id="1" uname="cluster01"/>
+ <node id="2" uname="cluster02"/>
+ </nodes>
+ <resources>
+ <primitive class="stonith" id="fencing" type="fence_xvm">
+ <instance_attributes id="fencing-instance_attributes">
+ <nvpair id="fencing-instance_attributes-ip_family" name="ip_family" value="ipv4"/>
+ </instance_attributes>
+ <operations/>
+ </primitive>
+ <primitive class="ocf" id="dummy-rsc" provider="pacemaker" type="Dummy">
+ <instance_attributes id="dummy-rsc-instance_attributes">
+ <nvpair id="dummy-rsc-instance_attributes-op_sleep" name="op_sleep" value="10"/>
+ </instance_attributes>
+ <operations>
+ <op id="dummy-rsc-monitor-interval-10s" interval="10s" name="monitor" on-fail="stop"/>
+ </operations>
+ </primitive>
+ </resources>
+ <constraints/>
+ <tags/>
+ <op_defaults>
+ <meta_attributes id="op-10s-monitor-defaults">
+ <rule id="op-10s-monitor-default-rule" score="INFINITY">
+ <op_expression id="op-10s-monitor-default-expr" name="monitor" interval="10s"/>
+ </rule>
+ <nvpair id="op-10s-monitor-timeout" name="timeout" value="8s"/>
+ </meta_attributes>
+ <meta_attributes id="op-monitor-defaults">
+ <rule id="op-monitor-default-rule" score="INFINITY">
+ <op_expression id="op-monitor-default-expr" name="monitor"/>
+ </rule>
+ <nvpair id="op-monitor-timeout" name="timeout" value="7s"/>
+ </meta_attributes>
+ </op_defaults>
+ </configuration>
+ <status>
+ <node_state id="1" uname="cluster01" in_ccm="true" crmd="online" crm-debug-origin="do_state_transition" join="member" expected="member"/>
+ <node_state id="2" uname="cluster02" in_ccm="true" crmd="online" crm-debug-origin="do_update_resource" join="member" expected="member"/>
+ </status>
+</cib>
--
1.8.3.1
From b9ccde16609e7d005ac0578a603da97a1808704a Mon Sep 17 00:00:00 2001
From: Chris Lumens <clumens@redhat.com>
Date: Fri, 15 May 2020 13:48:47 -0400
Subject: [PATCH 17/17] Test: scheduler: Add a test for rsc_defaults not
specifying type.
---
cts/cts-scheduler.in | 1 +
cts/scheduler/rsc-defaults-2.dot | 11 ++++++
cts/scheduler/rsc-defaults-2.exp | 72 ++++++++++++++++++++++++++++++++++++
cts/scheduler/rsc-defaults-2.scores | 7 ++++
cts/scheduler/rsc-defaults-2.summary | 27 ++++++++++++++
cts/scheduler/rsc-defaults-2.xml | 52 ++++++++++++++++++++++++++
6 files changed, 170 insertions(+)
create mode 100644 cts/scheduler/rsc-defaults-2.dot
create mode 100644 cts/scheduler/rsc-defaults-2.exp
create mode 100644 cts/scheduler/rsc-defaults-2.scores
create mode 100644 cts/scheduler/rsc-defaults-2.summary
create mode 100644 cts/scheduler/rsc-defaults-2.xml
diff --git a/cts/cts-scheduler.in b/cts/cts-scheduler.in
index 2c2d14f..346ada2 100644
--- a/cts/cts-scheduler.in
+++ b/cts/cts-scheduler.in
@@ -967,6 +967,7 @@ TESTS = [
[ "op-defaults-2", "Test op_defaults AND'ed conditional expressions" ],
[ "op-defaults-3", "Test op_defaults precedence" ],
[ "rsc-defaults", "Test rsc_defaults conditional expressions" ],
+ [ "rsc-defaults-2", "Test rsc_defaults conditional expressions without type" ],
],
# @TODO: If pacemaker implements versioned attributes, uncomment these tests
diff --git a/cts/scheduler/rsc-defaults-2.dot b/cts/scheduler/rsc-defaults-2.dot
new file mode 100644
index 0000000..b43c5e6
--- /dev/null
+++ b/cts/scheduler/rsc-defaults-2.dot
@@ -0,0 +1,11 @@
+ digraph "g" {
+"dummy-rsc_monitor_0 cluster01" [ style=bold color="green" fontcolor="black"]
+"dummy-rsc_monitor_0 cluster02" [ style=bold color="green" fontcolor="black"]
+"fencing_monitor_0 cluster01" -> "fencing_start_0 cluster01" [ style = bold]
+"fencing_monitor_0 cluster01" [ style=bold color="green" fontcolor="black"]
+"fencing_monitor_0 cluster02" -> "fencing_start_0 cluster01" [ style = bold]
+"fencing_monitor_0 cluster02" [ style=bold color="green" fontcolor="black"]
+"fencing_start_0 cluster01" [ style=bold color="green" fontcolor="black"]
+"ping-rsc-ping_monitor_0 cluster01" [ style=bold color="green" fontcolor="black"]
+"ping-rsc-ping_monitor_0 cluster02" [ style=bold color="green" fontcolor="black"]
+}
diff --git a/cts/scheduler/rsc-defaults-2.exp b/cts/scheduler/rsc-defaults-2.exp
new file mode 100644
index 0000000..e9e1b5f
--- /dev/null
+++ b/cts/scheduler/rsc-defaults-2.exp
@@ -0,0 +1,72 @@
+<transition_graph cluster-delay="60s" stonith-timeout="60s" failed-stop-offset="INFINITY" failed-start-offset="INFINITY" transition_id="0">
+ <synapse id="0">
+ <action_set>
+ <rsc_op id="7" operation="start" operation_key="fencing_start_0" on_node="cluster01" on_node_uuid="1">
+ <primitive id="fencing" class="stonith" type="fence_xvm"/>
+ <attributes CRM_meta_on_node="cluster01" CRM_meta_on_node_uuid="1" CRM_meta_timeout="20000" ip_family="ipv4"/>
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="1" operation="monitor" operation_key="fencing_monitor_0" on_node="cluster01" on_node_uuid="1"/>
+ </trigger>
+ <trigger>
+ <rsc_op id="4" operation="monitor" operation_key="fencing_monitor_0" on_node="cluster02" on_node_uuid="2"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="1">
+ <action_set>
+ <rsc_op id="4" operation="monitor" operation_key="fencing_monitor_0" on_node="cluster02" on_node_uuid="2">
+ <primitive id="fencing" class="stonith" type="fence_xvm"/>
+ <attributes CRM_meta_on_node="cluster02" CRM_meta_on_node_uuid="2" CRM_meta_op_target_rc="7" CRM_meta_timeout="20000" ip_family="ipv4"/>
+ </rsc_op>
+ </action_set>
+ <inputs/>
+ </synapse>
+ <synapse id="2">
+ <action_set>
+ <rsc_op id="1" operation="monitor" operation_key="fencing_monitor_0" on_node="cluster01" on_node_uuid="1">
+ <primitive id="fencing" class="stonith" type="fence_xvm"/>
+ <attributes CRM_meta_on_node="cluster01" CRM_meta_on_node_uuid="1" CRM_meta_op_target_rc="7" CRM_meta_timeout="20000" ip_family="ipv4"/>
+ </rsc_op>
+ </action_set>
+ <inputs/>
+ </synapse>
+ <synapse id="3">
+ <action_set>
+ <rsc_op id="5" operation="monitor" operation_key="dummy-rsc_monitor_0" on_node="cluster02" on_node_uuid="2">
+ <primitive id="dummy-rsc" class="ocf" provider="pacemaker" type="Dummy"/>
+ <attributes CRM_meta_on_node="cluster02" CRM_meta_on_node_uuid="2" CRM_meta_op_target_rc="7" CRM_meta_timeout="20000" op_sleep="10"/>
+ </rsc_op>
+ </action_set>
+ <inputs/>
+ </synapse>
+ <synapse id="4">
+ <action_set>
+ <rsc_op id="2" operation="monitor" operation_key="dummy-rsc_monitor_0" on_node="cluster01" on_node_uuid="1">
+ <primitive id="dummy-rsc" class="ocf" provider="pacemaker" type="Dummy"/>
+ <attributes CRM_meta_on_node="cluster01" CRM_meta_on_node_uuid="1" CRM_meta_op_target_rc="7" CRM_meta_timeout="20000" op_sleep="10"/>
+ </rsc_op>
+ </action_set>
+ <inputs/>
+ </synapse>
+ <synapse id="5">
+ <action_set>
+ <rsc_op id="6" operation="monitor" operation_key="ping-rsc-ping_monitor_0" on_node="cluster02" on_node_uuid="2">
+ <primitive id="ping-rsc-ping" class="ocf" provider="pacemaker" type="ping"/>
+ <attributes CRM_meta_on_node="cluster02" CRM_meta_on_node_uuid="2" CRM_meta_op_target_rc="7" CRM_meta_timeout="20000" host_list="4.2.2.2"/>
+ </rsc_op>
+ </action_set>
+ <inputs/>
+ </synapse>
+ <synapse id="6">
+ <action_set>
+ <rsc_op id="3" operation="monitor" operation_key="ping-rsc-ping_monitor_0" on_node="cluster01" on_node_uuid="1">
+ <primitive id="ping-rsc-ping" class="ocf" provider="pacemaker" type="ping"/>
+ <attributes CRM_meta_on_node="cluster01" CRM_meta_on_node_uuid="1" CRM_meta_op_target_rc="7" CRM_meta_timeout="20000" host_list="4.2.2.2"/>
+ </rsc_op>
+ </action_set>
+ <inputs/>
+ </synapse>
+</transition_graph>
diff --git a/cts/scheduler/rsc-defaults-2.scores b/cts/scheduler/rsc-defaults-2.scores
new file mode 100644
index 0000000..4b70f54
--- /dev/null
+++ b/cts/scheduler/rsc-defaults-2.scores
@@ -0,0 +1,7 @@
+Allocation scores:
+pcmk__native_allocate: dummy-rsc allocation score on cluster01: 0
+pcmk__native_allocate: dummy-rsc allocation score on cluster02: 0
+pcmk__native_allocate: fencing allocation score on cluster01: 0
+pcmk__native_allocate: fencing allocation score on cluster02: 0
+pcmk__native_allocate: ping-rsc-ping allocation score on cluster01: 0
+pcmk__native_allocate: ping-rsc-ping allocation score on cluster02: 0
diff --git a/cts/scheduler/rsc-defaults-2.summary b/cts/scheduler/rsc-defaults-2.summary
new file mode 100644
index 0000000..46a2a2d
--- /dev/null
+++ b/cts/scheduler/rsc-defaults-2.summary
@@ -0,0 +1,27 @@
+
+Current cluster status:
+Online: [ cluster01 cluster02 ]
+
+ fencing (stonith:fence_xvm): Stopped
+ dummy-rsc (ocf::pacemaker:Dummy): Stopped (unmanaged)
+ ping-rsc-ping (ocf::pacemaker:ping): Stopped (unmanaged)
+
+Transition Summary:
+ * Start fencing ( cluster01 )
+
+Executing cluster transition:
+ * Resource action: fencing monitor on cluster02
+ * Resource action: fencing monitor on cluster01
+ * Resource action: dummy-rsc monitor on cluster02
+ * Resource action: dummy-rsc monitor on cluster01
+ * Resource action: ping-rsc-ping monitor on cluster02
+ * Resource action: ping-rsc-ping monitor on cluster01
+ * Resource action: fencing start on cluster01
+
+Revised cluster status:
+Online: [ cluster01 cluster02 ]
+
+ fencing (stonith:fence_xvm): Started cluster01
+ dummy-rsc (ocf::pacemaker:Dummy): Stopped (unmanaged)
+ ping-rsc-ping (ocf::pacemaker:ping): Stopped (unmanaged)
+
diff --git a/cts/scheduler/rsc-defaults-2.xml b/cts/scheduler/rsc-defaults-2.xml
new file mode 100644
index 0000000..a160fae
--- /dev/null
+++ b/cts/scheduler/rsc-defaults-2.xml
@@ -0,0 +1,52 @@
+<cib crm_feature_set="3.3.0" validate-with="pacemaker-3.4" epoch="130" num_updates="31" admin_epoch="1" cib-last-written="Fri Apr 24 16:08:36 2020" update-origin="cluster01" update-client="crmd" update-user="hacluster" have-quorum="1" dc-uuid="1">
+ <configuration>
+ <crm_config>
+ <cluster_property_set id="cib-bootstrap-options">
+ <nvpair id="cib-bootstrap-options-have-watchdog" name="have-watchdog" value="false"/>
+ <nvpair id="cib-bootstrap-options-dc-version" name="dc-version" value="2.0.3-1.c40fb040a.git.el7-c40fb040a"/>
+ <nvpair id="cib-bootstrap-options-cluster-infrastructure" name="cluster-infrastructure" value="corosync"/>
+ <nvpair id="cib-bootstrap-options-cluster-name" name="cluster-name" value="test-cluster"/>
+ <nvpair id="cib-bootstrap-options-stonith-enabled" name="stonith-enabled" value="true"/>
+ <nvpair id="cib-bootstrap-options-maintenance-mode" name="maintenance-mode" value="false"/>
+ </cluster_property_set>
+ </crm_config>
+ <nodes>
+ <node id="1" uname="cluster01"/>
+ <node id="2" uname="cluster02"/>
+ </nodes>
+ <resources>
+ <primitive class="stonith" id="fencing" type="fence_xvm">
+ <instance_attributes id="fencing-instance_attributes">
+ <nvpair id="fencing-instance_attributes-ip_family" name="ip_family" value="ipv4"/>
+ </instance_attributes>
+ <operations/>
+ </primitive>
+ <primitive class="ocf" id="dummy-rsc" provider="pacemaker" type="Dummy">
+ <instance_attributes id="dummy-rsc-instance_attributes">
+ <nvpair id="dummy-rsc-instance_attributes-op_sleep" name="op_sleep" value="10"/>
+ </instance_attributes>
+ <operations/>
+ </primitive>
+ <primitive class="ocf" id="ping-rsc-ping" provider="pacemaker" type="ping">
+ <instance_attributes id="ping-rsc-instance_attributes">
+ <nvpair id="ping-rsc-host_list" name="host_list" value="4.2.2.2"/>
+ </instance_attributes>
+ <operations/>
+ </primitive>
+ </resources>
+ <constraints/>
+ <tags/>
+ <rsc_defaults>
+ <meta_attributes id="op-unmanaged">
+ <rule id="op-unmanaged-rule" score="INFINITY">
+ <rsc_expression id="op-unmanaged-expr" class="ocf" provider="pacemaker"/>
+ </rule>
+ <nvpair id="op-unmanaged-nvpair" name="is-managed" value="false"/>
+ </meta_attributes>
+ </rsc_defaults>
+ </configuration>
+ <status>
+ <node_state id="1" uname="cluster01" in_ccm="true" crmd="online" crm-debug-origin="do_state_transition" join="member" expected="member"/>
+ <node_state id="2" uname="cluster02" in_ccm="true" crmd="online" crm-debug-origin="do_update_resource" join="member" expected="member"/>
+ </status>
+</cib>
--
1.8.3.1