import 389-ds-base-1.4.3.23-12.module+el8.5.0+13329+4096c77a

This commit is contained in:
CentOS Sources 2021-12-21 04:12:27 -05:00 committed by Stepan Oksanichenko
parent 8a7f112eaf
commit 7900475d31
4 changed files with 1016 additions and 2 deletions

View File

@ -0,0 +1,415 @@
From 18a8ed29ae0b300083a6b83665b0137948a2ef7c Mon Sep 17 00:00:00 2001
From: tbordaz <tbordaz@redhat.com>
Date: Thu, 23 Sep 2021 10:48:50 +0200
Subject: [PATCH 1/3] Issue 4925 - Performance ACI: targetfilter evaluation
result can be reused (#4926)
Bug description:
An ACI may contain targetfilter. For a given returned entry, of a
SRCH request, the same targetfilter is evaluated for each of the
returned attributes.
Once the filter has been evaluated, it is useless to reevaluate
it for a next attribute.
Fix description:
The fix implements a very simple cache (linked list) that keeps
the results of the previously evaluated 'targetfilter'.
This cache is per-entry. For an operation, a aclpb is allocated
that is used to evaluate ACIs against each successive entry.
Each time a candidate entry is added in the aclpb
(acl_access_allowed), the cache (aclpb_curr_entry_targetfilters)
is freed. Then for each 'targetfilter', the original targetfilter
is lookup from the cache. If this is the first evaluation of it
then the result of the evaluation is stored into the cache using
the original targetfilter as the key in the cache
The key to lookup/store the cache is the string representation
of the targetfilter. The string contains a redzone to detect
that the filter exceeds the maximum size (2K). If it exceeds
then the key is invalid and the lookup/store is noop.
relates: #4925
Reviewed by: Mark Reynolds, William Brown (Thanks)
Platforms tested: F34
---
ldap/servers/plugins/acl/acl.c | 138 +++++++++++++++++++++++++++--
ldap/servers/plugins/acl/acl.h | 14 +++
ldap/servers/plugins/acl/acl_ext.c | 12 +++
ldap/servers/slapd/libglobs.c | 29 ++++++
ldap/servers/slapd/proto-slap.h | 2 +
ldap/servers/slapd/slap.h | 2 +
6 files changed, 191 insertions(+), 6 deletions(-)
diff --git a/ldap/servers/plugins/acl/acl.c b/ldap/servers/plugins/acl/acl.c
index 4e811f73a..377c18e68 100644
--- a/ldap/servers/plugins/acl/acl.c
+++ b/ldap/servers/plugins/acl/acl.c
@@ -67,6 +67,9 @@ static void print_access_control_summary(char *source,
const char *edn,
aclResultReason_t *acl_reason);
static int check_rdn_access(Slapi_PBlock *pb, Slapi_Entry *e, const char *newrdn, int access);
+static struct targetfilter_cached_result *targetfilter_cache_lookup(struct acl_pblock *aclpb, char *filter, PRBool filter_valid);
+static void targetfilter_cache_add(struct acl_pblock *aclpb, char *filter, int result, PRBool filter_valid);
+
/*
@@ -176,6 +179,70 @@ check_rdn_access(Slapi_PBlock *pb, Slapi_Entry *e, const char *dn, int access)
return (retCode);
}
+/* Retrieves, in the targetfilter cache (list), this
+ * filter in case it was already evaluated
+ *
+ * filter: key to retrieve the evaluation in the cache
+ * filter_valid: PR_FALSE means that the filter key is truncated, PR_TRUE else
+ */
+static struct targetfilter_cached_result *
+targetfilter_cache_lookup(struct acl_pblock *aclpb, char *filter, PRBool filter_valid)
+{
+ struct targetfilter_cached_result *results;
+ if (! aclpb->targetfilter_cache_enabled) {
+ /* targetfilter cache is disabled */
+ return NULL;
+ }
+ if (filter == NULL) {
+ return NULL;
+ }
+ for(results = aclpb->aclpb_curr_entry_targetfilters; results; results = results->next) {
+ if (strcmp(results->filter, filter) == 0) {
+ return results;
+ }
+ }
+
+ return NULL;
+}
+
+/* Free all evaluations cached for this current entry */
+void
+targetfilter_cache_free(struct acl_pblock *aclpb)
+{
+ struct targetfilter_cached_result *results, *next;
+ if (aclpb == NULL) {
+ return;
+ }
+ for(results = aclpb->aclpb_curr_entry_targetfilters; results;) {
+ next = results->next;
+ slapi_ch_free_string(&results->filter);
+ slapi_ch_free((void **) &results);
+ results = next;
+ }
+ aclpb->aclpb_curr_entry_targetfilters = NULL;
+}
+
+/* add a new targetfilter evaluation into the cache (per entry)
+ * ATM just use a linked list of evaluation
+ *
+ * filter: key to retrieve the evaluation in the cache
+ * result: result of the evaluation
+ * filter_valid: PR_FALSE means that the filter key is truncated, PR_TRUE else
+ */
+static void
+targetfilter_cache_add(struct acl_pblock *aclpb, char *filter, int result, PRBool filter_valid)
+{
+ struct targetfilter_cached_result *results;
+ if (! filter_valid || ! aclpb->targetfilter_cache_enabled) {
+ /* targetfilter cache is disabled or filter is truncated */
+ return;
+ }
+ results = (struct targetfilter_cached_result *) slapi_ch_calloc(1, (sizeof(struct targetfilter_cached_result)));
+ results->filter = slapi_ch_strdup(filter);
+ results->next = aclpb->aclpb_curr_entry_targetfilters;
+ results->matching_result = result;
+ aclpb->aclpb_curr_entry_targetfilters = results;
+}
/***************************************************************************
*
* acl_access_allowed
@@ -496,6 +563,7 @@ acl_access_allowed(
/* Keep the ptr to the current entry */
aclpb->aclpb_curr_entry = (Slapi_Entry *)e;
+ targetfilter_cache_free(aclpb);
/* Get the attr info */
deallocate_attrEval = acl__get_attrEval(aclpb, attr);
@@ -1924,7 +1992,7 @@ acl_modified(Slapi_PBlock *pb, int optype, Slapi_DN *e_sdn, void *change)
* None.
*
**************************************************************************/
-static int
+int
acl__scan_for_acis(Acl_PBlock *aclpb, int *err)
{
aci_t *aci;
@@ -2405,10 +2473,68 @@ acl__resource_match_aci(Acl_PBlock *aclpb, aci_t *aci, int skip_attrEval, int *a
ACL_EVAL_TARGET_FILTER);
slapi_ch_free((void **)&lasinfo);
} else {
- if (slapi_vattr_filter_test(NULL, aclpb->aclpb_curr_entry,
- aci->targetFilter,
- 0 /*don't do access check*/) != 0) {
- filter_matched = ACL_FALSE;
+ Slapi_DN *sdn;
+ char* attr_evaluated = "None";
+ char logbuf[2048] = {0};
+ char *redzone = "the redzone";
+ int32_t redzone_idx;
+ char *filterstr; /* key to retrieve/add targetfilter value in the cache */
+ PRBool valid_filter;
+ struct targetfilter_cached_result *previous_filter_test;
+
+ /* only usefull for debug purpose */
+ if (aclpb->aclpb_curr_attrEval && aclpb->aclpb_curr_attrEval->attrEval_name) {
+ attr_evaluated = aclpb->aclpb_curr_attrEval->attrEval_name;
+ }
+ sdn = slapi_entry_get_sdn(aclpb->aclpb_curr_entry);
+
+ /* The key for the cache is the string representation of the original filter
+ * If the string can not fit into the provided buffer (overwrite redzone)
+ * then the filter is said invalid (for the cache) and it will be evaluated
+ */
+ redzone_idx = sizeof(logbuf) - 1 - strlen(redzone);
+ strcpy(&logbuf[redzone_idx], redzone);
+ filterstr = slapi_filter_to_string(aci->targetFilter, logbuf, sizeof(logbuf));
+
+ /* if the redzone was overwritten that means filterstr is truncated and not valid */
+ valid_filter = (strcmp(&logbuf[redzone_idx], redzone) == 0);
+ if (!valid_filter) {
+ strcpy(&logbuf[50], "...");
+ slapi_log_err(SLAPI_LOG_ACL, "acl__ressource_match_aci", "targetfilter too large (can not be cache) %s\n", logbuf);
+ }
+
+ previous_filter_test = targetfilter_cache_lookup(aclpb, filterstr, valid_filter);
+ if (previous_filter_test) {
+ /* The filter was already evaluated against that same entry */
+ if (previous_filter_test->matching_result == 0) {
+ slapi_log_err(SLAPI_LOG_ACL, "acl__ressource_match_aci", "cached result for entry %s did NOT match %s (%s)\n",
+ slapi_sdn_get_ndn(sdn),
+ filterstr,
+ attr_evaluated);
+ filter_matched = ACL_FALSE;
+ } else {
+ slapi_log_err(SLAPI_LOG_ACL, "acl__ressource_match_aci", "cached result for entry %s did match %s (%s)\n",
+ slapi_sdn_get_ndn(sdn),
+ filterstr,
+ attr_evaluated);
+ }
+ } else {
+ /* The filter has not already been evaluated against that entry
+ * evaluate it and cache the result
+ */
+ if (slapi_vattr_filter_test(NULL, aclpb->aclpb_curr_entry,
+ aci->targetFilter,
+ 0 /*don't do access check*/) != 0) {
+ filter_matched = ACL_FALSE;
+ targetfilter_cache_add(aclpb, filterstr, 0, valid_filter); /* does not match */
+ } else {
+ targetfilter_cache_add(aclpb, filterstr, 1, valid_filter); /* does match */
+ }
+ slapi_log_err(SLAPI_LOG_ACL, "acl__ressource_match_aci", "entry %s %s match %s (%s)\n",
+ slapi_sdn_get_ndn(sdn),
+ filter_matched == ACL_FALSE ? "does not" : "does",
+ filterstr,
+ attr_evaluated);
}
}
@@ -2858,7 +2984,7 @@ acl__resource_match_aci_EXIT:
* None.
*
**************************************************************************/
-static int
+int
acl__TestRights(Acl_PBlock *aclpb, int access, const char **right, const char **map_generic, aclResultReason_t *result_reason)
{
ACLEvalHandle_t *acleval;
diff --git a/ldap/servers/plugins/acl/acl.h b/ldap/servers/plugins/acl/acl.h
index becc7f920..c9b9efa56 100644
--- a/ldap/servers/plugins/acl/acl.h
+++ b/ldap/servers/plugins/acl/acl.h
@@ -407,6 +407,17 @@ struct aci_container
};
typedef struct aci_container AciContainer;
+/* This structure is stored in the aclpb.
+ * It is a linked list containing the result of
+ * the filter matching against a specific entry.
+ *
+ * This list is free for each new entry in the aclpb*/
+struct targetfilter_cached_result {
+ char *filter; /* strdup of string representation of aci->targetFilter */
+ int matching_result; /* 0 does not match / 1 does match */
+ struct targetfilter_cached_result *next; /* next targetfilter already evaluated */
+};
+
struct acl_pblock
{
int aclpb_state;
@@ -476,6 +487,8 @@ struct acl_pblock
/* Current entry/dn/attr evaluation info */
Slapi_Entry *aclpb_curr_entry; /* current Entry being processed */
+ int32_t targetfilter_cache_enabled;
+ struct targetfilter_cached_result *aclpb_curr_entry_targetfilters;
int aclpb_num_entries;
Slapi_DN *aclpb_curr_entry_sdn; /* Entry's SDN */
Slapi_DN *aclpb_authorization_sdn; /* dn used for authorization */
@@ -723,6 +736,7 @@ void acl_modified(Slapi_PBlock *pb, int optype, Slapi_DN *e_sdn, void *change);
int acl_access_allowed_disjoint_resource(Slapi_PBlock *pb, Slapi_Entry *e, char *attr, struct berval *val, int access);
int acl_access_allowed_main(Slapi_PBlock *pb, Slapi_Entry *e, char **attrs, struct berval *val, int access, int flags, char **errbuf);
+void targetfilter_cache_free(struct acl_pblock *aclpb);
int acl_access_allowed(Slapi_PBlock *pb, Slapi_Entry *e, char *attr, struct berval *val, int access);
aclUserGroup *acl_get_usersGroup(struct acl_pblock *aclpb, char *n_dn);
void acl_print_acllib_err(NSErr_t *errp, char *str);
diff --git a/ldap/servers/plugins/acl/acl_ext.c b/ldap/servers/plugins/acl/acl_ext.c
index 797c5d2fd..c88f7389f 100644
--- a/ldap/servers/plugins/acl/acl_ext.c
+++ b/ldap/servers/plugins/acl/acl_ext.c
@@ -189,6 +189,11 @@ acl_operation_ext_constructor(void *object __attribute__((unused)), void *parent
slapi_log_err(SLAPI_LOG_ERR, plugin_name,
"acl_operation_ext_constructor - Operation extension allocation Failed\n");
}
+ /* targetfilter_cache toggle set during aclpb allocation
+ * to avoid accessing configuration during the evaluation
+ * of each aci
+ */
+ aclpb->targetfilter_cache_enabled = config_get_targetfilter_cache();
TNF_PROBE_0_DEBUG(acl_operation_ext_constructor_end, "ACL", "");
@@ -713,6 +718,7 @@ acl__free_aclpb(Acl_PBlock **aclpb_ptr)
slapi_ch_free((void **)&(aclpb->aclpb_curr_entryEval_context.acle_handles_matched_target));
slapi_ch_free((void **)&(aclpb->aclpb_prev_entryEval_context.acle_handles_matched_target));
slapi_ch_free((void **)&(aclpb->aclpb_prev_opEval_context.acle_handles_matched_target));
+ targetfilter_cache_free(aclpb);
slapi_sdn_free(&aclpb->aclpb_authorization_sdn);
slapi_sdn_free(&aclpb->aclpb_curr_entry_sdn);
if (aclpb->aclpb_macro_ht) {
@@ -921,6 +927,12 @@ acl__done_aclpb(struct acl_pblock *aclpb)
aclpb->aclpb_acleval ? (char *)aclpb->aclpb_acleval : "NULL");
}
+ /* This aclpb return to the aclpb pool, make sure
+ * the cached evaluations are freed and that
+ * aclpb_curr_entry_targetfilters is NULL
+ */
+ targetfilter_cache_free(aclpb);
+
/* Now Free the contents or clean it */
slapi_sdn_done(aclpb->aclpb_curr_entry_sdn);
if (aclpb->aclpb_Evalattr)
diff --git a/ldap/servers/slapd/libglobs.c b/ldap/servers/slapd/libglobs.c
index db7d01bbc..2ea4cd760 100644
--- a/ldap/servers/slapd/libglobs.c
+++ b/ldap/servers/slapd/libglobs.c
@@ -221,6 +221,7 @@ slapi_onoff_t init_return_exact_case;
slapi_onoff_t init_result_tweak;
slapi_onoff_t init_plugin_track;
slapi_onoff_t init_moddn_aci;
+slapi_onoff_t init_targetfilter_cache;
slapi_onoff_t init_lastmod;
slapi_onoff_t init_readonly;
slapi_onoff_t init_accesscontrol;
@@ -903,6 +904,11 @@ static struct config_get_and_set
(void **)&global_slapdFrontendConfig.moddn_aci,
CONFIG_ON_OFF, (ConfigGetFunc)config_get_moddn_aci,
&init_moddn_aci, NULL},
+ {CONFIG_TARGETFILTER_CACHE_ATTRIBUTE, config_set_targetfilter_cache,
+ NULL, 0,
+ (void **)&global_slapdFrontendConfig.targetfilter_cache,
+ CONFIG_ON_OFF, (ConfigGetFunc)config_get_targetfilter_cache,
+ &init_targetfilter_cache, NULL},
{CONFIG_ATTRIBUTE_NAME_EXCEPTION_ATTRIBUTE, config_set_attrname_exceptions,
NULL, 0,
(void **)&global_slapdFrontendConfig.attrname_exceptions,
@@ -1688,6 +1694,7 @@ FrontendConfig_init(void)
init_syntaxcheck = cfg->syntaxcheck = LDAP_ON;
init_plugin_track = cfg->plugin_track = LDAP_OFF;
init_moddn_aci = cfg->moddn_aci = LDAP_ON;
+ init_targetfilter_cache = cfg->targetfilter_cache = LDAP_ON;
init_syntaxlogging = cfg->syntaxlogging = LDAP_OFF;
init_dn_validate_strict = cfg->dn_validate_strict = LDAP_OFF;
init_ds4_compatible_schema = cfg->ds4_compatible_schema = LDAP_OFF;
@@ -4053,6 +4060,21 @@ config_set_moddn_aci(const char *attrname, char *value, char *errorbuf, int appl
return retVal;
}
+int32_t
+config_set_targetfilter_cache(const char *attrname, char *value, char *errorbuf, int apply)
+{
+ int32_t retVal = LDAP_SUCCESS;
+ slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig();
+
+ retVal = config_set_onoff(attrname,
+ value,
+ &(slapdFrontendConfig->targetfilter_cache),
+ errorbuf,
+ apply);
+
+ return retVal;
+}
+
int32_t
config_set_dynamic_plugins(const char *attrname, char *value, char *errorbuf, int apply)
{
@@ -5903,6 +5925,13 @@ config_get_moddn_aci(void)
return slapi_atomic_load_32(&(slapdFrontendConfig->moddn_aci), __ATOMIC_ACQUIRE);
}
+int32_t
+config_get_targetfilter_cache(void)
+{
+ slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig();
+ return slapi_atomic_load_32(&(slapdFrontendConfig->targetfilter_cache), __ATOMIC_ACQUIRE);
+}
+
int32_t
config_get_security(void)
{
diff --git a/ldap/servers/slapd/proto-slap.h b/ldap/servers/slapd/proto-slap.h
index 2768d5a1d..c143f3772 100644
--- a/ldap/servers/slapd/proto-slap.h
+++ b/ldap/servers/slapd/proto-slap.h
@@ -263,6 +263,7 @@ int config_set_lastmod(const char *attrname, char *value, char *errorbuf, int ap
int config_set_nagle(const char *attrname, char *value, char *errorbuf, int apply);
int config_set_accesscontrol(const char *attrname, char *value, char *errorbuf, int apply);
int config_set_moddn_aci(const char *attrname, char *value, char *errorbuf, int apply);
+int32_t config_set_targetfilter_cache(const char *attrname, char *value, char *errorbuf, int apply);
int config_set_security(const char *attrname, char *value, char *errorbuf, int apply);
int config_set_readonly(const char *attrname, char *value, char *errorbuf, int apply);
int config_set_schemacheck(const char *attrname, char *value, char *errorbuf, int apply);
@@ -469,6 +470,7 @@ int config_get_accesscontrol(void);
int config_get_return_exact_case(void);
int config_get_result_tweak(void);
int config_get_moddn_aci(void);
+int32_t config_get_targetfilter_cache(void);
int config_get_security(void);
int config_get_schemacheck(void);
int config_get_syntaxcheck(void);
diff --git a/ldap/servers/slapd/slap.h b/ldap/servers/slapd/slap.h
index c48516157..a3c0eff93 100644
--- a/ldap/servers/slapd/slap.h
+++ b/ldap/servers/slapd/slap.h
@@ -2229,6 +2229,7 @@ typedef struct _slapdEntryPoints
#define CONFIG_REWRITE_RFC1274_ATTRIBUTE "nsslapd-rewrite-rfc1274"
#define CONFIG_PLUGIN_BINDDN_TRACKING_ATTRIBUTE "nsslapd-plugin-binddn-tracking"
#define CONFIG_MODDN_ACI_ATTRIBUTE "nsslapd-moddn-aci"
+#define CONFIG_TARGETFILTER_CACHE_ATTRIBUTE "nsslapd-targetfilter-cache"
#define CONFIG_GLOBAL_BACKEND_LOCK "nsslapd-global-backend-lock"
#define CONFIG_ENABLE_NUNC_STANS "nsslapd-enable-nunc-stans"
#define CONFIG_ENABLE_UPGRADE_HASH "nsslapd-enable-upgrade-hash"
@@ -2401,6 +2402,7 @@ typedef struct _slapdFrontendConfig
char **plugin;
slapi_onoff_t plugin_track;
slapi_onoff_t moddn_aci;
+ slapi_onoff_t targetfilter_cache;
struct pw_scheme *pw_storagescheme;
slapi_onoff_t pwpolicy_local;
slapi_onoff_t pw_is_global_policy;
--
2.31.1

View File

@ -0,0 +1,468 @@
From 375c1aad59989fb418ab1ead6050f919cfa1ceea Mon Sep 17 00:00:00 2001
From: tbordaz <tbordaz@redhat.com>
Date: Fri, 5 Nov 2021 09:56:43 +0100
Subject: [PATCH 2/3] Issue 4972 - gecos with IA5 introduces a compatibility
issue with previous (#4981)
releases where it was DirectoryString
Bug description:
For years 'gecos' was DirectoryString (UTF8), with #50933 it was restricted to IA5 (ascii)
https://github.com/389ds/389-ds-base/commit/0683bcde1b667b6d0ca6e8d1ef605f17c51ea2f7#
IA5 definition conforms rfc2307 but is a problem for existing deployments
where entries can have 'gecos' attribute value with UTF8.
Fix description:
Revert the definition to of 'gecos' being Directory String
Additional fix to make test_replica_backup_and_restore more
robust to CI
relates: https://github.com/389ds/389-ds-base/issues/4972
Reviewed by: William Brown, Pierre Rogier, James Chapman (Thanks !)
Platforms tested: F34
---
.../tests/suites/schema/schema_test.py | 398 +++++++++++++++++-
ldap/schema/10rfc2307compat.ldif | 6 +-
2 files changed, 400 insertions(+), 4 deletions(-)
diff --git a/dirsrvtests/tests/suites/schema/schema_test.py b/dirsrvtests/tests/suites/schema/schema_test.py
index d590624b6..5d62b8d59 100644
--- a/dirsrvtests/tests/suites/schema/schema_test.py
+++ b/dirsrvtests/tests/suites/schema/schema_test.py
@@ -18,8 +18,12 @@ import pytest
import six
from ldap.cidict import cidict
from ldap.schema import SubSchema
+from lib389.schema import SchemaLegacy
from lib389._constants import *
-from lib389.topologies import topology_st
+from lib389.topologies import topology_st, topology_m2 as topo_m2
+from lib389.idm.user import UserAccounts, UserAccount
+from lib389.replica import ReplicationManager
+from lib389.utils import ensure_bytes
pytestmark = pytest.mark.tier1
@@ -165,6 +169,398 @@ def test_schema_comparewithfiles(topology_st):
log.info('test_schema_comparewithfiles: PASSED')
+def test_gecos_directoryString(topology_st):
+ """Check that gecos supports directoryString value
+
+ :id: aee422bb-6299-4124-b5cd-d7393dac19d3
+
+ :setup: Standalone instance
+
+ :steps:
+ 1. Add a common user
+ 2. replace gecos with a direstoryString value
+
+ :expectedresults:
+ 1. Success
+ 2. Success
+ """
+
+ users = UserAccounts(topology_st.standalone, DEFAULT_SUFFIX)
+
+ user_properties = {
+ 'uid': 'testuser',
+ 'cn' : 'testuser',
+ 'sn' : 'user',
+ 'uidNumber' : '1000',
+ 'gidNumber' : '2000',
+ 'homeDirectory' : '/home/testuser',
+ }
+ testuser = users.create(properties=user_properties)
+
+ # Add a gecos UTF value
+ testuser.replace('gecos', 'Hélène')
+
+def test_gecos_mixed_definition_topo(topo_m2, request):
+ """Check that replication is still working if schema contains
+ definitions that does not conform with a replicated entry
+
+ :id: d5940e71-d18a-4b71-aaf7-b9185361fffe
+ :setup: Two suppliers replication setup
+ :steps:
+ 1. Create a testuser on M1
+ 2 Stop M1 and M2
+ 3 Change gecos def on M2 to be IA5
+ 4 Update testuser with gecos directoryString value
+ 5 Check replication is still working
+ :expectedresults:
+ 1. success
+ 2. success
+ 3. success
+ 4. success
+ 5. success
+
+ """
+
+ repl = ReplicationManager(DEFAULT_SUFFIX)
+ m1 = topo_m2.ms["supplier1"]
+ m2 = topo_m2.ms["supplier2"]
+
+
+ # create a test user
+ testuser_dn = 'uid={},{}'.format('testuser', DEFAULT_SUFFIX)
+ testuser = UserAccount(m1, testuser_dn)
+ try:
+ testuser.create(properties={
+ 'uid': 'testuser',
+ 'cn': 'testuser',
+ 'sn': 'testuser',
+ 'uidNumber' : '1000',
+ 'gidNumber' : '2000',
+ 'homeDirectory' : '/home/testuser',
+ })
+ except ldap.ALREADY_EXISTS:
+ pass
+ repl.wait_for_replication(m1, m2)
+
+ # Stop suppliers to update the schema
+ m1.stop()
+ m2.stop()
+
+ # on M1: gecos is DirectoryString (default)
+ # on M2: gecos is IA5
+ schema_filename = (m2.schemadir + "/99user.ldif")
+ try:
+ with open(schema_filename, 'w') as schema_file:
+ schema_file.write("dn: cn=schema\n")
+ schema_file.write("attributetypes: ( 1.3.6.1.1.1.1.2 NAME " +
+ "'gecos' DESC 'The GECOS field; the common name' " +
+ "EQUALITY caseIgnoreIA5Match " +
+ "SUBSTR caseIgnoreIA5SubstringsMatch " +
+ "SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 " +
+ "SINGLE-VALUE )\n")
+ os.chmod(schema_filename, 0o777)
+ except OSError as e:
+ log.fatal("Failed to update schema file: " +
+ "{} Error: {}".format(schema_filename, str(e)))
+
+ # start the instances
+ m1.start()
+ m2.start()
+
+ # Check that gecos is IA5 on M2
+ schema = SchemaLegacy(m2)
+ attributetypes = schema.query_attributetype('gecos')
+ assert attributetypes[0].syntax == "1.3.6.1.4.1.1466.115.121.1.26"
+
+
+ # Add a gecos UTF value on M1
+ testuser.replace('gecos', 'Hélène')
+
+ # Check replication is still working
+ testuser.replace('displayName', 'ascii value')
+ repl.wait_for_replication(m1, m2)
+ testuser_m2 = UserAccount(m2, testuser_dn)
+ assert testuser_m2.exists()
+ assert testuser_m2.get_attr_val_utf8('displayName') == 'ascii value'
+
+ def fin():
+ m1.start()
+ m2.start()
+ testuser.delete()
+ repl.wait_for_replication(m1, m2)
+
+ # on M2 restore a default 99user.ldif
+ m2.stop()
+ os.remove(m2.schemadir + "/99user.ldif")
+ schema_filename = (m2.schemadir + "/99user.ldif")
+ try:
+ with open(schema_filename, 'w') as schema_file:
+ schema_file.write("dn: cn=schema\n")
+ os.chmod(schema_filename, 0o777)
+ except OSError as e:
+ log.fatal("Failed to update schema file: " +
+ "{} Error: {}".format(schema_filename, str(e)))
+ m2.start()
+ m1.start()
+
+ request.addfinalizer(fin)
+
+def test_gecos_directoryString_wins_M1(topo_m2, request):
+ """Check that if inital syntax are IA5(M2) and DirectoryString(M1)
+ Then directoryString wins when nsSchemaCSN M1 is the greatest
+
+ :id: ad119fa5-7671-45c8-b2ef-0b28ffb68fdb
+ :setup: Two suppliers replication setup
+ :steps:
+ 1. Create a testuser on M1
+ 2 Stop M1 and M2
+ 3 Change gecos def on M2 to be IA5
+ 4 Start M1 and M2
+ 5 Update M1 schema so that M1 has greatest nsSchemaCSN
+ 6 Update testuser with gecos directoryString value
+ 7 Check replication is still working
+ 8 Check gecos is DirectoryString on M1 and M2
+ :expectedresults:
+ 1. success
+ 2. success
+ 3. success
+ 4. success
+ 5. success
+ 6. success
+ 7. success
+ 8. success
+
+ """
+
+ repl = ReplicationManager(DEFAULT_SUFFIX)
+ m1 = topo_m2.ms["supplier1"]
+ m2 = topo_m2.ms["supplier2"]
+
+
+ # create a test user
+ testuser_dn = 'uid={},{}'.format('testuser', DEFAULT_SUFFIX)
+ testuser = UserAccount(m1, testuser_dn)
+ try:
+ testuser.create(properties={
+ 'uid': 'testuser',
+ 'cn': 'testuser',
+ 'sn': 'testuser',
+ 'uidNumber' : '1000',
+ 'gidNumber' : '2000',
+ 'homeDirectory' : '/home/testuser',
+ })
+ except ldap.ALREADY_EXISTS:
+ pass
+ repl.wait_for_replication(m1, m2)
+
+ # Stop suppliers to update the schema
+ m1.stop()
+ m2.stop()
+
+ # on M1: gecos is DirectoryString (default)
+ # on M2: gecos is IA5
+ schema_filename = (m2.schemadir + "/99user.ldif")
+ try:
+ with open(schema_filename, 'w') as schema_file:
+ schema_file.write("dn: cn=schema\n")
+ schema_file.write("attributetypes: ( 1.3.6.1.1.1.1.2 NAME " +
+ "'gecos' DESC 'The GECOS field; the common name' " +
+ "EQUALITY caseIgnoreIA5Match " +
+ "SUBSTR caseIgnoreIA5SubstringsMatch " +
+ "SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 " +
+ "SINGLE-VALUE )\n")
+ os.chmod(schema_filename, 0o777)
+ except OSError as e:
+ log.fatal("Failed to update schema file: " +
+ "{} Error: {}".format(schema_filename, str(e)))
+
+ # start the instances
+ m1.start()
+ m2.start()
+
+ # Check that gecos is IA5 on M2
+ schema = SchemaLegacy(m2)
+ attributetypes = schema.query_attributetype('gecos')
+ assert attributetypes[0].syntax == "1.3.6.1.4.1.1466.115.121.1.26"
+
+
+ # update M1 schema to increase its nsschemaCSN
+ new_at = "( dummy-oid NAME 'dummy' DESC 'dummy attribute' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE X-ORIGIN 'RFC 2307' )"
+ m1.schema.add_schema('attributetypes', ensure_bytes(new_at))
+
+ # Add a gecos UTF value on M1
+ testuser.replace('gecos', 'Hélène')
+
+ # Check replication is still working
+ testuser.replace('displayName', 'ascii value')
+ repl.wait_for_replication(m1, m2)
+ testuser_m2 = UserAccount(m2, testuser_dn)
+ assert testuser_m2.exists()
+ assert testuser_m2.get_attr_val_utf8('displayName') == 'ascii value'
+
+ # Check that gecos is DirectoryString on M1
+ schema = SchemaLegacy(m1)
+ attributetypes = schema.query_attributetype('gecos')
+ assert attributetypes[0].syntax == "1.3.6.1.4.1.1466.115.121.1.15"
+
+ # Check that gecos is DirectoryString on M2
+ schema = SchemaLegacy(m2)
+ attributetypes = schema.query_attributetype('gecos')
+ assert attributetypes[0].syntax == "1.3.6.1.4.1.1466.115.121.1.15"
+
+ def fin():
+ m1.start()
+ m2.start()
+ testuser.delete()
+ m1.schema.del_schema('attributetypes', ensure_bytes(new_at))
+ repl.wait_for_replication(m1, m2)
+
+ # on M2 restore a default 99user.ldif
+ m2.stop()
+ os.remove(m2.schemadir + "/99user.ldif")
+ schema_filename = (m2.schemadir + "/99user.ldif")
+ try:
+ with open(schema_filename, 'w') as schema_file:
+ schema_file.write("dn: cn=schema\n")
+ os.chmod(schema_filename, 0o777)
+ except OSError as e:
+ log.fatal("Failed to update schema file: " +
+ "{} Error: {}".format(schema_filename, str(e)))
+ m2.start()
+ m1.start()
+
+ request.addfinalizer(fin)
+
+def test_gecos_directoryString_wins_M2(topo_m2, request):
+ """Check that if inital syntax are IA5(M2) and DirectoryString(M1)
+ Then directoryString wins when nsSchemaCSN M2 is the greatest
+
+ :id: 2da7f1b1-f86d-4072-a940-ba56d4bc8348
+ :setup: Two suppliers replication setup
+ :steps:
+ 1. Create a testuser on M1
+ 2 Stop M1 and M2
+ 3 Change gecos def on M2 to be IA5
+ 4 Start M1 and M2
+ 5 Update M2 schema so that M2 has greatest nsSchemaCSN
+ 6 Update testuser on M2 and trigger replication to M1
+ 7 Update testuser on M2 with gecos directoryString value
+ 8 Check replication is still working
+ 9 Check gecos is DirectoryString on M1 and M2
+ :expectedresults:
+ 1. success
+ 2. success
+ 3. success
+ 4. success
+ 5. success
+ 6. success
+ 7. success
+ 8. success
+ 9. success
+
+ """
+
+ repl = ReplicationManager(DEFAULT_SUFFIX)
+ m1 = topo_m2.ms["supplier1"]
+ m2 = topo_m2.ms["supplier2"]
+
+
+ # create a test user
+ testuser_dn = 'uid={},{}'.format('testuser', DEFAULT_SUFFIX)
+ testuser = UserAccount(m1, testuser_dn)
+ try:
+ testuser.create(properties={
+ 'uid': 'testuser',
+ 'cn': 'testuser',
+ 'sn': 'testuser',
+ 'uidNumber' : '1000',
+ 'gidNumber' : '2000',
+ 'homeDirectory' : '/home/testuser',
+ })
+ except ldap.ALREADY_EXISTS:
+ pass
+ testuser.replace('displayName', 'to trigger replication M1-> M2')
+ repl.wait_for_replication(m1, m2)
+
+ # Stop suppliers to update the schema
+ m1.stop()
+ m2.stop()
+
+ # on M1: gecos is DirectoryString (default)
+ # on M2: gecos is IA5
+ schema_filename = (m2.schemadir + "/99user.ldif")
+ try:
+ with open(schema_filename, 'w') as schema_file:
+ schema_file.write("dn: cn=schema\n")
+ schema_file.write("attributetypes: ( 1.3.6.1.1.1.1.2 NAME " +
+ "'gecos' DESC 'The GECOS field; the common name' " +
+ "EQUALITY caseIgnoreIA5Match " +
+ "SUBSTR caseIgnoreIA5SubstringsMatch " +
+ "SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 " +
+ "SINGLE-VALUE )\n")
+ os.chmod(schema_filename, 0o777)
+ except OSError as e:
+ log.fatal("Failed to update schema file: " +
+ "{} Error: {}".format(schema_filename, str(e)))
+
+ # start the instances
+ m1.start()
+ m2.start()
+
+ # Check that gecos is IA5 on M2
+ schema = SchemaLegacy(m2)
+ attributetypes = schema.query_attributetype('gecos')
+ assert attributetypes[0].syntax == "1.3.6.1.4.1.1466.115.121.1.26"
+
+ # update M2 schema to increase its nsschemaCSN
+ new_at = "( dummy-oid NAME 'dummy' DESC 'dummy attribute' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE X-ORIGIN 'RFC 2307' )"
+ m2.schema.add_schema('attributetypes', ensure_bytes(new_at))
+
+ # update just to trigger replication M2->M1
+ # and update of M2 schema
+ testuser_m2 = UserAccount(m2, testuser_dn)
+ testuser_m2.replace('displayName', 'to trigger replication M2-> M1')
+
+ # Add a gecos UTF value on M1
+ testuser.replace('gecos', 'Hélène')
+
+ # Check replication is still working
+ testuser.replace('displayName', 'ascii value')
+ repl.wait_for_replication(m1, m2)
+ assert testuser_m2.exists()
+ assert testuser_m2.get_attr_val_utf8('displayName') == 'ascii value'
+
+ # Check that gecos is DirectoryString on M1
+ schema = SchemaLegacy(m1)
+ attributetypes = schema.query_attributetype('gecos')
+ assert attributetypes[0].syntax == "1.3.6.1.4.1.1466.115.121.1.15"
+
+ # Check that gecos is DirectoryString on M2
+ schema = SchemaLegacy(m2)
+ attributetypes = schema.query_attributetype('gecos')
+ assert attributetypes[0].syntax == "1.3.6.1.4.1.1466.115.121.1.15"
+
+ def fin():
+ m1.start()
+ m2.start()
+ testuser.delete()
+ m1.schema.del_schema('attributetypes', ensure_bytes(new_at))
+ repl.wait_for_replication(m1, m2)
+
+ # on M2 restore a default 99user.ldif
+ m2.stop()
+ os.remove(m2.schemadir + "/99user.ldif")
+ schema_filename = (m2.schemadir + "/99user.ldif")
+ try:
+ with open(schema_filename, 'w') as schema_file:
+ schema_file.write("dn: cn=schema\n")
+ os.chmod(schema_filename, 0o777)
+ except OSError as e:
+ log.fatal("Failed to update schema file: " +
+ "{} Error: {}".format(schema_filename, str(e)))
+ m2.start()
+
+ request.addfinalizer(fin)
if __name__ == '__main__':
# Run isolated
diff --git a/ldap/schema/10rfc2307compat.ldif b/ldap/schema/10rfc2307compat.ldif
index 8ba72e1e3..998b8983b 100644
--- a/ldap/schema/10rfc2307compat.ldif
+++ b/ldap/schema/10rfc2307compat.ldif
@@ -21,9 +21,9 @@ attributeTypes: (
attributeTypes: (
1.3.6.1.1.1.1.2 NAME 'gecos'
DESC 'The GECOS field; the common name'
- EQUALITY caseIgnoreIA5Match
- SUBSTR caseIgnoreIA5SubstringsMatch
- SYNTAX 1.3.6.1.4.1.1466.115.121.1.26
+ EQUALITY caseIgnoreMatch
+ SUBSTR caseIgnoreSubstringsMatch
+ SYNTAX 1.3.6.1.4.1.1466.115.121.1.15
SINGLE-VALUE
)
attributeTypes: (
--
2.31.1

View File

@ -0,0 +1,120 @@
From 096c95690a27c942d47b20a85fa3d7fe15ffe624 Mon Sep 17 00:00:00 2001
From: Mark Reynolds <mreynolds@redhat.com>
Date: Wed, 8 Sep 2021 10:31:19 -0400
Subject: [PATCH] Issue 4910 - db reindex corrupts RUV tombstone nsuiqueid
index
Bug Description: During a reindex task we skip the RUV tombstone entry,
which corrupts the nsuniqueid index.
Fix Description: Make sure we still index nsuniqueid index for
the RUV tombstone entry.
relates: https://github.com/389ds/389-ds-base/issues/4910
Reviewed by: firstyear & progier389 (Thanks!!)
---
.../tests/suites/replication/ruvstore_test.py | 35 +++++++++++++++++++
.../slapd/back-ldbm/db-bdb/bdb_ldif2db.c | 12 +++++--
2 files changed, 44 insertions(+), 3 deletions(-)
diff --git a/dirsrvtests/tests/suites/replication/ruvstore_test.py b/dirsrvtests/tests/suites/replication/ruvstore_test.py
index c04fd079e..4e5326227 100644
--- a/dirsrvtests/tests/suites/replication/ruvstore_test.py
+++ b/dirsrvtests/tests/suites/replication/ruvstore_test.py
@@ -12,6 +12,8 @@ import ldap
import pytest
from ldif import LDIFParser
from lib389.replica import Replicas
+from lib389.backend import Backends
+from lib389.idm.domain import Domain
from lib389.idm.user import UserAccounts
from lib389.topologies import topology_m2 as topo
from lib389._constants import *
@@ -156,6 +158,39 @@ def test_memoryruv_sync_with_databaseruv(topo):
_compare_memoryruv_and_databaseruv(topo, 'delete')
+def test_ruv_after_reindex(topo):
+ """Test that the tombstone RUV entry is not corrupted after a reindex task
+
+ :id: 988c0fab-1905-4dc5-a45d-fbf195843a33
+ :setup: 2 suppliers
+ :steps:
+ 1. Reindex database
+ 2. Perform some updates
+ 3. Check error log does not have "_entryrdn_insert_key" errors
+ :expectedresults:
+ 1. Success
+ 2. Success
+ 3. Success
+ """
+
+ inst = topo.ms['supplier1']
+ suffix = Domain(inst, "ou=people," + DEFAULT_SUFFIX)
+ backends = Backends(inst)
+ backend = backends.get(DEFAULT_BENAME)
+
+ # Reindex nsuniqueid
+ backend.reindex(attrs=['nsuniqueid'], wait=True)
+
+ # Do some updates
+ for idx in range(0, 5):
+ suffix.replace('description', str(idx))
+
+ # Check error log for RUV entryrdn errors. Stopping instance forces RUV
+ # to be written and quickly exposes the error
+ inst.stop()
+ assert not inst.searchErrorsLog("entryrdn_insert_key")
+
+
if __name__ == '__main__':
# Run isolated
# -s for DEBUG mode
diff --git a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_ldif2db.c b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_ldif2db.c
index 506c285a3..6100dbf77 100644
--- a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_ldif2db.c
+++ b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_ldif2db.c
@@ -25,6 +25,7 @@
#define DB2INDEX_ENTRYRDN 0x2 /* index entryrdn */
#define DB2LDIF_ENTRYRDN 0x4 /* export entryrdn */
#define DB2INDEX_OBJECTCLASS 0x10 /* for reindexing "objectclass: nstombstone" */
+#define DB2INDEX_NSUNIQUEID 0x20 /* for reindexing RUV tombstone */
#define LDIF2LDBM_EXTBITS(x) ((x)&0xf)
@@ -1543,6 +1544,9 @@ bdb_db2index(Slapi_PBlock *pb)
if (strcasecmp(attrs[i] + 1, SLAPI_ATTR_OBJECTCLASS) == 0) {
index_ext |= DB2INDEX_OBJECTCLASS;
}
+ if (strcasecmp(attrs[i] + 1, SLAPI_ATTR_UNIQUEID) == 0) {
+ index_ext |= DB2INDEX_NSUNIQUEID;
+ }
charray_add(&indexAttrs, attrs[i] + 1);
ai->ai_indexmask |= INDEX_OFFLINE;
slapi_task_log_notice(task, "%s: Indexing attribute: %s",
@@ -1895,7 +1899,7 @@ bdb_db2index(Slapi_PBlock *pb)
* Update the attribute indexes
*/
if (indexAttrs) {
- if (istombstone && !(index_ext & (DB2INDEX_ENTRYRDN | DB2INDEX_OBJECTCLASS))) {
+ if (istombstone && !(index_ext & (DB2INDEX_ENTRYRDN | DB2INDEX_OBJECTCLASS | DB2INDEX_NSUNIQUEID))) {
/* if it is a tombstone entry, just entryrdn or "objectclass: nstombstone"
* need to be reindexed. the to-be-indexed list does not contain them. */
backentry_free(&ep);
@@ -1915,8 +1919,10 @@ bdb_db2index(Slapi_PBlock *pb)
if (istombstone) {
if (!slapi_attr_type_cmp(indexAttrs[j], SLAPI_ATTR_OBJECTCLASS, SLAPI_TYPE_CMP_SUBTYPE)) {
is_tombstone_obj = 1; /* is tombstone && is objectclass. need to index "nstombstone"*/
- } else if (slapi_attr_type_cmp(indexAttrs[j], LDBM_ENTRYRDN_STR, SLAPI_TYPE_CMP_SUBTYPE)) {
- /* Entry is a tombstone && this index is not an entryrdn. */
+ } else if (slapi_attr_type_cmp(indexAttrs[j], LDBM_ENTRYRDN_STR, SLAPI_TYPE_CMP_SUBTYPE) &&
+ slapi_attr_type_cmp(indexAttrs[j], SLAPI_ATTR_UNIQUEID, SLAPI_TYPE_CMP_SUBTYPE))
+ {
+ /* Entry is a tombstone && this index is not entryrdn or nsuniqueid */
continue;
}
}
--
2.31.1

View File

@ -48,7 +48,7 @@ ExcludeArch: i686
Summary: 389 Directory Server (base)
Name: 389-ds-base
Version: 1.4.3.23
Release: %{?relprefix}10%{?prerel}%{?dist}
Release: %{?relprefix}12%{?prerel}%{?dist}
License: GPLv3+
URL: https://www.port389.org
Group: System Environment/Daemons
@ -267,7 +267,9 @@ Patch27: 0027-Issue-4734-import-of-entry-with-no-parent-warning-47.patc
Patch28: 0028-Issue-4872-BUG-entryuuid-enabled-by-default-causes-r.patch
Patch29: 0029-Remove-GOST-YESCRYPT-password-sotrage-scheme.patch
Patch30: 0030-Issue-4884-server-crashes-when-dnaInterval-attribute.patch
Patch31: 0031-Issue-4925-Performance-ACI-targetfilter-evaluation-r.patch
Patch32: 0032-Issue-4972-gecos-with-IA5-introduces-a-compatibility.patch
Patch33: 0033-Issue-4910-db-reindex-corrupts-RUV-tombstone-nsuique.patch
%description
389 Directory Server is an LDAPv3 compliant server. The base package includes
@ -886,6 +888,15 @@ exit 0
%doc README.md
%changelog
* Thu Nov 18 2021 Mark Reynolds <mreynolds@redhat.com> - 1.4.3.23-12
- Bump version to 1.4.3.23-12
- Resolves: Bug 2024697 - DB corruption "_entryrdn_insert_key - Same DN (dn: nsuniqueid=ffffffff-ffffffff-ffffffff-ffffffff,<SUFFIX>) is already in the entryrdn file"
* Wed Nov 10 2021 Mark Reynolds <mreynolds@redhat.com> - 1.4.3.23-11
- Bump version to 1.4.3.23-11
- Resolves: Bug 2022005 - ipa user-add fails with "gecos: invalid per syntax: Invalid syntax"
- Resolves: Bug 2021998 - IPA server is very slow in execution of some searches
* Thu Aug 26 2021 Mark Reynolds <mreynolds@redhat.com> - 1.4.3.23-10
- Bump version to 1.4.3.23-10
- Resolves: Bug 1997138 - LDAP server crashes when dnaInterval attribute is set to 0