import 389-ds-base-1.4.3.8-6.module+el8.3.0+8995+c08169ba

This commit is contained in:
CentOS Sources 2020-12-15 11:06:49 -05:00 committed by Andrew Lukoshko
commit b342bd146c
38 changed files with 9915 additions and 0 deletions

2
.389-ds-base.metadata Normal file
View File

@ -0,0 +1,2 @@
7e651c99e43265c678c98ac2d8e31b8c48522be6 SOURCES/389-ds-base-1.4.3.8.tar.bz2
9e06b5cc57fd185379d007696da153893cf73e30 SOURCES/jemalloc-5.2.1.tar.bz2

2
.gitignore vendored Normal file
View File

@ -0,0 +1,2 @@
SOURCES/389-ds-base-1.4.3.8.tar.bz2
SOURCES/jemalloc-5.2.1.tar.bz2

View File

@ -0,0 +1,43 @@
From 97ecf0190f264a2d87750bc2d26ebf011542e3e1 Mon Sep 17 00:00:00 2001
From: Mark Reynolds <mreynolds@redhat.com>
Date: Fri, 8 May 2020 10:52:43 -0400
Subject: [PATCH 01/12] Issue 51076 - prevent unnecessarily duplication of the
target entry
Bug Description: For any update operation the MEP plugin was calling
slapi_search_internal_get_entry() which duplicates
the entry it returns. In this case the entry is just
read from and discarded, but this entry is already
in the pblock (the PRE OP ENTRY).
Fix Description: Just grab the PRE OP ENTRY from the pblock and use
that to read the attribute values from. This saves
two entry duplications for every update operation
from MEP.
fixes: https://pagure.io/389-ds-base/issue/51076
Reviewed by: tbordaz & firstyear(Thanks!!)
---
ldap/servers/plugins/mep/mep.c | 5 ++---
1 file changed, 2 insertions(+), 3 deletions(-)
diff --git a/ldap/servers/plugins/mep/mep.c b/ldap/servers/plugins/mep/mep.c
index ca9a64b3b..401d95e3a 100644
--- a/ldap/servers/plugins/mep/mep.c
+++ b/ldap/servers/plugins/mep/mep.c
@@ -2165,9 +2165,8 @@ mep_pre_op(Slapi_PBlock *pb, int modop)
if (e && free_entry) {
slapi_entry_free(e);
}
-
- slapi_search_internal_get_entry(sdn, 0, &e, mep_get_plugin_id());
- free_entry = 1;
+ slapi_pblock_get(pb, SLAPI_ENTRY_PRE_OP, &e);
+ free_entry = 0;
}
if (e && mep_is_managed_entry(e)) {
--
2.26.2

View File

@ -0,0 +1,116 @@
From 1426f086623404ab2eacb04de7e6414177c0993a Mon Sep 17 00:00:00 2001
From: Thierry Bordaz <tbordaz@redhat.com>
Date: Mon, 11 May 2020 17:11:49 +0200
Subject: [PATCH 02/12] Ticket 51082 - abort when a empty valueset is freed
Bug Description:
A large valueset (more than 10 values) manages a sorted array of values.
replication purges old values from a valueset (valueset_array_purge). If it purges all the values
the valueset is freed (slapi_valueset_done).
A problem is that the counter of values, in the valueset, is still reflecting the initial number
of values (before the purge). When the valueset is freed (because empty) a safety checking
detects incoherent values based on the wrong counter.
Fix Description:
When all the values have been purge reset the counter before freeing the valueset
https://pagure.io/389-ds-base/issue/51082
Reviewed by: Mark Reynolds
Platforms tested: F30
Flag Day: no
Doc impact: no
---
.../suites/replication/acceptance_test.py | 57 +++++++++++++++++++
ldap/servers/slapd/valueset.c | 4 ++
2 files changed, 61 insertions(+)
diff --git a/dirsrvtests/tests/suites/replication/acceptance_test.py b/dirsrvtests/tests/suites/replication/acceptance_test.py
index c8e0a4c93..5009f4e7c 100644
--- a/dirsrvtests/tests/suites/replication/acceptance_test.py
+++ b/dirsrvtests/tests/suites/replication/acceptance_test.py
@@ -500,6 +500,63 @@ def test_warining_for_invalid_replica(topo_m4):
assert topo_m4.ms["master1"].ds_error_log.match('.*nsds5ReplicaBackoffMax.*10.*invalid.*')
+@pytest.mark.ds51082
+def test_csnpurge_large_valueset(topo_m2):
+ """Test csn generator test
+
+ :id: 63e2bdb2-0a8f-4660-9465-7b80a9f72a74
+ :setup: MMR with 2 masters
+ :steps:
+ 1. Create a test_user
+ 2. add a large set of values (more than 10)
+ 3. delete all the values (more than 10)
+ 4. configure the replica to purge those values (purgedelay=5s)
+ 5. Waiting for 6 second
+ 6. do a series of update
+ :expectedresults:
+ 1. Should succeeds
+ 2. Should succeeds
+ 3. Should succeeds
+ 4. Should succeeds
+ 5. Should succeeds
+ 6. Should not crash
+ """
+ m1 = topo_m2.ms["master2"]
+
+ test_user = UserAccount(m1, TEST_ENTRY_DN)
+ if test_user.exists():
+ log.info('Deleting entry {}'.format(TEST_ENTRY_DN))
+ test_user.delete()
+ test_user.create(properties={
+ 'uid': TEST_ENTRY_NAME,
+ 'cn': TEST_ENTRY_NAME,
+ 'sn': TEST_ENTRY_NAME,
+ 'userPassword': TEST_ENTRY_NAME,
+ 'uidNumber' : '1000',
+ 'gidNumber' : '2000',
+ 'homeDirectory' : '/home/mmrepl_test',
+ })
+
+ # create a large value set so that it is sorted
+ for i in range(1,20):
+ test_user.add('description', 'value {}'.format(str(i)))
+
+ # delete all values of the valueset
+ for i in range(1,20):
+ test_user.remove('description', 'value {}'.format(str(i)))
+
+ # set purging delay to 5 second and wait more that 5second
+ replicas = Replicas(m1)
+ replica = replicas.list()[0]
+ log.info('nsds5ReplicaPurgeDelay to 5')
+ replica.set('nsds5ReplicaPurgeDelay', '5')
+ time.sleep(6)
+
+ # add some new values to the valueset containing entries that should be purged
+ for i in range(21,25):
+ test_user.add('description', 'value {}'.format(str(i)))
+
+
if __name__ == '__main__':
# Run isolated
# -s for DEBUG mode
diff --git a/ldap/servers/slapd/valueset.c b/ldap/servers/slapd/valueset.c
index 2af3ee18d..12027ecb8 100644
--- a/ldap/servers/slapd/valueset.c
+++ b/ldap/servers/slapd/valueset.c
@@ -801,6 +801,10 @@ valueset_array_purge(const Slapi_Attr *a, Slapi_ValueSet *vs, const CSN *csn)
}
}
} else {
+ /* empty valueset - reset the vs->num so that further
+ * checking will not abort
+ */
+ vs->num = 0;
slapi_valueset_done(vs);
}
--
2.26.2

View File

@ -0,0 +1,45 @@
From 7a62e72b81d75ebb844835619ecc97dbf5e21058 Mon Sep 17 00:00:00 2001
From: Mark Reynolds <mreynolds@redhat.com>
Date: Thu, 14 May 2020 09:38:20 -0400
Subject: [PATCH 03/12] Issue 51091 - healthcheck json report fails when
mapping tree is deleted
Description: We were passing the bename in bytes and not as a utf8 string.
This caused the json dumping to fail.
relates: https://pagure.io/389-ds-base/issue/51091
Reviewed by: firstyear(Thanks!)
---
src/lib389/lib389/backend.py | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/src/lib389/lib389/backend.py b/src/lib389/lib389/backend.py
index e472d3de5..4f752f414 100644
--- a/src/lib389/lib389/backend.py
+++ b/src/lib389/lib389/backend.py
@@ -11,7 +11,7 @@ import copy
import ldap
from lib389._constants import *
from lib389.properties import *
-from lib389.utils import normalizeDN, ensure_str, ensure_bytes, assert_c
+from lib389.utils import normalizeDN, ensure_str, assert_c
from lib389 import Entry
# Need to fix this ....
@@ -488,10 +488,10 @@ class Backend(DSLdapObject):
# Check for the missing mapping tree.
suffix = self.get_attr_val_utf8('nsslapd-suffix')
- bename = self.get_attr_val_bytes('cn')
+ bename = self.get_attr_val_utf8('cn')
try:
mt = self._mts.get(suffix)
- if mt.get_attr_val_bytes('nsslapd-backend') != bename and mt.get_attr_val('nsslapd-state') != ensure_bytes('backend'):
+ if mt.get_attr_val_utf8('nsslapd-backend') != bename and mt.get_attr_val_utf8('nsslapd-state') != 'backend':
raise ldap.NO_SUCH_OBJECT("We have a matching suffix, but not a backend or correct database name.")
except ldap.NO_SUCH_OBJECT:
result = DSBLE0001
--
2.26.2

View File

@ -0,0 +1,943 @@
From f13d630ff98eb5b5505f1db3e7f207175b51b237 Mon Sep 17 00:00:00 2001
From: Mark Reynolds <mreynolds@redhat.com>
Date: Tue, 12 May 2020 13:48:30 -0400
Subject: [PATCH 04/12] Issue 51076 - remove unnecessary slapi entry dups
Description: So the problem is that slapi_search_internal_get_entry()
duplicates the entry twice. It does that as a convenience
where it will allocate a pblock, do the search, copy
the entry, free search results from the pblock, and then
free the pblock itself. I basically split this function
into two functions. One function allocates the pblock,
does the search and returns the entry. The other function
frees the entries and pblock.
99% of time when we call slapi_search_internal_get_entry()
we are just reading it and freeing it. It's not being
consumed. In these cases we can use the two function
approach eliminates an extra slapi_entry_dup(). Over the
time of an operation/connection we can save quite a bit
of mallocing/freeing. This could also help with memory
fragmentation.
ASAN: passed
relates: https://pagure.io/389-ds-base/issue/51076
Reviewed by: firstyear & tbordaz(Thanks!)
---
ldap/servers/plugins/acctpolicy/acct_config.c | 6 +--
ldap/servers/plugins/acctpolicy/acct_plugin.c | 36 +++++++-------
ldap/servers/plugins/acctpolicy/acct_util.c | 6 +--
ldap/servers/plugins/automember/automember.c | 17 +++----
ldap/servers/plugins/dna/dna.c | 23 ++++-----
ldap/servers/plugins/memberof/memberof.c | 16 +++----
.../plugins/pam_passthru/pam_ptconfig.c | 10 ++--
.../servers/plugins/pam_passthru/pam_ptimpl.c | 7 +--
.../plugins/pam_passthru/pam_ptpreop.c | 9 ++--
.../plugins/replication/repl5_tot_protocol.c | 5 +-
ldap/servers/plugins/uiduniq/uid.c | 23 ++++-----
ldap/servers/slapd/daemon.c | 11 ++---
ldap/servers/slapd/modify.c | 12 +++--
ldap/servers/slapd/plugin_internal_op.c | 48 +++++++++++++++++++
ldap/servers/slapd/resourcelimit.c | 13 ++---
ldap/servers/slapd/schema.c | 7 ++-
ldap/servers/slapd/slapi-plugin.h | 23 ++++++++-
17 files changed, 161 insertions(+), 111 deletions(-)
diff --git a/ldap/servers/plugins/acctpolicy/acct_config.c b/ldap/servers/plugins/acctpolicy/acct_config.c
index fe35ba5a0..01e4f319f 100644
--- a/ldap/servers/plugins/acctpolicy/acct_config.c
+++ b/ldap/servers/plugins/acctpolicy/acct_config.c
@@ -37,6 +37,7 @@ static int acct_policy_entry2config(Slapi_Entry *e,
int
acct_policy_load_config_startup(Slapi_PBlock *pb __attribute__((unused)), void *plugin_id)
{
+ Slapi_PBlock *entry_pb = NULL;
acctPluginCfg *newcfg;
Slapi_Entry *config_entry = NULL;
Slapi_DN *config_sdn = NULL;
@@ -44,8 +45,7 @@ acct_policy_load_config_startup(Slapi_PBlock *pb __attribute__((unused)), void *
/* Retrieve the config entry */
config_sdn = slapi_sdn_new_normdn_byref(PLUGIN_CONFIG_DN);
- rc = slapi_search_internal_get_entry(config_sdn, NULL, &config_entry,
- plugin_id);
+ rc = slapi_search_get_entry(&entry_pb, config_sdn, NULL, &config_entry, plugin_id);
slapi_sdn_free(&config_sdn);
if (rc != LDAP_SUCCESS || config_entry == NULL) {
@@ -60,7 +60,7 @@ acct_policy_load_config_startup(Slapi_PBlock *pb __attribute__((unused)), void *
rc = acct_policy_entry2config(config_entry, newcfg);
config_unlock();
- slapi_entry_free(config_entry);
+ slapi_search_get_entry_done(&entry_pb);
return (rc);
}
diff --git a/ldap/servers/plugins/acctpolicy/acct_plugin.c b/ldap/servers/plugins/acctpolicy/acct_plugin.c
index 2a876ad72..c3c32b074 100644
--- a/ldap/servers/plugins/acctpolicy/acct_plugin.c
+++ b/ldap/servers/plugins/acctpolicy/acct_plugin.c
@@ -209,6 +209,7 @@ done:
int
acct_bind_preop(Slapi_PBlock *pb)
{
+ Slapi_PBlock *entry_pb = NULL;
const char *dn = NULL;
Slapi_DN *sdn = NULL;
Slapi_Entry *target_entry = NULL;
@@ -236,8 +237,7 @@ acct_bind_preop(Slapi_PBlock *pb)
goto done;
}
- ldrc = slapi_search_internal_get_entry(sdn, NULL, &target_entry,
- plugin_id);
+ ldrc = slapi_search_get_entry(&entry_pb, sdn, NULL, &target_entry, plugin_id);
/* There was a problem retrieving the entry */
if (ldrc != LDAP_SUCCESS) {
@@ -275,7 +275,7 @@ done:
slapi_send_ldap_result(pb, LDAP_UNWILLING_TO_PERFORM, NULL, NULL, 0, NULL);
}
- slapi_entry_free(target_entry);
+ slapi_search_get_entry_done(&entry_pb);
free_acctpolicy(&policy);
@@ -293,6 +293,7 @@ done:
int
acct_bind_postop(Slapi_PBlock *pb)
{
+ Slapi_PBlock *entry_pb = NULL;
char *dn = NULL;
int ldrc, tracklogin = 0;
int rc = 0; /* Optimistic default */
@@ -327,8 +328,7 @@ acct_bind_postop(Slapi_PBlock *pb)
covered by an account policy to decide whether we should track */
if (tracklogin == 0) {
sdn = slapi_sdn_new_normdn_byref(dn);
- ldrc = slapi_search_internal_get_entry(sdn, NULL, &target_entry,
- plugin_id);
+ ldrc = slapi_search_get_entry(&entry_pb, sdn, NULL, &target_entry, plugin_id);
if (ldrc != LDAP_SUCCESS) {
slapi_log_err(SLAPI_LOG_ERR, POST_PLUGIN_NAME,
@@ -355,7 +355,7 @@ done:
slapi_send_ldap_result(pb, LDAP_UNWILLING_TO_PERFORM, NULL, NULL, 0, NULL);
}
- slapi_entry_free(target_entry);
+ slapi_search_get_entry_done(&entry_pb);
slapi_sdn_free(&sdn);
@@ -370,11 +370,11 @@ done:
static int
acct_pre_op(Slapi_PBlock *pb, int modop)
{
+ Slapi_PBlock *entry_pb = NULL;
Slapi_DN *sdn = 0;
Slapi_Entry *e = 0;
Slapi_Mods *smods = 0;
LDAPMod **mods;
- int free_entry = 0;
char *errstr = NULL;
int ret = SLAPI_PLUGIN_SUCCESS;
@@ -384,28 +384,25 @@ acct_pre_op(Slapi_PBlock *pb, int modop)
if (acct_policy_dn_is_config(sdn)) {
/* Validate config changes, but don't apply them.
- * This allows us to reject invalid config changes
- * here at the pre-op stage. Applying the config
- * needs to be done at the post-op stage. */
+ * This allows us to reject invalid config changes
+ * here at the pre-op stage. Applying the config
+ * needs to be done at the post-op stage. */
if (LDAP_CHANGETYPE_ADD == modop) {
slapi_pblock_get(pb, SLAPI_ADD_ENTRY, &e);
- /* If the entry doesn't exist, just bail and
- * let the server handle it. */
+ /* If the entry doesn't exist, just bail and let the server handle it. */
if (e == NULL) {
goto bail;
}
} else if (LDAP_CHANGETYPE_MODIFY == modop) {
/* Fetch the entry being modified so we can
- * create the resulting entry for validation. */
+ * create the resulting entry for validation. */
if (sdn) {
- slapi_search_internal_get_entry(sdn, 0, &e, get_identity());
- free_entry = 1;
+ slapi_search_get_entry(&entry_pb, sdn, 0, &e, get_identity());
}
- /* If the entry doesn't exist, just bail and
- * let the server handle it. */
+ /* If the entry doesn't exist, just bail and let the server handle it. */
if (e == NULL) {
goto bail;
}
@@ -418,7 +415,7 @@ acct_pre_op(Slapi_PBlock *pb, int modop)
/* Apply the mods to create the resulting entry. */
if (mods && (slapi_entry_apply_mods(e, mods) != LDAP_SUCCESS)) {
/* The mods don't apply cleanly, so we just let this op go
- * to let the main server handle it. */
+ * to let the main server handle it. */
goto bailmod;
}
} else if (modop == LDAP_CHANGETYPE_DELETE) {
@@ -439,8 +436,7 @@ bailmod:
}
bail:
- if (free_entry && e)
- slapi_entry_free(e);
+ slapi_search_get_entry_done(&entry_pb);
if (ret) {
slapi_log_err(SLAPI_LOG_PLUGIN, PRE_PLUGIN_NAME,
diff --git a/ldap/servers/plugins/acctpolicy/acct_util.c b/ldap/servers/plugins/acctpolicy/acct_util.c
index f25a3202d..f432092fe 100644
--- a/ldap/servers/plugins/acctpolicy/acct_util.c
+++ b/ldap/servers/plugins/acctpolicy/acct_util.c
@@ -85,6 +85,7 @@ get_attr_string_val(Slapi_Entry *target_entry, char *attr_name)
int
get_acctpolicy(Slapi_PBlock *pb __attribute__((unused)), Slapi_Entry *target_entry, void *plugin_id, acctPolicy **policy)
{
+ Slapi_PBlock *entry_pb = NULL;
Slapi_DN *sdn = NULL;
Slapi_Entry *policy_entry = NULL;
Slapi_Attr *attr;
@@ -123,8 +124,7 @@ get_acctpolicy(Slapi_PBlock *pb __attribute__((unused)), Slapi_Entry *target_ent
}
sdn = slapi_sdn_new_dn_byref(policy_dn);
- ldrc = slapi_search_internal_get_entry(sdn, NULL, &policy_entry,
- plugin_id);
+ ldrc = slapi_search_get_entry(&entry_pb, sdn, NULL, &policy_entry, plugin_id);
slapi_sdn_free(&sdn);
/* There should be a policy but it can't be retrieved; fatal error */
@@ -160,7 +160,7 @@ dopolicy:
done:
config_unlock();
slapi_ch_free_string(&policy_dn);
- slapi_entry_free(policy_entry);
+ slapi_search_get_entry_done(&entry_pb);
return (rc);
}
diff --git a/ldap/servers/plugins/automember/automember.c b/ldap/servers/plugins/automember/automember.c
index 7c875c852..39350ad53 100644
--- a/ldap/servers/plugins/automember/automember.c
+++ b/ldap/servers/plugins/automember/automember.c
@@ -1629,13 +1629,12 @@ automember_update_member_value(Slapi_Entry *member_e, const char *group_dn, char
char *member_value = NULL;
int rc = 0;
Slapi_DN *group_sdn;
- Slapi_Entry *group_entry = NULL;
/* First thing check that the group still exists */
group_sdn = slapi_sdn_new_dn_byval(group_dn);
- rc = slapi_search_internal_get_entry(group_sdn, NULL, &group_entry, automember_get_plugin_id());
+ rc = slapi_search_internal_get_entry(group_sdn, NULL, NULL, automember_get_plugin_id());
slapi_sdn_free(&group_sdn);
- if (rc != LDAP_SUCCESS || group_entry == NULL) {
+ if (rc != LDAP_SUCCESS) {
if (rc == LDAP_NO_SUCH_OBJECT) {
/* the automember group (default or target) does not exist, just skip this definition */
slapi_log_err(SLAPI_LOG_INFO, AUTOMEMBER_PLUGIN_SUBSYSTEM,
@@ -1647,10 +1646,8 @@ automember_update_member_value(Slapi_Entry *member_e, const char *group_dn, char
"automember_update_member_value - group (default or target) can not be retrieved (%s) err=%d\n",
group_dn, rc);
}
- slapi_entry_free(group_entry);
return rc;
}
- slapi_entry_free(group_entry);
/* If grouping_value is dn, we need to fetch the dn instead. */
if (slapi_attr_type_cmp(grouping_value, "dn", SLAPI_TYPE_CMP_EXACT) == 0) {
@@ -1752,11 +1749,11 @@ out:
static int
automember_pre_op(Slapi_PBlock *pb, int modop)
{
+ Slapi_PBlock *entry_pb = NULL;
Slapi_DN *sdn = 0;
Slapi_Entry *e = 0;
Slapi_Mods *smods = 0;
LDAPMod **mods;
- int free_entry = 0;
char *errstr = NULL;
int ret = SLAPI_PLUGIN_SUCCESS;
@@ -1784,8 +1781,7 @@ automember_pre_op(Slapi_PBlock *pb, int modop)
/* Fetch the entry being modified so we can
* create the resulting entry for validation. */
if (sdn) {
- slapi_search_internal_get_entry(sdn, 0, &e, automember_get_plugin_id());
- free_entry = 1;
+ slapi_search_get_entry(&entry_pb, sdn, 0, &e, automember_get_plugin_id());
}
/* If the entry doesn't exist, just bail and
@@ -1799,7 +1795,7 @@ automember_pre_op(Slapi_PBlock *pb, int modop)
smods = slapi_mods_new();
slapi_mods_init_byref(smods, mods);
- /* Apply the mods to create the resulting entry. */
+ /* Apply the mods to create the resulting entry. */
if (mods && (slapi_entry_apply_mods(e, mods) != LDAP_SUCCESS)) {
/* The mods don't apply cleanly, so we just let this op go
* to let the main server handle it. */
@@ -1831,8 +1827,7 @@ bailmod:
}
bail:
- if (free_entry && e)
- slapi_entry_free(e);
+ slapi_search_get_entry_done(&entry_pb);
if (ret) {
slapi_log_err(SLAPI_LOG_PLUGIN, AUTOMEMBER_PLUGIN_SUBSYSTEM,
diff --git a/ldap/servers/plugins/dna/dna.c b/ldap/servers/plugins/dna/dna.c
index 1ee271359..16c625bb0 100644
--- a/ldap/servers/plugins/dna/dna.c
+++ b/ldap/servers/plugins/dna/dna.c
@@ -1178,7 +1178,6 @@ dna_parse_config_entry(Slapi_PBlock *pb, Slapi_Entry *e, int apply)
value = slapi_entry_attr_get_charptr(e, DNA_SHARED_CFG_DN);
if (value) {
- Slapi_Entry *shared_e = NULL;
Slapi_DN *sdn = NULL;
char *normdn = NULL;
char *attrs[2];
@@ -1197,10 +1196,8 @@ dna_parse_config_entry(Slapi_PBlock *pb, Slapi_Entry *e, int apply)
/* We don't need attributes */
attrs[0] = "cn";
attrs[1] = NULL;
- slapi_search_internal_get_entry(sdn, attrs, &shared_e, getPluginID());
-
/* Make sure that the shared config entry exists. */
- if (!shared_e) {
+ if(slapi_search_internal_get_entry(sdn, attrs, NULL, getPluginID()) != LDAP_SUCCESS) {
/* We didn't locate the shared config container entry. Log
* a message and skip this config entry. */
slapi_log_err(SLAPI_LOG_ERR, DNA_PLUGIN_SUBSYSTEM,
@@ -1210,9 +1207,6 @@ dna_parse_config_entry(Slapi_PBlock *pb, Slapi_Entry *e, int apply)
ret = DNA_FAILURE;
slapi_sdn_free(&sdn);
goto bail;
- } else {
- slapi_entry_free(shared_e);
- shared_e = NULL;
}
normdn = (char *)slapi_sdn_get_dn(sdn);
@@ -1539,6 +1533,7 @@ dna_delete_shared_servers(PRCList **servers)
static int
dna_load_host_port(void)
{
+ Slapi_PBlock *pb = NULL;
int status = DNA_SUCCESS;
Slapi_Entry *e = NULL;
Slapi_DN *config_dn = NULL;
@@ -1554,7 +1549,7 @@ dna_load_host_port(void)
config_dn = slapi_sdn_new_ndn_byref("cn=config");
if (config_dn) {
- slapi_search_internal_get_entry(config_dn, attrs, &e, getPluginID());
+ slapi_search_get_entry(&pb, config_dn, attrs, &e, getPluginID());
slapi_sdn_free(&config_dn);
}
@@ -1562,8 +1557,8 @@ dna_load_host_port(void)
hostname = slapi_entry_attr_get_charptr(e, "nsslapd-localhost");
portnum = slapi_entry_attr_get_charptr(e, "nsslapd-port");
secureportnum = slapi_entry_attr_get_charptr(e, "nsslapd-secureport");
- slapi_entry_free(e);
}
+ slapi_search_get_entry_done(&pb);
if (!hostname || !portnum) {
status = DNA_FAILURE;
@@ -2876,6 +2871,7 @@ bail:
static int
dna_is_replica_bind_dn(char *range_dn, char *bind_dn)
{
+ Slapi_PBlock *entry_pb = NULL;
char *replica_dn = NULL;
Slapi_DN *replica_sdn = NULL;
Slapi_DN *range_sdn = NULL;
@@ -2912,8 +2908,7 @@ dna_is_replica_bind_dn(char *range_dn, char *bind_dn)
attrs[2] = 0;
/* Find cn=replica entry via search */
- slapi_search_internal_get_entry(replica_sdn, attrs, &e, getPluginID());
-
+ slapi_search_get_entry(&entry_pb, replica_sdn, attrs, &e, getPluginID());
if (e) {
/* Check if the passed in bind dn matches any of the replica bind dns. */
Slapi_Value *bind_dn_sv = slapi_value_new_string(bind_dn);
@@ -2927,6 +2922,7 @@ dna_is_replica_bind_dn(char *range_dn, char *bind_dn)
attrs[0] = "member";
attrs[1] = "uniquemember";
attrs[2] = 0;
+ slapi_search_get_entry_done(&entry_pb);
for (i = 0; bind_group_dn != NULL && bind_group_dn[i] != NULL; i++) {
if (ret) {
/* already found a member, just free group */
@@ -2934,14 +2930,14 @@ dna_is_replica_bind_dn(char *range_dn, char *bind_dn)
continue;
}
bind_group_sdn = slapi_sdn_new_normdn_passin(bind_group_dn[i]);
- slapi_search_internal_get_entry(bind_group_sdn, attrs, &bind_group_entry, getPluginID());
+ slapi_search_get_entry(&entry_pb, bind_group_sdn, attrs, &bind_group_entry, getPluginID());
if (bind_group_entry) {
ret = slapi_entry_attr_has_syntax_value(bind_group_entry, "member", bind_dn_sv);
if (ret == 0) {
ret = slapi_entry_attr_has_syntax_value(bind_group_entry, "uniquemember", bind_dn_sv);
}
}
- slapi_entry_free(bind_group_entry);
+ slapi_search_get_entry_done(&entry_pb);
slapi_sdn_free(&bind_group_sdn);
}
slapi_ch_free((void **)&bind_group_dn);
@@ -2956,7 +2952,6 @@ dna_is_replica_bind_dn(char *range_dn, char *bind_dn)
}
done:
- slapi_entry_free(e);
slapi_sdn_free(&range_sdn);
slapi_sdn_free(&replica_sdn);
diff --git a/ldap/servers/plugins/memberof/memberof.c b/ldap/servers/plugins/memberof/memberof.c
index 40bd4b380..e9e1ec4c7 100644
--- a/ldap/servers/plugins/memberof/memberof.c
+++ b/ldap/servers/plugins/memberof/memberof.c
@@ -884,7 +884,7 @@ memberof_postop_modrdn(Slapi_PBlock *pb)
pre_sdn = slapi_entry_get_sdn(pre_e);
post_sdn = slapi_entry_get_sdn(post_e);
}
-
+
if (pre_sdn && post_sdn && slapi_sdn_compare(pre_sdn, post_sdn) == 0) {
/* Regarding memberof plugin, this rename is a no-op
* but it can be expensive to process it. So skip it
@@ -1466,6 +1466,7 @@ memberof_modop_one_r(Slapi_PBlock *pb, MemberOfConfig *config, int mod_op, Slapi
int
memberof_modop_one_replace_r(Slapi_PBlock *pb, MemberOfConfig *config, int mod_op, Slapi_DN *group_sdn, Slapi_DN *op_this_sdn, Slapi_DN *replace_with_sdn, Slapi_DN *op_to_sdn, memberofstringll *stack)
{
+ Slapi_PBlock *entry_pb = NULL;
int rc = 0;
LDAPMod mod;
LDAPMod replace_mod;
@@ -1515,8 +1516,7 @@ memberof_modop_one_replace_r(Slapi_PBlock *pb, MemberOfConfig *config, int mod_o
}
/* determine if this is a group op or single entry */
- slapi_search_internal_get_entry(op_to_sdn, config->groupattrs,
- &e, memberof_get_plugin_id());
+ slapi_search_get_entry(&entry_pb, op_to_sdn, config->groupattrs, &e, memberof_get_plugin_id());
if (!e) {
/* In the case of a delete, we need to worry about the
* missing entry being a nested group. There's a small
@@ -1751,7 +1751,7 @@ memberof_modop_one_replace_r(Slapi_PBlock *pb, MemberOfConfig *config, int mod_o
bail:
slapi_value_free(&to_dn_val);
slapi_value_free(&this_dn_val);
- slapi_entry_free(e);
+ slapi_search_get_entry_done(&entry_pb);
return rc;
}
@@ -2368,6 +2368,7 @@ bail:
int
memberof_is_direct_member(MemberOfConfig *config, Slapi_Value *groupdn, Slapi_Value *memberdn)
{
+ Slapi_PBlock *pb = NULL;
int rc = 0;
Slapi_DN *sdn = 0;
Slapi_Entry *group_e = 0;
@@ -2376,8 +2377,8 @@ memberof_is_direct_member(MemberOfConfig *config, Slapi_Value *groupdn, Slapi_Va
sdn = slapi_sdn_new_normdn_byref(slapi_value_get_string(groupdn));
- slapi_search_internal_get_entry(sdn, config->groupattrs,
- &group_e, memberof_get_plugin_id());
+ slapi_search_get_entry(&pb, sdn, config->groupattrs,
+ &group_e, memberof_get_plugin_id());
if (group_e) {
/* See if memberdn is referred to by any of the group attributes. */
@@ -2388,9 +2389,8 @@ memberof_is_direct_member(MemberOfConfig *config, Slapi_Value *groupdn, Slapi_Va
break;
}
}
-
- slapi_entry_free(group_e);
}
+ slapi_search_get_entry_done(&pb);
slapi_sdn_free(&sdn);
return rc;
diff --git a/ldap/servers/plugins/pam_passthru/pam_ptconfig.c b/ldap/servers/plugins/pam_passthru/pam_ptconfig.c
index 46a76d884..cbec2ec40 100644
--- a/ldap/servers/plugins/pam_passthru/pam_ptconfig.c
+++ b/ldap/servers/plugins/pam_passthru/pam_ptconfig.c
@@ -749,22 +749,22 @@ pam_passthru_get_config(Slapi_DN *bind_sdn)
if (pam_passthru_check_suffix(cfg, bind_sdn) == LDAP_SUCCESS) {
if (cfg->slapi_filter) {
/* A filter is configured, so see if the bind entry is a match. */
+ Slapi_PBlock *entry_pb = NULL;
Slapi_Entry *test_e = NULL;
/* Fetch the bind entry */
- slapi_search_internal_get_entry(bind_sdn, NULL, &test_e,
- pam_passthruauth_get_plugin_identity());
+ slapi_search_get_entry(&entry_pb, bind_sdn, NULL, &test_e,
+ pam_passthruauth_get_plugin_identity());
/* If the entry doesn't exist, just fall through to the main server code */
if (test_e) {
/* Evaluate the filter. */
if (LDAP_SUCCESS == slapi_filter_test_simple(test_e, cfg->slapi_filter)) {
/* This is a match. */
- slapi_entry_free(test_e);
+ slapi_search_get_entry_done(&entry_pb);
goto done;
}
-
- slapi_entry_free(test_e);
+ slapi_search_get_entry_done(&entry_pb);
}
} else {
/* There is no filter to check, so this is a match. */
diff --git a/ldap/servers/plugins/pam_passthru/pam_ptimpl.c b/ldap/servers/plugins/pam_passthru/pam_ptimpl.c
index 7f5fb02c4..5b43f8d1f 100644
--- a/ldap/servers/plugins/pam_passthru/pam_ptimpl.c
+++ b/ldap/servers/plugins/pam_passthru/pam_ptimpl.c
@@ -81,11 +81,12 @@ derive_from_bind_dn(Slapi_PBlock *pb __attribute__((unused)), const Slapi_DN *bi
static char *
derive_from_bind_entry(Slapi_PBlock *pb, const Slapi_DN *bindsdn, MyStrBuf *pam_id, char *map_ident_attr, int *locked)
{
+ Slapi_PBlock *entry_pb = NULL;
Slapi_Entry *entry = NULL;
char *attrs[] = {NULL, NULL};
attrs[0] = map_ident_attr;
- int rc = slapi_search_internal_get_entry((Slapi_DN *)bindsdn, attrs, &entry,
- pam_passthruauth_get_plugin_identity());
+ int32_t rc = slapi_search_get_entry(&entry_pb, (Slapi_DN *)bindsdn, attrs, &entry,
+ pam_passthruauth_get_plugin_identity());
if (rc != LDAP_SUCCESS) {
slapi_log_err(SLAPI_LOG_ERR, PAM_PASSTHRU_PLUGIN_SUBSYSTEM,
@@ -108,7 +109,7 @@ derive_from_bind_entry(Slapi_PBlock *pb, const Slapi_DN *bindsdn, MyStrBuf *pam_
init_my_str_buf(pam_id, val);
}
- slapi_entry_free(entry);
+ slapi_search_get_entry_done(&entry_pb);
return pam_id->str;
}
diff --git a/ldap/servers/plugins/pam_passthru/pam_ptpreop.c b/ldap/servers/plugins/pam_passthru/pam_ptpreop.c
index 3d0067531..5bca823ff 100644
--- a/ldap/servers/plugins/pam_passthru/pam_ptpreop.c
+++ b/ldap/servers/plugins/pam_passthru/pam_ptpreop.c
@@ -526,6 +526,7 @@ done:
static int
pam_passthru_preop(Slapi_PBlock *pb, int modtype)
{
+ Slapi_PBlock *entry_pb = NULL;
Slapi_DN *sdn = NULL;
Slapi_Entry *e = NULL;
LDAPMod **mods;
@@ -555,8 +556,8 @@ pam_passthru_preop(Slapi_PBlock *pb, int modtype)
case LDAP_CHANGETYPE_MODIFY:
/* Fetch the entry being modified so we can
* create the resulting entry for validation. */
- slapi_search_internal_get_entry(sdn, 0, &e,
- pam_passthruauth_get_plugin_identity());
+ slapi_search_get_entry(&entry_pb, sdn, 0, &e,
+ pam_passthruauth_get_plugin_identity());
/* If the entry doesn't exist, just bail and
* let the server handle it. */
@@ -576,9 +577,6 @@ pam_passthru_preop(Slapi_PBlock *pb, int modtype)
/* Don't bail here, as we need to free the entry. */
}
}
-
- /* Free the entry. */
- slapi_entry_free(e);
break;
case LDAP_CHANGETYPE_DELETE:
case LDAP_CHANGETYPE_MODDN:
@@ -591,6 +589,7 @@ pam_passthru_preop(Slapi_PBlock *pb, int modtype)
}
bail:
+ slapi_search_get_entry_done(&entry_pb);
/* If we are refusing the operation, return the result to the client. */
if (ret) {
slapi_send_ldap_result(pb, ret, NULL, returntext, 0, NULL);
diff --git a/ldap/servers/plugins/replication/repl5_tot_protocol.c b/ldap/servers/plugins/replication/repl5_tot_protocol.c
index 3b65d6b20..a25839f21 100644
--- a/ldap/servers/plugins/replication/repl5_tot_protocol.c
+++ b/ldap/servers/plugins/replication/repl5_tot_protocol.c
@@ -469,7 +469,8 @@ retry:
*/
/* Get suffix */
Slapi_Entry *suffix = NULL;
- rc = slapi_search_internal_get_entry(area_sdn, NULL, &suffix, repl_get_plugin_identity(PLUGIN_MULTIMASTER_REPLICATION));
+ Slapi_PBlock *suffix_pb = NULL;
+ rc = slapi_search_get_entry(&suffix_pb, area_sdn, NULL, &suffix, repl_get_plugin_identity(PLUGIN_MULTIMASTER_REPLICATION));
if (rc) {
slapi_log_err(SLAPI_LOG_ERR, repl_plugin_name, "repl5_tot_run - Unable to "
"get the suffix entry \"%s\".\n",
@@ -517,7 +518,7 @@ retry:
LDAP_SCOPE_SUBTREE, "(parentid>=1)", NULL, 0, ctrls, NULL,
repl_get_plugin_identity(PLUGIN_MULTIMASTER_REPLICATION), OP_FLAG_BULK_IMPORT);
cb_data.num_entries = 0UL;
- slapi_entry_free(suffix);
+ slapi_search_get_entry_done(&suffix_pb);
} else {
/* Original total update */
/* we need to provide managedsait control so that referral entries can
diff --git a/ldap/servers/plugins/uiduniq/uid.c b/ldap/servers/plugins/uiduniq/uid.c
index d7ccf0e07..e69012204 100644
--- a/ldap/servers/plugins/uiduniq/uid.c
+++ b/ldap/servers/plugins/uiduniq/uid.c
@@ -1254,6 +1254,7 @@ preop_modify(Slapi_PBlock *pb)
static int
preop_modrdn(Slapi_PBlock *pb)
{
+ Slapi_PBlock *entry_pb = NULL;
int result = LDAP_SUCCESS;
Slapi_Entry *e = NULL;
Slapi_Value *sv_requiredObjectClass = NULL;
@@ -1351,7 +1352,7 @@ preop_modrdn(Slapi_PBlock *pb)
/* Get the entry that is being renamed so we can make a dummy copy
* of what it will look like after the rename. */
- err = slapi_search_internal_get_entry(sdn, NULL, &e, plugin_identity);
+ err = slapi_search_get_entry(&entry_pb, sdn, NULL, &e, plugin_identity);
if (err != LDAP_SUCCESS) {
result = uid_op_error(35);
/* We want to return a no such object error if the target doesn't exist. */
@@ -1371,24 +1372,24 @@ preop_modrdn(Slapi_PBlock *pb)
/*
- * Check if it has the required object class
- */
+ * Check if it has the required object class
+ */
if (requiredObjectClass &&
!slapi_entry_attr_has_syntax_value(e, SLAPI_ATTR_OBJECTCLASS, sv_requiredObjectClass)) {
break;
}
/*
- * Find any unique attribute data in the new RDN
- */
+ * Find any unique attribute data in the new RDN
+ */
for (i = 0; attrNames && attrNames[i]; i++) {
err = slapi_entry_attr_find(e, attrNames[i], &attr);
if (!err) {
/*
- * Passed all the requirements - this is an operation we
- * need to enforce uniqueness on. Now find all parent entries
- * with the marker object class, and do a search for each one.
- */
+ * Passed all the requirements - this is an operation we
+ * need to enforce uniqueness on. Now find all parent entries
+ * with the marker object class, and do a search for each one.
+ */
if (NULL != markerObjectClass) {
/* Subtree defined by location of marker object class */
result = findSubtreeAndSearch(slapi_entry_get_sdn(e), attrNames, attr, NULL,
@@ -1407,8 +1408,8 @@ preop_modrdn(Slapi_PBlock *pb)
END
/* Clean-up */
slapi_value_free(&sv_requiredObjectClass);
- if (e)
- slapi_entry_free(e);
+
+ slapi_search_get_entry_done(&entry_pb);
if (result) {
slapi_log_err(SLAPI_LOG_PLUGIN, plugin_name,
diff --git a/ldap/servers/slapd/daemon.c b/ldap/servers/slapd/daemon.c
index 65f23363a..a70f40316 100644
--- a/ldap/servers/slapd/daemon.c
+++ b/ldap/servers/slapd/daemon.c
@@ -1916,18 +1916,13 @@ slapd_bind_local_user(Connection *conn)
char *root_dn = config_get_ldapi_root_dn();
if (root_dn) {
+ Slapi_PBlock *entry_pb = NULL;
Slapi_DN *edn = slapi_sdn_new_dn_byref(
slapi_dn_normalize(root_dn));
Slapi_Entry *e = 0;
/* root might be locked too! :) */
- ret = slapi_search_internal_get_entry(
- edn, 0,
- &e,
- (void *)plugin_get_default_component_id()
-
- );
-
+ ret = slapi_search_get_entry(&entry_pb, edn, 0, &e, (void *)plugin_get_default_component_id());
if (0 == ret && e) {
ret = slapi_check_account_lock(
0, /* pb not req */
@@ -1955,7 +1950,7 @@ slapd_bind_local_user(Connection *conn)
root_map_free:
/* root_dn consumed by bind creds set */
slapi_sdn_free(&edn);
- slapi_entry_free(e);
+ slapi_search_get_entry_done(&entry_pb);
ret = 0;
}
}
diff --git a/ldap/servers/slapd/modify.c b/ldap/servers/slapd/modify.c
index bbc0ab71a..259bedfff 100644
--- a/ldap/servers/slapd/modify.c
+++ b/ldap/servers/slapd/modify.c
@@ -592,6 +592,7 @@ modify_internal_pb(Slapi_PBlock *pb)
static void
op_shared_modify(Slapi_PBlock *pb, int pw_change, char *old_pw)
{
+ Slapi_PBlock *entry_pb = NULL;
Slapi_Backend *be = NULL;
Slapi_Entry *pse;
Slapi_Entry *referral;
@@ -723,7 +724,7 @@ op_shared_modify(Slapi_PBlock *pb, int pw_change, char *old_pw)
* 2. If yes, then if the mods contain any passwdpolicy specific attributes.
* 3. If yes, then it invokes corrosponding checking function.
*/
- if (!repl_op && !internal_op && normdn && (e = get_entry(pb, normdn))) {
+ if (!repl_op && !internal_op && normdn && slapi_search_get_entry(&entry_pb, sdn, NULL, &e, NULL) == LDAP_SUCCESS) {
Slapi_Value target;
slapi_value_init(&target);
slapi_value_set_string(&target, "passwordpolicy");
@@ -1072,7 +1073,7 @@ free_and_return : {
slapi_entry_free(epre);
slapi_entry_free(epost);
}
- slapi_entry_free(e);
+ slapi_search_get_entry_done(&entry_pb);
if (be)
slapi_be_Unlock(be);
@@ -1202,12 +1203,13 @@ op_shared_allow_pw_change(Slapi_PBlock *pb, LDAPMod *mod, char **old_pw, Slapi_M
if (!internal_op) {
/* slapi_acl_check_mods needs an array of LDAPMods, but
* we're really only interested in the one password mod. */
+ Slapi_PBlock *entry_pb = NULL;
LDAPMod *mods[2];
mods[0] = mod;
mods[1] = NULL;
/* We need to actually fetch the target here to use for ACI checking. */
- slapi_search_internal_get_entry(&sdn, NULL, &e, (void *)plugin_get_default_component_id());
+ slapi_search_get_entry(&entry_pb, &sdn, NULL, &e, NULL);
/* Create a bogus entry with just the target dn if we were unable to
* find the actual entry. This will only be used for checking the ACIs. */
@@ -1238,9 +1240,12 @@ op_shared_allow_pw_change(Slapi_PBlock *pb, LDAPMod *mod, char **old_pw, Slapi_M
}
send_ldap_result(pb, res, NULL, errtxt, 0, NULL);
slapi_ch_free_string(&errtxt);
+ slapi_search_get_entry_done(&entry_pb);
rc = -1;
goto done;
}
+ /* done with slapi entry e */
+ slapi_search_get_entry_done(&entry_pb);
/*
* If this mod is being performed by a password administrator/rootDN,
@@ -1353,7 +1358,6 @@ op_shared_allow_pw_change(Slapi_PBlock *pb, LDAPMod *mod, char **old_pw, Slapi_M
valuearray_free(&values);
done:
- slapi_entry_free(e);
slapi_sdn_done(&sdn);
slapi_ch_free_string(&proxydn);
slapi_ch_free_string(&proxystr);
diff --git a/ldap/servers/slapd/plugin_internal_op.c b/ldap/servers/slapd/plugin_internal_op.c
index 9da266b61..a140e7988 100644
--- a/ldap/servers/slapd/plugin_internal_op.c
+++ b/ldap/servers/slapd/plugin_internal_op.c
@@ -882,3 +882,51 @@ slapi_search_internal_get_entry(Slapi_DN *dn, char **attrs, Slapi_Entry **ret_en
int_search_pb = NULL;
return rc;
}
+
+int32_t
+slapi_search_get_entry(Slapi_PBlock **pb, Slapi_DN *dn, char **attrs, Slapi_Entry **ret_entry, void *component_identity)
+{
+ Slapi_Entry **entries = NULL;
+ int32_t rc = 0;
+ void *component = component_identity;
+
+ if (ret_entry) {
+ *ret_entry = NULL;
+ }
+
+ if (component == NULL) {
+ component = (void *)plugin_get_default_component_id();
+ }
+
+ if (*pb == NULL) {
+ *pb = slapi_pblock_new();
+ }
+ slapi_search_internal_set_pb(*pb, slapi_sdn_get_dn(dn), LDAP_SCOPE_BASE,
+ "(|(objectclass=*)(objectclass=ldapsubentry))",
+ attrs, 0, NULL, NULL, component, 0 );
+ slapi_search_internal_pb(*pb);
+ slapi_pblock_get(*pb, SLAPI_PLUGIN_INTOP_RESULT, &rc);
+ if (LDAP_SUCCESS == rc) {
+ slapi_pblock_get(*pb, SLAPI_PLUGIN_INTOP_SEARCH_ENTRIES, &entries);
+ if (NULL != entries && NULL != entries[0]) {
+ /* Only need to dup the entry if the caller passed ret_entry in. */
+ if (ret_entry) {
+ *ret_entry = entries[0];
+ }
+ } else {
+ rc = LDAP_NO_SUCH_OBJECT;
+ }
+ }
+
+ return rc;
+}
+
+void
+slapi_search_get_entry_done(Slapi_PBlock **pb)
+{
+ if (pb && *pb) {
+ slapi_free_search_results_internal(*pb);
+ slapi_pblock_destroy(*pb);
+ *pb = NULL;
+ }
+}
diff --git a/ldap/servers/slapd/resourcelimit.c b/ldap/servers/slapd/resourcelimit.c
index 705344c84..9c2619716 100644
--- a/ldap/servers/slapd/resourcelimit.c
+++ b/ldap/servers/slapd/resourcelimit.c
@@ -305,22 +305,17 @@ reslimit_get_ext(Slapi_Connection *conn, const char *logname, SLAPIResLimitConnD
int
reslimit_update_from_dn(Slapi_Connection *conn, Slapi_DN *dn)
{
- Slapi_Entry *e;
+ Slapi_PBlock *pb = NULL;
+ Slapi_Entry *e = NULL;
int rc;
- e = NULL;
if (dn != NULL) {
-
char **attrs = reslimit_get_registered_attributes();
- (void)slapi_search_internal_get_entry(dn, attrs, &e, reslimit_componentid);
+ slapi_search_get_entry(&pb, dn, attrs, &e, reslimit_componentid);
charray_free(attrs);
}
-
rc = reslimit_update_from_entry(conn, e);
-
- if (NULL != e) {
- slapi_entry_free(e);
- }
+ slapi_search_get_entry_done(&pb);
return (rc);
}
diff --git a/ldap/servers/slapd/schema.c b/ldap/servers/slapd/schema.c
index d44b03b0e..bf7e59f75 100644
--- a/ldap/servers/slapd/schema.c
+++ b/ldap/servers/slapd/schema.c
@@ -341,6 +341,7 @@ schema_policy_add_action(Slapi_Entry *entry, char *attrName, schema_item_t **lis
static void
schema_load_repl_policy(const char *dn, repl_schema_policy_t *replica)
{
+ Slapi_PBlock *pb = NULL;
Slapi_DN sdn;
Slapi_Entry *entry = NULL;
schema_item_t *schema_item, *next;
@@ -369,8 +370,7 @@ schema_load_repl_policy(const char *dn, repl_schema_policy_t *replica)
/* Load the replication policy of the schema */
slapi_sdn_init_dn_byref(&sdn, dn);
- if (slapi_search_internal_get_entry(&sdn, NULL, &entry, plugin_get_default_component_id()) == LDAP_SUCCESS) {
-
+ if (slapi_search_get_entry(&pb, &sdn, NULL, &entry, plugin_get_default_component_id()) == LDAP_SUCCESS) {
/* fill the policies (accept/reject) regarding objectclass */
schema_policy_add_action(entry, ATTR_SCHEMA_UPDATE_OBJECTCLASS_ACCEPT, &replica->objectclasses);
schema_policy_add_action(entry, ATTR_SCHEMA_UPDATE_OBJECTCLASS_REJECT, &replica->objectclasses);
@@ -378,9 +378,8 @@ schema_load_repl_policy(const char *dn, repl_schema_policy_t *replica)
/* fill the policies (accept/reject) regarding attribute */
schema_policy_add_action(entry, ATTR_SCHEMA_UPDATE_ATTRIBUTE_ACCEPT, &replica->attributes);
schema_policy_add_action(entry, ATTR_SCHEMA_UPDATE_ATTRIBUTE_REJECT, &replica->attributes);
-
- slapi_entry_free(entry);
}
+ slapi_search_get_entry_done(&pb);
slapi_sdn_done(&sdn);
}
diff --git a/ldap/servers/slapd/slapi-plugin.h b/ldap/servers/slapd/slapi-plugin.h
index 0e3857068..be1e52e4d 100644
--- a/ldap/servers/slapd/slapi-plugin.h
+++ b/ldap/servers/slapd/slapi-plugin.h
@@ -5972,7 +5972,7 @@ void slapi_seq_internal_set_pb(Slapi_PBlock *pb, char *ibase, int type, char *at
/*
* slapi_search_internal_get_entry() finds an entry given a dn. It returns
- * an LDAP error code (LDAP_SUCCESS if all goes well).
+ * an LDAP error code (LDAP_SUCCESS if all goes well). Caller must free ret_entry
*/
int slapi_search_internal_get_entry(Slapi_DN *dn, char **attrlist, Slapi_Entry **ret_entry, void *caller_identity);
@@ -8296,6 +8296,27 @@ uint64_t slapi_atomic_decr_64(uint64_t *ptr, int memorder);
/* helper function */
const char * slapi_fetch_attr(Slapi_Entry *e, const char *attrname, char *default_val);
+/**
+ * Get a Slapi_Entry via an internal search. The caller then needs to call
+ * slapi_get_entry_done() to free any resources allocated to get the entry
+ *
+ * \param pb - slapi_pblock pointer (the function will allocate if necessary)
+ * \param dn - Slapi_DN of the entry to retrieve
+ * \param attrs - char list of attributes to get
+ * \param ret_entry - pointer to a Slapi_entry wer the returned entry is stored
+ * \param component_identity - plugin component
+ *
+ * \return - ldap result code
+ */
+int32_t slapi_search_get_entry(Slapi_PBlock **pb, Slapi_DN *dn, char **attrs, Slapi_Entry **ret_entry, void *component_identity);
+
+/**
+ * Free the resources allocated by slapi_search_get_entry()
+ *
+ * \param pb - slapi_pblock pointer
+ */
+void slapi_search_get_entry_done(Slapi_PBlock **pb);
+
#ifdef __cplusplus
}
#endif
--
2.26.2

View File

@ -0,0 +1,96 @@
From 9710c327b3034d7a9d112306961c9cec98083df5 Mon Sep 17 00:00:00 2001
From: Simon Pichugin <simon.pichugin@gmail.com>
Date: Mon, 18 May 2020 22:33:45 +0200
Subject: [PATCH 05/12] Issue 51086 - Improve dscreate instance name validation
Bug Description: When creating an instance using dscreate, it doesn't enforce
max name length. The ldapi socket name contains name of the instance. If it's
too long, we can hit limits, and the file name will be truncated. Also, it
doesn't sanitize the instance name, it's possible to create an instance with
non-ascii symbols in its name.
Fix Description: Add more checks to 'dscreate from-file' installation.
Add a limitation for nsslapd-ldapifilepath string lenght because it is
limited by sizeof((*ports_info.i_listenaddr)->local.path)) it is copied to.
https://pagure.io/389-ds-base/issue/51086
Reviewed by: firstyear, mreynolds (Thanks!)
---
ldap/servers/slapd/libglobs.c | 12 ++++++++++++
src/cockpit/389-console/src/ds.jsx | 8 ++++++--
src/lib389/lib389/instance/setup.py | 9 +++++++++
3 files changed, 27 insertions(+), 2 deletions(-)
diff --git a/ldap/servers/slapd/libglobs.c b/ldap/servers/slapd/libglobs.c
index 0d3d9a924..fbf90d92d 100644
--- a/ldap/servers/slapd/libglobs.c
+++ b/ldap/servers/slapd/libglobs.c
@@ -2390,11 +2390,23 @@ config_set_ldapi_filename(const char *attrname, char *value, char *errorbuf, int
{
int retVal = LDAP_SUCCESS;
slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig();
+ /*
+ * LDAPI file path length is limited by sizeof((*ports_info.i_listenaddr)->local.path))
+ * which is set in main.c inside of "#if defined(ENABLE_LDAPI)" block
+ * ports_info.i_listenaddr is sizeof(PRNetAddr) and our required sizes is 8 bytes less
+ */
+ size_t result_size = sizeof(PRNetAddr) - 8;
if (config_value_is_null(attrname, value, errorbuf, 0)) {
return LDAP_OPERATIONS_ERROR;
}
+ if (strlen(value) >= result_size) {
+ slapi_create_errormsg(errorbuf, SLAPI_DSE_RETURNTEXT_SIZE, "%s: \"%s\" is invalid, its length must be less than %d",
+ attrname, value, result_size);
+ return LDAP_OPERATIONS_ERROR;
+ }
+
if (apply) {
CFG_LOCK_WRITE(slapdFrontendConfig);
diff --git a/src/cockpit/389-console/src/ds.jsx b/src/cockpit/389-console/src/ds.jsx
index 90d9e5abd..53aa5cb79 100644
--- a/src/cockpit/389-console/src/ds.jsx
+++ b/src/cockpit/389-console/src/ds.jsx
@@ -793,10 +793,14 @@ class CreateInstanceModal extends React.Component {
return;
}
newServerId = newServerId.replace(/^slapd-/i, ""); // strip "slapd-"
- if (newServerId.length > 128) {
+ if (newServerId === "admin") {
+ addNotification("warning", "Instance Name 'admin' is reserved, please choose a different name");
+ return;
+ }
+ if (newServerId.length > 80) {
addNotification(
"warning",
- "Instance name is too long, it must not exceed 128 characters"
+ "Instance name is too long, it must not exceed 80 characters"
);
return;
}
diff --git a/src/lib389/lib389/instance/setup.py b/src/lib389/lib389/instance/setup.py
index 803992275..f5fc5495d 100644
--- a/src/lib389/lib389/instance/setup.py
+++ b/src/lib389/lib389/instance/setup.py
@@ -567,6 +567,15 @@ class SetupDs(object):
# We need to know the prefix before we can do the instance checks
assert_c(slapd['instance_name'] is not None, "Configuration instance_name in section [slapd] not found")
+ assert_c(len(slapd['instance_name']) <= 80, "Server identifier should not be longer than 80 symbols")
+ assert_c(all(ord(c) < 128 for c in slapd['instance_name']), "Server identifier can not contain non ascii characters")
+ assert_c(' ' not in slapd['instance_name'], "Server identifier can not contain a space")
+ assert_c(slapd['instance_name'] != 'admin', "Server identifier \"admin\" is reserved, please choose a different identifier")
+
+ # Check that valid characters are used
+ safe = re.compile(r'^[#%:\w@_-]+$').search
+ assert_c(bool(safe(slapd['instance_name'])), "Server identifier has invalid characters, please choose a different value")
+
# Check if the instance exists or not.
# Should I move this import? I think this prevents some recursion
from lib389 import DirSrv
--
2.26.2

View File

@ -0,0 +1,254 @@
From c0cb15445c1434b3d317b1c06ab1a0ba8dbc6f04 Mon Sep 17 00:00:00 2001
From: Mark Reynolds <mreynolds@redhat.com>
Date: Tue, 19 May 2020 15:11:53 -0400
Subject: [PATCH 06/12] Issue 51102 - RFE - ds-replcheck - make online timeout
configurable
Bug Description: When doing an online check with replicas that are very
far apart the connection can time out as the hardcoded
timeout is 5 seconds.
Fix Description: Change the default timeout to never timeout, and add an
CLI option to specify a specific timeout.
Also caught all the possible LDAP exceptions so we can
cleanly "fail". Fixed some python syntax issues, and
improved the entry inconsistency report
relates: https://pagure.io/389-ds-base/issue/51102
Reviewed by: firstyear & spichugi(Thanks!)
---
ldap/admin/src/scripts/ds-replcheck | 90 ++++++++++++++++++-----------
1 file changed, 57 insertions(+), 33 deletions(-)
diff --git a/ldap/admin/src/scripts/ds-replcheck b/ldap/admin/src/scripts/ds-replcheck
index 30bcfd65d..5bb7dfce3 100755
--- a/ldap/admin/src/scripts/ds-replcheck
+++ b/ldap/admin/src/scripts/ds-replcheck
@@ -1,7 +1,7 @@
#!/usr/bin/python3
# --- BEGIN COPYRIGHT BLOCK ---
-# Copyright (C) 2018 Red Hat, Inc.
+# Copyright (C) 2020 Red Hat, Inc.
# All rights reserved.
#
# License: GPL (version 3 or any later version).
@@ -21,10 +21,9 @@ import getpass
import signal
from ldif import LDIFRecordList
from ldap.ldapobject import SimpleLDAPObject
-from ldap.cidict import cidict
from ldap.controls import SimplePagedResultsControl
from lib389._entry import Entry
-from lib389.utils import ensure_str, ensure_list_str, ensure_int
+from lib389.utils import ensure_list_str, ensure_int
VERSION = "2.0"
RUV_FILTER = '(&(nsuniqueid=ffffffff-ffffffff-ffffffff-ffffffff)(objectclass=nstombstone))'
@@ -185,11 +184,11 @@ def report_conflict(entry, attr, opts):
report = True
if 'nscpentrywsi' in entry.data:
- found = False
for val in entry.data['nscpentrywsi']:
if val.lower().startswith(attr + ';'):
if (opts['starttime'] - extract_time(val)) <= opts['lag']:
report = False
+ break
return report
@@ -321,6 +320,9 @@ def ldif_search(LDIF, dn):
count = 0
ignore_list = ['conflictcsn', 'modifytimestamp', 'modifiersname']
val = ""
+ attr = ""
+ state_attr = ""
+ part_dn = ""
result['entry'] = None
result['conflict'] = None
result['tombstone'] = False
@@ -570,6 +572,7 @@ def cmp_entry(mentry, rentry, opts):
if val.lower().startswith(mattr + ';'):
if not found:
diff['diff'].append(" Master:")
+ diff['diff'].append(" - Value: %s" % (val.split(':')[1].lstrip()))
diff['diff'].append(" - State Info: %s" % (val))
diff['diff'].append(" - Date: %s\n" % (time.ctime(extract_time(val))))
found = True
@@ -588,6 +591,7 @@ def cmp_entry(mentry, rentry, opts):
if val.lower().startswith(mattr + ';'):
if not found:
diff['diff'].append(" Replica:")
+ diff['diff'].append(" - Value: %s" % (val.split(':')[1].lstrip()))
diff['diff'].append(" - State Info: %s" % (val))
diff['diff'].append(" - Date: %s\n" % (time.ctime(extract_time(val))))
found = True
@@ -654,7 +658,6 @@ def do_offline_report(opts, output_file=None):
rconflicts = []
rtombstones = 0
mtombstones = 0
- idx = 0
# Open LDIF files
try:
@@ -926,7 +929,7 @@ def validate_suffix(ldapnode, suffix, hostname):
:return - True if suffix exists, otherwise False
"""
try:
- master_basesuffix = ldapnode.search_s(suffix, ldap.SCOPE_BASE )
+ ldapnode.search_s(suffix, ldap.SCOPE_BASE)
except ldap.NO_SUCH_OBJECT:
print("Error: Failed to validate suffix in {}. {} does not exist.".format(hostname, suffix))
return False
@@ -968,12 +971,12 @@ def connect_to_replicas(opts):
replica = SimpleLDAPObject(ruri)
# Set timeouts
- master.set_option(ldap.OPT_NETWORK_TIMEOUT,5.0)
- master.set_option(ldap.OPT_TIMEOUT,5.0)
- replica.set_option(ldap.OPT_NETWORK_TIMEOUT,5.0)
- replica.set_option(ldap.OPT_TIMEOUT,5.0)
+ master.set_option(ldap.OPT_NETWORK_TIMEOUT, opts['timeout'])
+ master.set_option(ldap.OPT_TIMEOUT, opts['timeout'])
+ replica.set_option(ldap.OPT_NETWORK_TIMEOUT, opts['timeout'])
+ replica.set_option(ldap.OPT_TIMEOUT, opts['timeout'])
- # Setup Secure Conenction
+ # Setup Secure Connection
if opts['certdir'] is not None:
# Setup Master
if opts['mprotocol'] != LDAPI:
@@ -1003,7 +1006,7 @@ def connect_to_replicas(opts):
try:
master.simple_bind_s(opts['binddn'], opts['bindpw'])
except ldap.SERVER_DOWN as e:
- print("Cannot connect to %r" % muri)
+ print(f"Cannot connect to {muri} ({str(e)})")
sys.exit(1)
except ldap.LDAPError as e:
print("Error: Failed to authenticate to Master: ({}). "
@@ -1014,7 +1017,7 @@ def connect_to_replicas(opts):
try:
replica.simple_bind_s(opts['binddn'], opts['bindpw'])
except ldap.SERVER_DOWN as e:
- print("Cannot connect to %r" % ruri)
+ print(f"Cannot connect to {ruri} ({str(e)})")
sys.exit(1)
except ldap.LDAPError as e:
print("Error: Failed to authenticate to Replica: ({}). "
@@ -1218,7 +1221,6 @@ def do_online_report(opts, output_file=None):
"""
m_done = False
r_done = False
- done = False
report = {}
report['diff'] = []
report['m_missing'] = []
@@ -1257,15 +1259,22 @@ def do_online_report(opts, output_file=None):
# Read the results and start comparing
while not m_done or not r_done:
- if not m_done:
- m_rtype, m_rdata, m_rmsgid, m_rctrls = master.result3(master_msgid)
- elif not r_done:
- m_rdata = []
-
- if not r_done:
- r_rtype, r_rdata, r_rmsgid, r_rctrls = replica.result3(replica_msgid)
- elif not m_done:
- r_rdata = []
+ try:
+ if not m_done:
+ m_rtype, m_rdata, m_rmsgid, m_rctrls = master.result3(master_msgid)
+ elif not r_done:
+ m_rdata = []
+ except ldap.LDAPError as e:
+ print("Error: Problem getting the results from the master: %s", str(e))
+ sys.exit(1)
+ try:
+ if not r_done:
+ r_rtype, r_rdata, r_rmsgid, r_rctrls = replica.result3(replica_msgid)
+ elif not m_done:
+ r_rdata = []
+ except ldap.LDAPError as e:
+ print("Error: Problem getting the results from the replica: %s", str(e))
+ sys.exit(1)
# Convert entries
mresult = convert_entries(m_rdata)
@@ -1291,11 +1300,15 @@ def do_online_report(opts, output_file=None):
]
if m_pctrls:
if m_pctrls[0].cookie:
- # Copy cookie from response control to request control
- req_pr_ctrl.cookie = m_pctrls[0].cookie
- master_msgid = master.search_ext(opts['suffix'], ldap.SCOPE_SUBTREE,
- "(|(objectclass=*)(objectclass=ldapsubentry))",
- ['*', 'createtimestamp', 'nscpentrywsi', 'conflictcsn', 'nsds5replconflict'], serverctrls=controls)
+ try:
+ # Copy cookie from response control to request control
+ req_pr_ctrl.cookie = m_pctrls[0].cookie
+ master_msgid = master.search_ext(opts['suffix'], ldap.SCOPE_SUBTREE,
+ "(|(objectclass=*)(objectclass=ldapsubentry))",
+ ['*', 'createtimestamp', 'nscpentrywsi', 'conflictcsn', 'nsds5replconflict'], serverctrls=controls)
+ except ldap.LDAPError as e:
+ print("Error: Problem searching the master: %s", str(e))
+ sys.exit(1)
else:
m_done = True # No more pages available
else:
@@ -1311,11 +1324,15 @@ def do_online_report(opts, output_file=None):
if r_pctrls:
if r_pctrls[0].cookie:
- # Copy cookie from response control to request control
- req_pr_ctrl.cookie = r_pctrls[0].cookie
- replica_msgid = replica.search_ext(opts['suffix'], ldap.SCOPE_SUBTREE,
- "(|(objectclass=*)(objectclass=ldapsubentry))",
- ['*', 'createtimestamp', 'nscpentrywsi', 'conflictcsn', 'nsds5replconflict'], serverctrls=controls)
+ try:
+ # Copy cookie from response control to request control
+ req_pr_ctrl.cookie = r_pctrls[0].cookie
+ replica_msgid = replica.search_ext(opts['suffix'], ldap.SCOPE_SUBTREE,
+ "(|(objectclass=*)(objectclass=ldapsubentry))",
+ ['*', 'createtimestamp', 'nscpentrywsi', 'conflictcsn', 'nsds5replconflict'], serverctrls=controls)
+ except ldap.LDAPError as e:
+ print("Error: Problem searching the replica: %s", str(e))
+ sys.exit(1)
else:
r_done = True # No more pages available
else:
@@ -1426,6 +1443,9 @@ def init_online_params(args):
# prompt for password
opts['bindpw'] = getpass.getpass('Enter password: ')
+ # lastly handle the timeout
+ opts['timeout'] = int(args.timeout)
+
return opts
@@ -1553,6 +1573,8 @@ def main():
state_parser.add_argument('-y', '--pass-file', help='A text file containing the clear text password for the bind dn', dest='pass_file', default=None)
state_parser.add_argument('-Z', '--cert-dir', help='The certificate database directory for secure connections',
dest='certdir', default=None)
+ state_parser.add_argument('-t', '--timeout', help='The timeout for the LDAP connections. Default is no timeout.',
+ type=int, dest='timeout', default=-1)
# Online mode
online_parser = subparsers.add_parser('online', help="Compare two online replicas for differences")
@@ -1577,6 +1599,8 @@ def main():
online_parser.add_argument('-p', '--page-size', help='The paged-search result grouping size (default 500 entries)',
dest='pagesize', default=500)
online_parser.add_argument('-o', '--out-file', help='The output file', dest='file', default=None)
+ online_parser.add_argument('-t', '--timeout', help='The timeout for the LDAP connections. Default is no timeout.',
+ type=int, dest='timeout', default=-1)
# Offline LDIF mode
offline_parser = subparsers.add_parser('offline', help="Compare two replication LDIF files for differences (LDIF file generated by 'db2ldif -r')")
--
2.26.2

View File

@ -0,0 +1,428 @@
From a1cd3cf8e8b6b33ab21d5338921187a76dd9dcd0 Mon Sep 17 00:00:00 2001
From: Mark Reynolds <mreynolds@redhat.com>
Date: Fri, 22 May 2020 15:41:45 -0400
Subject: [PATCH 07/12] Issue 51110 - Fix ASAN ODR warnings
Description: Fixed ODR issues with glboal attributes which were duplicated from
the core server into the replication and retrocl plugins.
relates: https://pagure.io/389-ds-base/issue/51110
Reviewed by: firstyear(Thanks!)
---
ldap/servers/plugins/replication/repl5.h | 17 +++---
.../plugins/replication/repl_globals.c | 17 +++---
ldap/servers/plugins/replication/replutil.c | 16 +++---
ldap/servers/plugins/retrocl/retrocl.h | 22 ++++----
ldap/servers/plugins/retrocl/retrocl_cn.c | 12 ++---
ldap/servers/plugins/retrocl/retrocl_po.c | 52 +++++++++----------
ldap/servers/plugins/retrocl/retrocl_trim.c | 30 +++++------
7 files changed, 82 insertions(+), 84 deletions(-)
diff --git a/ldap/servers/plugins/replication/repl5.h b/ldap/servers/plugins/replication/repl5.h
index 873dd8a16..72b7089e3 100644
--- a/ldap/servers/plugins/replication/repl5.h
+++ b/ldap/servers/plugins/replication/repl5.h
@@ -280,15 +280,14 @@ struct berval *NSDS90StartReplicationRequest_new(const char *protocol_oid,
int multimaster_extop_NSDS50ReplicationEntry(Slapi_PBlock *pb);
/* From repl_globals.c */
-extern char *attr_changenumber;
-extern char *attr_targetdn;
-extern char *attr_changetype;
-extern char *attr_newrdn;
-extern char *attr_deleteoldrdn;
-extern char *attr_changes;
-extern char *attr_newsuperior;
-extern char *attr_changetime;
-extern char *attr_dataversion;
+extern char *repl_changenumber;
+extern char *repl_targetdn;
+extern char *repl_changetype;
+extern char *repl_newrdn;
+extern char *repl_deleteoldrdn;
+extern char *repl_changes;
+extern char *repl_newsuperior;
+extern char *repl_changetime;
extern char *attr_csn;
extern char *changetype_add;
extern char *changetype_delete;
diff --git a/ldap/servers/plugins/replication/repl_globals.c b/ldap/servers/plugins/replication/repl_globals.c
index 355a0ffa1..c615c77da 100644
--- a/ldap/servers/plugins/replication/repl_globals.c
+++ b/ldap/servers/plugins/replication/repl_globals.c
@@ -48,15 +48,14 @@ char *changetype_delete = CHANGETYPE_DELETE;
char *changetype_modify = CHANGETYPE_MODIFY;
char *changetype_modrdn = CHANGETYPE_MODRDN;
char *changetype_moddn = CHANGETYPE_MODDN;
-char *attr_changenumber = ATTR_CHANGENUMBER;
-char *attr_targetdn = ATTR_TARGETDN;
-char *attr_changetype = ATTR_CHANGETYPE;
-char *attr_newrdn = ATTR_NEWRDN;
-char *attr_deleteoldrdn = ATTR_DELETEOLDRDN;
-char *attr_changes = ATTR_CHANGES;
-char *attr_newsuperior = ATTR_NEWSUPERIOR;
-char *attr_changetime = ATTR_CHANGETIME;
-char *attr_dataversion = ATTR_DATAVERSION;
+char *repl_changenumber = ATTR_CHANGENUMBER;
+char *repl_targetdn = ATTR_TARGETDN;
+char *repl_changetype = ATTR_CHANGETYPE;
+char *repl_newrdn = ATTR_NEWRDN;
+char *repl_deleteoldrdn = ATTR_DELETEOLDRDN;
+char *repl_changes = ATTR_CHANGES;
+char *repl_newsuperior = ATTR_NEWSUPERIOR;
+char *repl_changetime = ATTR_CHANGETIME;
char *attr_csn = ATTR_CSN;
char *type_copyingFrom = TYPE_COPYINGFROM;
char *type_copiedFrom = TYPE_COPIEDFROM;
diff --git a/ldap/servers/plugins/replication/replutil.c b/ldap/servers/plugins/replication/replutil.c
index de1e77880..39f821d12 100644
--- a/ldap/servers/plugins/replication/replutil.c
+++ b/ldap/servers/plugins/replication/replutil.c
@@ -64,14 +64,14 @@ get_cleattrs()
{
if (cleattrs[0] == NULL) {
cleattrs[0] = type_objectclass;
- cleattrs[1] = attr_changenumber;
- cleattrs[2] = attr_targetdn;
- cleattrs[3] = attr_changetype;
- cleattrs[4] = attr_newrdn;
- cleattrs[5] = attr_deleteoldrdn;
- cleattrs[6] = attr_changes;
- cleattrs[7] = attr_newsuperior;
- cleattrs[8] = attr_changetime;
+ cleattrs[1] = repl_changenumber;
+ cleattrs[2] = repl_targetdn;
+ cleattrs[3] = repl_changetype;
+ cleattrs[4] = repl_newrdn;
+ cleattrs[5] = repl_deleteoldrdn;
+ cleattrs[6] = repl_changes;
+ cleattrs[7] = repl_newsuperior;
+ cleattrs[8] = repl_changetime;
cleattrs[9] = NULL;
}
return cleattrs;
diff --git a/ldap/servers/plugins/retrocl/retrocl.h b/ldap/servers/plugins/retrocl/retrocl.h
index 06482a14c..2ce76fcec 100644
--- a/ldap/servers/plugins/retrocl/retrocl.h
+++ b/ldap/servers/plugins/retrocl/retrocl.h
@@ -94,17 +94,17 @@ extern int retrocl_nattributes;
extern char **retrocl_attributes;
extern char **retrocl_aliases;
-extern const char *attr_changenumber;
-extern const char *attr_targetdn;
-extern const char *attr_changetype;
-extern const char *attr_newrdn;
-extern const char *attr_newsuperior;
-extern const char *attr_deleteoldrdn;
-extern const char *attr_changes;
-extern const char *attr_changetime;
-extern const char *attr_objectclass;
-extern const char *attr_nsuniqueid;
-extern const char *attr_isreplicated;
+extern const char *retrocl_changenumber;
+extern const char *retrocl_targetdn;
+extern const char *retrocl_changetype;
+extern const char *retrocl_newrdn;
+extern const char *retrocl_newsuperior;
+extern const char *retrocl_deleteoldrdn;
+extern const char *retrocl_changes;
+extern const char *retrocl_changetime;
+extern const char *retrocl_objectclass;
+extern const char *retrocl_nsuniqueid;
+extern const char *retrocl_isreplicated;
extern PRLock *retrocl_internal_lock;
extern Slapi_RWLock *retrocl_cn_lock;
diff --git a/ldap/servers/plugins/retrocl/retrocl_cn.c b/ldap/servers/plugins/retrocl/retrocl_cn.c
index 709d7a857..5fc5f586d 100644
--- a/ldap/servers/plugins/retrocl/retrocl_cn.c
+++ b/ldap/servers/plugins/retrocl/retrocl_cn.c
@@ -62,7 +62,7 @@ handle_cnum_entry(Slapi_Entry *e, void *callback_data)
Slapi_Attr *chattr = NULL;
sval = NULL;
value = NULL;
- if (slapi_entry_attr_find(e, attr_changenumber, &chattr) == 0) {
+ if (slapi_entry_attr_find(e, retrocl_changenumber, &chattr) == 0) {
slapi_attr_first_value(chattr, &sval);
if (NULL != sval) {
value = slapi_value_get_berval(sval);
@@ -79,7 +79,7 @@ handle_cnum_entry(Slapi_Entry *e, void *callback_data)
chattr = NULL;
sval = NULL;
value = NULL;
- if (slapi_entry_attr_find(e, attr_changetime, &chattr) == 0) {
+ if (slapi_entry_attr_find(e, retrocl_changetime, &chattr) == 0) {
slapi_attr_first_value(chattr, &sval);
if (NULL != sval) {
value = slapi_value_get_berval(sval);
@@ -134,7 +134,7 @@ retrocl_get_changenumbers(void)
cr.cr_time = 0;
slapi_seq_callback(RETROCL_CHANGELOG_DN, SLAPI_SEQ_FIRST,
- (char *)attr_changenumber, /* cast away const */
+ (char *)retrocl_changenumber, /* cast away const */
NULL, NULL, 0, &cr, NULL, handle_cnum_result,
handle_cnum_entry, NULL);
@@ -144,7 +144,7 @@ retrocl_get_changenumbers(void)
slapi_ch_free((void **)&cr.cr_time);
slapi_seq_callback(RETROCL_CHANGELOG_DN, SLAPI_SEQ_LAST,
- (char *)attr_changenumber, /* cast away const */
+ (char *)retrocl_changenumber, /* cast away const */
NULL, NULL, 0, &cr, NULL, handle_cnum_result,
handle_cnum_entry, NULL);
@@ -185,7 +185,7 @@ retrocl_getchangetime(int type, int *err)
return NO_TIME;
}
slapi_seq_callback(RETROCL_CHANGELOG_DN, type,
- (char *)attr_changenumber, /* cast away const */
+ (char *)retrocl_changenumber, /* cast away const */
NULL,
NULL, 0, &cr, NULL,
handle_cnum_result, handle_cnum_entry, NULL);
@@ -353,7 +353,7 @@ retrocl_update_lastchangenumber(void)
cr.cr_cnum = 0;
cr.cr_time = 0;
slapi_seq_callback(RETROCL_CHANGELOG_DN, SLAPI_SEQ_LAST,
- (char *)attr_changenumber, /* cast away const */
+ (char *)retrocl_changenumber, /* cast away const */
NULL, NULL, 0, &cr, NULL, handle_cnum_result,
handle_cnum_entry, NULL);
diff --git a/ldap/servers/plugins/retrocl/retrocl_po.c b/ldap/servers/plugins/retrocl/retrocl_po.c
index d2af79b31..e1488f56b 100644
--- a/ldap/servers/plugins/retrocl/retrocl_po.c
+++ b/ldap/servers/plugins/retrocl/retrocl_po.c
@@ -25,17 +25,17 @@ modrdn2reple(Slapi_Entry *e, const char *newrdn, int deloldrdn, LDAPMod **ldm, c
/******************************/
-const char *attr_changenumber = "changenumber";
-const char *attr_targetdn = "targetdn";
-const char *attr_changetype = "changetype";
-const char *attr_newrdn = "newrdn";
-const char *attr_deleteoldrdn = "deleteoldrdn";
-const char *attr_changes = "changes";
-const char *attr_newsuperior = "newsuperior";
-const char *attr_changetime = "changetime";
-const char *attr_objectclass = "objectclass";
-const char *attr_nsuniqueid = "nsuniqueid";
-const char *attr_isreplicated = "isreplicated";
+const char *retrocl_changenumber = "changenumber";
+const char *retrocl_targetdn = "targetdn";
+const char *retrocl_changetype = "changetype";
+const char *retrocl_newrdn = "newrdn";
+const char *retrocl_deleteoldrdn = "deleteoldrdn";
+const char *retrocl_changes = "changes";
+const char *retrocl_newsuperior = "newsuperior";
+const char *retrocl_changetime = "changetime";
+const char *retrocl_objectclass = "objectclass";
+const char *retrocl_nsuniqueid = "nsuniqueid";
+const char *retrocl_isreplicated = "isreplicated";
/*
* Function: make_changes_string
@@ -185,7 +185,7 @@ write_replog_db(
changenum, dn);
/* Construct the dn of this change record */
- edn = slapi_ch_smprintf("%s=%lu,%s", attr_changenumber, changenum, RETROCL_CHANGELOG_DN);
+ edn = slapi_ch_smprintf("%s=%lu,%s", retrocl_changenumber, changenum, RETROCL_CHANGELOG_DN);
/*
* Create the entry struct, and fill in fields common to all types
@@ -214,7 +214,7 @@ write_replog_db(
attributeAlias = attributeName;
}
- if (strcasecmp(attributeName, attr_nsuniqueid) == 0) {
+ if (strcasecmp(attributeName, retrocl_nsuniqueid) == 0) {
Slapi_Entry *entry = NULL;
const char *uniqueId = NULL;
@@ -236,7 +236,7 @@ write_replog_db(
extensibleObject = 1;
- } else if (strcasecmp(attributeName, attr_isreplicated) == 0) {
+ } else if (strcasecmp(attributeName, retrocl_isreplicated) == 0) {
int isReplicated = 0;
char *attributeValue = NULL;
@@ -298,17 +298,17 @@ write_replog_db(
sprintf(chnobuf, "%lu", changenum);
val.bv_val = chnobuf;
val.bv_len = strlen(chnobuf);
- slapi_entry_add_values(e, attr_changenumber, vals);
+ slapi_entry_add_values(e, retrocl_changenumber, vals);
/* Set the targetentrydn attribute */
val.bv_val = dn;
val.bv_len = strlen(dn);
- slapi_entry_add_values(e, attr_targetdn, vals);
+ slapi_entry_add_values(e, retrocl_targetdn, vals);
/* Set the changeTime attribute */
val.bv_val = format_genTime(curtime);
val.bv_len = strlen(val.bv_val);
- slapi_entry_add_values(e, attr_changetime, vals);
+ slapi_entry_add_values(e, retrocl_changetime, vals);
slapi_ch_free((void **)&val.bv_val);
/*
@@ -344,7 +344,7 @@ write_replog_db(
/* Set the changetype attribute */
val.bv_val = "delete";
val.bv_len = 6;
- slapi_entry_add_values(e, attr_changetype, vals);
+ slapi_entry_add_values(e, retrocl_changetype, vals);
}
break;
@@ -422,7 +422,7 @@ entry2reple(Slapi_Entry *e, Slapi_Entry *oe, int optype)
} else {
return (1);
}
- slapi_entry_add_values(e, attr_changetype, vals);
+ slapi_entry_add_values(e, retrocl_changetype, vals);
estr = slapi_entry2str(oe, &len);
p = estr;
@@ -435,7 +435,7 @@ entry2reple(Slapi_Entry *e, Slapi_Entry *oe, int optype)
}
val.bv_val = p;
val.bv_len = len - (p - estr); /* length + terminating \0 */
- slapi_entry_add_values(e, attr_changes, vals);
+ slapi_entry_add_values(e, retrocl_changes, vals);
slapi_ch_free_string(&estr);
return 0;
}
@@ -471,7 +471,7 @@ mods2reple(Slapi_Entry *e, LDAPMod **ldm)
if (NULL != l) {
val.bv_val = l->ls_buf;
val.bv_len = l->ls_len + 1; /* string + terminating \0 */
- slapi_entry_add_values(e, attr_changes, vals);
+ slapi_entry_add_values(e, retrocl_changes, vals);
lenstr_free(&l);
}
}
@@ -511,12 +511,12 @@ modrdn2reple(
val.bv_val = "modrdn";
val.bv_len = 6;
- slapi_entry_add_values(e, attr_changetype, vals);
+ slapi_entry_add_values(e, retrocl_changetype, vals);
if (newrdn) {
val.bv_val = (char *)newrdn; /* cast away const */
val.bv_len = strlen(newrdn);
- slapi_entry_add_values(e, attr_newrdn, vals);
+ slapi_entry_add_values(e, retrocl_newrdn, vals);
}
if (deloldrdn == 0) {
@@ -526,12 +526,12 @@ modrdn2reple(
val.bv_val = "TRUE";
val.bv_len = 4;
}
- slapi_entry_add_values(e, attr_deleteoldrdn, vals);
+ slapi_entry_add_values(e, retrocl_deleteoldrdn, vals);
if (newsuperior) {
val.bv_val = (char *)newsuperior; /* cast away const */
val.bv_len = strlen(newsuperior);
- slapi_entry_add_values(e, attr_newsuperior, vals);
+ slapi_entry_add_values(e, retrocl_newsuperior, vals);
}
if (NULL != ldm) {
@@ -540,7 +540,7 @@ modrdn2reple(
if (l->ls_len) {
val.bv_val = l->ls_buf;
val.bv_len = l->ls_len;
- slapi_entry_add_values(e, attr_changes, vals);
+ slapi_entry_add_values(e, retrocl_changes, vals);
}
lenstr_free(&l);
}
diff --git a/ldap/servers/plugins/retrocl/retrocl_trim.c b/ldap/servers/plugins/retrocl/retrocl_trim.c
index 0378eb7f6..d031dc3f8 100644
--- a/ldap/servers/plugins/retrocl/retrocl_trim.c
+++ b/ldap/servers/plugins/retrocl/retrocl_trim.c
@@ -49,15 +49,15 @@ static const char **
get_cleattrs(void)
{
if (cleattrs[0] == NULL) {
- cleattrs[0] = attr_objectclass;
- cleattrs[1] = attr_changenumber;
- cleattrs[2] = attr_targetdn;
- cleattrs[3] = attr_changetype;
- cleattrs[4] = attr_newrdn;
- cleattrs[5] = attr_deleteoldrdn;
- cleattrs[6] = attr_changes;
- cleattrs[7] = attr_newsuperior;
- cleattrs[8] = attr_changetime;
+ cleattrs[0] = retrocl_objectclass;
+ cleattrs[1] = retrocl_changenumber;
+ cleattrs[2] = retrocl_targetdn;
+ cleattrs[3] = retrocl_changetype;
+ cleattrs[4] = retrocl_newrdn;
+ cleattrs[5] = retrocl_deleteoldrdn;
+ cleattrs[6] = retrocl_changes;
+ cleattrs[7] = retrocl_newsuperior;
+ cleattrs[8] = retrocl_changetime;
cleattrs[9] = NULL;
}
return cleattrs;
@@ -81,7 +81,7 @@ delete_changerecord(changeNumber cnum)
char *dnbuf;
int delrc;
- dnbuf = slapi_ch_smprintf("%s=%ld, %s", attr_changenumber, cnum,
+ dnbuf = slapi_ch_smprintf("%s=%ld, %s", retrocl_changenumber, cnum,
RETROCL_CHANGELOG_DN);
pb = slapi_pblock_new();
slapi_delete_internal_set_pb(pb, dnbuf, NULL /*controls*/, NULL /* uniqueid */,
@@ -154,7 +154,7 @@ handle_getchangetime_search(Slapi_Entry *e, void *callback_data)
if (NULL != e) {
Slapi_Value *sval = NULL;
const struct berval *val = NULL;
- rc = slapi_entry_attr_find(e, attr_changetime, &attr);
+ rc = slapi_entry_attr_find(e, retrocl_changetime, &attr);
/* Bug 624442: Logic checking for lack of timestamp was
reversed. */
if (0 != rc || slapi_attr_first_value(attr, &sval) == -1 ||
@@ -174,14 +174,14 @@ handle_getchangetime_search(Slapi_Entry *e, void *callback_data)
/*
* Function: get_changetime
* Arguments: cnum - number of change record to retrieve
- * Returns: Taking the attr_changetime of the 'cnum' entry,
+ * Returns: Taking the retrocl_changetime of the 'cnum' entry,
* it converts it into time_t (parse_localTime) and returns this time value.
* It returns 0 in the following cases:
- * - changerecord entry has not attr_changetime
+ * - changerecord entry has not retrocl_changetime
* - attr_changetime attribute has no value
* - attr_changetime attribute value is empty
*
- * Description: Retrieve attr_changetime ("changetime") from a changerecord whose number is "cnum".
+ * Description: Retrieve retrocl_changetime ("changetime") from a changerecord whose number is "cnum".
*/
static time_t
get_changetime(changeNumber cnum, int *err)
@@ -198,7 +198,7 @@ get_changetime(changeNumber cnum, int *err)
}
crtp->crt_nentries = crtp->crt_err = 0;
crtp->crt_time = 0;
- PR_snprintf(fstr, sizeof(fstr), "%s=%ld", attr_changenumber, cnum);
+ PR_snprintf(fstr, sizeof(fstr), "%s=%ld", retrocl_changenumber, cnum);
pb = slapi_pblock_new();
slapi_search_internal_set_pb(pb, RETROCL_CHANGELOG_DN,
--
2.26.2

View File

@ -0,0 +1,466 @@
From 8d14ff153e9335b09739438344f9c3c78a496548 Mon Sep 17 00:00:00 2001
From: Mark Reynolds <mreynolds@redhat.com>
Date: Fri, 22 May 2020 10:42:11 -0400
Subject: [PATCH 08/12] Issue 51095 - abort operation if CSN can not be
generated
Bug Description: If we fail to get the system time then we were using an
uninitialized timespec struct which could lead to bizarre
times in CSN's.
Fix description: Check if the system time function fails, and if it does
then abort the update operation.
relates: https://pagure.io/389-ds-base/issue/51095
Reviewed by: firstyear & tbordaz(Thanks!!)
---
ldap/servers/plugins/replication/repl5.h | 2 +-
.../plugins/replication/repl5_replica.c | 33 ++++++++------
ldap/servers/slapd/back-ldbm/ldbm_add.c | 8 +++-
ldap/servers/slapd/back-ldbm/ldbm_delete.c | 9 +++-
ldap/servers/slapd/back-ldbm/ldbm_modify.c | 10 ++++-
ldap/servers/slapd/back-ldbm/ldbm_modrdn.c | 8 +++-
ldap/servers/slapd/csngen.c | 18 +++++++-
ldap/servers/slapd/entrywsi.c | 15 ++++---
ldap/servers/slapd/slap.h | 2 +-
ldap/servers/slapd/slapi-plugin.h | 8 ++++
ldap/servers/slapd/slapi-private.h | 5 ++-
ldap/servers/slapd/time.c | 43 +++++++++++++------
12 files changed, 118 insertions(+), 43 deletions(-)
diff --git a/ldap/servers/plugins/replication/repl5.h b/ldap/servers/plugins/replication/repl5.h
index 72b7089e3..638471744 100644
--- a/ldap/servers/plugins/replication/repl5.h
+++ b/ldap/servers/plugins/replication/repl5.h
@@ -776,7 +776,7 @@ void replica_disable_replication(Replica *r);
int replica_start_agreement(Replica *r, Repl_Agmt *ra);
int windows_replica_start_agreement(Replica *r, Repl_Agmt *ra);
-CSN *replica_generate_next_csn(Slapi_PBlock *pb, const CSN *basecsn);
+int32_t replica_generate_next_csn(Slapi_PBlock *pb, const CSN *basecsn, CSN **opcsn);
int replica_get_attr(Slapi_PBlock *pb, const char *type, void *value);
/* mapping tree extensions manipulation */
diff --git a/ldap/servers/plugins/replication/repl5_replica.c b/ldap/servers/plugins/replication/repl5_replica.c
index 02caa88d9..f01782330 100644
--- a/ldap/servers/plugins/replication/repl5_replica.c
+++ b/ldap/servers/plugins/replication/repl5_replica.c
@@ -3931,11 +3931,9 @@ windows_replica_start_agreement(Replica *r, Repl_Agmt *ra)
* A callback function registered as op->o_csngen_handler and
* called by backend ops to generate opcsn.
*/
-CSN *
-replica_generate_next_csn(Slapi_PBlock *pb, const CSN *basecsn)
+int32_t
+replica_generate_next_csn(Slapi_PBlock *pb, const CSN *basecsn, CSN **opcsn)
{
- CSN *opcsn = NULL;
-
Replica *replica = replica_get_replica_for_op(pb);
if (NULL != replica) {
Slapi_Operation *op;
@@ -3946,17 +3944,26 @@ replica_generate_next_csn(Slapi_PBlock *pb, const CSN *basecsn)
CSNGen *gen = (CSNGen *)object_get_data(gen_obj);
if (NULL != gen) {
/* The new CSN should be greater than the base CSN */
- csngen_new_csn(gen, &opcsn, PR_FALSE /* don't notify */);
- if (csn_compare(opcsn, basecsn) <= 0) {
- char opcsnstr[CSN_STRSIZE], basecsnstr[CSN_STRSIZE];
+ if (csngen_new_csn(gen, opcsn, PR_FALSE /* don't notify */) != CSN_SUCCESS) {
+ /* Failed to generate CSN we must abort */
+ object_release(gen_obj);
+ return -1;
+ }
+ if (csn_compare(*opcsn, basecsn) <= 0) {
+ char opcsnstr[CSN_STRSIZE];
+ char basecsnstr[CSN_STRSIZE];
char opcsn2str[CSN_STRSIZE];
- csn_as_string(opcsn, PR_FALSE, opcsnstr);
+ csn_as_string(*opcsn, PR_FALSE, opcsnstr);
csn_as_string(basecsn, PR_FALSE, basecsnstr);
- csn_free(&opcsn);
+ csn_free(opcsn);
csngen_adjust_time(gen, basecsn);
- csngen_new_csn(gen, &opcsn, PR_FALSE /* don't notify */);
- csn_as_string(opcsn, PR_FALSE, opcsn2str);
+ if (csngen_new_csn(gen, opcsn, PR_FALSE) != CSN_SUCCESS) {
+ /* Failed to generate CSN we must abort */
+ object_release(gen_obj);
+ return -1;
+ }
+ csn_as_string(*opcsn, PR_FALSE, opcsn2str);
slapi_log_err(SLAPI_LOG_WARNING, repl_plugin_name,
"replica_generate_next_csn - "
"opcsn=%s <= basecsn=%s, adjusted opcsn=%s\n",
@@ -3966,14 +3973,14 @@ replica_generate_next_csn(Slapi_PBlock *pb, const CSN *basecsn)
* Insert opcsn into the csn pending list.
* This is the notify effect in csngen_new_csn().
*/
- assign_csn_callback(opcsn, (void *)replica);
+ assign_csn_callback(*opcsn, (void *)replica);
}
object_release(gen_obj);
}
}
}
- return opcsn;
+ return 0;
}
/*
diff --git a/ldap/servers/slapd/back-ldbm/ldbm_add.c b/ldap/servers/slapd/back-ldbm/ldbm_add.c
index d0d88bf16..ee366c74c 100644
--- a/ldap/servers/slapd/back-ldbm/ldbm_add.c
+++ b/ldap/servers/slapd/back-ldbm/ldbm_add.c
@@ -645,7 +645,13 @@ ldbm_back_add(Slapi_PBlock *pb)
* Current op is a user request. Opcsn will be assigned
* if the dn is in an updatable replica.
*/
- opcsn = entry_assign_operation_csn(pb, e, parententry ? parententry->ep_entry : NULL);
+ if (entry_assign_operation_csn(pb, e, parententry ? parententry->ep_entry : NULL, &opcsn) != 0) {
+ slapi_log_err(SLAPI_LOG_ERR, "ldbm_back_add",
+ "failed to generate add CSN for entry (%s), aborting operation\n",
+ slapi_entry_get_dn(e));
+ ldap_result_code = LDAP_OPERATIONS_ERROR;
+ goto error_return;
+ }
}
if (opcsn != NULL) {
entry_set_csn(e, opcsn);
diff --git a/ldap/servers/slapd/back-ldbm/ldbm_delete.c b/ldap/servers/slapd/back-ldbm/ldbm_delete.c
index 873b5b00e..fbcb57310 100644
--- a/ldap/servers/slapd/back-ldbm/ldbm_delete.c
+++ b/ldap/servers/slapd/back-ldbm/ldbm_delete.c
@@ -464,7 +464,14 @@ replace_entry:
* by entry_assign_operation_csn() if the dn is in an
* updatable replica.
*/
- opcsn = entry_assign_operation_csn ( pb, e->ep_entry, NULL );
+ if (entry_assign_operation_csn(pb, e->ep_entry, NULL, &opcsn) != 0) {
+ slapi_log_err(SLAPI_LOG_ERR, "ldbm_back_delete",
+ "failed to generate delete CSN for entry (%s), aborting operation\n",
+ slapi_entry_get_dn(e->ep_entry));
+ retval = -1;
+ ldap_result_code = LDAP_OPERATIONS_ERROR;
+ goto error_return;
+ }
}
if (opcsn != NULL) {
if (!is_fixup_operation) {
diff --git a/ldap/servers/slapd/back-ldbm/ldbm_modify.c b/ldap/servers/slapd/back-ldbm/ldbm_modify.c
index b0c477e3f..e9d7e87e3 100644
--- a/ldap/servers/slapd/back-ldbm/ldbm_modify.c
+++ b/ldap/servers/slapd/back-ldbm/ldbm_modify.c
@@ -598,12 +598,18 @@ ldbm_back_modify(Slapi_PBlock *pb)
goto error_return;
}
opcsn = operation_get_csn(operation);
- if (NULL == opcsn && operation->o_csngen_handler) {
+ if (opcsn == NULL && operation->o_csngen_handler) {
/*
* Current op is a user request. Opcsn will be assigned
* if the dn is in an updatable replica.
*/
- opcsn = entry_assign_operation_csn(pb, e->ep_entry, NULL);
+ if (entry_assign_operation_csn(pb, e->ep_entry, NULL, &opcsn) != 0) {
+ slapi_log_err(SLAPI_LOG_ERR, "ldbm_back_modify",
+ "failed to generate modify CSN for entry (%s), aborting operation\n",
+ slapi_entry_get_dn(e->ep_entry));
+ ldap_result_code = LDAP_OPERATIONS_ERROR;
+ goto error_return;
+ }
}
if (opcsn) {
entry_set_maxcsn(e->ep_entry, opcsn);
diff --git a/ldap/servers/slapd/back-ldbm/ldbm_modrdn.c b/ldap/servers/slapd/back-ldbm/ldbm_modrdn.c
index 26698012a..fde83c99f 100644
--- a/ldap/servers/slapd/back-ldbm/ldbm_modrdn.c
+++ b/ldap/servers/slapd/back-ldbm/ldbm_modrdn.c
@@ -543,7 +543,13 @@ ldbm_back_modrdn(Slapi_PBlock *pb)
* Current op is a user request. Opcsn will be assigned
* if the dn is in an updatable replica.
*/
- opcsn = entry_assign_operation_csn(pb, e->ep_entry, parententry ? parententry->ep_entry : NULL);
+ if (entry_assign_operation_csn(pb, e->ep_entry, parententry ? parententry->ep_entry : NULL, &opcsn) != 0) {
+ slapi_log_err(SLAPI_LOG_ERR, "ldbm_back_modrdn",
+ "failed to generate modrdn CSN for entry (%s), aborting operation\n",
+ slapi_entry_get_dn(e->ep_entry));
+ ldap_result_code = LDAP_OPERATIONS_ERROR;
+ goto error_return;
+ }
}
if (opcsn != NULL) {
entry_set_maxcsn(e->ep_entry, opcsn);
diff --git a/ldap/servers/slapd/csngen.c b/ldap/servers/slapd/csngen.c
index 68dbbda8e..b08d8b25c 100644
--- a/ldap/servers/slapd/csngen.c
+++ b/ldap/servers/slapd/csngen.c
@@ -164,6 +164,7 @@ csngen_free(CSNGen **gen)
int
csngen_new_csn(CSNGen *gen, CSN **csn, PRBool notify)
{
+ struct timespec now = {0};
int rc = CSN_SUCCESS;
time_t cur_time;
int delta;
@@ -179,12 +180,25 @@ csngen_new_csn(CSNGen *gen, CSN **csn, PRBool notify)
return CSN_MEMORY_ERROR;
}
- slapi_rwlock_wrlock(gen->lock);
+ if ((rc = slapi_clock_gettime(&now)) != 0) {
+ /* Failed to get system time, we must abort */
+ slapi_log_err(SLAPI_LOG_ERR, "csngen_new_csn",
+ "Failed to get system time (%s)\n",
+ slapd_system_strerror(rc));
+ return CSN_TIME_ERROR;
+ }
+ cur_time = now.tv_sec;
- cur_time = slapi_current_utc_time();
+ slapi_rwlock_wrlock(gen->lock);
/* check if the time should be adjusted */
delta = cur_time - gen->state.sampled_time;
+ if (delta > _SEC_PER_DAY || delta < (-1 * _SEC_PER_DAY)) {
+ /* We had a jump larger than a day */
+ slapi_log_err(SLAPI_LOG_INFO, "csngen_new_csn",
+ "Detected large jump in CSN time. Delta: %d (current time: %ld vs previous time: %ld)\n",
+ delta, cur_time, gen->state.sampled_time);
+ }
if (delta > 0) {
rc = _csngen_adjust_local_time(gen, cur_time);
if (rc != CSN_SUCCESS) {
diff --git a/ldap/servers/slapd/entrywsi.c b/ldap/servers/slapd/entrywsi.c
index 5d1d7238a..31bf65d8e 100644
--- a/ldap/servers/slapd/entrywsi.c
+++ b/ldap/servers/slapd/entrywsi.c
@@ -224,13 +224,12 @@ entry_add_rdn_csn(Slapi_Entry *e, const CSN *csn)
slapi_rdn_free(&rdn);
}
-CSN *
-entry_assign_operation_csn(Slapi_PBlock *pb, Slapi_Entry *e, Slapi_Entry *parententry)
+int32_t
+entry_assign_operation_csn(Slapi_PBlock *pb, Slapi_Entry *e, Slapi_Entry *parententry, CSN **opcsn)
{
Slapi_Operation *op;
const CSN *basecsn = NULL;
const CSN *parententry_dncsn = NULL;
- CSN *opcsn = NULL;
slapi_pblock_get(pb, SLAPI_OPERATION, &op);
@@ -252,14 +251,16 @@ entry_assign_operation_csn(Slapi_PBlock *pb, Slapi_Entry *e, Slapi_Entry *parent
basecsn = parententry_dncsn;
}
}
- opcsn = op->o_csngen_handler(pb, basecsn);
+ if(op->o_csngen_handler(pb, basecsn, opcsn) != 0) {
+ return -1;
+ }
- if (NULL != opcsn) {
- operation_set_csn(op, opcsn);
+ if (*opcsn) {
+ operation_set_csn(op, *opcsn);
}
}
- return opcsn;
+ return 0;
}
/*
diff --git a/ldap/servers/slapd/slap.h b/ldap/servers/slapd/slap.h
index a4cae784a..cef8c789c 100644
--- a/ldap/servers/slapd/slap.h
+++ b/ldap/servers/slapd/slap.h
@@ -1480,7 +1480,7 @@ struct op;
typedef void (*result_handler)(struct conn *, struct op *, int, char *, char *, int, struct berval **);
typedef int (*search_entry_handler)(Slapi_Backend *, struct conn *, struct op *, struct slapi_entry *);
typedef int (*search_referral_handler)(Slapi_Backend *, struct conn *, struct op *, struct berval **);
-typedef CSN *(*csngen_handler)(Slapi_PBlock *pb, const CSN *basecsn);
+typedef int32_t *(*csngen_handler)(Slapi_PBlock *pb, const CSN *basecsn, CSN **opcsn);
typedef int (*replica_attr_handler)(Slapi_PBlock *pb, const char *type, void **value);
/*
diff --git a/ldap/servers/slapd/slapi-plugin.h b/ldap/servers/slapd/slapi-plugin.h
index be1e52e4d..834a98742 100644
--- a/ldap/servers/slapd/slapi-plugin.h
+++ b/ldap/servers/slapd/slapi-plugin.h
@@ -6743,6 +6743,14 @@ int slapi_reslimit_get_integer_limit(Slapi_Connection *conn, int handle, int *li
*/
time_t slapi_current_time(void) __attribute__((deprecated));
+/**
+ * Get the system time and check for errors. Return
+ *
+ * \param tp - a timespec struct where the system time is set
+ * \return result code, upon success tp is set to the system time
+ */
+int32_t slapi_clock_gettime(struct timespec *tp);
+
/**
* Returns the current system time as a hr clock relative to uptime
* This means the clock is not affected by timezones
diff --git a/ldap/servers/slapd/slapi-private.h b/ldap/servers/slapd/slapi-private.h
index d85ee43e5..c98c1947c 100644
--- a/ldap/servers/slapd/slapi-private.h
+++ b/ldap/servers/slapd/slapi-private.h
@@ -233,7 +233,8 @@ enum
CSN_INVALID_PARAMETER, /* invalid function argument */
CSN_INVALID_FORMAT, /* invalid state format */
CSN_LDAP_ERROR, /* LDAP operation failed */
- CSN_NSPR_ERROR /* NSPR API failure */
+ CSN_NSPR_ERROR, /* NSPR API failure */
+ CSN_TIME_ERROR /* Error generating new CSN due to clock failure */
};
typedef struct csngen CSNGen;
@@ -326,7 +327,7 @@ int slapi_entries_diff(Slapi_Entry **old_entries, Slapi_Entry **new_entries, int
void set_attr_to_protected_list(char *attr, int flag);
/* entrywsi.c */
-CSN *entry_assign_operation_csn(Slapi_PBlock *pb, Slapi_Entry *e, Slapi_Entry *parententry);
+int32_t entry_assign_operation_csn(Slapi_PBlock *pb, Slapi_Entry *e, Slapi_Entry *parententry, CSN **opcsn);
const CSN *entry_get_maxcsn(const Slapi_Entry *entry);
void entry_set_maxcsn(Slapi_Entry *entry, const CSN *csn);
const CSN *entry_get_dncsn(const Slapi_Entry *entry);
diff --git a/ldap/servers/slapd/time.c b/ldap/servers/slapd/time.c
index 8048a3359..545538404 100644
--- a/ldap/servers/slapd/time.c
+++ b/ldap/servers/slapd/time.c
@@ -61,6 +61,25 @@ poll_current_time()
return 0;
}
+/*
+ * Check if the time function returns an error. If so return the errno
+ */
+int32_t
+slapi_clock_gettime(struct timespec *tp)
+{
+ int32_t rc = 0;
+
+ PR_ASSERT(tp && tp->tv_nsec == 0 && tp->tv_sec == 0);
+
+ if (clock_gettime(CLOCK_REALTIME, tp) != 0) {
+ rc = errno;
+ }
+
+ PR_ASSERT(rc == 0);
+
+ return rc;
+}
+
time_t
current_time(void)
{
@@ -69,7 +88,7 @@ current_time(void)
* but this should be removed in favour of the
* more accurately named slapi_current_utc_time
*/
- struct timespec now;
+ struct timespec now = {0};
clock_gettime(CLOCK_REALTIME, &now);
return now.tv_sec;
}
@@ -83,7 +102,7 @@ slapi_current_time(void)
struct timespec
slapi_current_rel_time_hr(void)
{
- struct timespec now;
+ struct timespec now = {0};
clock_gettime(CLOCK_MONOTONIC, &now);
return now;
}
@@ -91,7 +110,7 @@ slapi_current_rel_time_hr(void)
struct timespec
slapi_current_utc_time_hr(void)
{
- struct timespec ltnow;
+ struct timespec ltnow = {0};
clock_gettime(CLOCK_REALTIME, &ltnow);
return ltnow;
}
@@ -99,7 +118,7 @@ slapi_current_utc_time_hr(void)
time_t
slapi_current_utc_time(void)
{
- struct timespec ltnow;
+ struct timespec ltnow = {0};
clock_gettime(CLOCK_REALTIME, &ltnow);
return ltnow.tv_sec;
}
@@ -108,8 +127,8 @@ void
slapi_timestamp_utc_hr(char *buf, size_t bufsize)
{
PR_ASSERT(bufsize >= SLAPI_TIMESTAMP_BUFSIZE);
- struct timespec ltnow;
- struct tm utctm;
+ struct timespec ltnow = {0};
+ struct tm utctm = {0};
clock_gettime(CLOCK_REALTIME, &ltnow);
gmtime_r(&(ltnow.tv_sec), &utctm);
strftime(buf, bufsize, "%Y%m%d%H%M%SZ", &utctm);
@@ -140,7 +159,7 @@ format_localTime_log(time_t t, int initsize __attribute__((unused)), char *buf,
{
long tz;
- struct tm *tmsp, tms;
+ struct tm *tmsp, tms = {0};
char tbuf[*bufsize];
char sign;
/* make sure our buffer will be big enough. Need at least 29 */
@@ -191,7 +210,7 @@ format_localTime_hr_log(time_t t, long nsec, int initsize __attribute__((unused)
{
long tz;
- struct tm *tmsp, tms;
+ struct tm *tmsp, tms = {0};
char tbuf[*bufsize];
char sign;
/* make sure our buffer will be big enough. Need at least 39 */
@@ -278,7 +297,7 @@ slapi_timespec_expire_check(struct timespec *expire)
if (expire->tv_sec == 0 && expire->tv_nsec == 0) {
return TIMER_CONTINUE;
}
- struct timespec now;
+ struct timespec now = {0};
clock_gettime(CLOCK_MONOTONIC, &now);
if (now.tv_sec > expire->tv_sec ||
(expire->tv_sec == now.tv_sec && now.tv_sec > expire->tv_nsec)) {
@@ -293,7 +312,7 @@ format_localTime(time_t from)
in the syntax of a generalizedTime, except without the time zone. */
{
char *into;
- struct tm t;
+ struct tm t = {0};
localtime_r(&from, &t);
@@ -362,7 +381,7 @@ format_genTime(time_t from)
in the syntax of a generalizedTime. */
{
char *into;
- struct tm t;
+ struct tm t = {0};
gmtime_r(&from, &t);
into = slapi_ch_malloc(SLAPI_TIMESTAMP_BUFSIZE);
@@ -382,7 +401,7 @@ time_t
read_genTime(struct berval *from)
{
struct tm t = {0};
- time_t retTime;
+ time_t retTime = {0};
time_t diffsec = 0;
int i, gflag = 0, havesec = 0;
--
2.26.2

View File

@ -0,0 +1,179 @@
From 52ce524f7672563b543e84401665765cfa72dea5 Mon Sep 17 00:00:00 2001
From: Mark Reynolds <mreynolds@redhat.com>
Date: Tue, 26 May 2020 17:03:11 -0400
Subject: [PATCH 09/12] Issue 51113 - Allow using uid for replication manager
entry
Bug Description: Currently it was hardcoded to only allow "cn" as
the rdn attribute for the replication manager entry.
Fix description: Allow setting the rdn attribute of the replication
manager DS ldap object, and include the schema that
allows "uid".
relates: https://pagure.io/389-ds-base/issue/51113
Reviewed by: spichugi & firstyear(Thanks!!)
---
src/lib389/lib389/cli_conf/replication.py | 53 ++++++++++++-----------
src/lib389/lib389/replica.py | 11 +++--
2 files changed, 35 insertions(+), 29 deletions(-)
diff --git a/src/lib389/lib389/cli_conf/replication.py b/src/lib389/lib389/cli_conf/replication.py
index 09cb9b435..b9bc3d291 100644
--- a/src/lib389/lib389/cli_conf/replication.py
+++ b/src/lib389/lib389/cli_conf/replication.py
@@ -199,19 +199,21 @@ def enable_replication(inst, basedn, log, args):
# Create replication manager if password was provided
if args.bind_dn and args.bind_passwd:
- cn_rdn = args.bind_dn.split(",", 1)[0]
- cn_val = cn_rdn.split("=", 1)[1]
- manager = BootstrapReplicationManager(inst, dn=args.bind_dn)
+ rdn = args.bind_dn.split(",", 1)[0]
+ rdn_attr, rdn_val = rdn.split("=", 1)
+ manager = BootstrapReplicationManager(inst, dn=args.bind_dn, rdn_attr=rdn_attr)
try:
manager.create(properties={
- 'cn': cn_val,
+ 'cn': rdn_val,
+ 'uid': rdn_val,
'userPassword': args.bind_passwd
})
except ldap.ALREADY_EXISTS:
# Already there, but could have different password. Delete and recreate
manager.delete()
manager.create(properties={
- 'cn': cn_val,
+ 'cn': rdn_val,
+ 'uid': rdn_val,
'userPassword': args.bind_passwd
})
except ldap.NO_SUCH_OBJECT:
@@ -511,22 +513,23 @@ def get_cl(inst, basedn, log, args):
def create_repl_manager(inst, basedn, log, args):
- manager_cn = "replication manager"
+ manager_name = "replication manager"
repl_manager_password = ""
repl_manager_password_confirm = ""
if args.name:
- manager_cn = args.name
-
- if is_a_dn(manager_cn):
- # A full DN was provided, make sure it uses "cn" for the RDN
- if manager_cn.split("=", 1)[0].lower() != "cn":
- raise ValueError("Replication manager DN must use \"cn\" for the rdn attribute")
- manager_dn = manager_cn
- manager_rdn = manager_dn.split(",", 1)[0]
- manager_cn = manager_rdn.split("=", 1)[1]
+ manager_name = args.name
+
+ if is_a_dn(manager_name):
+ # A full DN was provided
+ manager_dn = manager_name
+ manager_rdn = manager_name.split(",", 1)[0]
+ manager_attr, manager_name = manager_rdn.split("=", 1)
+ if manager_attr.lower() not in ['cn', 'uid']:
+ raise ValueError(f'The RDN attribute "{manager_attr}" is not allowed, you must use "cn" or "uid"')
else:
- manager_dn = "cn={},cn=config".format(manager_cn)
+ manager_dn = "cn={},cn=config".format(manager_name)
+ manager_attr = "cn"
if args.passwd:
repl_manager_password = args.passwd
@@ -544,10 +547,11 @@ def create_repl_manager(inst, basedn, log, args):
repl_manager_password = ""
repl_manager_password_confirm = ""
- manager = BootstrapReplicationManager(inst, dn=manager_dn)
+ manager = BootstrapReplicationManager(inst, dn=manager_dn, rdn_attr=manager_attr)
try:
manager.create(properties={
- 'cn': manager_cn,
+ 'cn': manager_name,
+ 'uid': manager_name,
'userPassword': repl_manager_password
})
if args.suffix:
@@ -564,7 +568,8 @@ def create_repl_manager(inst, basedn, log, args):
# Already there, but could have different password. Delete and recreate
manager.delete()
manager.create(properties={
- 'cn': manager_cn,
+ 'cn': manager_name,
+ 'uid': manager_name,
'userPassword': repl_manager_password
})
if args.suffix:
@@ -954,6 +959,7 @@ def get_winsync_agmt_status(inst, basedn, log, args):
status = agmt.status(winsync=True, use_json=args.json)
log.info(status)
+
#
# Tasks
#
@@ -1347,8 +1353,7 @@ def create_parser(subparsers):
agmt_set_parser.add_argument('--wait-async-results', help="The amount of time in milliseconds the server waits if "
"the consumer is not ready before resending data")
agmt_set_parser.add_argument('--busy-wait-time', help="The amount of time in seconds a supplier should wait after "
- "a consumer sends back a busy response before making another "
- "attempt to acquire access.")
+ "a consumer sends back a busy response before making another attempt to acquire access.")
agmt_set_parser.add_argument('--session-pause-time', help="The amount of time in seconds a supplier should wait between update sessions.")
agmt_set_parser.add_argument('--flow-control-window', help="Sets the maximum number of entries and updates sent by a supplier, which are not acknowledged by the consumer.")
agmt_set_parser.add_argument('--flow-control-pause', help="The time in milliseconds to pause after reaching the number of entries and updates set in \"--flow-control-window\"")
@@ -1438,8 +1443,7 @@ def create_parser(subparsers):
winsync_agmt_add_parser.add_argument('--subtree-pair', help="Set the subtree pair: <DS_SUBTREE>:<WINDOWS_SUBTREE>")
winsync_agmt_add_parser.add_argument('--conn-timeout', help="The timeout used for replicaton connections")
winsync_agmt_add_parser.add_argument('--busy-wait-time', help="The amount of time in seconds a supplier should wait after "
- "a consumer sends back a busy response before making another "
- "attempt to acquire access.")
+ "a consumer sends back a busy response before making another attempt to acquire access.")
winsync_agmt_add_parser.add_argument('--session-pause-time', help="The amount of time in seconds a supplier should wait between update sessions.")
winsync_agmt_add_parser.add_argument('--init', action='store_true', default=False, help="Initialize the agreement after creating it.")
@@ -1468,8 +1472,7 @@ def create_parser(subparsers):
winsync_agmt_set_parser.add_argument('--subtree-pair', help="Set the subtree pair: <DS_SUBTREE>:<WINDOWS_SUBTREE>")
winsync_agmt_set_parser.add_argument('--conn-timeout', help="The timeout used for replicaton connections")
winsync_agmt_set_parser.add_argument('--busy-wait-time', help="The amount of time in seconds a supplier should wait after "
- "a consumer sends back a busy response before making another "
- "attempt to acquire access.")
+ "a consumer sends back a busy response before making another attempt to acquire access.")
winsync_agmt_set_parser.add_argument('--session-pause-time', help="The amount of time in seconds a supplier should wait between update sessions.")
# Get
diff --git a/src/lib389/lib389/replica.py b/src/lib389/lib389/replica.py
index e3fc7fe1f..f8adb3ce2 100644
--- a/src/lib389/lib389/replica.py
+++ b/src/lib389/lib389/replica.py
@@ -1779,15 +1779,18 @@ class BootstrapReplicationManager(DSLdapObject):
:type instance: lib389.DirSrv
:param dn: The dn to create
:type dn: str
+ :param rdn_attr: The attribute to use for the RDN
+ :type rdn_attr: str
"""
- def __init__(self, instance, dn='cn=replication manager,cn=config'):
+ def __init__(self, instance, dn='cn=replication manager,cn=config', rdn_attr='cn'):
super(BootstrapReplicationManager, self).__init__(instance, dn)
- self._rdn_attribute = 'cn'
+ self._rdn_attribute = rdn_attr
self._must_attributes = ['cn', 'userPassword']
self._create_objectclasses = [
'top',
- 'netscapeServer',
- 'nsAccount'
+ 'inetUser', # for uid
+ 'netscapeServer', # for cn
+ 'nsAccount', # for authentication attributes
]
if ds_is_older('1.4.0'):
self._create_objectclasses.remove('nsAccount')
--
2.26.2

View File

@ -0,0 +1,34 @@
From ec85e986ec5710682de883f0f40f539b2f9945fa Mon Sep 17 00:00:00 2001
From: Viktor Ashirov <vashirov@redhat.com>
Date: Wed, 27 May 2020 15:22:18 +0200
Subject: [PATCH 10/12] Issue 50931 - RFE AD filter rewriter for ObjectCategory
Bug Description:
ASAN build fails on RHEL due to linking issues
Fix Description:
Add missing libslapd.la for librewriters.la
Relates: https://pagure.io/389-ds-base/issue/50931
Reviewed by: tbordaz (Thanks!)
---
Makefile.am | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/Makefile.am b/Makefile.am
index 2309f3010..0e5f04f91 100644
--- a/Makefile.am
+++ b/Makefile.am
@@ -1159,7 +1159,7 @@ librewriters_la_SOURCES = \
librewriters_la_LDFLAGS = $(AM_LDFLAGS)
librewriters_la_CPPFLAGS = $(AM_CPPFLAGS) $(REWRITERS_INCLUDES) $(DSPLUGIN_CPPFLAGS)
-librewriters_la_LIBADD = $(NSS_LINK) $(NSPR_LINK)
+librewriters_la_LIBADD = libslapd.la $(NSS_LINK) $(NSPR_LINK)
#------------------------
# libsvrcore
--
2.26.2

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,54 @@
From 2540354b7eb6fa03db7d36a5b755001b0852aa1b Mon Sep 17 00:00:00 2001
From: Simon Pichugin <spichugi@redhat.com>
Date: Thu, 26 Mar 2020 19:33:47 +0100
Subject: [PATCH] Issue 50984 - Memory leaks in disk monitoring
Description: Memory leaks are reported by the disk monitoring test suite.
The direct leak is related to char **dirs array which is not freed at all.
Free the array when we clean up or go to shutdown.
Fix disk_monitoring_test.py::test_below_half_of_the_threshold_not_starting_after_shutdown.
It should accept different exception when the instance is not started.
https://pagure.io/389-ds-base/issue/50984
Reviewed by: firstyear (Thanks!)
---
ldap/servers/slapd/daemon.c | 2 --
ldap/servers/slapd/main.c | 1 -
2 files changed, 3 deletions(-)
diff --git a/ldap/servers/slapd/daemon.c b/ldap/servers/slapd/daemon.c
index a70f40316..542d31037 100644
--- a/ldap/servers/slapd/daemon.c
+++ b/ldap/servers/slapd/daemon.c
@@ -613,7 +613,6 @@ disk_monitoring_thread(void *nothing __attribute__((unused)))
}
}
slapi_ch_array_free(dirs);
- dirs = NULL;
return;
}
/*
@@ -713,7 +712,6 @@ disk_monitoring_thread(void *nothing __attribute__((unused)))
}
}
slapi_ch_array_free(dirs);
- dirs = NULL; /* now it is not needed but the code may be changed in the future and it'd better be more robust */
g_set_shutdown(SLAPI_SHUTDOWN_DISKFULL);
return;
}
diff --git a/ldap/servers/slapd/main.c b/ldap/servers/slapd/main.c
index e54b8e1c5..1f8b01959 100644
--- a/ldap/servers/slapd/main.c
+++ b/ldap/servers/slapd/main.c
@@ -958,7 +958,6 @@ main(int argc, char **argv)
goto cleanup;
}
slapi_ch_array_free(dirs);
- dirs = NULL;
}
/* log the max fd limit as it is typically set in env/systemd */
slapi_log_err(SLAPI_LOG_INFO, "main",
--
2.26.2

View File

@ -0,0 +1,52 @@
From a720e002751815323a295e11e77c56d7ce38314e Mon Sep 17 00:00:00 2001
From: Simon Pichugin <spichugi@redhat.com>
Date: Fri, 27 Mar 2020 11:35:55 +0100
Subject: [PATCH] Issue 50984 - Memory leaks in disk monitoring
Description: Reset dirs pointer every time we free it.
The code may be changed in the future so we should make it
more robust.
https://pagure.io/389-ds-base/issue/50984
Reviewed by: spichugi, tbordaz (one line commit rule)
---
ldap/servers/slapd/daemon.c | 2 ++
ldap/servers/slapd/main.c | 1 +
2 files changed, 3 insertions(+)
diff --git a/ldap/servers/slapd/daemon.c b/ldap/servers/slapd/daemon.c
index 542d31037..a70f40316 100644
--- a/ldap/servers/slapd/daemon.c
+++ b/ldap/servers/slapd/daemon.c
@@ -613,6 +613,7 @@ disk_monitoring_thread(void *nothing __attribute__((unused)))
}
}
slapi_ch_array_free(dirs);
+ dirs = NULL;
return;
}
/*
@@ -712,6 +713,7 @@ disk_monitoring_thread(void *nothing __attribute__((unused)))
}
}
slapi_ch_array_free(dirs);
+ dirs = NULL; /* now it is not needed but the code may be changed in the future and it'd better be more robust */
g_set_shutdown(SLAPI_SHUTDOWN_DISKFULL);
return;
}
diff --git a/ldap/servers/slapd/main.c b/ldap/servers/slapd/main.c
index 1f8b01959..e54b8e1c5 100644
--- a/ldap/servers/slapd/main.c
+++ b/ldap/servers/slapd/main.c
@@ -958,6 +958,7 @@ main(int argc, char **argv)
goto cleanup;
}
slapi_ch_array_free(dirs);
+ dirs = NULL;
}
/* log the max fd limit as it is typically set in env/systemd */
slapi_log_err(SLAPI_LOG_INFO, "main",
--
2.26.2

View File

@ -0,0 +1,569 @@
From f60364cd9472edc61e7d327d13dca67eadf0c5b2 Mon Sep 17 00:00:00 2001
From: Simon Pichugin <simon.pichugin@gmail.com>
Date: Tue, 28 Apr 2020 23:44:20 +0200
Subject: [PATCH] Issue 50201 - nsIndexIDListScanLimit accepts any value
Bug Description: Setting of nsIndexIDListScanLimit like
'limit=2 limit=3' are detected and logged in error logs.
But the invalid value is successfully applied in the config entry
and the operation itself is successful.
The impact is limited because the index will be used following
idlistscanlimit rather than invalid definition nsIndexIDListScanLimit.
Fix Description: Print the errors to the user when he tries to add
or to modify index config entry with malformed values.
Change tests accordingly.
https://pagure.io/389-ds-base/issue/50201
Reviewed by: mreynolds, tbordaz (Thanks!)
---
.../suites/filter/filterscanlimit_test.py | 87 ++++++++-----------
ldap/servers/slapd/back-ldbm/instance.c | 4 +-
ldap/servers/slapd/back-ldbm/ldbm_attr.c | 33 ++++++-
.../slapd/back-ldbm/ldbm_index_config.c | 59 +++++++++----
ldap/servers/slapd/back-ldbm/ldif2ldbm.c | 2 +-
.../servers/slapd/back-ldbm/proto-back-ldbm.h | 2 +-
6 files changed, 114 insertions(+), 73 deletions(-)
diff --git a/dirsrvtests/tests/suites/filter/filterscanlimit_test.py b/dirsrvtests/tests/suites/filter/filterscanlimit_test.py
index dd9c6ee4e..0198f6533 100644
--- a/dirsrvtests/tests/suites/filter/filterscanlimit_test.py
+++ b/dirsrvtests/tests/suites/filter/filterscanlimit_test.py
@@ -11,6 +11,7 @@ This script will test different type of Filers.
"""
import os
+import ldap
import pytest
from lib389._constants import DEFAULT_SUFFIX, PW_DM
@@ -19,11 +20,10 @@ from lib389.idm.user import UserAccounts
from lib389.idm.organizationalunit import OrganizationalUnits
from lib389.index import Index
from lib389.idm.account import Accounts
-from lib389.idm.group import UniqueGroups, Group
+from lib389.idm.group import UniqueGroups
pytestmark = pytest.mark.tier1
-
GIVEN_NAME = 'cn=givenname,cn=index,cn=userRoot,cn=ldbm database,cn=plugins,cn=config'
CN_NAME = 'cn=sn,cn=index,cn=userRoot,cn=ldbm database,cn=plugins,cn=config'
UNIQMEMBER = 'cn=uniquemember,cn=index,cn=userRoot,cn=ldbm database,cn=plugins,cn=config'
@@ -39,7 +39,6 @@ LIST_OF_USER_ACCOUNTING = [
"Judy Wallace",
"Marcus Ward",
"Judy McFarland",
- "Anuj Hall",
"Gern Triplett",
"Emanuel Johnson",
"Brad Walker",
@@ -57,7 +56,6 @@ LIST_OF_USER_ACCOUNTING = [
"Randy Ulrich",
"Richard Francis",
"Morgan White",
- "Anuj Maddox",
"Jody Jensen",
"Mike Carter",
"Gern Tyler",
@@ -77,8 +75,6 @@ LIST_OF_USER_HUMAN = [
"Robert Daugherty",
"Torrey Mason",
"Brad Talbot",
- "Anuj Jablonski",
- "Harry Miller",
"Jeffrey Campaigne",
"Stephen Triplett",
"John Falena",
@@ -107,8 +103,7 @@ LIST_OF_USER_HUMAN = [
"Tobias Schmith",
"Jon Goldstein",
"Janet Lutz",
- "Karl Cope",
-]
+ "Karl Cope"]
LIST_OF_USER_TESTING = [
"Andy Bergin",
@@ -122,8 +117,7 @@ LIST_OF_USER_TESTING = [
"Alan White",
"Daniel Ward",
"Lee Stockton",
- "Matthew Vaughan"
-]
+ "Matthew Vaughan"]
LIST_OF_USER_DEVELOPMENT = [
"Kelly Winters",
@@ -143,7 +137,6 @@ LIST_OF_USER_DEVELOPMENT = [
"Timothy Kelly",
"Sue Mason",
"Chris Alexander",
- "Anuj Jensen",
"Martin Talbot",
"Scott Farmer",
"Allison Jensen",
@@ -152,9 +145,7 @@ LIST_OF_USER_DEVELOPMENT = [
"Dan Langdon",
"Ashley Knutson",
"Jon Bourke",
- "Pete Hunt",
-
-]
+ "Pete Hunt"]
LIST_OF_USER_PAYROLL = [
"Ashley Chassin",
@@ -164,12 +155,17 @@ LIST_OF_USER_PAYROLL = [
"Patricia Shelton",
"Dietrich Swain",
"Allison Hunter",
- "Anne-Louise Barnes"
+ "Anne-Louise Barnes"]
-]
+LIST_OF_USER_PEOPLE = [
+ 'Sam Carter',
+ 'Tom Morris',
+ 'Kevin Vaughan',
+ 'Rich Daugherty',
+ 'Harry Miller',
+ 'Sam Schmith']
-@pytest.mark.skip(reason="https://pagure.io/389-ds-base/issue/50201")
def test_invalid_configuration(topo):
""""
Error handling for invalid configuration
@@ -190,10 +186,7 @@ def test_invalid_configuration(topo):
'limit=0 flags=AND flags=AND',
'limit=0 type=eq values=foo values=foo',
'limit=0 type=eq values=foo,foo',
- 'limit=0 type=sub',
- 'limit=0 type=eq values=notvalid',
'limit',
- 'limit=0 type=eq values=notavaliddn',
'limit=0 type=pres values=bogus',
'limit=0 type=eq,sub values=bogus',
'limit=',
@@ -203,7 +196,8 @@ def test_invalid_configuration(topo):
'limit=-2',
'type=eq',
'limit=0 type=bogus']:
- Index(topo.standalone, GIVEN_NAME).replace('nsIndexIDListScanLimit', i)
+ with pytest.raises(ldap.UNWILLING_TO_PERFORM):
+ Index(topo.standalone, GIVEN_NAME).replace('nsIndexIDListScanLimit', i)
def test_idlistscanlimit(topo):
@@ -247,28 +241,24 @@ def test_idlistscanlimit(topo):
(LIST_OF_USER_HUMAN, users_human),
(LIST_OF_USER_TESTING, users_testing),
(LIST_OF_USER_DEVELOPMENT, users_development),
- (LIST_OF_USER_PAYROLL, users_payroll)]:
+ (LIST_OF_USER_PAYROLL, users_payroll),
+ (LIST_OF_USER_PEOPLE, users_people)]:
for demo1 in data[0]:
+ fn = demo1.split()[0]
+ sn = demo1.split()[1]
+ uid = ''.join([fn[:1], sn]).lower()
data[1].create(properties={
- 'uid': demo1,
+ 'uid': uid,
'cn': demo1,
- 'sn': demo1.split()[1],
+ 'sn': sn,
'uidNumber': str(1000),
'gidNumber': '2000',
- 'homeDirectory': '/home/' + demo1,
- 'givenname': demo1.split()[0],
- 'userpassword': PW_DM
+ 'homeDirectory': f'/home/{uid}',
+ 'givenname': fn,
+ 'userpassword': PW_DM,
+ 'mail': f'{uid}@test.com'
})
- users_people.create(properties={
- 'uid': 'scarter',
- 'cn': 'Sam Carter',
- 'sn': 'Carter',
- 'uidNumber': str(1000),
- 'gidNumber': '2000',
- 'homeDirectory': '/home/' + 'scarter',
- 'mail': 'scarter@anuj.com',
- })
try:
# Change log levels
errorlog_value = topo.standalone.config.get_attr_val_utf8('nsslapd-errorlog-level')
@@ -297,16 +287,12 @@ def test_idlistscanlimit(topo):
Index(topo.standalone, UNIQMEMBER).\
replace('nsIndexIDListScanLimit',
- 'limit=0 type=eq values=uid=kvaughan,ou=People,'
- 'dc=example,dc=com,uid=rdaugherty,ou=People,dc=example,dc=com')
+ 'limit=0 type=eq values=uid=kvaughan\2Cou=People\2Cdc=example\2Cdc=com,'
+ 'uid=rdaugherty\2Cou=People\2Cdc=example\2Cdc=com')
Index(topo.standalone, OBJECTCLASS).\
replace('nsIndexIDListScanLimit', 'limit=0 type=eq flags=AND values=inetOrgPerson')
- Index(topo.standalone, MAIL).\
- replace('nsIndexIDListScanLimit',
- 'cn=mail,cn=index,cn=userRoot,cn=ldbm database,cn=plugins,cn=config')
-
# Search with filter
for i in ['(sn=Lutz)',
'(sn=*ter)',
@@ -321,22 +307,24 @@ def test_idlistscanlimit(topo):
'(&(sn=*)(cn=*))',
'(sn=Hunter)',
'(&(givenname=Richard)(objectclass=organizationalPerson))',
- '(givenname=Anuj)',
+ '(givenname=Morgan)',
'(&(givenname=*)(cn=*))',
'(givenname=*)']:
assert Accounts(topo.standalone, DEFAULT_SUFFIX).filter(f'{i}')
- # Creating Group
- Group(topo.standalone, 'cn=Accounting Managers,ou=groups,dc=example,dc=com').\
- add('uniquemember',
+ # Creating Groups and adding members
+ groups = UniqueGroups(topo.standalone, DEFAULT_SUFFIX)
+ accounting_managers = groups.ensure_state(properties={'cn': 'Accounting Managers'})
+ hr_managers = groups.ensure_state(properties={'cn': 'HR Managers'})
+
+ accounting_managers.add('uniquemember',
['uid=scarter, ou=People, dc=example,dc=com',
'uid=tmorris, ou=People, dc=example,dc=com',
'uid=kvaughan, ou=People, dc=example,dc=com',
'uid=rdaugherty, ou=People, dc=example,dc=com',
'uid=hmiller, ou=People, dc=example,dc=com'])
- Group(topo.standalone, 'cn=HR Managers,ou=groups,dc=example,dc=com').\
- add('uniquemember',
+ hr_managers.add('uniquemember',
['uid=kvaughan, ou=People, dc=example,dc=com',
'uid=cschmith, ou=People, dc=example,dc=com'])
@@ -403,10 +391,9 @@ def test_idlistscanlimit(topo):
'(&(sn=*)(cn=*))',
'(sn=Hunter)',
'(&(givenname=Richard)(objectclass=organizationalPerson))',
- '(givenname=Anuj)',
+ '(givenname=Morgan)',
'(&(givenname=*)(cn=*))',
'(givenname=*)']:
-
assert Accounts(topo.standalone, DEFAULT_SUFFIX).filter(value)
finally:
diff --git a/ldap/servers/slapd/back-ldbm/instance.c b/ldap/servers/slapd/back-ldbm/instance.c
index 04c28ff39..07655a8ec 100644
--- a/ldap/servers/slapd/back-ldbm/instance.c
+++ b/ldap/servers/slapd/back-ldbm/instance.c
@@ -231,7 +231,7 @@ ldbm_instance_create_default_indexes(backend *be)
/* ldbm_instance_config_add_index_entry(inst, 2, argv); */
e = ldbm_instance_init_config_entry(LDBM_PSEUDO_ATTR_DEFAULT, "none", 0, 0, 0);
- attr_index_config(be, "ldbm index init", 0, e, 1, 0);
+ attr_index_config(be, "ldbm index init", 0, e, 1, 0, NULL);
slapi_entry_free(e);
if (!entryrdn_get_noancestorid()) {
@@ -240,7 +240,7 @@ ldbm_instance_create_default_indexes(backend *be)
* but we still want to use the attr index file APIs.
*/
e = ldbm_instance_init_config_entry(LDBM_ANCESTORID_STR, "eq", 0, 0, 0);
- attr_index_config(be, "ldbm index init", 0, e, 1, 0);
+ attr_index_config(be, "ldbm index init", 0, e, 1, 0, NULL);
slapi_entry_free(e);
}
diff --git a/ldap/servers/slapd/back-ldbm/ldbm_attr.c b/ldap/servers/slapd/back-ldbm/ldbm_attr.c
index b9e130d77..f0d418572 100644
--- a/ldap/servers/slapd/back-ldbm/ldbm_attr.c
+++ b/ldap/servers/slapd/back-ldbm/ldbm_attr.c
@@ -633,6 +633,18 @@ attr_index_idlistsize_config(Slapi_Entry *e, struct attrinfo *ai, char *returnte
return rc;
}
+/*
+ * Function that process index attributes and modifies attrinfo structure
+ *
+ * Called while adding default indexes, during db2index execution and
+ * when we add/modify/delete index config entry
+ *
+ * If char *err_buf is not NULL, it will additionally print all error messages to STDERR
+ * It is used when we add/modify/delete index config entry, so the user would have a better verbose
+ *
+ * returns -1, 1 on a failure
+ * 0 on success
+ */
int
attr_index_config(
backend *be,
@@ -640,7 +652,8 @@ attr_index_config(
int lineno,
Slapi_Entry *e,
int init __attribute__((unused)),
- int indextype_none)
+ int indextype_none,
+ char *err_buf)
{
ldbm_instance *inst = (ldbm_instance *)be->be_instance_info;
int j = 0;
@@ -662,6 +675,7 @@ attr_index_config(
slapi_attr_first_value(attr, &sval);
attrValue = slapi_value_get_berval(sval);
} else {
+ slapi_create_errormsg(err_buf, SLAPI_DSE_RETURNTEXT_SIZE, "Error: missing indexing arguments\n");
slapi_log_err(SLAPI_LOG_ERR, "attr_index_config", "Missing indexing arguments\n");
return -1;
}
@@ -705,6 +719,10 @@ attr_index_config(
}
a->ai_indexmask = INDEX_OFFLINE; /* note that the index isn't available */
} else {
+ slapi_create_errormsg(err_buf, SLAPI_DSE_RETURNTEXT_SIZE,
+ "Error: %s: line %d: unknown index type \"%s\" (ignored) in entry (%s), "
+ "valid index types are \"pres\", \"eq\", \"approx\", or \"sub\"\n",
+ fname, lineno, attrValue->bv_val, slapi_entry_get_dn(e));
slapi_log_err(SLAPI_LOG_ERR, "attr_index_config",
"%s: line %d: unknown index type \"%s\" (ignored) in entry (%s), "
"valid index types are \"pres\", \"eq\", \"approx\", or \"sub\"\n",
@@ -715,6 +733,7 @@ attr_index_config(
}
if (hasIndexType == 0) {
/* indexType missing, error out */
+ slapi_create_errormsg(err_buf, SLAPI_DSE_RETURNTEXT_SIZE, "Error: missing index type\n");
slapi_log_err(SLAPI_LOG_ERR, "attr_index_config", "Missing index type\n");
attrinfo_delete(&a);
return -1;
@@ -873,16 +892,26 @@ attr_index_config(
slapi_ch_free((void **)&official_rules);
}
}
-
if ((return_value = attr_index_idlistsize_config(e, a, myreturntext))) {
+ slapi_create_errormsg(err_buf, SLAPI_DSE_RETURNTEXT_SIZE,
+ "Error: %s: Failed to parse idscanlimit info: %d:%s\n",
+ fname, return_value, myreturntext);
slapi_log_err(SLAPI_LOG_ERR, "attr_index_config", "%s: Failed to parse idscanlimit info: %d:%s\n",
fname, return_value, myreturntext);
+ if (err_buf != NULL) {
+ /* we are inside of a callback, we shouldn't allow malformed attributes in index entries */
+ attrinfo_delete(&a);
+ return return_value;
+ }
}
/* initialize the IDL code's private data */
return_value = idl_init_private(be, a);
if (0 != return_value) {
/* fatal error, exit */
+ slapi_create_errormsg(err_buf, SLAPI_DSE_RETURNTEXT_SIZE,
+ "Error: %s: line %d:Fatal Error: Failed to initialize attribute structure\n",
+ fname, lineno);
slapi_log_err(SLAPI_LOG_CRIT, "attr_index_config",
"%s: line %d:Fatal Error: Failed to initialize attribute structure\n",
fname, lineno);
diff --git a/ldap/servers/slapd/back-ldbm/ldbm_index_config.c b/ldap/servers/slapd/back-ldbm/ldbm_index_config.c
index 45f0034f0..720f93036 100644
--- a/ldap/servers/slapd/back-ldbm/ldbm_index_config.c
+++ b/ldap/servers/slapd/back-ldbm/ldbm_index_config.c
@@ -25,26 +25,34 @@ int ldbm_instance_index_config_delete_callback(Slapi_PBlock *pb, Slapi_Entry *en
#define INDEXTYPE_NONE 1
static int
-ldbm_index_parse_entry(ldbm_instance *inst, Slapi_Entry *e, const char *trace_string, char **index_name)
+ldbm_index_parse_entry(ldbm_instance *inst, Slapi_Entry *e, const char *trace_string, char **index_name, char *err_buf)
{
Slapi_Attr *attr;
const struct berval *attrValue;
Slapi_Value *sval;
+ char *edn = slapi_entry_get_dn(e);
/* Get the name of the attribute to index which will be the value
* of the cn attribute. */
if (slapi_entry_attr_find(e, "cn", &attr) != 0) {
- slapi_log_err(SLAPI_LOG_ERR, "ldbm_index_parse_entry", "Malformed index entry %s\n",
- slapi_entry_get_dn(e));
+ slapi_create_errormsg(err_buf, SLAPI_DSE_RETURNTEXT_SIZE,
+ "Error: malformed index entry %s\n",
+ edn);
+ slapi_log_err(SLAPI_LOG_ERR,
+ "ldbm_index_parse_entry", "Malformed index entry %s\n",
+ edn);
return LDAP_OPERATIONS_ERROR;
}
slapi_attr_first_value(attr, &sval);
attrValue = slapi_value_get_berval(sval);
if (NULL == attrValue->bv_val || 0 == attrValue->bv_len) {
+ slapi_create_errormsg(err_buf, SLAPI_DSE_RETURNTEXT_SIZE,
+ "Error: malformed index entry %s -- empty index name\n",
+ edn);
slapi_log_err(SLAPI_LOG_ERR,
"ldbm_index_parse_entry", "Malformed index entry %s -- empty index name\n",
- slapi_entry_get_dn(e));
+ edn);
return LDAP_OPERATIONS_ERROR;
}
@@ -59,16 +67,19 @@ ldbm_index_parse_entry(ldbm_instance *inst, Slapi_Entry *e, const char *trace_st
attrValue = slapi_value_get_berval(sval);
if (NULL == attrValue->bv_val || attrValue->bv_len == 0) {
/* missing the index type, error out */
- slapi_log_err(SLAPI_LOG_ERR,
- "ldbm_index_parse_entry", "Malformed index entry %s -- empty nsIndexType\n",
- slapi_entry_get_dn(e));
+ slapi_create_errormsg(err_buf, SLAPI_DSE_RETURNTEXT_SIZE,
+ "Error: malformed index entry %s -- empty nsIndexType\n",
+ edn);
+ slapi_log_err(SLAPI_LOG_ERR, "ldbm_index_parse_entry",
+ "Malformed index entry %s -- empty nsIndexType\n",
+ edn);
slapi_ch_free_string(index_name);
return LDAP_OPERATIONS_ERROR;
}
}
/* ok the entry is good to process, pass it to attr_index_config */
- if (attr_index_config(inst->inst_be, (char *)trace_string, 0, e, 0, 0)) {
+ if (attr_index_config(inst->inst_be, (char *)trace_string, 0, e, 0, 0, err_buf)) {
slapi_ch_free_string(index_name);
return LDAP_OPERATIONS_ERROR;
}
@@ -92,7 +103,7 @@ ldbm_index_init_entry_callback(Slapi_PBlock *pb __attribute__((unused)),
ldbm_instance *inst = (ldbm_instance *)arg;
returntext[0] = '\0';
- *returncode = ldbm_index_parse_entry(inst, e, "from ldbm instance init", NULL);
+ *returncode = ldbm_index_parse_entry(inst, e, "from ldbm instance init", NULL, NULL);
if (*returncode == LDAP_SUCCESS) {
return SLAPI_DSE_CALLBACK_OK;
} else {
@@ -117,7 +128,7 @@ ldbm_instance_index_config_add_callback(Slapi_PBlock *pb __attribute__((unused))
char *index_name = NULL;
returntext[0] = '\0';
- *returncode = ldbm_index_parse_entry(inst, e, "from DSE add", &index_name);
+ *returncode = ldbm_index_parse_entry(inst, e, "from DSE add", &index_name, returntext);
if (*returncode == LDAP_SUCCESS) {
struct attrinfo *ai = NULL;
/* if the index is a "system" index, we assume it's being added by
@@ -179,7 +190,7 @@ ldbm_instance_index_config_delete_callback(Slapi_PBlock *pb,
slapi_attr_first_value(attr, &sval);
attrValue = slapi_value_get_berval(sval);
- attr_index_config(inst->inst_be, "From DSE delete", 0, e, 0, INDEXTYPE_NONE);
+ attr_index_config(inst->inst_be, "From DSE delete", 0, e, 0, INDEXTYPE_NONE, returntext);
ainfo_get(inst->inst_be, attrValue->bv_val, &ainfo);
if (NULL == ainfo) {
@@ -213,14 +224,19 @@ ldbm_instance_index_config_modify_callback(Slapi_PBlock *pb __attribute__((unuse
Slapi_Value *sval;
const struct berval *attrValue;
struct attrinfo *ainfo = NULL;
+ char *edn = slapi_entry_get_dn(e);
+ char *edn_after = slapi_entry_get_dn(entryAfter);
returntext[0] = '\0';
*returncode = LDAP_SUCCESS;
if (slapi_entry_attr_find(entryAfter, "cn", &attr) != 0) {
+ slapi_create_errormsg(returntext, SLAPI_DSE_RETURNTEXT_SIZE,
+ "Error: malformed index entry %s - missing cn attribute\n",
+ edn_after);
slapi_log_err(SLAPI_LOG_ERR,
"ldbm_instance_index_config_modify_callback", "Malformed index entry %s - missing cn attribute\n",
- slapi_entry_get_dn(entryAfter));
+ edn_after);
*returncode = LDAP_OBJECT_CLASS_VIOLATION;
return SLAPI_DSE_CALLBACK_ERROR;
}
@@ -228,31 +244,40 @@ ldbm_instance_index_config_modify_callback(Slapi_PBlock *pb __attribute__((unuse
attrValue = slapi_value_get_berval(sval);
if (NULL == attrValue->bv_val || 0 == attrValue->bv_len) {
+ slapi_create_errormsg(returntext, SLAPI_DSE_RETURNTEXT_SIZE,
+ "Error: malformed index entry %s - missing index name\n",
+ edn);
slapi_log_err(SLAPI_LOG_ERR,
"ldbm_instance_index_config_modify_callback", "Malformed index entry %s, missing index name\n",
- slapi_entry_get_dn(e));
+ edn);
*returncode = LDAP_UNWILLING_TO_PERFORM;
return SLAPI_DSE_CALLBACK_ERROR;
}
ainfo_get(inst->inst_be, attrValue->bv_val, &ainfo);
if (NULL == ainfo) {
+ slapi_create_errormsg(returntext, SLAPI_DSE_RETURNTEXT_SIZE,
+ "Error: malformed index entry %s - missing cn attribute info\n",
+ edn);
slapi_log_err(SLAPI_LOG_ERR,
"ldbm_instance_index_config_modify_callback", "Malformed index entry %s - missing cn attribute info\n",
- slapi_entry_get_dn(e));
+ edn);
*returncode = LDAP_UNWILLING_TO_PERFORM;
return SLAPI_DSE_CALLBACK_ERROR;
}
if (slapi_entry_attr_find(entryAfter, "nsIndexType", &attr) != 0) {
+ slapi_create_errormsg(returntext, SLAPI_DSE_RETURNTEXT_SIZE,
+ "Error: malformed index entry %s - missing nsIndexType attribute\n",
+ edn_after);
slapi_log_err(SLAPI_LOG_ERR,
"ldbm_instance_index_config_modify_callback", "Malformed index entry %s - missing nsIndexType attribute\n",
- slapi_entry_get_dn(entryAfter));
+ edn_after);
*returncode = LDAP_OBJECT_CLASS_VIOLATION;
return SLAPI_DSE_CALLBACK_ERROR;
}
- if (attr_index_config(inst->inst_be, "from DSE modify", 0, entryAfter, 0, 0)) {
+ if (attr_index_config(inst->inst_be, "from DSE modify", 0, entryAfter, 0, 0, returntext)) {
*returncode = LDAP_UNWILLING_TO_PERFORM;
return SLAPI_DSE_CALLBACK_ERROR;
}
@@ -364,7 +389,7 @@ ldbm_instance_index_config_enable_index(ldbm_instance *inst, Slapi_Entry *e)
ainfo_get(inst->inst_be, index_name, &ai);
}
if (!ai) {
- rc = ldbm_index_parse_entry(inst, e, "from DSE add", &index_name);
+ rc = ldbm_index_parse_entry(inst, e, "from DSE add", &index_name, NULL);
}
if (rc == LDAP_SUCCESS) {
/* Assume the caller knows if it is OK to go online immediately */
diff --git a/ldap/servers/slapd/back-ldbm/ldif2ldbm.c b/ldap/servers/slapd/back-ldbm/ldif2ldbm.c
index 9d82c8228..f2ef5ecd4 100644
--- a/ldap/servers/slapd/back-ldbm/ldif2ldbm.c
+++ b/ldap/servers/slapd/back-ldbm/ldif2ldbm.c
@@ -291,7 +291,7 @@ db2index_add_indexed_attr(backend *be, char *attrString)
}
}
- attr_index_config(be, "from db2index()", 0, e, 0, 0);
+ attr_index_config(be, "from db2index()", 0, e, 0, 0, NULL);
slapi_entry_free(e);
return (0);
diff --git a/ldap/servers/slapd/back-ldbm/proto-back-ldbm.h b/ldap/servers/slapd/back-ldbm/proto-back-ldbm.h
index 9a86c752b..a07acee5e 100644
--- a/ldap/servers/slapd/back-ldbm/proto-back-ldbm.h
+++ b/ldap/servers/slapd/back-ldbm/proto-back-ldbm.h
@@ -24,7 +24,7 @@ void attrinfo_delete(struct attrinfo **pp);
void ainfo_get(backend *be, char *type, struct attrinfo **at);
void attr_masks(backend *be, char *type, int *indexmask, int *syntaxmask);
void attr_masks_ex(backend *be, char *type, int *indexmask, int *syntaxmask, struct attrinfo **at);
-int attr_index_config(backend *be, char *fname, int lineno, Slapi_Entry *e, int init, int none);
+int attr_index_config(backend *be, char *fname, int lineno, Slapi_Entry *e, int init, int none, char *err_buf);
int db2index_add_indexed_attr(backend *be, char *attrString);
int ldbm_compute_init(void);
void attrinfo_deletetree(ldbm_instance *inst);
--
2.26.2

View File

@ -0,0 +1,213 @@
From 3b3faee01e645577ad77ff4f38429a9e0806231b Mon Sep 17 00:00:00 2001
From: Simon Pichugin <simon.pichugin@gmail.com>
Date: Tue, 16 Jun 2020 20:35:05 +0200
Subject: [PATCH] Issue 51157 - Reindex task may create abandoned index file
Bug Description: Recreating an index for the same attribute but changing
the case of for example 1 letter, results in abandoned indexfile.
Fix Decsription: Add a test case to a newly created 'indexes' test suite.
When we remove the index config from the backend, - remove the attribute
info from LDBM instance attributes.
https://pagure.io/389-ds-base/issue/51157
Reviewed by: firstyear, mreynolds (Thanks!)
---
dirsrvtests/tests/suites/indexes/__init__.py | 3 +
.../tests/suites/indexes/regression_test.py | 125 ++++++++++++++++++
ldap/servers/slapd/back-ldbm/ldbm_attr.c | 7 +
.../slapd/back-ldbm/ldbm_index_config.c | 3 +
.../servers/slapd/back-ldbm/proto-back-ldbm.h | 1 +
5 files changed, 139 insertions(+)
create mode 100644 dirsrvtests/tests/suites/indexes/__init__.py
create mode 100644 dirsrvtests/tests/suites/indexes/regression_test.py
diff --git a/dirsrvtests/tests/suites/indexes/__init__.py b/dirsrvtests/tests/suites/indexes/__init__.py
new file mode 100644
index 000000000..04441667e
--- /dev/null
+++ b/dirsrvtests/tests/suites/indexes/__init__.py
@@ -0,0 +1,3 @@
+"""
+ :Requirement: 389-ds-base: Indexes
+"""
diff --git a/dirsrvtests/tests/suites/indexes/regression_test.py b/dirsrvtests/tests/suites/indexes/regression_test.py
new file mode 100644
index 000000000..1a71f16e9
--- /dev/null
+++ b/dirsrvtests/tests/suites/indexes/regression_test.py
@@ -0,0 +1,125 @@
+# --- BEGIN COPYRIGHT BLOCK ---
+# Copyright (C) 2020 Red Hat, Inc.
+# All rights reserved.
+#
+# License: GPL (version 3 or any later version).
+# See LICENSE for details.
+# --- END COPYRIGHT BLOCK ---
+#
+import time
+import os
+import pytest
+import ldap
+from lib389._constants import DEFAULT_BENAME, DEFAULT_SUFFIX
+from lib389.index import Indexes
+from lib389.backend import Backends
+from lib389.idm.user import UserAccounts
+from lib389.topologies import topology_st as topo
+
+pytestmark = pytest.mark.tier1
+
+
+def test_reindex_task_creates_abandoned_index_file(topo):
+ """
+ Recreating an index for the same attribute but changing
+ the case of for example 1 letter, results in abandoned indexfile
+
+ :id: 07ae5274-481a-4fa8-8074-e0de50d89ac6
+ :setup: Standalone instance
+ :steps:
+ 1. Create a user object with additional attributes:
+ objectClass: mozillaabpersonalpha
+ mozillaCustom1: xyz
+ 2. Add an index entry mozillacustom1
+ 3. Reindex the backend
+ 4. Check the content of the index (after it has been flushed to disk) mozillacustom1.db
+ 5. Remove the index
+ 6. Notice the mozillacustom1.db is removed
+ 7. Recreate the index but now use the exact case as mentioned in the schema
+ 8. Reindex the backend
+ 9. Check the content of the index (after it has been flushed to disk) mozillaCustom1.db
+ 10. Check that an ldapsearch does not return a result (mozillacustom1=xyz)
+ 11. Check that an ldapsearch returns the results (mozillaCustom1=xyz)
+ 12. Restart the instance
+ 13. Notice that an ldapsearch does not return a result(mozillacustom1=xyz)
+ 15. Check that an ldapsearch does not return a result (mozillacustom1=xyz)
+ 16. Check that an ldapsearch returns the results (mozillaCustom1=xyz)
+ 17. Reindex the backend
+ 18. Notice the second indexfile for this attribute
+ 19. Check the content of the index (after it has been flushed to disk) no mozillacustom1.db
+ 20. Check the content of the index (after it has been flushed to disk) mozillaCustom1.db
+ :expectedresults:
+ 1. Should Success.
+ 2. Should Success.
+ 3. Should Success.
+ 4. Should Success.
+ 5. Should Success.
+ 6. Should Success.
+ 7. Should Success.
+ 8. Should Success.
+ 9. Should Success.
+ 10. Should Success.
+ 11. Should Success.
+ 12. Should Success.
+ 13. Should Success.
+ 14. Should Success.
+ 15. Should Success.
+ 16. Should Success.
+ 17. Should Success.
+ 18. Should Success.
+ 19. Should Success.
+ 20. Should Success.
+ """
+
+ inst = topo.standalone
+ attr_name = "mozillaCustom1"
+ attr_value = "xyz"
+
+ users = UserAccounts(inst, DEFAULT_SUFFIX)
+ user = users.create_test_user()
+ user.add("objectClass", "mozillaabpersonalpha")
+ user.add(attr_name, attr_value)
+
+ backends = Backends(inst)
+ backend = backends.get(DEFAULT_BENAME)
+ indexes = backend.get_indexes()
+ index = indexes.create(properties={
+ 'cn': attr_name.lower(),
+ 'nsSystemIndex': 'false',
+ 'nsIndexType': ['eq', 'pres']
+ })
+
+ backend.reindex()
+ time.sleep(3)
+ assert os.path.exists(f"{inst.ds_paths.db_home_dir}/{DEFAULT_BENAME}/{attr_name.lower()}.db")
+ index.delete()
+ assert not os.path.exists(f"{inst.ds_paths.db_home_dir}/{DEFAULT_BENAME}/{attr_name.lower()}.db")
+
+ index = indexes.create(properties={
+ 'cn': attr_name,
+ 'nsSystemIndex': 'false',
+ 'nsIndexType': ['eq', 'pres']
+ })
+
+ backend.reindex()
+ time.sleep(3)
+ assert not os.path.exists(f"{inst.ds_paths.db_home_dir}/{DEFAULT_BENAME}/{attr_name.lower()}.db")
+ assert os.path.exists(f"{inst.ds_paths.db_home_dir}/{DEFAULT_BENAME}/{attr_name}.db")
+
+ entries = inst.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, f"{attr_name}={attr_value}")
+ assert len(entries) > 0
+ inst.restart()
+ entries = inst.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, f"{attr_name}={attr_value}")
+ assert len(entries) > 0
+
+ backend.reindex()
+ time.sleep(3)
+ assert not os.path.exists(f"{inst.ds_paths.db_home_dir}/{DEFAULT_BENAME}/{attr_name.lower()}.db")
+ assert os.path.exists(f"{inst.ds_paths.db_home_dir}/{DEFAULT_BENAME}/{attr_name}.db")
+
+
+if __name__ == "__main__":
+ # Run isolated
+ # -s for DEBUG mode
+ CURRENT_FILE = os.path.realpath(__file__)
+ pytest.main("-s %s" % CURRENT_FILE)
diff --git a/ldap/servers/slapd/back-ldbm/ldbm_attr.c b/ldap/servers/slapd/back-ldbm/ldbm_attr.c
index f0d418572..688c4f137 100644
--- a/ldap/servers/slapd/back-ldbm/ldbm_attr.c
+++ b/ldap/servers/slapd/back-ldbm/ldbm_attr.c
@@ -98,6 +98,13 @@ ainfo_cmp(
return (strcasecmp(a->ai_type, b->ai_type));
}
+void
+attrinfo_delete_from_tree(backend *be, struct attrinfo *ai)
+{
+ ldbm_instance *inst = (ldbm_instance *)be->be_instance_info;
+ avl_delete(&inst->inst_attrs, ai, ainfo_cmp);
+}
+
/*
* Called when a duplicate "index" line is encountered.
*
diff --git a/ldap/servers/slapd/back-ldbm/ldbm_index_config.c b/ldap/servers/slapd/back-ldbm/ldbm_index_config.c
index 720f93036..9722d0ce7 100644
--- a/ldap/servers/slapd/back-ldbm/ldbm_index_config.c
+++ b/ldap/servers/slapd/back-ldbm/ldbm_index_config.c
@@ -201,7 +201,10 @@ ldbm_instance_index_config_delete_callback(Slapi_PBlock *pb,
*returncode = LDAP_UNWILLING_TO_PERFORM;
rc = SLAPI_DSE_CALLBACK_ERROR;
}
+ attrinfo_delete_from_tree(inst->inst_be, ainfo);
}
+ /* Free attrinfo structure */
+ attrinfo_delete(&ainfo);
bail:
return rc;
}
diff --git a/ldap/servers/slapd/back-ldbm/proto-back-ldbm.h b/ldap/servers/slapd/back-ldbm/proto-back-ldbm.h
index a07acee5e..4d2524fd9 100644
--- a/ldap/servers/slapd/back-ldbm/proto-back-ldbm.h
+++ b/ldap/servers/slapd/back-ldbm/proto-back-ldbm.h
@@ -21,6 +21,7 @@
*/
struct attrinfo *attrinfo_new(void);
void attrinfo_delete(struct attrinfo **pp);
+void attrinfo_delete_from_tree(backend *be, struct attrinfo *ai);
void ainfo_get(backend *be, char *type, struct attrinfo **at);
void attr_masks(backend *be, char *type, int *indexmask, int *syntaxmask);
void attr_masks_ex(backend *be, char *type, int *indexmask, int *syntaxmask, struct attrinfo **at);
--
2.26.2

View File

@ -0,0 +1,668 @@
From 282edde7950ceb2515d74fdbcc0a188131769d74 Mon Sep 17 00:00:00 2001
From: Mark Reynolds <mreynolds@redhat.com>
Date: Tue, 23 Jun 2020 16:38:55 -0400
Subject: [PATCH] Issue 51165 - add new access log keywords for wtime and
optime
Description: In addition to the "etime" stat in the access we can also
add the time the operation spent in the work queue, and
how long the actual operation took. We now have "wtime"
and "optime" to track these stats in the access log.
Also updated logconf for notes=F (related to a different
ticket), and stats for wtime and optime.
relates: https://pagure.io/389-ds-base/issue/51165
Reviewed by: ?
---
ldap/admin/src/logconv.pl | 187 +++++++++++++++++++++++++++---
ldap/servers/slapd/add.c | 3 +
ldap/servers/slapd/bind.c | 4 +
ldap/servers/slapd/delete.c | 3 +
ldap/servers/slapd/modify.c | 3 +
ldap/servers/slapd/modrdn.c | 3 +
ldap/servers/slapd/operation.c | 24 ++++
ldap/servers/slapd/opshared.c | 3 +
ldap/servers/slapd/result.c | 49 ++++----
ldap/servers/slapd/slap.h | 13 ++-
ldap/servers/slapd/slapi-plugin.h | 26 ++++-
11 files changed, 269 insertions(+), 49 deletions(-)
diff --git a/ldap/admin/src/logconv.pl b/ldap/admin/src/logconv.pl
index f4808a101..1ed44a888 100755
--- a/ldap/admin/src/logconv.pl
+++ b/ldap/admin/src/logconv.pl
@@ -3,7 +3,7 @@
#
# BEGIN COPYRIGHT BLOCK
# Copyright (C) 2001 Sun Microsystems, Inc. Used by permission.
-# Copyright (C) 2013 Red Hat, Inc.
+# Copyright (C) 2020 Red Hat, Inc.
# All rights reserved.
#
# License: GPL (version 3 or any later version).
@@ -55,7 +55,7 @@ my $reportStats = "";
my $dataLocation = "/tmp";
my $startTLSoid = "1.3.6.1.4.1.1466.20037";
my @statnames=qw(last last_str results srch add mod modrdn moddn cmp del abandon
- conns sslconns bind anonbind unbind notesA notesU etime);
+ conns sslconns bind anonbind unbind notesA notesU notesF etime);
my $s_stats;
my $m_stats;
my $verb = "no";
@@ -211,6 +211,7 @@ my $sslClientBindCount = 0;
my $sslClientFailedCount = 0;
my $objectclassTopCount= 0;
my $pagedSearchCount = 0;
+my $invalidFilterCount = 0;
my $bindCount = 0;
my $filterCount = 0;
my $baseCount = 0;
@@ -258,7 +259,7 @@ map {$conn{$_} = $_} @conncodes;
# hash db-backed hashes
my @hashnames = qw(attr rc src rsrc excount conn_hash ip_hash conncount nentries
filter base ds6xbadpwd saslmech saslconnop bindlist etime oid
- start_time_of_connection end_time_of_connection
+ start_time_of_connection end_time_of_connection notesf_conn_op
notesa_conn_op notesu_conn_op etime_conn_op nentries_conn_op
optype_conn_op time_conn_op srch_conn_op del_conn_op mod_conn_op
mdn_conn_op cmp_conn_op bind_conn_op unbind_conn_op ext_conn_op
@@ -926,7 +927,7 @@ if ($verb eq "yes" || $usage =~ /u/ || $usage =~ /U/){
}
while($op > 0){
# The bind op is not the same as the search op that triggered the notes=A.
- # We have adjust the key by decrementing the op count until we find the last bind op.
+ # We have to adjust the key by decrementing the op count until we find the last bind op.
$op--;
$binddn_key = "$srvRstCnt,$conn,$op";
if (exists($bind_conn_op->{$binddn_key}) && defined($bind_conn_op->{$binddn_key})) {
@@ -1049,9 +1050,60 @@ if ($verb eq "yes" || $usage =~ /u/ || $usage =~ /U/){
}
}
}
-} # end of unindexed search report
+ print "\n";
+}
+
+print "Invalid Attribute Filters: $invalidFilterCount\n";
+if ($invalidFilterCount > 0 && $verb eq "yes"){
+ my $conn_hash = $hashes->{conn_hash};
+ my $notesf_conn_op = $hashes->{notesf_conn_op};
+ my $time_conn_op = $hashes->{time_conn_op};
+ my $etime_conn_op = $hashes->{etime_conn_op};
+ my $nentries_conn_op = $hashes->{nentries_conn_op};
+ my $filter_conn_op = $hashes->{filter_conn_op};
+ my $bind_conn_op = $hashes->{bind_conn_op};
+ my $notesCount = 1;
+ my $unindexedIp;
+ my $binddn_key;
+ my %uniqFilt = (); # hash of unique filters
+ my %uniqFilter = (); # hash of unique filters bind dn
+ my %uniqBindDNs = (); # hash of unique bind dn's
+ my %uniqBindFilters = (); # hash of filters for a bind DN
+
+ while (my ($srcnt_conn_op, $count) = each %{$notesf_conn_op}) {
+ my ($srvRstCnt, $conn, $op) = split(",", $srcnt_conn_op);
+ my $attrIp = getIPfromConn($conn, $srvRstCnt);
+ print "\n Invalid Attribute Filter #".$notesCount." (notes=F)\n";
+ print " - Date/Time: $time_conn_op->{$srcnt_conn_op}\n";
+ print " - Connection Number: $conn\n";
+ print " - Operation Number: $op\n";
+ print " - Etime: $etime_conn_op->{$srcnt_conn_op}\n";
+ print " - Nentries: $nentries_conn_op->{$srcnt_conn_op}\n";
+ print " - IP Address: $attrIp\n";
+ if (exists($filter_conn_op->{$srcnt_conn_op}) && defined($filter_conn_op->{$srcnt_conn_op})) {
+ print " - Search Filter: $filter_conn_op->{$srcnt_conn_op}\n";
+ $uniqFilt{$filter_conn_op->{$srcnt_conn_op}}++;
+ }
+ while($op > 0){
+ # The bind op is not the same as the search op that triggered the notes=A.
+ # We have to adjust the key by decrementing the op count until we find the last bind op.
+ $op--;
+ $binddn_key = "$srvRstCnt,$conn,$op";
+ if (exists($bind_conn_op->{$binddn_key}) && defined($bind_conn_op->{$binddn_key})) {
+ print " - Bind DN: $bind_conn_op->{$binddn_key}\n";
+ $uniqBindDNs{$bind_conn_op->{$binddn_key}}++;
+ if( $uniqFilt{$filter_conn_op->{$srcnt_conn_op}} && defined($filter_conn_op->{$srcnt_conn_op})) {
+ $uniqBindFilters{$bind_conn_op->{$binddn_key}}{$filter_conn_op->{$srcnt_conn_op}}++;
+ $uniqFilter{$filter_conn_op->{$srcnt_conn_op}}{$bind_conn_op->{$binddn_key}}++;
+ }
+ last;
+ }
+ }
+ $notesCount++;
+ }
+ print "\n";
+}
-print "\n";
print "FDs Taken: $fdTaken\n";
print "FDs Returned: $fdReturned\n";
print "Highest FD Taken: $highestFdTaken\n\n";
@@ -1386,20 +1438,20 @@ if ($usage =~ /l/ || $verb eq "yes"){
}
}
-#########################################
-# #
-# Gather and Process the unique etimes #
-# #
-#########################################
+##############################################################
+# #
+# Gather and Process the unique etimes, wtimes, and optimes #
+# #
+##############################################################
my $first;
if ($usage =~ /t/i || $verb eq "yes"){
+ # Print the elapsed times (etime)
+
my $etime = $hashes->{etime};
my @ekeys = keys %{$etime};
- #
# print most often etimes
- #
- print "\n\n----- Top $sizeCount Most Frequent etimes -----\n\n";
+ print "\n\n----- Top $sizeCount Most Frequent etimes (elapsed times) -----\n\n";
my $eloop = 0;
my $retime = 0;
foreach my $et (sort { $etime->{$b} <=> $etime->{$a} } @ekeys) {
@@ -1411,16 +1463,84 @@ if ($usage =~ /t/i || $verb eq "yes"){
printf "%-8s %-12s\n", $etime->{ $et }, "etime=$et";
$eloop++;
}
- #
+ if ($eloop == 0) {
+ print "None";
+ }
# print longest etimes
- #
- print "\n\n----- Top $sizeCount Longest etimes -----\n\n";
+ print "\n\n----- Top $sizeCount Longest etimes (elapsed times) -----\n\n";
$eloop = 0;
foreach my $et (sort { $b <=> $a } @ekeys) {
if ($eloop == $sizeCount) { last; }
printf "%-12s %-10s\n","etime=$et",$etime->{ $et };
$eloop++;
}
+ if ($eloop == 0) {
+ print "None";
+ }
+
+ # Print the wait times (wtime)
+
+ my $wtime = $hashes->{wtime};
+ my @wkeys = keys %{$wtime};
+ # print most often wtimes
+ print "\n\n----- Top $sizeCount Most Frequent wtimes (wait times) -----\n\n";
+ $eloop = 0;
+ $retime = 0;
+ foreach my $et (sort { $wtime->{$b} <=> $wtime->{$a} } @wkeys) {
+ if ($eloop == $sizeCount) { last; }
+ if ($retime ne "2"){
+ $first = $et;
+ $retime = "2";
+ }
+ printf "%-8s %-12s\n", $wtime->{ $et }, "wtime=$et";
+ $eloop++;
+ }
+ if ($eloop == 0) {
+ print "None";
+ }
+ # print longest wtimes
+ print "\n\n----- Top $sizeCount Longest wtimes (wait times) -----\n\n";
+ $eloop = 0;
+ foreach my $et (sort { $b <=> $a } @wkeys) {
+ if ($eloop == $sizeCount) { last; }
+ printf "%-12s %-10s\n","wtime=$et",$wtime->{ $et };
+ $eloop++;
+ }
+ if ($eloop == 0) {
+ print "None";
+ }
+
+ # Print the operation times (optime)
+
+ my $optime = $hashes->{optime};
+ my @opkeys = keys %{$optime};
+ # print most often optimes
+ print "\n\n----- Top $sizeCount Most Frequent optimes (actual operation times) -----\n\n";
+ $eloop = 0;
+ $retime = 0;
+ foreach my $et (sort { $optime->{$b} <=> $optime->{$a} } @opkeys) {
+ if ($eloop == $sizeCount) { last; }
+ if ($retime ne "2"){
+ $first = $et;
+ $retime = "2";
+ }
+ printf "%-8s %-12s\n", $optime->{ $et }, "optime=$et";
+ $eloop++;
+ }
+ if ($eloop == 0) {
+ print "None";
+ }
+ # print longest optimes
+ print "\n\n----- Top $sizeCount Longest optimes (actual operation times) -----\n\n";
+ $eloop = 0;
+ foreach my $et (sort { $b <=> $a } @opkeys) {
+ if ($eloop == $sizeCount) { last; }
+ printf "%-12s %-10s\n","optime=$et",$optime->{ $et };
+ $eloop++;
+ }
+ if ($eloop == 0) {
+ print "None";
+ }
}
#######################################
@@ -2152,6 +2272,26 @@ sub parseLineNormal
if (m/ RESULT err=/ && m/ notes=[A-Z,]*P/){
$pagedSearchCount++;
}
+ if (m/ RESULT err=/ && m/ notes=[A-Z,]*F/){
+ $invalidFilterCount++;
+ $con = "";
+ if ($_ =~ /conn= *([0-9A-Z]+)/i){
+ $con = $1;
+ if ($_ =~ /op= *([0-9\-]+)/i){ $op = $1;}
+ }
+
+ if($reportStats){ inc_stats('notesF',$s_stats,$m_stats); }
+ if ($usage =~ /u/ || $usage =~ /U/ || $verb eq "yes"){
+ if($_ =~ /etime= *([0-9.]+)/i ){
+ if($1 >= $minEtime){
+ $hashes->{etime_conn_op}->{"$serverRestartCount,$con,$op"} = $1;
+ $hashes->{notesf_conn_op}->{"$serverRestartCount,$con,$op"}++;
+ if ($_ =~ / *([0-9a-z:\/]+)/i){ $hashes->{time_conn_op}->{"$serverRestartCount,$con,$op"} = $1; }
+ if ($_ =~ /nentries= *([0-9]+)/i ){ $hashes->{nentries_conn_op}->{"$serverRestartCount,$con,$op"} = $1; }
+ }
+ }
+ }
+ }
if (m/ notes=[A-Z,]*A/){
$con = "";
if ($_ =~ /conn= *([0-9A-Z]+)/i){
@@ -2435,6 +2575,16 @@ sub parseLineNormal
if ($usage =~ /t/i || $verb eq "yes"){ $hashes->{etime}->{$etime_val}++; }
if ($reportStats){ inc_stats_val('etime',$etime_val,$s_stats,$m_stats); }
}
+ if ($_ =~ /wtime= *([0-9.]+)/ ) {
+ my $wtime_val = $1;
+ if ($usage =~ /t/i || $verb eq "yes"){ $hashes->{wtime}->{$wtime_val}++; }
+ if ($reportStats){ inc_stats_val('wtime',$wtime_val,$s_stats,$m_stats); }
+ }
+ if ($_ =~ /optime= *([0-9.]+)/ ) {
+ my $optime_val = $1;
+ if ($usage =~ /t/i || $verb eq "yes"){ $hashes->{optime}->{$optime_val}++; }
+ if ($reportStats){ inc_stats_val('optime',$optime_val,$s_stats,$m_stats); }
+ }
if ($_ =~ / tag=101 / || $_ =~ / tag=111 / || $_ =~ / tag=100 / || $_ =~ / tag=115 /){
if ($_ =~ / nentries= *([0-9]+)/i ){
my $nents = $1;
@@ -2555,7 +2705,7 @@ sub parseLineNormal
}
}
}
- if (/ RESULT err=/ && / tag=97 nentries=0 etime=/ && $_ =~ /dn=\"(.*)\"/i){
+ if (/ RESULT err=/ && / tag=97 nentries=0 / && $_ =~ /dn=\"(.*)\"/i){
# Check if this is a sasl bind, if see we need to add the RESULT's dn as a bind dn
my $binddn = $1;
my ($conn, $op);
@@ -2680,6 +2830,7 @@ print_stats_block
$stats->{'unbind'},
$stats->{'notesA'},
$stats->{'notesU'},
+ $stats->{'notesF'},
$stats->{'etime'}),
"\n" );
} else {
diff --git a/ldap/servers/slapd/add.c b/ldap/servers/slapd/add.c
index 06ca1ee79..52c64fa3c 100644
--- a/ldap/servers/slapd/add.c
+++ b/ldap/servers/slapd/add.c
@@ -441,6 +441,9 @@ op_shared_add(Slapi_PBlock *pb)
internal_op = operation_is_flag_set(operation, OP_FLAG_INTERNAL);
pwpolicy = new_passwdPolicy(pb, slapi_entry_get_dn(e));
+ /* Set the time we actually started the operation */
+ slapi_operation_set_time_started(operation);
+
/* target spec is used to decide which plugins are applicable for the operation */
operation_set_target_spec(operation, slapi_entry_get_sdn(e));
diff --git a/ldap/servers/slapd/bind.c b/ldap/servers/slapd/bind.c
index 310216e89..55f865077 100644
--- a/ldap/servers/slapd/bind.c
+++ b/ldap/servers/slapd/bind.c
@@ -87,6 +87,10 @@ do_bind(Slapi_PBlock *pb)
send_ldap_result(pb, LDAP_OPERATIONS_ERROR, NULL, NULL, 0, NULL);
goto free_and_return;
}
+
+ /* Set the time we actually started the operation */
+ slapi_operation_set_time_started(pb_op);
+
ber = pb_op->o_ber;
/*
diff --git a/ldap/servers/slapd/delete.c b/ldap/servers/slapd/delete.c
index c0e61adf1..1a7209317 100644
--- a/ldap/servers/slapd/delete.c
+++ b/ldap/servers/slapd/delete.c
@@ -236,6 +236,9 @@ op_shared_delete(Slapi_PBlock *pb)
slapi_pblock_get(pb, SLAPI_OPERATION, &operation);
internal_op = operation_is_flag_set(operation, OP_FLAG_INTERNAL);
+ /* Set the time we actually started the operation */
+ slapi_operation_set_time_started(operation);
+
sdn = slapi_sdn_new_dn_byval(rawdn);
dn = slapi_sdn_get_dn(sdn);
slapi_pblock_set(pb, SLAPI_DELETE_TARGET_SDN, (void *)sdn);
diff --git a/ldap/servers/slapd/modify.c b/ldap/servers/slapd/modify.c
index 259bedfff..a186dbde3 100644
--- a/ldap/servers/slapd/modify.c
+++ b/ldap/servers/slapd/modify.c
@@ -626,6 +626,9 @@ op_shared_modify(Slapi_PBlock *pb, int pw_change, char *old_pw)
slapi_pblock_get(pb, SLAPI_SKIP_MODIFIED_ATTRS, &skip_modified_attrs);
slapi_pblock_get(pb, SLAPI_CONNECTION, &pb_conn);
+ /* Set the time we actually started the operation */
+ slapi_operation_set_time_started(operation);
+
if (sdn) {
passin_sdn = 1;
} else {
diff --git a/ldap/servers/slapd/modrdn.c b/ldap/servers/slapd/modrdn.c
index 3efe584a7..e04916b83 100644
--- a/ldap/servers/slapd/modrdn.c
+++ b/ldap/servers/slapd/modrdn.c
@@ -417,6 +417,9 @@ op_shared_rename(Slapi_PBlock *pb, int passin_args)
internal_op = operation_is_flag_set(operation, OP_FLAG_INTERNAL);
slapi_pblock_get(pb, SLAPI_CONNECTION, &pb_conn);
+ /* Set the time we actually started the operation */
+ slapi_operation_set_time_started(operation);
+
/*
* If ownership has not been passed to this function, we replace the
* string input fields within the pblock with strdup'd copies. Why?
diff --git a/ldap/servers/slapd/operation.c b/ldap/servers/slapd/operation.c
index ff16cd906..4dd3481c7 100644
--- a/ldap/servers/slapd/operation.c
+++ b/ldap/servers/slapd/operation.c
@@ -651,3 +651,27 @@ slapi_operation_time_expiry(Slapi_Operation *o, time_t timeout, struct timespec
{
slapi_timespec_expire_rel(timeout, &(o->o_hr_time_rel), expiry);
}
+
+/* Set the time the operation actually started */
+void
+slapi_operation_set_time_started(Slapi_Operation *o)
+{
+ clock_gettime(CLOCK_MONOTONIC, &(o->o_hr_time_started_rel));
+}
+
+/* The time diff of how long the operation took once it actually started */
+void
+slapi_operation_op_time_elapsed(Slapi_Operation *o, struct timespec *elapsed)
+{
+ struct timespec o_hr_time_now;
+ clock_gettime(CLOCK_MONOTONIC, &o_hr_time_now);
+
+ slapi_timespec_diff(&o_hr_time_now, &(o->o_hr_time_started_rel), elapsed);
+}
+
+/* The time diff the operation waited in the work queue */
+void
+slapi_operation_workq_time_elapsed(Slapi_Operation *o, struct timespec *elapsed)
+{
+ slapi_timespec_diff(&(o->o_hr_time_started_rel), &(o->o_hr_time_rel), elapsed);
+}
diff --git a/ldap/servers/slapd/opshared.c b/ldap/servers/slapd/opshared.c
index 9fe78655c..c0bc5dcd0 100644
--- a/ldap/servers/slapd/opshared.c
+++ b/ldap/servers/slapd/opshared.c
@@ -284,6 +284,9 @@ op_shared_search(Slapi_PBlock *pb, int send_result)
slapi_pblock_get(pb, SLAPI_SEARCH_TARGET_SDN, &sdn);
slapi_pblock_get(pb, SLAPI_OPERATION, &operation);
+ /* Set the time we actually started the operation */
+ slapi_operation_set_time_started(operation);
+
if (NULL == sdn) {
sdn = slapi_sdn_new_dn_byval(base);
slapi_pblock_set(pb, SLAPI_SEARCH_TARGET_SDN, sdn);
diff --git a/ldap/servers/slapd/result.c b/ldap/servers/slapd/result.c
index 0b13c30e9..61efb6f8d 100644
--- a/ldap/servers/slapd/result.c
+++ b/ldap/servers/slapd/result.c
@@ -1975,6 +1975,8 @@ log_result(Slapi_PBlock *pb, Operation *op, int err, ber_tag_t tag, int nentries
CSN *operationcsn = NULL;
char csn_str[CSN_STRSIZE + 5];
char etime[ETIME_BUFSIZ] = {0};
+ char wtime[ETIME_BUFSIZ] = {0};
+ char optime[ETIME_BUFSIZ] = {0};
int pr_idx = -1;
int pr_cookie = -1;
uint32_t operation_notes;
@@ -1982,19 +1984,26 @@ log_result(Slapi_PBlock *pb, Operation *op, int err, ber_tag_t tag, int nentries
int32_t op_id;
int32_t op_internal_id;
int32_t op_nested_count;
+ struct timespec o_hr_time_end;
get_internal_conn_op(&connid, &op_id, &op_internal_id, &op_nested_count);
-
slapi_pblock_get(pb, SLAPI_PAGED_RESULTS_INDEX, &pr_idx);
slapi_pblock_get(pb, SLAPI_PAGED_RESULTS_COOKIE, &pr_cookie);
-
internal_op = operation_is_flag_set(op, OP_FLAG_INTERNAL);
- struct timespec o_hr_time_end;
+ /* total elapsed time */
slapi_operation_time_elapsed(op, &o_hr_time_end);
+ snprintf(etime, ETIME_BUFSIZ, "%" PRId64 ".%.09" PRId64 "", (int64_t)o_hr_time_end.tv_sec, (int64_t)o_hr_time_end.tv_nsec);
+
+ /* wait time */
+ slapi_operation_workq_time_elapsed(op, &o_hr_time_end);
+ snprintf(wtime, ETIME_BUFSIZ, "%" PRId64 ".%.09" PRId64 "", (int64_t)o_hr_time_end.tv_sec, (int64_t)o_hr_time_end.tv_nsec);
+
+ /* op time */
+ slapi_operation_op_time_elapsed(op, &o_hr_time_end);
+ snprintf(optime, ETIME_BUFSIZ, "%" PRId64 ".%.09" PRId64 "", (int64_t)o_hr_time_end.tv_sec, (int64_t)o_hr_time_end.tv_nsec);
- snprintf(etime, ETIME_BUFSIZ, "%" PRId64 ".%.09" PRId64 "", (int64_t)o_hr_time_end.tv_sec, (int64_t)o_hr_time_end.tv_nsec);
operation_notes = slapi_pblock_get_operation_notes(pb);
@@ -2025,16 +2034,16 @@ log_result(Slapi_PBlock *pb, Operation *op, int err, ber_tag_t tag, int nentries
if (!internal_op) {
slapi_log_access(LDAP_DEBUG_STATS,
"conn=%" PRIu64 " op=%d RESULT err=%d"
- " tag=%" BERTAG_T " nentries=%d etime=%s%s%s"
+ " tag=%" BERTAG_T " nentries=%d wtime=%s optime=%s etime=%s%s%s"
", SASL bind in progress\n",
op->o_connid,
op->o_opid,
err, tag, nentries,
- etime,
+ wtime, optime, etime,
notes_str, csn_str);
} else {
-#define LOG_SASLMSG_FMT " tag=%" BERTAG_T " nentries=%d etime=%s%s%s, SASL bind in progress\n"
+#define LOG_SASLMSG_FMT " tag=%" BERTAG_T " nentries=%d wtime=%s optime=%s etime=%s%s%s, SASL bind in progress\n"
slapi_log_access(LDAP_DEBUG_ARGS,
connid == 0 ? LOG_CONN_OP_FMT_INT_INT LOG_SASLMSG_FMT :
LOG_CONN_OP_FMT_EXT_INT LOG_SASLMSG_FMT,
@@ -2043,7 +2052,7 @@ log_result(Slapi_PBlock *pb, Operation *op, int err, ber_tag_t tag, int nentries
op_internal_id,
op_nested_count,
err, tag, nentries,
- etime,
+ wtime, optime, etime,
notes_str, csn_str);
}
} else if (op->o_tag == LDAP_REQ_BIND && err == LDAP_SUCCESS) {
@@ -2057,15 +2066,15 @@ log_result(Slapi_PBlock *pb, Operation *op, int err, ber_tag_t tag, int nentries
if (!internal_op) {
slapi_log_access(LDAP_DEBUG_STATS,
"conn=%" PRIu64 " op=%d RESULT err=%d"
- " tag=%" BERTAG_T " nentries=%d etime=%s%s%s"
+ " tag=%" BERTAG_T " nentries=%d wtime=%s optime=%s etime=%s%s%s"
" dn=\"%s\"\n",
op->o_connid,
op->o_opid,
err, tag, nentries,
- etime,
+ wtime, optime, etime,
notes_str, csn_str, dn ? dn : "");
} else {
-#define LOG_BINDMSG_FMT " tag=%" BERTAG_T " nentries=%d etime=%s%s%s dn=\"%s\"\n"
+#define LOG_BINDMSG_FMT " tag=%" BERTAG_T " nentries=%d wtime=%s optime=%s etime=%s%s%s dn=\"%s\"\n"
slapi_log_access(LDAP_DEBUG_ARGS,
connid == 0 ? LOG_CONN_OP_FMT_INT_INT LOG_BINDMSG_FMT :
LOG_CONN_OP_FMT_EXT_INT LOG_BINDMSG_FMT,
@@ -2074,7 +2083,7 @@ log_result(Slapi_PBlock *pb, Operation *op, int err, ber_tag_t tag, int nentries
op_internal_id,
op_nested_count,
err, tag, nentries,
- etime,
+ wtime, optime, etime,
notes_str, csn_str, dn ? dn : "");
}
slapi_ch_free((void **)&dn);
@@ -2083,15 +2092,15 @@ log_result(Slapi_PBlock *pb, Operation *op, int err, ber_tag_t tag, int nentries
if (!internal_op) {
slapi_log_access(LDAP_DEBUG_STATS,
"conn=%" PRIu64 " op=%d RESULT err=%d"
- " tag=%" BERTAG_T " nentries=%d etime=%s%s%s"
+ " tag=%" BERTAG_T " nentries=%d wtime=%s optime=%s etime=%s%s%s"
" pr_idx=%d pr_cookie=%d\n",
op->o_connid,
op->o_opid,
err, tag, nentries,
- etime,
+ wtime, optime, etime,
notes_str, csn_str, pr_idx, pr_cookie);
} else {
-#define LOG_PRMSG_FMT " tag=%" BERTAG_T " nentries=%d etime=%s%s%s pr_idx=%d pr_cookie=%d \n"
+#define LOG_PRMSG_FMT " tag=%" BERTAG_T " nentries=%d wtime=%s optime=%s etime=%s%s%s pr_idx=%d pr_cookie=%d \n"
slapi_log_access(LDAP_DEBUG_ARGS,
connid == 0 ? LOG_CONN_OP_FMT_INT_INT LOG_PRMSG_FMT :
LOG_CONN_OP_FMT_EXT_INT LOG_PRMSG_FMT,
@@ -2100,7 +2109,7 @@ log_result(Slapi_PBlock *pb, Operation *op, int err, ber_tag_t tag, int nentries
op_internal_id,
op_nested_count,
err, tag, nentries,
- etime,
+ wtime, optime, etime,
notes_str, csn_str, pr_idx, pr_cookie);
}
} else if (!internal_op) {
@@ -2114,11 +2123,11 @@ log_result(Slapi_PBlock *pb, Operation *op, int err, ber_tag_t tag, int nentries
}
slapi_log_access(LDAP_DEBUG_STATS,
"conn=%" PRIu64 " op=%d RESULT err=%d"
- " tag=%" BERTAG_T " nentries=%d etime=%s%s%s%s\n",
+ " tag=%" BERTAG_T " nentries=%d wtime=%s optime=%s etime=%s%s%s%s\n",
op->o_connid,
op->o_opid,
err, tag, nentries,
- etime,
+ wtime, optime, etime,
notes_str, csn_str, ext_str);
if (pbtxt) {
/* if !pbtxt ==> ext_str == "". Don't free ext_str. */
@@ -2126,7 +2135,7 @@ log_result(Slapi_PBlock *pb, Operation *op, int err, ber_tag_t tag, int nentries
}
} else {
int optype;
-#define LOG_MSG_FMT " tag=%" BERTAG_T " nentries=%d etime=%s%s%s\n"
+#define LOG_MSG_FMT " tag=%" BERTAG_T " nentries=%d wtime=%s optime=%s etime=%s%s%s\n"
slapi_log_access(LDAP_DEBUG_ARGS,
connid == 0 ? LOG_CONN_OP_FMT_INT_INT LOG_MSG_FMT :
LOG_CONN_OP_FMT_EXT_INT LOG_MSG_FMT,
@@ -2135,7 +2144,7 @@ log_result(Slapi_PBlock *pb, Operation *op, int err, ber_tag_t tag, int nentries
op_internal_id,
op_nested_count,
err, tag, nentries,
- etime,
+ wtime, optime, etime,
notes_str, csn_str);
/*
* If this is an unindexed search we should log it in the error log if
diff --git a/ldap/servers/slapd/slap.h b/ldap/servers/slapd/slap.h
index cef8c789c..8e76393c3 100644
--- a/ldap/servers/slapd/slap.h
+++ b/ldap/servers/slapd/slap.h
@@ -1538,16 +1538,17 @@ typedef struct slapi_operation_results
*/
typedef struct op
{
- BerElement *o_ber; /* ber of the request */
- ber_int_t o_msgid; /* msgid of the request */
- ber_tag_t o_tag; /* tag of the request */
+ BerElement *o_ber; /* ber of the request */
+ ber_int_t o_msgid; /* msgid of the request */
+ ber_tag_t o_tag; /* tag of the request */
struct timespec o_hr_time_rel; /* internal system time op initiated */
struct timespec o_hr_time_utc; /* utc system time op initiated */
- int o_isroot; /* requestor is manager */
+ struct timespec o_hr_time_started_rel; /* internal system time op started */
+ int o_isroot; /* requestor is manager */
Slapi_DN o_sdn; /* dn bound when op was initiated */
- char *o_authtype; /* auth method used to bind dn */
+ char *o_authtype; /* auth method used to bind dn */
int o_ssf; /* ssf for this operation (highest between SASL and TLS/SSL) */
- int o_opid; /* id of this operation */
+ int o_opid; /* id of this operation */
PRUint64 o_connid; /* id of conn initiating this op; for logging only */
void *o_handler_data;
result_handler o_result_handler;
diff --git a/ldap/servers/slapd/slapi-plugin.h b/ldap/servers/slapd/slapi-plugin.h
index 834a98742..8d9c3fa6a 100644
--- a/ldap/servers/slapd/slapi-plugin.h
+++ b/ldap/servers/slapd/slapi-plugin.h
@@ -8210,13 +8210,29 @@ void slapi_operation_time_elapsed(Slapi_Operation *o, struct timespec *elapsed);
*/
void slapi_operation_time_initiated(Slapi_Operation *o, struct timespec *initiated);
/**
- * Given an operation and a timeout, return a populate struct with the expiry
- * time of the operation suitable for checking with slapi_timespec_expire_check
+ * Given an operation, determine the time elapsed since the op
+ * was actually started.
*
- * \param Slapi_Operation o - the operation that is in progress
- * \param time_t timeout the seconds relative to operation initiation to expiry at.
- * \param struct timespec *expiry the timespec to popluate with the relative expiry.
+ * \param Slapi_Operation o - the operation which is inprogress
+ * \param struct timespec *elapsed - location where the time difference will be
+ * placed.
+ */
+void slapi_operation_op_time_elapsed(Slapi_Operation *o, struct timespec *elapsed);
+/**
+ * Given an operation, determine the time elapsed that the op spent
+ * in the work queue before actually being dispatched to a worker thread
+ *
+ * \param Slapi_Operation o - the operation which is inprogress
+ * \param struct timespec *elapsed - location where the time difference will be
+ * placed.
+ */
+void slapi_operation_workq_time_elapsed(Slapi_Operation *o, struct timespec *elapsed);
+/**
+ * Set the time the operation actually started
+ *
+ * \param Slapi_Operation o - the operation which is inprogress
*/
+void slapi_operation_set_time_started(Slapi_Operation *o);
#endif
/**
--
2.26.2

View File

@ -0,0 +1,31 @@
From ec1714c81290a03ae9aa5fd10acf3e9be71596d7 Mon Sep 17 00:00:00 2001
From: Mark Reynolds <mreynolds@redhat.com>
Date: Thu, 11 Jun 2020 15:47:43 -0400
Subject: [PATCH] Issue 50912 - pwdReset can be modified by a user
Description: The attribute "pwdReset" should only be allowed to be set by the
server. Update schema definition to include NO-USER-MODIFICATION
relates: https://pagure.io/389-ds-base/issue/50912
Reviewed by: mreynolds(one line commit rule)
---
ldap/schema/02common.ldif | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/ldap/schema/02common.ldif b/ldap/schema/02common.ldif
index 966636bef..c6dc074db 100644
--- a/ldap/schema/02common.ldif
+++ b/ldap/schema/02common.ldif
@@ -76,7 +76,7 @@ attributeTypes: ( 2.16.840.1.113730.3.1.2349 NAME ( 'passwordDictCheck' 'pwdDict
attributeTypes: ( 2.16.840.1.113730.3.1.2350 NAME ( 'passwordDictPath' 'pwdDictPath' ) DESC '389 Directory Server password policy attribute type' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE X-ORIGIN '389 Directory Server' )
attributeTypes: ( 2.16.840.1.113730.3.1.2351 NAME ( 'passwordUserAttributes' 'pwdUserAttributes' ) DESC '389 Directory Server password policy attribute type' SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 SINGLE-VALUE X-ORIGIN '389 Directory Server' )
attributeTypes: ( 2.16.840.1.113730.3.1.2352 NAME ( 'passwordBadWords' 'pwdBadWords' ) DESC '389 Directory Server password policy attribute type' SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 SINGLE-VALUE X-ORIGIN '389 Directory Server' )
-attributeTypes: ( 2.16.840.1.113730.3.1.2366 NAME 'pwdReset' DESC '389 Directory Server password policy attribute type' EQUALITY booleanMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.7 SINGLE-VALUE USAGE directoryOperation X-ORIGIN '389 Directory Server' )
+attributeTypes: ( 2.16.840.1.113730.3.1.2366 NAME 'pwdReset' DESC '389 Directory Server password policy attribute type' EQUALITY booleanMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.7 SINGLE-VALUE NO-USER-MODIFICATION USAGE directoryOperation X-ORIGIN '389 Directory Server' )
attributeTypes: ( 2.16.840.1.113730.3.1.198 NAME 'memberURL' DESC 'Netscape defined attribute type' SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 X-ORIGIN 'Netscape Directory Server' )
attributeTypes: ( 2.16.840.1.113730.3.1.199 NAME 'memberCertificateDescription' DESC 'Netscape defined attribute type' SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 X-ORIGIN 'Netscape Directory Server' )
attributeTypes: ( 2.16.840.1.113730.3.1.207 NAME 'vlvBase' DESC 'Netscape defined attribute type' SYNTAX 1.3.6.1.4.1.1466.115.121.1.12 X-ORIGIN 'Netscape Directory Server' )
--
2.26.2

View File

@ -0,0 +1,202 @@
From a6a52365df26edd4f6b0028056395d943344d787 Mon Sep 17 00:00:00 2001
From: Mark Reynolds <mreynolds@redhat.com>
Date: Thu, 11 Jun 2020 15:30:28 -0400
Subject: [PATCH] Issue 50791 - Healthcheck should look for notes=A/F in access
log
Description: Add checks for notes=A (fully unindexed search) and
notes=F (Unknown attribute in search filter) in the
current access log.
relates: https://pagure.io/389-ds-base/issue/50791
Reviewed by: firstyear(Thanks!)
---
src/lib389/lib389/cli_ctl/health.py | 4 +-
src/lib389/lib389/dirsrv_log.py | 72 +++++++++++++++++++++++++++--
src/lib389/lib389/lint.py | 26 ++++++++++-
3 files changed, 96 insertions(+), 6 deletions(-)
diff --git a/src/lib389/lib389/cli_ctl/health.py b/src/lib389/lib389/cli_ctl/health.py
index 6333a753a..89484a11b 100644
--- a/src/lib389/lib389/cli_ctl/health.py
+++ b/src/lib389/lib389/cli_ctl/health.py
@@ -1,5 +1,5 @@
# --- BEGIN COPYRIGHT BLOCK ---
-# Copyright (C) 2019 Red Hat, Inc.
+# Copyright (C) 2020 Red Hat, Inc.
# All rights reserved.
#
# License: GPL (version 3 or any later version).
@@ -18,6 +18,7 @@ from lib389.monitor import MonitorDiskSpace
from lib389.replica import Replica, Changelog5
from lib389.nss_ssl import NssSsl
from lib389.dseldif import FSChecks, DSEldif
+from lib389.dirsrv_log import DirsrvAccessLog
from lib389 import lint
from lib389 import plugins
from lib389._constants import DSRC_HOME
@@ -37,6 +38,7 @@ CHECK_OBJECTS = [
Changelog5,
DSEldif,
NssSsl,
+ DirsrvAccessLog,
]
diff --git a/src/lib389/lib389/dirsrv_log.py b/src/lib389/lib389/dirsrv_log.py
index baac2a3c9..7bed4bb17 100644
--- a/src/lib389/lib389/dirsrv_log.py
+++ b/src/lib389/lib389/dirsrv_log.py
@@ -1,5 +1,5 @@
# --- BEGIN COPYRIGHT BLOCK ---
-# Copyright (C) 2016 Red Hat, Inc.
+# Copyright (C) 2020 Red Hat, Inc.
# All rights reserved.
#
# License: GPL (version 3 or any later version).
@@ -9,12 +9,17 @@
"""Helpers for managing the directory server internal logs.
"""
+import copy
import re
import gzip
from dateutil.parser import parse as dt_parse
from glob import glob
from lib389.utils import ensure_bytes
-
+from lib389._mapped_object_lint import DSLint
+from lib389.lint import (
+ DSLOGNOTES0001, # Unindexed search
+ DSLOGNOTES0002, # Unknown attr in search filter
+)
# Because many of these settings can change live, we need to check for certain
# attributes all the time.
@@ -35,7 +40,7 @@ MONTH_LOOKUP = {
}
-class DirsrvLog(object):
+class DirsrvLog(DSLint):
"""Class of functions to working with the various DIrectory Server logs
"""
def __init__(self, dirsrv):
@@ -189,6 +194,67 @@ class DirsrvAccessLog(DirsrvLog):
self.full_regexs = [self.prog_m1, self.prog_con, self.prog_discon]
self.result_regexs = [self.prog_notes, self.prog_repl,
self.prog_result]
+ @classmethod
+ def lint_uid(cls):
+ return 'logs'
+
+ def _log_get_search_stats(self, conn, op):
+ lines = self.match(f".* conn={conn} op={op} SRCH base=.*")
+ if len(lines) != 1:
+ return None
+
+ quoted_vals = re.findall('"([^"]*)"', lines[0])
+ return {
+ 'base': quoted_vals[0],
+ 'filter': quoted_vals[1],
+ 'timestamp': re.findall('\[(.*)\]', lines[0])[0],
+ 'scope': lines[0].split(' scope=', 1)[1].split(' ',1)[0]
+ }
+
+ def _lint_notes(self):
+ """
+ Check for notes=A (fully unindexed searches), and
+ notes=F (unknown attribute in filter)
+ """
+ for pattern, lint_report in [(".* notes=A", DSLOGNOTES0001), (".* notes=F", DSLOGNOTES0002)]:
+ lines = self.match(pattern)
+ if len(lines) > 0:
+ count = 0
+ searches = []
+ for line in lines:
+ if ' RESULT err=' in line:
+ # Looks like a valid notes=A/F
+ conn = line.split(' conn=', 1)[1].split(' ',1)[0]
+ op = line.split(' op=', 1)[1].split(' ',1)[0]
+ etime = line.split(' etime=', 1)[1].split(' ',1)[0]
+ stats = self._log_get_search_stats(conn, op)
+ if stats is not None:
+ timestamp = stats['timestamp']
+ base = stats['base']
+ scope = stats['scope']
+ srch_filter = stats['filter']
+ count += 1
+ if lint_report == DSLOGNOTES0001:
+ searches.append(f'\n [{count}] Unindexed Search\n'
+ f' - date: {timestamp}\n'
+ f' - conn/op: {conn}/{op}\n'
+ f' - base: {base}\n'
+ f' - scope: {scope}\n'
+ f' - filter: {srch_filter}\n'
+ f' - etime: {etime}\n')
+ else:
+ searches.append(f'\n [{count}] Invalid Attribute in Filter\n'
+ f' - date: {timestamp}\n'
+ f' - conn/op: {conn}/{op}\n'
+ f' - filter: {srch_filter}\n')
+ if len(searches) > 0:
+ report = copy.deepcopy(lint_report)
+ report['items'].append(self._get_log_path())
+ report['detail'] = report['detail'].replace('NUMBER', str(count))
+ for srch in searches:
+ report['detail'] += srch
+ yield report
+
def _get_log_path(self):
"""Return the current log file location"""
diff --git a/src/lib389/lib389/lint.py b/src/lib389/lib389/lint.py
index a103feec7..4b1700b92 100644
--- a/src/lib389/lib389/lint.py
+++ b/src/lib389/lib389/lint.py
@@ -1,5 +1,5 @@
# --- BEGIN COPYRIGHT BLOCK ---
-# Copyright (C) 2019 Red Hat, Inc.
+# Copyright (C) 2020 Red Hat, Inc.
# All rights reserved.
#
# License: GPL (version 3 or any later version).
@@ -253,7 +253,7 @@ can use the CLI tool "dsconf" to resolve the conflict. Here is an example:
Remove conflict entry and keep only the original/counterpart entry:
- # dsconf slapd-YOUR_INSTANCE repl-conflict remove <DN of conflict entry>
+ # dsconf slapd-YOUR_INSTANCE repl-conflict delete <DN of conflict entry>
Replace the original/counterpart entry with the conflict entry:
@@ -418,3 +418,25 @@ until the time issues have been resolved:
Also look at https://access.redhat.com/documentation/en-us/red_hat_directory_server/11/html/administration_guide/managing_replication-troubleshooting_replication_related_problems
and find the paragraph "Too much time skew"."""
}
+
+DSLOGNOTES0001 = {
+ 'dsle': 'DSLOGNOTES0001',
+ 'severity': 'Medium',
+ 'description': 'Unindexed Search',
+ 'items': ['Performance'],
+ 'detail': """Found NUMBER fully unindexed searches in the current access log.
+Unindexed searches can cause high CPU and slow down the entire server's performance.\n""",
+ 'fix': """Examine the searches that are unindexed, and either properly index the attributes
+in the filter, increase the nsslapd-idlistscanlimit, or stop using that filter."""
+}
+
+DSLOGNOTES0002 = {
+ 'dsle': 'DSLOGNOTES0002',
+ 'severity': 'Medium',
+ 'description': 'Unknown Attribute In Filter',
+ 'items': ['Possible Performance Impact'],
+ 'detail': """Found NUMBER searches in the current access log that are using an
+unknown attribute in the search filter.\n""",
+ 'fix': """Stop using this these unknown attributes in the filter, or add the schema
+to the server and make sure it's properly indexed."""
+}
--
2.26.2

View File

@ -0,0 +1,51 @@
From 2844d4ad90cbbd23ae75309e50ae4d7145586bb7 Mon Sep 17 00:00:00 2001
From: Mark Reynolds <mreynolds@redhat.com>
Date: Wed, 10 Jun 2020 14:07:24 -0400
Subject: [PATCH] Issue 51144 - dsctl fails with instance names that contain
slapd-
Bug Description: If an instance name contains 'slapd-' the CLI breaks:
slapd-test-slapd
Fix Description: Only strip off "slapd-" from the front of the instance
name.
relates: https://pagure.io/389-ds-base/issue/51144
Reviewed by: firstyear(Thanks!)
---
src/lib389/lib389/__init__.py | 2 +-
src/lib389/lib389/dseldif.py | 3 ++-
2 files changed, 3 insertions(+), 2 deletions(-)
diff --git a/src/lib389/lib389/__init__.py b/src/lib389/lib389/__init__.py
index 0ff1ab173..63d44b60a 100644
--- a/src/lib389/lib389/__init__.py
+++ b/src/lib389/lib389/__init__.py
@@ -710,7 +710,7 @@ class DirSrv(SimpleLDAPObject, object):
# Don't need a default value now since it's set in init.
if serverid is None and hasattr(self, 'serverid'):
serverid = self.serverid
- elif serverid is not None:
+ elif serverid is not None and serverid.startswith('slapd-'):
serverid = serverid.replace('slapd-', '', 1)
if self.serverid is None:
diff --git a/src/lib389/lib389/dseldif.py b/src/lib389/lib389/dseldif.py
index 96c9af9d1..f2725add9 100644
--- a/src/lib389/lib389/dseldif.py
+++ b/src/lib389/lib389/dseldif.py
@@ -40,7 +40,8 @@ class DSEldif(DSLint):
if serverid:
# Get the dse.ldif from the instance name
prefix = os.environ.get('PREFIX', ""),
- serverid = serverid.replace("slapd-", "")
+ if serverid.startswith("slapd-"):
+ serverid = serverid.replace("slapd-", "", 1)
self.path = "{}/etc/dirsrv/slapd-{}/dse.ldif".format(prefix[0], serverid)
else:
ds_paths = Paths(self._instance.serverid, self._instance)
--
2.26.2

View File

@ -0,0 +1,520 @@
From 6cd4b1c60dbd3d7b74adb19a2434585d50553f39 Mon Sep 17 00:00:00 2001
From: Thierry Bordaz <tbordaz@redhat.com>
Date: Fri, 5 Jun 2020 12:14:51 +0200
Subject: [PATCH] Ticket 49859 - A distinguished value can be missing in an
entry
Bug description:
According to RFC 4511 (see ticket), the values of the RDN attributes
should be present in an entry.
With a set of replicated operations, it is possible that those values
would be missing
Fix description:
MOD and MODRDN update checks that the RDN values are presents.
If they are missing they are added to the resulting entry. In addition
the set of modifications to add those values are also indexed.
The specific case of single-valued attributes, where the final and unique value
can not be the RDN value, the attribute nsds5ReplConflict is added.
https://pagure.io/389-ds-base/issue/49859
Reviewed by: Mark Reynolds, William Brown
Platforms tested: F31
---
.../replication/conflict_resolve_test.py | 174 +++++++++++++++++-
ldap/servers/slapd/back-ldbm/ldbm_modify.c | 136 ++++++++++++++
ldap/servers/slapd/back-ldbm/ldbm_modrdn.c | 37 +++-
.../servers/slapd/back-ldbm/proto-back-ldbm.h | 1 +
4 files changed, 343 insertions(+), 5 deletions(-)
diff --git a/dirsrvtests/tests/suites/replication/conflict_resolve_test.py b/dirsrvtests/tests/suites/replication/conflict_resolve_test.py
index 99a072935..48d0067db 100644
--- a/dirsrvtests/tests/suites/replication/conflict_resolve_test.py
+++ b/dirsrvtests/tests/suites/replication/conflict_resolve_test.py
@@ -10,10 +10,11 @@ import time
import logging
import ldap
import pytest
+import re
from itertools import permutations
from lib389._constants import *
from lib389.idm.nscontainer import nsContainers
-from lib389.idm.user import UserAccounts
+from lib389.idm.user import UserAccounts, UserAccount
from lib389.idm.group import Groups
from lib389.idm.organizationalunit import OrganizationalUnits
from lib389.replica import ReplicationManager
@@ -763,6 +764,177 @@ class TestTwoMasters:
user_dns_m2 = [user.dn for user in test_users_m2.list()]
assert set(user_dns_m1) == set(user_dns_m2)
+ def test_conflict_attribute_multi_valued(self, topology_m2, base_m2):
+ """A RDN attribute being multi-valued, checks that after several operations
+ MODRDN and MOD_REPL its RDN values are the same on both servers
+
+ :id: 225b3522-8ed7-4256-96f9-5fab9b7044a5
+ :setup: Two master replication,
+ audit log, error log for replica and access log for internal
+ :steps:
+ 1. Create a test entry uid=user_test_1000,...
+ 2. Pause all replication agreements
+ 3. On M1 rename it into uid=foo1,...
+ 4. On M2 rename it into uid=foo2,...
+ 5. On M1 MOD_REPL uid:foo1
+ 6. Resume all replication agreements
+ 7. Check that entry on M1 has uid=foo1, foo2
+ 8. Check that entry on M2 has uid=foo1, foo2
+ 9. Check that entry on M1 and M2 has the same uid values
+ :expectedresults:
+ 1. It should pass
+ 2. It should pass
+ 3. It should pass
+ 4. It should pass
+ 5. It should pass
+ 6. It should pass
+ 7. It should pass
+ 8. It should pass
+ 9. It should pass
+ """
+
+ M1 = topology_m2.ms["master1"]
+ M2 = topology_m2.ms["master2"]
+
+ # add a test user
+ test_users_m1 = UserAccounts(M1, base_m2.dn, rdn=None)
+ user_1 = test_users_m1.create_test_user(uid=1000)
+ test_users_m2 = UserAccount(M2, user_1.dn)
+ # Waiting fo the user to be replicated
+ for i in range(0,4):
+ time.sleep(1)
+ if test_users_m2.exists():
+ break
+ assert(test_users_m2.exists())
+
+ # Stop replication agreements
+ topology_m2.pause_all_replicas()
+
+ # On M1 rename test entry in uid=foo1
+ original_dn = user_1.dn
+ user_1.rename('uid=foo1')
+ time.sleep(1)
+
+ # On M2 rename test entry in uid=foo2
+ M2.rename_s(original_dn, 'uid=foo2')
+ time.sleep(2)
+
+ # on M1 MOD_REPL uid into foo1
+ user_1.replace('uid', 'foo1')
+
+ # resume replication agreements
+ topology_m2.resume_all_replicas()
+ time.sleep(5)
+
+ # check that on M1, the entry 'uid' has two values 'foo1' and 'foo2'
+ final_dn = re.sub('^.*1000,', 'uid=foo2,', original_dn)
+ final_user_m1 = UserAccount(M1, final_dn)
+ for val in final_user_m1.get_attr_vals_utf8('uid'):
+ log.info("Check %s is on M1" % val)
+ assert(val in ['foo1', 'foo2'])
+
+ # check that on M2, the entry 'uid' has two values 'foo1' and 'foo2'
+ final_user_m2 = UserAccount(M2, final_dn)
+ for val in final_user_m2.get_attr_vals_utf8('uid'):
+ log.info("Check %s is on M1" % val)
+ assert(val in ['foo1', 'foo2'])
+
+ # check that the entry have the same uid values
+ for val in final_user_m1.get_attr_vals_utf8('uid'):
+ log.info("Check M1.uid %s is also on M2" % val)
+ assert(val in final_user_m2.get_attr_vals_utf8('uid'))
+
+ for val in final_user_m2.get_attr_vals_utf8('uid'):
+ log.info("Check M2.uid %s is also on M1" % val)
+ assert(val in final_user_m1.get_attr_vals_utf8('uid'))
+
+ def test_conflict_attribute_single_valued(self, topology_m2, base_m2):
+ """A RDN attribute being signle-valued, checks that after several operations
+ MODRDN and MOD_REPL its RDN values are the same on both servers
+
+ :id: c38ae613-5d1e-47cf-b051-c7284e64b817
+ :setup: Two master replication, test container for entries, enable plugin logging,
+ audit log, error log for replica and access log for internal
+ :steps:
+ 1. Create a test entry uid=user_test_1000,...
+ 2. Pause all replication agreements
+ 3. On M1 rename it into employeenumber=foo1,...
+ 4. On M2 rename it into employeenumber=foo2,...
+ 5. On M1 MOD_REPL employeenumber:foo1
+ 6. Resume all replication agreements
+ 7. Check that entry on M1 has employeenumber=foo1
+ 8. Check that entry on M2 has employeenumber=foo1
+ 9. Check that entry on M1 and M2 has the same employeenumber values
+ :expectedresults:
+ 1. It should pass
+ 2. It should pass
+ 3. It should pass
+ 4. It should pass
+ 5. It should pass
+ 6. It should pass
+ 7. It should pass
+ 8. It should pass
+ 9. It should pass
+ """
+
+ M1 = topology_m2.ms["master1"]
+ M2 = topology_m2.ms["master2"]
+
+ # add a test user with a dummy 'uid' extra value because modrdn removes
+ # uid that conflict with 'account' objectclass
+ test_users_m1 = UserAccounts(M1, base_m2.dn, rdn=None)
+ user_1 = test_users_m1.create_test_user(uid=1000)
+ user_1.add('objectclass', 'extensibleobject')
+ user_1.add('uid', 'dummy')
+ test_users_m2 = UserAccount(M2, user_1.dn)
+
+ # Waiting fo the user to be replicated
+ for i in range(0,4):
+ time.sleep(1)
+ if test_users_m2.exists():
+ break
+ assert(test_users_m2.exists())
+
+ # Stop replication agreements
+ topology_m2.pause_all_replicas()
+
+ # On M1 rename test entry in employeenumber=foo1
+ original_dn = user_1.dn
+ user_1.rename('employeenumber=foo1')
+ time.sleep(1)
+
+ # On M2 rename test entry in employeenumber=foo2
+ M2.rename_s(original_dn, 'employeenumber=foo2')
+ time.sleep(2)
+
+ # on M1 MOD_REPL uid into foo1
+ user_1.replace('employeenumber', 'foo1')
+
+ # resume replication agreements
+ topology_m2.resume_all_replicas()
+ time.sleep(5)
+
+ # check that on M1, the entry 'employeenumber' has value 'foo1'
+ final_dn = re.sub('^.*1000,', 'employeenumber=foo2,', original_dn)
+ final_user_m1 = UserAccount(M1, final_dn)
+ for val in final_user_m1.get_attr_vals_utf8('employeenumber'):
+ log.info("Check %s is on M1" % val)
+ assert(val in ['foo1'])
+
+ # check that on M2, the entry 'employeenumber' has values 'foo1'
+ final_user_m2 = UserAccount(M2, final_dn)
+ for val in final_user_m2.get_attr_vals_utf8('employeenumber'):
+ log.info("Check %s is on M2" % val)
+ assert(val in ['foo1'])
+
+ # check that the entry have the same uid values
+ for val in final_user_m1.get_attr_vals_utf8('employeenumber'):
+ log.info("Check M1.uid %s is also on M2" % val)
+ assert(val in final_user_m2.get_attr_vals_utf8('employeenumber'))
+
+ for val in final_user_m2.get_attr_vals_utf8('employeenumber'):
+ log.info("Check M2.uid %s is also on M1" % val)
+ assert(val in final_user_m1.get_attr_vals_utf8('employeenumber'))
class TestThreeMasters:
def test_nested_entries(self, topology_m3, base_m3):
diff --git a/ldap/servers/slapd/back-ldbm/ldbm_modify.c b/ldap/servers/slapd/back-ldbm/ldbm_modify.c
index e9d7e87e3..a507f3c31 100644
--- a/ldap/servers/slapd/back-ldbm/ldbm_modify.c
+++ b/ldap/servers/slapd/back-ldbm/ldbm_modify.c
@@ -213,6 +213,112 @@ error:
return retval;
}
+int32_t
+entry_get_rdn_mods(Slapi_PBlock *pb, Slapi_Entry *entry, CSN *csn, int repl_op, Slapi_Mods **smods_ret)
+{
+ unsigned long op_type = SLAPI_OPERATION_NONE;
+ char *new_rdn = NULL;
+ char **dns = NULL;
+ char **rdns = NULL;
+ Slapi_Mods *smods = NULL;
+ char *type = NULL;
+ struct berval *bvp[2] = {0};
+ struct berval bv;
+ Slapi_Attr *attr = NULL;
+ const char *entry_dn = NULL;
+
+ *smods_ret = NULL;
+ entry_dn = slapi_entry_get_dn_const(entry);
+ /* Do not bother to check that RDN is present, no one rename RUV or change its nsuniqueid */
+ if (strcasestr(entry_dn, RUV_STORAGE_ENTRY_UNIQUEID)) {
+ return 0;
+ }
+
+ /* First get the RDNs of the operation */
+ slapi_pblock_get(pb, SLAPI_OPERATION_TYPE, &op_type);
+ switch (op_type) {
+ case SLAPI_OPERATION_MODIFY:
+ dns = slapi_ldap_explode_dn(entry_dn, 0);
+ if (dns == NULL) {
+ slapi_log_err(SLAPI_LOG_ERR, "entry_get_rdn_mods",
+ "Fails to split DN \"%s\" into components\n", entry_dn);
+ return -1;
+ }
+ rdns = slapi_ldap_explode_rdn(dns[0], 0);
+ slapi_ldap_value_free(dns);
+
+ break;
+ case SLAPI_OPERATION_MODRDN:
+ slapi_pblock_get(pb, SLAPI_MODRDN_NEWRDN, &new_rdn);
+ rdns = slapi_ldap_explode_rdn(new_rdn, 0);
+ break;
+ default:
+ break;
+ }
+ if (rdns == NULL || rdns[0] == NULL) {
+ slapi_log_err(SLAPI_LOG_ERR, "entry_get_rdn_mods",
+ "Fails to split RDN \"%s\" into components\n", slapi_entry_get_dn_const(entry));
+ return -1;
+ }
+
+ /* Update the entry to add RDNs values if they are missing */
+ smods = slapi_mods_new();
+
+ bvp[0] = &bv;
+ bvp[1] = NULL;
+ for (size_t rdns_count = 0; rdns[rdns_count]; rdns_count++) {
+ Slapi_Value *value;
+ attr = NULL;
+ slapi_rdn2typeval(rdns[rdns_count], &type, &bv);
+
+ /* Check if the RDN value exists */
+ if ((slapi_entry_attr_find(entry, type, &attr) != 0) ||
+ (slapi_attr_value_find(attr, &bv))) {
+ const CSN *csn_rdn_add;
+ const CSN *adcsn = attr_get_deletion_csn(attr);
+
+ /* It is missing => adds it */
+ if (slapi_attr_flag_is_set(attr, SLAPI_ATTR_FLAG_SINGLE)) {
+ if (csn_compare(adcsn, csn) >= 0) {
+ /* this is a single valued attribute and the current value
+ * (that is different from RDN value) is more recent than
+ * the RDN value we want to apply.
+ * Keep the current value and add a conflict flag
+ */
+
+ type = ATTR_NSDS5_REPLCONFLICT;
+ bv.bv_val = "RDN value may be missing because it is single-valued";
+ bv.bv_len = strlen(bv.bv_val);
+ slapi_entry_add_string(entry, type, bv.bv_val);
+ slapi_mods_add_modbvps(smods, LDAP_MOD_ADD, type, bvp);
+ continue;
+ }
+ }
+ /* if a RDN value needs to be forced, make sure it csn is ahead */
+ slapi_mods_add_modbvps(smods, LDAP_MOD_ADD, type, bvp);
+ csn_rdn_add = csn_max(adcsn, csn);
+
+ if (entry_apply_mods_wsi(entry, smods, csn_rdn_add, repl_op)) {
+ slapi_log_err(SLAPI_LOG_ERR, "entry_get_rdn_mods",
+ "Fails to set \"%s\" in \"%s\"\n", type, slapi_entry_get_dn_const(entry));
+ slapi_ldap_value_free(rdns);
+ slapi_mods_free(&smods);
+ return -1;
+ }
+ /* Make the RDN value a distinguished value */
+ attr_value_find_wsi(attr, &bv, &value);
+ value_update_csn(value, CSN_TYPE_VALUE_DISTINGUISHED, csn_rdn_add);
+ }
+ }
+ slapi_ldap_value_free(rdns);
+ if (smods->num_mods == 0) {
+ /* smods_ret already NULL, just free the useless smods */
+ slapi_mods_free(&smods);
+ } else {
+ *smods_ret = smods;
+ }
+ return 0;
+}
/**
Apply the mods to the ec entry. Check for syntax, schema problems.
Check for abandon.
@@ -269,6 +375,8 @@ modify_apply_check_expand(
goto done;
}
+
+
/*
* If the objectClass attribute type was modified in any way, expand
* the objectClass values to reflect the inheritance hierarchy.
@@ -414,6 +522,7 @@ ldbm_back_modify(Slapi_PBlock *pb)
int result_sent = 0;
int32_t parent_op = 0;
struct timespec parent_time;
+ Slapi_Mods *smods_add_rdn = NULL;
slapi_pblock_get(pb, SLAPI_BACKEND, &be);
slapi_pblock_get(pb, SLAPI_PLUGIN_PRIVATE, &li);
@@ -731,6 +840,15 @@ ldbm_back_modify(Slapi_PBlock *pb)
}
} /* else if new_mod_count == mod_count then betxnpremod plugin did nothing */
+ /* time to check if applying a replicated operation removed
+ * the RDN value from the entry. Assuming that only replicated update
+ * can lead to that bad result
+ */
+ if (entry_get_rdn_mods(pb, ec->ep_entry, opcsn, repl_op, &smods_add_rdn)) {
+ goto error_return;
+ }
+
+
/*
* Update the ID to Entry index.
* Note that id2entry_add replaces the entry, so the Entry ID
@@ -764,6 +882,23 @@ ldbm_back_modify(Slapi_PBlock *pb)
MOD_SET_ERROR(ldap_result_code, LDAP_OPERATIONS_ERROR, retry_count);
goto error_return;
}
+
+ if (smods_add_rdn && slapi_mods_get_num_mods(smods_add_rdn) > 0) {
+ retval = index_add_mods(be, (LDAPMod **) slapi_mods_get_ldapmods_byref(smods_add_rdn), e, ec, &txn);
+ if (DB_LOCK_DEADLOCK == retval) {
+ /* Abort and re-try */
+ slapi_mods_free(&smods_add_rdn);
+ continue;
+ }
+ if (retval != 0) {
+ slapi_log_err(SLAPI_LOG_ERR, "ldbm_back_modify",
+ "index_add_mods (rdn) failed, err=%d %s\n",
+ retval, (msg = dblayer_strerror(retval)) ? msg : "");
+ MOD_SET_ERROR(ldap_result_code, LDAP_OPERATIONS_ERROR, retry_count);
+ slapi_mods_free(&smods_add_rdn);
+ goto error_return;
+ }
+ }
/*
* Remove the old entry from the Virtual List View indexes.
* Add the new entry to the Virtual List View indexes.
@@ -978,6 +1113,7 @@ error_return:
common_return:
slapi_mods_done(&smods);
+ slapi_mods_free(&smods_add_rdn);
if (inst) {
if (ec_locked || cache_is_in_cache(&inst->inst_cache, ec)) {
diff --git a/ldap/servers/slapd/back-ldbm/ldbm_modrdn.c b/ldap/servers/slapd/back-ldbm/ldbm_modrdn.c
index fde83c99f..e97b7a5f6 100644
--- a/ldap/servers/slapd/back-ldbm/ldbm_modrdn.c
+++ b/ldap/servers/slapd/back-ldbm/ldbm_modrdn.c
@@ -21,7 +21,7 @@ static void moddn_unlock_and_return_entry(backend *be, struct backentry **target
static int moddn_newrdn_mods(Slapi_PBlock *pb, const char *olddn, struct backentry *ec, Slapi_Mods *smods_wsi, int is_repl_op);
static IDList *moddn_get_children(back_txn *ptxn, Slapi_PBlock *pb, backend *be, struct backentry *parententry, Slapi_DN *parentdn, struct backentry ***child_entries, struct backdn ***child_dns, int is_resurect_operation);
static int moddn_rename_children(back_txn *ptxn, Slapi_PBlock *pb, backend *be, IDList *children, Slapi_DN *dn_parentdn, Slapi_DN *dn_newsuperiordn, struct backentry *child_entries[]);
-static int modrdn_rename_entry_update_indexes(back_txn *ptxn, Slapi_PBlock *pb, struct ldbminfo *li, struct backentry *e, struct backentry **ec, Slapi_Mods *smods1, Slapi_Mods *smods2, Slapi_Mods *smods3);
+static int modrdn_rename_entry_update_indexes(back_txn *ptxn, Slapi_PBlock *pb, struct ldbminfo *li, struct backentry *e, struct backentry **ec, Slapi_Mods *smods1, Slapi_Mods *smods2, Slapi_Mods *smods3, Slapi_Mods *smods4);
static void mods_remove_nsuniqueid(Slapi_Mods *smods);
#define MOD_SET_ERROR(rc, error, count) \
@@ -100,6 +100,7 @@ ldbm_back_modrdn(Slapi_PBlock *pb)
Connection *pb_conn = NULL;
int32_t parent_op = 0;
struct timespec parent_time;
+ Slapi_Mods *smods_add_rdn = NULL;
if (slapi_pblock_get(pb, SLAPI_CONN_ID, &conn_id) < 0) {
conn_id = 0; /* connection is NULL */
@@ -842,6 +843,15 @@ ldbm_back_modrdn(Slapi_PBlock *pb)
goto error_return;
}
}
+
+ /* time to check if applying a replicated operation removed
+ * the RDN value from the entry. Assuming that only replicated update
+ * can lead to that bad result
+ */
+ if (entry_get_rdn_mods(pb, ec->ep_entry, opcsn, is_replicated_operation, &smods_add_rdn)) {
+ goto error_return;
+ }
+
/* check that the entry still obeys the schema */
if (slapi_entry_schema_check(pb, ec->ep_entry) != 0) {
ldap_result_code = LDAP_OBJECT_CLASS_VIOLATION;
@@ -1003,7 +1013,7 @@ ldbm_back_modrdn(Slapi_PBlock *pb)
/*
* Update the indexes for the entry.
*/
- retval = modrdn_rename_entry_update_indexes(&txn, pb, li, e, &ec, &smods_generated, &smods_generated_wsi, &smods_operation_wsi);
+ retval = modrdn_rename_entry_update_indexes(&txn, pb, li, e, &ec, &smods_generated, &smods_generated_wsi, &smods_operation_wsi, smods_add_rdn);
if (DB_LOCK_DEADLOCK == retval) {
/* Retry txn */
continue;
@@ -1497,6 +1507,7 @@ common_return:
slapi_mods_done(&smods_operation_wsi);
slapi_mods_done(&smods_generated);
slapi_mods_done(&smods_generated_wsi);
+ slapi_mods_free(&smods_add_rdn);
slapi_ch_free((void **)&child_entries);
slapi_ch_free((void **)&child_dns);
if (ldap_result_matcheddn && 0 != strcmp(ldap_result_matcheddn, "NULL"))
@@ -1778,7 +1789,7 @@ mods_remove_nsuniqueid(Slapi_Mods *smods)
* mods contains the list of attribute change made.
*/
static int
-modrdn_rename_entry_update_indexes(back_txn *ptxn, Slapi_PBlock *pb, struct ldbminfo *li __attribute__((unused)), struct backentry *e, struct backentry **ec, Slapi_Mods *smods1, Slapi_Mods *smods2, Slapi_Mods *smods3)
+modrdn_rename_entry_update_indexes(back_txn *ptxn, Slapi_PBlock *pb, struct ldbminfo *li __attribute__((unused)), struct backentry *e, struct backentry **ec, Slapi_Mods *smods1, Slapi_Mods *smods2, Slapi_Mods *smods3, Slapi_Mods *smods4)
{
backend *be;
ldbm_instance *inst;
@@ -1874,6 +1885,24 @@ modrdn_rename_entry_update_indexes(back_txn *ptxn, Slapi_PBlock *pb, struct ldbm
goto error_return;
}
}
+ if (smods4 != NULL && slapi_mods_get_num_mods(smods4) > 0) {
+ /*
+ * update the indexes: lastmod, rdn, etc.
+ */
+ retval = index_add_mods(be, slapi_mods_get_ldapmods_byref(smods4), e, *ec, ptxn);
+ if (DB_LOCK_DEADLOCK == retval) {
+ /* Retry txn */
+ slapi_log_err(SLAPI_LOG_BACKLDBM, "modrdn_rename_entry_update_indexes",
+ "index_add_mods4 deadlock\n");
+ goto error_return;
+ }
+ if (retval != 0) {
+ slapi_log_err(SLAPI_LOG_TRACE, "modrdn_rename_entry_update_indexes",
+ "index_add_mods 4 failed, err=%d %s\n",
+ retval, (msg = dblayer_strerror(retval)) ? msg : "");
+ goto error_return;
+ }
+ }
/*
* Remove the old entry from the Virtual List View indexes.
* Add the new entry to the Virtual List View indexes.
@@ -1991,7 +2020,7 @@ moddn_rename_child_entry(
* Update all the indexes.
*/
retval = modrdn_rename_entry_update_indexes(ptxn, pb, li, e, ec,
- smodsp, NULL, NULL);
+ smodsp, NULL, NULL, NULL);
/* JCMREPL - Should the children get updated modifiersname and lastmodifiedtime? */
slapi_mods_done(&smods);
}
diff --git a/ldap/servers/slapd/back-ldbm/proto-back-ldbm.h b/ldap/servers/slapd/back-ldbm/proto-back-ldbm.h
index 4d2524fd9..e2f1100ed 100644
--- a/ldap/servers/slapd/back-ldbm/proto-back-ldbm.h
+++ b/ldap/servers/slapd/back-ldbm/proto-back-ldbm.h
@@ -324,6 +324,7 @@ int get_parent_rdn(DB *db, ID parentid, Slapi_RDN *srdn);
/*
* modify.c
*/
+int32_t entry_get_rdn_mods(Slapi_PBlock *pb, Slapi_Entry *entry, CSN *csn, int repl_op, Slapi_Mods **smods_ret);
int modify_update_all(backend *be, Slapi_PBlock *pb, modify_context *mc, back_txn *txn);
void modify_init(modify_context *mc, struct backentry *old_entry);
int modify_apply_mods(modify_context *mc, Slapi_Mods *smods);
--
2.26.2

View File

@ -0,0 +1,128 @@
From 2be9d1b4332d3b9b55a2d285e9610813100e235f Mon Sep 17 00:00:00 2001
From: Mark Reynolds <mreynolds@redhat.com>
Date: Mon, 22 Jun 2020 17:49:10 -0400
Subject: [PATCH] Issue 49256 - log warning when thread number is very
different from autotuned value
Description: To help prevent customers from setting incorrect values for
the thread number it would be useful to warn them that the
configured value is either way too low or way too high.
relates: https://pagure.io/389-ds-base/issue/49256
Reviewed by: firstyear(Thanks!)
---
.../tests/suites/config/autotuning_test.py | 28 +++++++++++++++
ldap/servers/slapd/libglobs.c | 34 ++++++++++++++++++-
ldap/servers/slapd/slap.h | 3 ++
3 files changed, 64 insertions(+), 1 deletion(-)
diff --git a/dirsrvtests/tests/suites/config/autotuning_test.py b/dirsrvtests/tests/suites/config/autotuning_test.py
index d1c751444..540761250 100644
--- a/dirsrvtests/tests/suites/config/autotuning_test.py
+++ b/dirsrvtests/tests/suites/config/autotuning_test.py
@@ -43,6 +43,34 @@ def test_threads_basic(topo):
assert topo.standalone.config.get_attr_val_int("nsslapd-threadnumber") > 0
+def test_threads_warning(topo):
+ """Check that we log a warning if the thread number is too high or low
+
+ :id: db92412b-2812-49de-84b0-00f452cd254f
+ :setup: Standalone Instance
+ :steps:
+ 1. Get autotuned thread number
+ 2. Set threads way higher than hw threads, and find a warning in the log
+ 3. Set threads way lower than hw threads, and find a warning in the log
+ :expectedresults:
+ 1. Success
+ 2. Success
+ 3. Success
+ """
+ topo.standalone.config.set("nsslapd-threadnumber", "-1")
+ autotuned_value = topo.standalone.config.get_attr_val_utf8("nsslapd-threadnumber")
+
+ topo.standalone.config.set("nsslapd-threadnumber", str(int(autotuned_value) * 4))
+ time.sleep(.5)
+ assert topo.standalone.ds_error_log.match('.*higher.*hurt server performance.*')
+
+ if int(autotuned_value) > 1:
+ # If autotuned is 1, there isn't anything to test here
+ topo.standalone.config.set("nsslapd-threadnumber", "1")
+ time.sleep(.5)
+ assert topo.standalone.ds_error_log.match('.*lower.*hurt server performance.*')
+
+
@pytest.mark.parametrize("invalid_value", ('-2', '0', 'invalid'))
def test_threads_invalid_value(topo, invalid_value):
"""Check nsslapd-threadnumber for an invalid values
diff --git a/ldap/servers/slapd/libglobs.c b/ldap/servers/slapd/libglobs.c
index fbf90d92d..88676a303 100644
--- a/ldap/servers/slapd/libglobs.c
+++ b/ldap/servers/slapd/libglobs.c
@@ -4374,6 +4374,7 @@ config_set_threadnumber(const char *attrname, char *value, char *errorbuf, int a
{
int retVal = LDAP_SUCCESS;
int32_t threadnum = 0;
+ int32_t hw_threadnum = 0;
char *endp = NULL;
slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig();
@@ -4386,8 +4387,39 @@ config_set_threadnumber(const char *attrname, char *value, char *errorbuf, int a
threadnum = strtol(value, &endp, 10);
/* Means we want to re-run the hardware detection. */
+ hw_threadnum = util_get_hardware_threads();
if (threadnum == -1) {
- threadnum = util_get_hardware_threads();
+ threadnum = hw_threadnum;
+ } else {
+ /*
+ * Log a message if the user defined thread number is very different
+ * from the hardware threads as this is probably not the optimal
+ * value.
+ */
+ if (threadnum >= hw_threadnum) {
+ if (threadnum > MIN_THREADS && threadnum / hw_threadnum >= 4) {
+ /* We're over the default minimum and way higher than the hw
+ * threads. */
+ slapi_log_err(SLAPI_LOG_NOTICE, "config_set_threadnumber",
+ "The configured thread number (%d) is significantly "
+ "higher than the number of hardware threads (%d). "
+ "This can potentially hurt server performance. If "
+ "you are unsure how to tune \"nsslapd-threadnumber\" "
+ "then set it to \"-1\" and the server will tune it "
+ "according to the system hardware\n",
+ threadnum, hw_threadnum);
+ }
+ } else if (threadnum < MIN_THREADS) {
+ /* The thread number should never be less than the minimum and
+ * hardware threads. */
+ slapi_log_err(SLAPI_LOG_WARNING, "config_set_threadnumber",
+ "The configured thread number (%d) is lower than the number "
+ "of hardware threads (%d). This will hurt server performance. "
+ "If you are unsure how to tune \"nsslapd-threadnumber\" then "
+ "set it to \"-1\" and the server will tune it according to the "
+ "system hardware\n",
+ threadnum, hw_threadnum);
+ }
}
if (*endp != '\0' || errno == ERANGE || threadnum < 1 || threadnum > 65535) {
diff --git a/ldap/servers/slapd/slap.h b/ldap/servers/slapd/slap.h
index 8e76393c3..894efd29c 100644
--- a/ldap/servers/slapd/slap.h
+++ b/ldap/servers/slapd/slap.h
@@ -403,6 +403,9 @@ typedef void (*VFPV)(); /* takes undefined arguments */
#define SLAPD_DEFAULT_PW_MAX_CLASS_CHARS_ATTRIBUTE 0
#define SLAPD_DEFAULT_PW_MAX_CLASS_CHARS_ATTRIBUTE_STR "0"
+#define MIN_THREADS 16
+#define MAX_THREADS 512
+
/* Default password values. */
--
2.26.2

View File

@ -0,0 +1,34 @@
From d24381488a997dda0006b603fb2b452b726757c0 Mon Sep 17 00:00:00 2001
From: Simon Pichugin <simon.pichugin@gmail.com>
Date: Thu, 25 Jun 2020 10:45:16 +0200
Subject: [PATCH] Issue 51188 - db2ldif crashes when LDIF file can't be
accessed
Bug Description: db2ldif crashes when we set '-a LDIF_PATH' to a place that
can't be accessed by the user (dirsrv by default)
Fix Description: Don't attempt to close DB if we bail after a failed
attempt to open LDIF file.
https://pagure.io/389-ds-base/issue/51188
Reviewed by: mreynolds (Thanks!)
---
ldap/servers/slapd/back-ldbm/db-bdb/bdb_ldif2db.c | 1 +
1 file changed, 1 insertion(+)
diff --git a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_ldif2db.c b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_ldif2db.c
index 542147c3d..9ffd877cb 100644
--- a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_ldif2db.c
+++ b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_ldif2db.c
@@ -871,6 +871,7 @@ bdb_db2ldif(Slapi_PBlock *pb)
slapi_log_err(SLAPI_LOG_ERR, "bdb_db2ldif",
"db2ldif: %s: can't open %s: %d (%s) while running as user \"%s\"\n",
inst->inst_name, fname, errno, dblayer_strerror(errno), slapdFrontendConfig->localuserinfo->pw_name);
+ we_start_the_backends = 0;
return_value = -1;
goto bye;
}
--
2.26.2

View File

@ -0,0 +1,33 @@
From 5e0a2d34f1c03a7d6a1c8591896a21e122d90d6b Mon Sep 17 00:00:00 2001
From: Simon Pichugin <spichugi@redhat.com>
Date: Thu, 23 Jul 2020 23:45:18 +0200
Subject: [PATCH] Issue 51086 - Fix instance name length for interactive
install
Description: Instance name lenght is not properly validated
during interactive install. Add a check during a user input.
https://pagure.io/389-ds-base/issue/51086
Reviewed by: mreynolds (Thanks!)
---
src/lib389/lib389/instance/setup.py | 3 +++
1 file changed, 3 insertions(+)
diff --git a/src/lib389/lib389/instance/setup.py b/src/lib389/lib389/instance/setup.py
index f5fc5495d..45c7dfdd4 100644
--- a/src/lib389/lib389/instance/setup.py
+++ b/src/lib389/lib389/instance/setup.py
@@ -308,6 +308,9 @@ class SetupDs(object):
val = input('\nEnter the instance name [{}]: '.format(slapd['instance_name'])).rstrip()
if val != "":
+ if len(val) > 80:
+ print("Server identifier should not be longer than 80 symbols")
+ continue
if not all(ord(c) < 128 for c in val):
print("Server identifier can not contain non ascii characters")
continue
--
2.26.2

View File

@ -0,0 +1,360 @@
From 3e11020fa7a79d335a02c001435aabcf59aaa622 Mon Sep 17 00:00:00 2001
From: Mark Reynolds <mreynolds@redhat.com>
Date: Fri, 24 Jul 2020 12:14:44 -0400
Subject: [PATCH] Issue 51129 - SSL alert: The value of sslVersionMax "TLS1.3"
is higher than the supported version
Bug Description: If you try and set the sslVersionMax higher than the
default range, but within the supported range, you
would still get an error and the server would reset
the max to "default" max value.
Fix Description: Keep track of both the supported and default SSL ranges,
and correctly use each range for value validation. If
the value is outside the supported range, then use default
value, etc, but do not check the requested range against
the default range. We only use the default range if
there is no specified min or max in the config, or if
a invalid min or max value is set in the config.
Also, refactored the range variable names to be more
accurate:
enabledNSSVersions --> defaultNSSVersions
emin, emax --> dmin, dmax
relates: https://pagure.io/389-ds-base/issue/51129
Reviewed by: firstyear(Thanks!)
---
ldap/servers/slapd/ssl.c | 155 ++++++++++++++++----------------
src/lib389/lib389/dirsrv_log.py | 2 +-
2 files changed, 81 insertions(+), 76 deletions(-)
diff --git a/ldap/servers/slapd/ssl.c b/ldap/servers/slapd/ssl.c
index 846106b42..7206cafd2 100644
--- a/ldap/servers/slapd/ssl.c
+++ b/ldap/servers/slapd/ssl.c
@@ -50,11 +50,11 @@
******************************************************************************/
#define DEFVERSION "TLS1.2"
-#define CURRENT_DEFAULT_SSL_VERSION SSL_LIBRARY_VERSION_TLS_1_2
extern char *slapd_SSL3ciphers;
extern symbol_t supported_ciphers[];
-static SSLVersionRange enabledNSSVersions;
+static SSLVersionRange defaultNSSVersions;
+static SSLVersionRange supportedNSSVersions;
static SSLVersionRange slapdNSSVersions;
@@ -1014,15 +1014,24 @@ slapd_nss_init(int init_ssl __attribute__((unused)), int config_available __attr
int create_certdb = 0;
PRUint32 nssFlags = 0;
char *certdir;
- char emin[VERSION_STR_LENGTH], emax[VERSION_STR_LENGTH];
- /* Get the range of the supported SSL version */
- SSL_VersionRangeGetDefault(ssl_variant_stream, &enabledNSSVersions);
+ char dmin[VERSION_STR_LENGTH], dmax[VERSION_STR_LENGTH];
+ char smin[VERSION_STR_LENGTH], smax[VERSION_STR_LENGTH];
- (void)slapi_getSSLVersion_str(enabledNSSVersions.min, emin, sizeof(emin));
- (void)slapi_getSSLVersion_str(enabledNSSVersions.max, emax, sizeof(emax));
+ /* Get the range of the supported SSL version */
+ SSL_VersionRangeGetSupported(ssl_variant_stream, &supportedNSSVersions);
+ (void)slapi_getSSLVersion_str(supportedNSSVersions.min, smin, sizeof(smin));
+ (void)slapi_getSSLVersion_str(supportedNSSVersions.max, smax, sizeof(smax));
+
+ /* Get the enabled default range */
+ SSL_VersionRangeGetDefault(ssl_variant_stream, &defaultNSSVersions);
+ (void)slapi_getSSLVersion_str(defaultNSSVersions.min, dmin, sizeof(dmin));
+ (void)slapi_getSSLVersion_str(defaultNSSVersions.max, dmax, sizeof(dmax));
slapi_log_err(SLAPI_LOG_CONFIG, "Security Initialization",
"slapd_nss_init - Supported range by NSS: min: %s, max: %s\n",
- emin, emax);
+ smin, smax);
+ slapi_log_err(SLAPI_LOG_CONFIG, "Security Initialization",
+ "slapd_nss_init - Enabled default range by NSS: min: %s, max: %s\n",
+ dmin, dmax);
/* set in slapd_bootstrap_config,
thus certdir is available even if config_available is false
@@ -1344,21 +1353,21 @@ static int
set_NSS_version(char *val, PRUint16 *rval, int ismin)
{
char *vp;
- char emin[VERSION_STR_LENGTH], emax[VERSION_STR_LENGTH];
+ char dmin[VERSION_STR_LENGTH], dmax[VERSION_STR_LENGTH];
if (NULL == rval) {
return 1;
}
- (void)slapi_getSSLVersion_str(enabledNSSVersions.min, emin, sizeof(emin));
- (void)slapi_getSSLVersion_str(enabledNSSVersions.max, emax, sizeof(emax));
+ (void)slapi_getSSLVersion_str(defaultNSSVersions.min, dmin, sizeof(dmin));
+ (void)slapi_getSSLVersion_str(defaultNSSVersions.max, dmax, sizeof(dmax));
if (!strncasecmp(val, SSLSTR, SSLLEN)) { /* ssl# NOT SUPPORTED */
if (ismin) {
- slapd_SSL_warn("SSL3 is no longer supported. Using NSS default min value: %s\n", emin);
- (*rval) = enabledNSSVersions.min;
+ slapd_SSL_warn("SSL3 is no longer supported. Using NSS default min value: %s", dmin);
+ (*rval) = defaultNSSVersions.min;
} else {
- slapd_SSL_warn("SSL3 is no longer supported. Using NSS default max value: %s\n", emax);
- (*rval) = enabledNSSVersions.max;
+ slapd_SSL_warn("SSL3 is no longer supported. Using NSS default max value: %s", dmax);
+ (*rval) = defaultNSSVersions.max;
}
} else if (!strncasecmp(val, TLSSTR, TLSLEN)) { /* tls# */
float tlsv;
@@ -1366,122 +1375,122 @@ set_NSS_version(char *val, PRUint16 *rval, int ismin)
sscanf(vp, "%4f", &tlsv);
if (tlsv < 1.1f) { /* TLS1.0 */
if (ismin) {
- if (enabledNSSVersions.min > CURRENT_DEFAULT_SSL_VERSION) {
+ if (supportedNSSVersions.min > SSL_LIBRARY_VERSION_TLS_1_0) {
slapd_SSL_warn("The value of sslVersionMin "
"\"%s\" is lower than the supported version; "
"the default value \"%s\" is used.",
- val, emin);
- (*rval) = enabledNSSVersions.min;
+ val, dmin);
+ (*rval) = defaultNSSVersions.min;
} else {
(*rval) = SSL_LIBRARY_VERSION_TLS_1_0;
}
} else {
- if (enabledNSSVersions.max < CURRENT_DEFAULT_SSL_VERSION) {
+ if (supportedNSSVersions.max < SSL_LIBRARY_VERSION_TLS_1_0) {
/* never happens */
slapd_SSL_warn("The value of sslVersionMax "
"\"%s\" is higher than the supported version; "
"the default value \"%s\" is used.",
- val, emax);
- (*rval) = enabledNSSVersions.max;
+ val, dmax);
+ (*rval) = defaultNSSVersions.max;
} else {
(*rval) = SSL_LIBRARY_VERSION_TLS_1_0;
}
}
} else if (tlsv < 1.2f) { /* TLS1.1 */
if (ismin) {
- if (enabledNSSVersions.min > SSL_LIBRARY_VERSION_TLS_1_1) {
+ if (supportedNSSVersions.min > SSL_LIBRARY_VERSION_TLS_1_1) {
slapd_SSL_warn("The value of sslVersionMin "
"\"%s\" is lower than the supported version; "
"the default value \"%s\" is used.",
- val, emin);
- (*rval) = enabledNSSVersions.min;
+ val, dmin);
+ (*rval) = defaultNSSVersions.min;
} else {
(*rval) = SSL_LIBRARY_VERSION_TLS_1_1;
}
} else {
- if (enabledNSSVersions.max < SSL_LIBRARY_VERSION_TLS_1_1) {
+ if (supportedNSSVersions.max < SSL_LIBRARY_VERSION_TLS_1_1) {
/* never happens */
slapd_SSL_warn("The value of sslVersionMax "
"\"%s\" is higher than the supported version; "
"the default value \"%s\" is used.",
- val, emax);
- (*rval) = enabledNSSVersions.max;
+ val, dmax);
+ (*rval) = defaultNSSVersions.max;
} else {
(*rval) = SSL_LIBRARY_VERSION_TLS_1_1;
}
}
} else if (tlsv < 1.3f) { /* TLS1.2 */
if (ismin) {
- if (enabledNSSVersions.min > SSL_LIBRARY_VERSION_TLS_1_2) {
+ if (supportedNSSVersions.min > SSL_LIBRARY_VERSION_TLS_1_2) {
slapd_SSL_warn("The value of sslVersionMin "
"\"%s\" is lower than the supported version; "
"the default value \"%s\" is used.",
- val, emin);
- (*rval) = enabledNSSVersions.min;
+ val, dmin);
+ (*rval) = defaultNSSVersions.min;
} else {
(*rval) = SSL_LIBRARY_VERSION_TLS_1_2;
}
} else {
- if (enabledNSSVersions.max < SSL_LIBRARY_VERSION_TLS_1_2) {
+ if (supportedNSSVersions.max < SSL_LIBRARY_VERSION_TLS_1_2) {
/* never happens */
slapd_SSL_warn("The value of sslVersionMax "
"\"%s\" is higher than the supported version; "
"the default value \"%s\" is used.",
- val, emax);
- (*rval) = enabledNSSVersions.max;
+ val, dmax);
+ (*rval) = defaultNSSVersions.max;
} else {
(*rval) = SSL_LIBRARY_VERSION_TLS_1_2;
}
}
} else if (tlsv < 1.4f) { /* TLS1.3 */
- if (ismin) {
- if (enabledNSSVersions.min > SSL_LIBRARY_VERSION_TLS_1_3) {
- slapd_SSL_warn("The value of sslVersionMin "
- "\"%s\" is lower than the supported version; "
- "the default value \"%s\" is used.",
- val, emin);
- (*rval) = enabledNSSVersions.min;
- } else {
- (*rval) = SSL_LIBRARY_VERSION_TLS_1_3;
- }
- } else {
- if (enabledNSSVersions.max < SSL_LIBRARY_VERSION_TLS_1_3) {
- /* never happens */
- slapd_SSL_warn("The value of sslVersionMax "
- "\"%s\" is higher than the supported version; "
- "the default value \"%s\" is used.",
- val, emax);
- (*rval) = enabledNSSVersions.max;
- } else {
- (*rval) = SSL_LIBRARY_VERSION_TLS_1_3;
- }
- }
+ if (ismin) {
+ if (supportedNSSVersions.min > SSL_LIBRARY_VERSION_TLS_1_3) {
+ slapd_SSL_warn("The value of sslVersionMin "
+ "\"%s\" is lower than the supported version; "
+ "the default value \"%s\" is used.",
+ val, dmin);
+ (*rval) = defaultNSSVersions.min;
+ } else {
+ (*rval) = SSL_LIBRARY_VERSION_TLS_1_3;
+ }
+ } else {
+ if (supportedNSSVersions.max < SSL_LIBRARY_VERSION_TLS_1_3) {
+ /* never happens */
+ slapd_SSL_warn("The value of sslVersionMax "
+ "\"%s\" is higher than the supported version; "
+ "the default value \"%s\" is used.",
+ val, dmax);
+ (*rval) = defaultNSSVersions.max;
+ } else {
+ (*rval) = SSL_LIBRARY_VERSION_TLS_1_3;
+ }
+ }
} else { /* Specified TLS is newer than supported */
if (ismin) {
slapd_SSL_warn("The value of sslVersionMin "
"\"%s\" is out of the range of the supported version; "
"the default value \"%s\" is used.",
- val, emin);
- (*rval) = enabledNSSVersions.min;
+ val, dmin);
+ (*rval) = defaultNSSVersions.min;
} else {
slapd_SSL_warn("The value of sslVersionMax "
"\"%s\" is out of the range of the supported version; "
"the default value \"%s\" is used.",
- val, emax);
- (*rval) = enabledNSSVersions.max;
+ val, dmax);
+ (*rval) = defaultNSSVersions.max;
}
}
} else {
if (ismin) {
slapd_SSL_warn("The value of sslVersionMin "
"\"%s\" is invalid; the default value \"%s\" is used.",
- val, emin);
- (*rval) = enabledNSSVersions.min;
+ val, dmin);
+ (*rval) = defaultNSSVersions.min;
} else {
slapd_SSL_warn("The value of sslVersionMax "
"\"%s\" is invalid; the default value \"%s\" is used.",
- val, emax);
- (*rval) = enabledNSSVersions.max;
+ val, dmax);
+ (*rval) = defaultNSSVersions.max;
}
}
return 0;
@@ -1511,10 +1520,9 @@ slapd_ssl_init2(PRFileDesc **fd, int startTLS)
char *tmpDir;
Slapi_Entry *e = NULL;
PRBool fipsMode = PR_FALSE;
- PRUint16 NSSVersionMin = enabledNSSVersions.min;
- PRUint16 NSSVersionMax = enabledNSSVersions.max;
+ PRUint16 NSSVersionMin = defaultNSSVersions.min;
+ PRUint16 NSSVersionMax = defaultNSSVersions.max;
char mymin[VERSION_STR_LENGTH], mymax[VERSION_STR_LENGTH];
- char newmax[VERSION_STR_LENGTH];
int allowweakcipher = CIPHER_SET_DEFAULTWEAKCIPHER;
int_fast16_t renegotiation = (int_fast16_t)SSL_RENEGOTIATE_REQUIRES_XTN;
@@ -1875,12 +1883,9 @@ slapd_ssl_init2(PRFileDesc **fd, int startTLS)
if (NSSVersionMin > NSSVersionMax) {
(void)slapi_getSSLVersion_str(NSSVersionMin, mymin, sizeof(mymin));
(void)slapi_getSSLVersion_str(NSSVersionMax, mymax, sizeof(mymax));
- slapd_SSL_warn("The min value of NSS version range \"%s\" is greater than the max value \"%s\".",
+ slapd_SSL_warn("The min value of NSS version range \"%s\" is greater than the max value \"%s\". Adjusting the max to match the miniumum.",
mymin, mymax);
- (void)slapi_getSSLVersion_str(enabledNSSVersions.max, newmax, sizeof(newmax));
- slapd_SSL_warn("Reset the max \"%s\" to supported max \"%s\".",
- mymax, newmax);
- NSSVersionMax = enabledNSSVersions.max;
+ NSSVersionMax = NSSVersionMin;
}
}
@@ -1896,7 +1901,7 @@ slapd_ssl_init2(PRFileDesc **fd, int startTLS)
if (sslStatus != SECSuccess) {
errorCode = PR_GetError();
slapd_SSL_error("Security Initialization - "
- "slapd_ssl_init2 - Failed to set SSL range: min: %s, max: %s - error %d (%s)\n",
+ "slapd_ssl_init2 - Failed to set SSL range: min: %s, max: %s - error %d (%s)",
mymin, mymax, errorCode, slapd_pr_strerror(errorCode));
}
/*
@@ -1926,13 +1931,13 @@ slapd_ssl_init2(PRFileDesc **fd, int startTLS)
(void)slapi_getSSLVersion_str(slapdNSSVersions.min, mymin, sizeof(mymin));
(void)slapi_getSSLVersion_str(slapdNSSVersions.max, mymax, sizeof(mymax));
slapd_SSL_error("Security Initialization - "
- "slapd_ssl_init2 - Failed to set SSL range: min: %s, max: %s - error %d (%s)\n",
+ "slapd_ssl_init2 - Failed to set SSL range: min: %s, max: %s - error %d (%s)",
mymin, mymax, errorCode, slapd_pr_strerror(errorCode));
}
} else {
errorCode = PR_GetError();
slapd_SSL_error("Security Initialization - ",
- "slapd_ssl_init2 - Failed to get SSL range from socket - error %d (%s)\n",
+ "slapd_ssl_init2 - Failed to get SSL range from socket - error %d (%s)",
errorCode, slapd_pr_strerror(errorCode));
}
@@ -2265,7 +2270,7 @@ slapd_SSL_client_auth(LDAP *ld)
}
} else {
if (token == NULL) {
- slapd_SSL_warn("slapd_SSL_client_auth - certificate token was not found\n");
+ slapd_SSL_warn("slapd_SSL_client_auth - certificate token was not found");
}
rc = -1;
}
diff --git a/src/lib389/lib389/dirsrv_log.py b/src/lib389/lib389/dirsrv_log.py
index 7bed4bb17..ab8872051 100644
--- a/src/lib389/lib389/dirsrv_log.py
+++ b/src/lib389/lib389/dirsrv_log.py
@@ -207,7 +207,7 @@ class DirsrvAccessLog(DirsrvLog):
return {
'base': quoted_vals[0],
'filter': quoted_vals[1],
- 'timestamp': re.findall('\[(.*)\]', lines[0])[0],
+ 'timestamp': re.findall('[(.*)]', lines[0])[0],
'scope': lines[0].split(' scope=', 1)[1].split(' ',1)[0]
}
--
2.26.2

View File

@ -0,0 +1,202 @@
From 68ca1de0f39c11056a57b03a544520bd6708d855 Mon Sep 17 00:00:00 2001
From: Simon Pichugin <simon.pichugin@gmail.com>
Date: Thu, 11 Jun 2020 15:39:59 +0200
Subject: [PATCH] Issue 50984 - Memory leaks in disk monitoring
Description: Fix the rest of the leaks in disk monitoring
which are present when we shutdown while being below half
of the threshold (at the start-up in main.c).
Free directories, sockets and ports before going to cleanup.
https://pagure.io/389-ds-base/issue/50984
Reviewed by: mhonek, tbordaz (Thanks!)
---
ldap/servers/slapd/daemon.c | 75 ++++++++++++++++++++-----------------
ldap/servers/slapd/fe.h | 1 +
ldap/servers/slapd/main.c | 49 +++++++++++++-----------
3 files changed, 70 insertions(+), 55 deletions(-)
diff --git a/ldap/servers/slapd/daemon.c b/ldap/servers/slapd/daemon.c
index a70f40316..7091b570d 100644
--- a/ldap/servers/slapd/daemon.c
+++ b/ldap/servers/slapd/daemon.c
@@ -884,6 +884,46 @@ convert_pbe_des_to_aes(void)
charray_free(attrs);
}
+void
+slapd_sockets_ports_free(daemon_ports_t *ports_info)
+{
+ /* freeing PRFileDescs */
+ PRFileDesc **fdesp = NULL;
+ for (fdesp = ports_info->n_socket; fdesp && *fdesp; fdesp++) {
+ PR_Close(*fdesp);
+ }
+ slapi_ch_free((void **)&ports_info->n_socket);
+
+ for (fdesp = ports_info->s_socket; fdesp && *fdesp; fdesp++) {
+ PR_Close(*fdesp);
+ }
+ slapi_ch_free((void **)&ports_info->s_socket);
+#if defined(ENABLE_LDAPI)
+ for (fdesp = ports_info->i_socket; fdesp && *fdesp; fdesp++) {
+ PR_Close(*fdesp);
+ }
+ slapi_ch_free((void **)&ports_info->i_socket);
+#endif /* ENABLE_LDAPI */
+
+ /* freeing NetAddrs */
+ PRNetAddr **nap;
+ for (nap = ports_info->n_listenaddr; nap && *nap; nap++) {
+ slapi_ch_free((void **)nap);
+ }
+ slapi_ch_free((void **)&ports_info->n_listenaddr);
+
+ for (nap = ports_info->s_listenaddr; nap && *nap; nap++) {
+ slapi_ch_free((void **)nap);
+ }
+ slapi_ch_free((void **)&ports_info->s_listenaddr);
+#if defined(ENABLE_LDAPI)
+ for (nap = ports_info->i_listenaddr; nap && *nap; nap++) {
+ slapi_ch_free((void **)nap);
+ }
+ slapi_ch_free((void **)&ports_info->i_listenaddr);
+#endif
+}
+
void
slapd_daemon(daemon_ports_t *ports)
{
@@ -1099,40 +1139,7 @@ slapd_daemon(daemon_ports_t *ports)
/* free the listener indexes */
slapi_ch_free((void **)&listener_idxs);
- for (fdesp = n_tcps; fdesp && *fdesp; fdesp++) {
- PR_Close(*fdesp);
- }
- slapi_ch_free((void **)&n_tcps);
-
- for (fdesp = i_unix; fdesp && *fdesp; fdesp++) {
- PR_Close(*fdesp);
- }
- slapi_ch_free((void **)&i_unix);
-
- for (fdesp = s_tcps; fdesp && *fdesp; fdesp++) {
- PR_Close(*fdesp);
- }
- slapi_ch_free((void **)&s_tcps);
-
- /* freeing NetAddrs */
- {
- PRNetAddr **nap;
- for (nap = ports->n_listenaddr; nap && *nap; nap++) {
- slapi_ch_free((void **)nap);
- }
- slapi_ch_free((void **)&ports->n_listenaddr);
-
- for (nap = ports->s_listenaddr; nap && *nap; nap++) {
- slapi_ch_free((void **)nap);
- }
- slapi_ch_free((void **)&ports->s_listenaddr);
-#if defined(ENABLE_LDAPI)
- for (nap = ports->i_listenaddr; nap && *nap; nap++) {
- slapi_ch_free((void **)nap);
- }
- slapi_ch_free((void **)&ports->i_listenaddr);
-#endif
- }
+ slapd_sockets_ports_free(ports);
op_thread_cleanup();
housekeeping_stop(); /* Run this after op_thread_cleanup() logged sth */
diff --git a/ldap/servers/slapd/fe.h b/ldap/servers/slapd/fe.h
index 2d9a0931b..9cd122881 100644
--- a/ldap/servers/slapd/fe.h
+++ b/ldap/servers/slapd/fe.h
@@ -120,6 +120,7 @@ int connection_table_iterate_active_connections(Connection_Table *ct, void *arg,
*/
int signal_listner(void);
int daemon_pre_setuid_init(daemon_ports_t *ports);
+void slapd_sockets_ports_free(daemon_ports_t *ports_info);
void slapd_daemon(daemon_ports_t *ports);
void daemon_register_connection(void);
int slapd_listenhost2addr(const char *listenhost, PRNetAddr ***addr);
diff --git a/ldap/servers/slapd/main.c b/ldap/servers/slapd/main.c
index e54b8e1c5..9e5219c4a 100644
--- a/ldap/servers/slapd/main.c
+++ b/ldap/servers/slapd/main.c
@@ -734,7 +734,6 @@ main(int argc, char **argv)
* etc the backends need to start
*/
-
/* Important: up 'till here we could be running as root (on unix).
* we believe that we've not created any files before here, otherwise
* they'd be owned by root, which is bad. We're about to change identity
@@ -891,6 +890,34 @@ main(int argc, char **argv)
}
}
+ if (config_get_disk_monitoring()) {
+ char **dirs = NULL;
+ char *dirstr = NULL;
+ uint64_t disk_space = 0;
+ int64_t threshold = 0;
+ uint64_t halfway = 0;
+ threshold = config_get_disk_threshold();
+ halfway = threshold / 2;
+ disk_mon_get_dirs(&dirs);
+ dirstr = disk_mon_check_diskspace(dirs, threshold, &disk_space);
+ if (dirstr != NULL && disk_space < halfway) {
+ slapi_log_err(SLAPI_LOG_EMERG, "main",
+ "Disk Monitoring is enabled and disk space on (%s) is too far below the threshold(%" PRIu64 " bytes). Exiting now.\n",
+ dirstr, threshold);
+ slapi_ch_array_free(dirs);
+ /*
+ * We should free the structs we allocated for sockets and addresses
+ * as they would be freed at the slapd_daemon but it was not initiated
+ * at that point of start-up.
+ */
+ slapd_sockets_ports_free(&ports_info);
+ return_value = 1;
+ goto cleanup;
+ }
+ slapi_ch_array_free(dirs);
+ dirs = NULL;
+ }
+
/* initialize the normalized DN cache */
if (ndn_cache_init() != 0) {
slapi_log_err(SLAPI_LOG_EMERG, "main", "Unable to create ndn cache\n");
@@ -940,26 +967,6 @@ main(int argc, char **argv)
slapi_ch_free((void **)&versionstring);
}
- if (config_get_disk_monitoring()) {
- char **dirs = NULL;
- char *dirstr = NULL;
- uint64_t disk_space = 0;
- int64_t threshold = 0;
- uint64_t halfway = 0;
- threshold = config_get_disk_threshold();
- halfway = threshold / 2;
- disk_mon_get_dirs(&dirs);
- dirstr = disk_mon_check_diskspace(dirs, threshold, &disk_space);
- if (dirstr != NULL && disk_space < halfway) {
- slapi_log_err(SLAPI_LOG_EMERG, "main",
- "Disk Monitoring is enabled and disk space on (%s) is too far below the threshold(%" PRIu64 " bytes). Exiting now.\n",
- dirstr, threshold);
- return_value = 1;
- goto cleanup;
- }
- slapi_ch_array_free(dirs);
- dirs = NULL;
- }
/* log the max fd limit as it is typically set in env/systemd */
slapi_log_err(SLAPI_LOG_INFO, "main",
"Setting the maximum file descriptor limit to: %ld\n",
--
2.26.2

View File

@ -0,0 +1,194 @@
From e78d3bd879b880d679b49f3fa5ebe8009d309063 Mon Sep 17 00:00:00 2001
From: tbordaz <tbordaz@redhat.com>
Date: Fri, 2 Oct 2020 12:03:12 +0200
Subject: [PATCH 1/8] Issue 4297- On ADD replication URP issue internal
searches with filter containing unescaped chars (#4355)
Bug description:
In MMR a consumer receiving a ADD has to do some checking based on basedn.
It checks if the entry was a tombstone or if the conflicting parent entry was a tombstone.
To do this checking, URP does internal searches using basedn.
A '*' (ASTERISK) is valid in a RDN and in a DN. But using a DN in an assertionvalue of a filter, the ASTERISK needs to be escaped else the server will interprete the filtertype to be a substring. (see
https://tools.ietf.org/html/rfc4515#section-3)
The problem is that if a added entry contains an ASTERISK in the DN, it will not be escaped in internal search and trigger substring search (likely unindexed).
Fix description:
escape the DN before doing internal search in URP
Fixes: #4297
Reviewed by: Mark Reynolds, William Brown, Simon Pichugi (thanks !)
Platforms tested: F31
---
.../suites/replication/acceptance_test.py | 63 +++++++++++++++++++
ldap/servers/plugins/replication/urp.c | 10 ++-
ldap/servers/slapd/filter.c | 21 +++++++
ldap/servers/slapd/slapi-plugin.h | 1 +
4 files changed, 93 insertions(+), 2 deletions(-)
diff --git a/dirsrvtests/tests/suites/replication/acceptance_test.py b/dirsrvtests/tests/suites/replication/acceptance_test.py
index 5009f4e7c..661dddb11 100644
--- a/dirsrvtests/tests/suites/replication/acceptance_test.py
+++ b/dirsrvtests/tests/suites/replication/acceptance_test.py
@@ -7,6 +7,7 @@
# --- END COPYRIGHT BLOCK ---
#
import pytest
+import logging
from lib389.replica import Replicas
from lib389.tasks import *
from lib389.utils import *
@@ -556,6 +557,68 @@ def test_csnpurge_large_valueset(topo_m2):
for i in range(21,25):
test_user.add('description', 'value {}'.format(str(i)))
+@pytest.mark.ds51244
+def test_urp_trigger_substring_search(topo_m2):
+ """Test that a ADD of a entry with a '*' in its DN, triggers
+ an internal search with a escaped DN
+
+ :id: 9869bb39-419f-42c3-a44b-c93eb0b77667
+ :setup: MMR with 2 masters
+ :steps:
+ 1. enable internal operation loggging for plugins
+ 2. Create on M1 a test_user with a '*' in its DN
+ 3. Check the test_user is replicated
+ 4. Check in access logs that the internal search does not contain '*'
+ :expectedresults:
+ 1. Should succeeds
+ 2. Should succeeds
+ 3. Should succeeds
+ 4. Should succeeds
+ """
+ m1 = topo_m2.ms["master1"]
+ m2 = topo_m2.ms["master2"]
+
+ # Enable loggging of internal operation logging to capture URP intop
+ log.info('Set nsslapd-plugin-logging to on')
+ for inst in (m1, m2):
+ inst.config.loglevel([AccessLog.DEFAULT, AccessLog.INTERNAL], service='access')
+ inst.config.set('nsslapd-plugin-logging', 'on')
+ inst.restart()
+
+ # add a user with a DN containing '*'
+ test_asterisk_uid = 'asterisk_*_in_value'
+ test_asterisk_dn = 'uid={},{}'.format(test_asterisk_uid, DEFAULT_SUFFIX)
+
+ test_user = UserAccount(m1, test_asterisk_dn)
+ if test_user.exists():
+ log.info('Deleting entry {}'.format(test_asterisk_dn))
+ test_user.delete()
+ test_user.create(properties={
+ 'uid': test_asterisk_uid,
+ 'cn': test_asterisk_uid,
+ 'sn': test_asterisk_uid,
+ 'userPassword': test_asterisk_uid,
+ 'uidNumber' : '1000',
+ 'gidNumber' : '2000',
+ 'homeDirectory' : '/home/asterisk',
+ })
+
+ # check that the ADD was replicated on M2
+ test_user_m2 = UserAccount(m2, test_asterisk_dn)
+ for i in range(1,5):
+ if test_user_m2.exists():
+ break
+ else:
+ log.info('Entry not yet replicated on M2, wait a bit')
+ time.sleep(2)
+
+ # check that M2 access logs does not "(&(objectclass=nstombstone)(nscpentrydn=uid=asterisk_*_in_value,dc=example,dc=com))"
+ log.info('Check that on M2, URP as not triggered such internal search')
+ pattern = ".*\(Internal\).*SRCH.*\(&\(objectclass=nstombstone\)\(nscpentrydn=uid=asterisk_\*_in_value,dc=example,dc=com.*"
+ found = m2.ds_access_log.match(pattern)
+ log.info("found line: %s" % found)
+ assert not found
+
if __name__ == '__main__':
# Run isolated
diff --git a/ldap/servers/plugins/replication/urp.c b/ldap/servers/plugins/replication/urp.c
index 79a817c90..301e9fa00 100644
--- a/ldap/servers/plugins/replication/urp.c
+++ b/ldap/servers/plugins/replication/urp.c
@@ -1411,9 +1411,12 @@ urp_add_check_tombstone (Slapi_PBlock *pb, char *sessionid, Slapi_Entry *entry,
Slapi_Entry **entries = NULL;
Slapi_PBlock *newpb;
char *basedn = slapi_entry_get_ndn(entry);
+ char *escaped_basedn;
const Slapi_DN *suffix = slapi_get_suffix_by_dn(slapi_entry_get_sdn (entry));
+ escaped_basedn = slapi_filter_escape_filter_value("nscpentrydn", basedn);
- char *filter = slapi_filter_sprintf("(&(objectclass=nstombstone)(nscpentrydn=%s))", basedn);
+ char *filter = slapi_filter_sprintf("(&(objectclass=nstombstone)(nscpentrydn=%s))", escaped_basedn);
+ slapi_ch_free((void **)&escaped_basedn);
newpb = slapi_pblock_new();
slapi_search_internal_set_pb(newpb,
slapi_sdn_get_dn(suffix), /* Base DN */
@@ -1602,12 +1605,15 @@ urp_find_tombstone_for_glue (Slapi_PBlock *pb, char *sessionid, const Slapi_Entr
Slapi_Entry **entries = NULL;
Slapi_PBlock *newpb;
const char *basedn = slapi_sdn_get_dn(parentdn);
+ char *escaped_basedn;
+ escaped_basedn = slapi_filter_escape_filter_value("nscpentrydn", basedn);
char *conflict_csnstr = (char*)slapi_entry_attr_get_ref((Slapi_Entry *)entry, "conflictcsn");
CSN *conflict_csn = csn_new_by_string(conflict_csnstr);
CSN *tombstone_csn = NULL;
- char *filter = slapi_filter_sprintf("(&(objectclass=nstombstone)(nscpentrydn=%s))", basedn);
+ char *filter = slapi_filter_sprintf("(&(objectclass=nstombstone)(nscpentrydn=%s))", escaped_basedn);
+ slapi_ch_free((void **)&escaped_basedn);
newpb = slapi_pblock_new();
char *parent_dn = slapi_dn_parent (basedn);
slapi_search_internal_set_pb(newpb,
diff --git a/ldap/servers/slapd/filter.c b/ldap/servers/slapd/filter.c
index c818baec3..d671c87ff 100644
--- a/ldap/servers/slapd/filter.c
+++ b/ldap/servers/slapd/filter.c
@@ -130,6 +130,27 @@ filter_escape_filter_value(struct slapi_filter *f, const char *fmt, size_t len _
return ptr;
}
+/* Escaped an equality filter value (assertionValue) of a given attribute
+ * Caller must free allocated escaped filter value
+ */
+char *
+slapi_filter_escape_filter_value(char* filter_attr, char *filter_value)
+{
+ char *result;
+ struct slapi_filter *f;
+
+ if ((filter_attr == NULL) || (filter_value == NULL)) {
+ return NULL;
+ }
+ f = (struct slapi_filter *)slapi_ch_calloc(1, sizeof(struct slapi_filter));
+ f->f_choice = LDAP_FILTER_EQUALITY;
+ f->f_un.f_un_ava.ava_type = filter_attr;
+ f->f_un.f_un_ava.ava_value.bv_len = strlen(filter_value);
+ f->f_un.f_un_ava.ava_value.bv_val = filter_value;
+ result = filter_escape_filter_value(f, FILTER_EQ_FMT, FILTER_EQ_LEN);
+ slapi_ch_free((void**) &f);
+ return result;
+}
/*
* get_filter_internal(): extract an LDAP filter from a BerElement and create
diff --git a/ldap/servers/slapd/slapi-plugin.h b/ldap/servers/slapd/slapi-plugin.h
index 8d9c3fa6a..04c02cf7c 100644
--- a/ldap/servers/slapd/slapi-plugin.h
+++ b/ldap/servers/slapd/slapi-plugin.h
@@ -5262,6 +5262,7 @@ int slapi_vattr_filter_test_ext(Slapi_PBlock *pb, Slapi_Entry *e, Slapi_Filter *
int slapi_filter_compare(struct slapi_filter *f1, struct slapi_filter *f2);
Slapi_Filter *slapi_filter_dup(Slapi_Filter *f);
int slapi_filter_changetype(Slapi_Filter *f, const char *newtype);
+char *slapi_filter_escape_filter_value(char* filter_attr, char *filter_value);
int slapi_attr_is_last_mod(char *attr);
--
2.26.2

View File

@ -0,0 +1,66 @@
From 3cf7734177c70c36062d4e667b91e15f22a2ea81 Mon Sep 17 00:00:00 2001
From: tbordaz <tbordaz@redhat.com>
Date: Wed, 25 Nov 2020 18:07:34 +0100
Subject: [PATCH 2/8] Issue 4297 - 2nd fix for on ADD replication URP issue
internal searches with filter containing unescaped chars (#4439)
Bug description:
Previous fix is buggy because slapi_filter_escape_filter_value returns
a escaped filter component not an escaped assertion value.
Fix description:
use the escaped filter component
relates: https://github.com/389ds/389-ds-base/issues/4297
Reviewed by: William Brown
Platforms tested: F31
---
ldap/servers/plugins/replication/urp.c | 17 ++++++++---------
1 file changed, 8 insertions(+), 9 deletions(-)
diff --git a/ldap/servers/plugins/replication/urp.c b/ldap/servers/plugins/replication/urp.c
index 301e9fa00..96ad2759a 100644
--- a/ldap/servers/plugins/replication/urp.c
+++ b/ldap/servers/plugins/replication/urp.c
@@ -1411,12 +1411,12 @@ urp_add_check_tombstone (Slapi_PBlock *pb, char *sessionid, Slapi_Entry *entry,
Slapi_Entry **entries = NULL;
Slapi_PBlock *newpb;
char *basedn = slapi_entry_get_ndn(entry);
- char *escaped_basedn;
+ char *escaped_filter;
const Slapi_DN *suffix = slapi_get_suffix_by_dn(slapi_entry_get_sdn (entry));
- escaped_basedn = slapi_filter_escape_filter_value("nscpentrydn", basedn);
+ escaped_filter = slapi_filter_escape_filter_value("nscpentrydn", basedn);
- char *filter = slapi_filter_sprintf("(&(objectclass=nstombstone)(nscpentrydn=%s))", escaped_basedn);
- slapi_ch_free((void **)&escaped_basedn);
+ char *filter = slapi_filter_sprintf("(&(objectclass=nstombstone)%s)", escaped_filter);
+ slapi_ch_free((void **)&escaped_filter);
newpb = slapi_pblock_new();
slapi_search_internal_set_pb(newpb,
slapi_sdn_get_dn(suffix), /* Base DN */
@@ -1605,15 +1605,14 @@ urp_find_tombstone_for_glue (Slapi_PBlock *pb, char *sessionid, const Slapi_Entr
Slapi_Entry **entries = NULL;
Slapi_PBlock *newpb;
const char *basedn = slapi_sdn_get_dn(parentdn);
- char *escaped_basedn;
- escaped_basedn = slapi_filter_escape_filter_value("nscpentrydn", basedn);
-
+ char *escaped_filter;
+ escaped_filter = slapi_filter_escape_filter_value("nscpentrydn", (char *)basedn);
char *conflict_csnstr = (char*)slapi_entry_attr_get_ref((Slapi_Entry *)entry, "conflictcsn");
CSN *conflict_csn = csn_new_by_string(conflict_csnstr);
CSN *tombstone_csn = NULL;
- char *filter = slapi_filter_sprintf("(&(objectclass=nstombstone)(nscpentrydn=%s))", escaped_basedn);
- slapi_ch_free((void **)&escaped_basedn);
+ char *filter = slapi_filter_sprintf("(&(objectclass=nstombstone)%s)", escaped_filter);
+ slapi_ch_free((void **)&escaped_filter);
newpb = slapi_pblock_new();
char *parent_dn = slapi_dn_parent (basedn);
slapi_search_internal_set_pb(newpb,
--
2.26.2

View File

@ -0,0 +1,37 @@
From 16a004faf7eda3f8c4d59171bceab8cf78a9d002 Mon Sep 17 00:00:00 2001
From: Mark Reynolds <mreynolds@redhat.com>
Date: Thu, 6 Aug 2020 14:50:19 -0400
Subject: [PATCH 3/8] Issue 51233 - ds-replcheck crashes in offline mode
Bug Description: When processing all the DN's found in the Master LDIF
it is possible that the LDIF is not in the expected
order and ldifsearch fails (crashing the tool).
Fix Description: If ldifsearch does not find an entry, start from the
beginning of the LDIF and try again.
relates: https://pagure.io/389-ds-base/issue/51233
Reviewed by: spichugi(Thanks!)
---
ldap/admin/src/scripts/ds-replcheck | 4 ++++
1 file changed, 4 insertions(+)
diff --git a/ldap/admin/src/scripts/ds-replcheck b/ldap/admin/src/scripts/ds-replcheck
index 5bb7dfce3..1c133f4dd 100755
--- a/ldap/admin/src/scripts/ds-replcheck
+++ b/ldap/admin/src/scripts/ds-replcheck
@@ -725,6 +725,10 @@ def do_offline_report(opts, output_file=None):
missing = False
for dn in master_dns:
mresult = ldif_search(MLDIF, dn)
+ if mresult['entry'] is None and mresult['conflict'] is None and not mresult['tombstone']:
+ # Try from the beginning
+ MLDIF.seek(0)
+ mresult = ldif_search(MLDIF, dn)
rresult = ldif_search(RLDIF, dn)
if dn in replica_dns:
--
2.26.2

View File

@ -0,0 +1,103 @@
From bc8bdaa57ba9b57671e2921705b99eaa70729ce7 Mon Sep 17 00:00:00 2001
From: Mark Reynolds <mreynolds@redhat.com>
Date: Wed, 11 Nov 2020 11:45:11 -0500
Subject: [PATCH 4/8] Issue 4429 - NULL dereference in revert_cache()
Bug Description: During a delete, if the DN (with an escaped leading space)
of an existing entry fail to parse the server will revert
the entry update. In this case it will lead to a crash
becuase ther ldbm inst struct is not set before it attempts
the cache revert.
Fix Description: Check the the ldbm instance struct is not NULL before
dereferencing it.
Relates: https://github.com/389ds/389-ds-base/issues/4429
Reviewed by: firstyear & spichugi(Thanks!!)
---
.../tests/suites/syntax/acceptance_test.py | 40 +++++++++++++++++++
ldap/servers/slapd/back-ldbm/cache.c | 3 ++
2 files changed, 43 insertions(+)
diff --git a/dirsrvtests/tests/suites/syntax/acceptance_test.py b/dirsrvtests/tests/suites/syntax/acceptance_test.py
index db8f63c7e..543718689 100644
--- a/dirsrvtests/tests/suites/syntax/acceptance_test.py
+++ b/dirsrvtests/tests/suites/syntax/acceptance_test.py
@@ -6,12 +6,14 @@
# See LICENSE for details.
# --- END COPYRIGHT BLOCK ---
+import ldap
import logging
import pytest
import os
from lib389.schema import Schema
from lib389.config import Config
from lib389.idm.user import UserAccounts
+from lib389.idm.group import Groups
from lib389._constants import DEFAULT_SUFFIX
from lib389.topologies import log, topology_st as topo
@@ -105,6 +107,44 @@ def test_invalid_uidnumber(topo, validate_syntax_off):
log.info('Found an invalid entry with wrong uidNumber - Success')
+def test_invalid_dn_syntax_crash(topo):
+ """Add an entry with an escaped space, restart the server, and try to delete
+ it. In this case the DN is not correctly parsed and causes cache revert to
+ to dereference a NULL pointer. So the delete can fail as long as the server
+ does not crash.
+
+ :id: 62d87272-dfb8-4627-9ca1-dbe33082caf8
+ :setup: Standalone Instance
+ :steps:
+ 1. Add entry with leading escaped space in the RDN
+ 2. Restart the server so the entry is rebuilt from the database
+ 3. Delete the entry
+ 4. The server should still be running
+ :expectedresults:
+ 1. Success
+ 2. Success
+ 3. Success
+ 4. Success
+ """
+
+ # Create group
+ groups = Groups(topo.standalone, DEFAULT_SUFFIX)
+ group = groups.create(properties={'cn': ' test'})
+
+ # Restart the server
+ topo.standalone.restart()
+
+ # Delete group
+ try:
+ group.delete()
+ except ldap.NO_SUCH_OBJECT:
+ # This is okay in this case as we are only concerned about a crash
+ pass
+
+ # Make sure server is still running
+ groups.list()
+
+
if __name__ == '__main__':
# Run isolated
# -s for DEBUG mode
diff --git a/ldap/servers/slapd/back-ldbm/cache.c b/ldap/servers/slapd/back-ldbm/cache.c
index 89f958a35..5ad9ca829 100644
--- a/ldap/servers/slapd/back-ldbm/cache.c
+++ b/ldap/servers/slapd/back-ldbm/cache.c
@@ -614,6 +614,9 @@ flush_hash(struct cache *cache, struct timespec *start_time, int32_t type)
void
revert_cache(ldbm_instance *inst, struct timespec *start_time)
{
+ if (inst == NULL) {
+ return;
+ }
flush_hash(&inst->inst_cache, start_time, ENTRY_CACHE);
flush_hash(&inst->inst_dncache, start_time, DN_CACHE);
}
--
2.26.2

View File

@ -0,0 +1,232 @@
From 132f126c18214345ef4204bf8a061a0eca58fa59 Mon Sep 17 00:00:00 2001
From: progier389 <72748589+progier389@users.noreply.github.com>
Date: Tue, 3 Nov 2020 12:18:50 +0100
Subject: [PATCH 5/8] ticket 2058: Add keep alive entry after on-line
initialization - second version (#4399)
Bug description:
Keep alive entry is not created on target master after on line initialization,
and its RUVelement stays empty until a direct update is issued on that master
Fix description:
The patch allows a consumer (configured as a master) to create (if it did not
exist before) the consumer's keep alive entry. It creates it at the end of a
replication session at a time we are sure the changelog exists and will not
be reset. It allows a consumer to have RUVelement with csn in the RUV at the
first incoming replication session.
That is basically lkrispen's proposal with an associated pytest testcase
Second version changes:
- moved the testcase to suites/replication/regression_test.py
- set up the topology from a 2 master topology then
reinitialized the replicas from an ldif without replication metadata
rather than using the cli.
- search for keepalive entries using search_s instead of getEntry
- add a comment about keep alive entries purpose
last commit:
- wait that ruv are in sync before checking keep alive entries
Reviewed by: droideck, Firstyear
Platforms tested: F32
relates: #2058
---
.../suites/replication/regression_test.py | 130 ++++++++++++++++++
.../plugins/replication/repl5_replica.c | 14 ++
ldap/servers/plugins/replication/repl_extop.c | 4 +
3 files changed, 148 insertions(+)
diff --git a/dirsrvtests/tests/suites/replication/regression_test.py b/dirsrvtests/tests/suites/replication/regression_test.py
index 844d762b9..14b9d6a44 100644
--- a/dirsrvtests/tests/suites/replication/regression_test.py
+++ b/dirsrvtests/tests/suites/replication/regression_test.py
@@ -98,6 +98,30 @@ def _move_ruv(ldif_file):
for dn, entry in ldif_list:
ldif_writer.unparse(dn, entry)
+def _remove_replication_data(ldif_file):
+ """ Remove the replication data from ldif file:
+ db2lif without -r includes some of the replica data like
+ - nsUniqueId
+ - keepalive entries
+ This function filters the ldif fil to remove these data
+ """
+
+ with open(ldif_file) as f:
+ parser = ldif.LDIFRecordList(f)
+ parser.parse()
+
+ ldif_list = parser.all_records
+ # Iterate on a copy of the ldif entry list
+ for dn, entry in ldif_list[:]:
+ if dn.startswith('cn=repl keep alive'):
+ ldif_list.remove((dn,entry))
+ else:
+ entry.pop('nsUniqueId')
+ with open(ldif_file, 'w') as f:
+ ldif_writer = ldif.LDIFWriter(f)
+ for dn, entry in ldif_list:
+ ldif_writer.unparse(dn, entry)
+
@pytest.fixture(scope="module")
def topo_with_sigkill(request):
@@ -897,6 +921,112 @@ def test_moving_entry_make_online_init_fail(topology_m2):
assert len(m1entries) == len(m2entries)
+def get_keepalive_entries(instance,replica):
+ # Returns the keep alive entries that exists with the suffix of the server instance
+ try:
+ entries = instance.search_s(replica.get_suffix(), ldap.SCOPE_ONELEVEL,
+ "(&(objectclass=ldapsubentry)(cn=repl keep alive*))",
+ ['cn', 'nsUniqueId', 'modifierTimestamp'])
+ except ldap.LDAPError as e:
+ log.fatal('Failed to retrieve keepalive entry (%s) on instance %s: error %s' % (dn, instance, str(e)))
+ assert False
+ # No error, so lets log the keepalive entries
+ if log.isEnabledFor(logging.DEBUG):
+ for ret in entries:
+ log.debug("Found keepalive entry:\n"+str(ret));
+ return entries
+
+def verify_keepalive_entries(topo, expected):
+ #Check that keep alive entries exists (or not exists) for every masters on every masters
+ #Note: The testing method is quite basic: counting that there is one keepalive entry per master.
+ # that is ok for simple test cases like test_online_init_should_create_keepalive_entries but
+ # not for the general case as keep alive associated with no more existing master may exists
+ # (for example after: db2ldif / demote a master / ldif2db / init other masters)
+ # ==> if the function is somehow pushed in lib389, a check better than simply counting the entries
+ # should be done.
+ for masterId in topo.ms:
+ master=topo.ms[masterId]
+ for replica in Replicas(master).list():
+ if (replica.get_role() != ReplicaRole.MASTER):
+ continue
+ replica_info = f'master: {masterId} RID: {replica.get_rid()} suffix: {replica.get_suffix()}'
+ log.debug(f'Checking keepAliveEntries on {replica_info}')
+ keepaliveEntries = get_keepalive_entries(master, replica);
+ expectedCount = len(topo.ms) if expected else 0
+ foundCount = len(keepaliveEntries)
+ if (foundCount == expectedCount):
+ log.debug(f'Found {foundCount} keepalive entries as expected on {replica_info}.')
+ else:
+ log.error(f'{foundCount} Keepalive entries are found '
+ f'while {expectedCount} were expected on {replica_info}.')
+ assert False
+
+
+def test_online_init_should_create_keepalive_entries(topo_m2):
+ """Check that keep alive entries are created when initializinf a master from another one
+
+ :id: d5940e71-d18a-4b71-aaf7-b9185361fffe
+ :setup: Two masters replication setup
+ :steps:
+ 1. Generate ldif without replication data
+ 2 Init both masters from that ldif
+ 3 Check that keep alive entries does not exists
+ 4 Perform on line init of master2 from master1
+ 5 Check that keep alive entries exists
+ :expectedresults:
+ 1. No error while generating ldif
+ 2. No error while importing the ldif file
+ 3. No keepalive entrie should exists on any masters
+ 4. No error while initializing master2
+ 5. All keepalive entries should exist on every masters
+
+ """
+
+ repl = ReplicationManager(DEFAULT_SUFFIX)
+ m1 = topo_m2.ms["master1"]
+ m2 = topo_m2.ms["master2"]
+ # Step 1: Generate ldif without replication data
+ m1.stop()
+ m2.stop()
+ ldif_file = '%s/norepl.ldif' % m1.get_ldif_dir()
+ m1.db2ldif(bename=DEFAULT_BENAME, suffixes=[DEFAULT_SUFFIX],
+ excludeSuffixes=None, repl_data=False,
+ outputfile=ldif_file, encrypt=False)
+ # Remove replication metadata that are still in the ldif
+ _remove_replication_data(ldif_file)
+
+ # Step 2: Init both masters from that ldif
+ m1.ldif2db(DEFAULT_BENAME, None, None, None, ldif_file)
+ m2.ldif2db(DEFAULT_BENAME, None, None, None, ldif_file)
+ m1.start()
+ m2.start()
+
+ """ Replica state is now as if CLI setup has been done using:
+ dsconf master1 replication enable --suffix "${SUFFIX}" --role master
+ dsconf master2 replication enable --suffix "${SUFFIX}" --role master
+ dsconf master1 replication create-manager --name "${REPLICATION_MANAGER_NAME}" --passwd "${REPLICATION_MANAGER_PASSWORD}"
+ dsconf master2 replication create-manager --name "${REPLICATION_MANAGER_NAME}" --passwd "${REPLICATION_MANAGER_PASSWORD}"
+ dsconf master1 repl-agmt create --suffix "${SUFFIX}"
+ dsconf master2 repl-agmt create --suffix "${SUFFIX}"
+ """
+
+ # Step 3: No keepalive entrie should exists on any masters
+ verify_keepalive_entries(topo_m2, False)
+
+ # Step 4: Perform on line init of master2 from master1
+ agmt = Agreements(m1).list()[0]
+ agmt.begin_reinit()
+ (done, error) = agmt.wait_reinit()
+ assert done is True
+ assert error is False
+
+ # Step 5: All keepalive entries should exists on every masters
+ # Verify the keep alive entry once replication is in sync
+ # (that is the step that fails when bug is not fixed)
+ repl.wait_for_ruv(m2,m1)
+ verify_keepalive_entries(topo_m2, True);
+
+
if __name__ == '__main__':
# Run isolated
# -s for DEBUG mode
diff --git a/ldap/servers/plugins/replication/repl5_replica.c b/ldap/servers/plugins/replication/repl5_replica.c
index f01782330..f0ea0f8ef 100644
--- a/ldap/servers/plugins/replication/repl5_replica.c
+++ b/ldap/servers/plugins/replication/repl5_replica.c
@@ -373,6 +373,20 @@ replica_destroy(void **arg)
slapi_ch_free((void **)arg);
}
+/******************************************************************************
+ ******************** REPLICATION KEEP ALIVE ENTRIES **************************
+ ******************************************************************************
+ * They are subentries of the replicated suffix and there is one per master. *
+ * These entries exist only to trigger a change that get replicated over the *
+ * topology. *
+ * Their main purpose is to generate records in the changelog and they are *
+ * updated from time to time by fractional replication to insure that at *
+ * least a change must be replicated by FR after a great number of not *
+ * replicated changes are found in the changelog. The interest is that the *
+ * fractional RUV get then updated so less changes need to be walked in the *
+ * changelog when searching for the first change to send *
+ ******************************************************************************/
+
#define KEEP_ALIVE_ATTR "keepalivetimestamp"
#define KEEP_ALIVE_ENTRY "repl keep alive"
#define KEEP_ALIVE_DN_FORMAT "cn=%s %d,%s"
diff --git a/ldap/servers/plugins/replication/repl_extop.c b/ldap/servers/plugins/replication/repl_extop.c
index 14c8e0bcc..af486f730 100644
--- a/ldap/servers/plugins/replication/repl_extop.c
+++ b/ldap/servers/plugins/replication/repl_extop.c
@@ -1173,6 +1173,10 @@ multimaster_extop_EndNSDS50ReplicationRequest(Slapi_PBlock *pb)
*/
if (cl5GetState() == CL5_STATE_OPEN) {
replica_log_ruv_elements(r);
+ /* now that the changelog is open and started, we can alos cretae the
+ * keep alive entry without risk that db and cl will not match
+ */
+ replica_subentry_check(replica_get_root(r), replica_get_rid(r));
}
/* ONREPL code that dealt with new RUV, etc was moved into the code
--
2.26.2

View File

@ -0,0 +1,513 @@
From 9d25d8bc3262bfaeeda2992538f649bf1a1b33de Mon Sep 17 00:00:00 2001
From: progier389 <72748589+progier389@users.noreply.github.com>
Date: Thu, 12 Nov 2020 18:50:04 +0100
Subject: [PATCH 6/8] do not add referrals for masters with different data
generation #2054 (#4427)
Bug description:
The problem is that some operation mandatory in the usual cases are
also performed when replication cannot take place because the
database set are differents (i.e: RUV generation ids are different)
One of the issue is that the csn generator state is updated when
starting a replication session (it is a problem when trying to
reset the time skew, as freshly reinstalled replicas get infected
by the old ones)
A second issue is that the RUV got updated when ending a replication session
(which may add replica that does not share the same data set,
then update operations on consumer retun referrals towards wrong masters
Fix description:
The fix checks the RUVs generation id before updating the csn generator
and before updating the RUV.
Reviewed by: mreynolds
firstyear
vashirov
Platforms tested: F32
---
.../suites/replication/regression_test.py | 290 ++++++++++++++++++
ldap/servers/plugins/replication/repl5.h | 1 +
.../plugins/replication/repl5_inc_protocol.c | 20 +-
.../plugins/replication/repl5_replica.c | 39 ++-
src/lib389/lib389/dseldif.py | 37 +++
5 files changed, 368 insertions(+), 19 deletions(-)
diff --git a/dirsrvtests/tests/suites/replication/regression_test.py b/dirsrvtests/tests/suites/replication/regression_test.py
index 14b9d6a44..a72af6b30 100644
--- a/dirsrvtests/tests/suites/replication/regression_test.py
+++ b/dirsrvtests/tests/suites/replication/regression_test.py
@@ -13,6 +13,7 @@ from lib389.idm.user import TEST_USER_PROPERTIES, UserAccounts
from lib389.pwpolicy import PwPolicyManager
from lib389.utils import *
from lib389.topologies import topology_m2 as topo_m2, TopologyMain, topology_m3 as topo_m3, create_topology, _remove_ssca_db, topology_i2 as topo_i2
+from lib389.topologies import topology_m2c2 as topo_m2c2
from lib389._constants import *
from lib389.idm.organizationalunit import OrganizationalUnits
from lib389.idm.user import UserAccount
@@ -22,6 +23,7 @@ from lib389.idm.directorymanager import DirectoryManager
from lib389.replica import Replicas, ReplicationManager, Changelog5, BootstrapReplicationManager
from lib389.agreement import Agreements
from lib389 import pid_from_file
+from lib389.dseldif import *
pytestmark = pytest.mark.tier1
@@ -1027,6 +1029,294 @@ def test_online_init_should_create_keepalive_entries(topo_m2):
verify_keepalive_entries(topo_m2, True);
+def get_agreement(agmts, consumer):
+ # Get agreement towards consumer among the agremment list
+ for agmt in agmts.list():
+ if (agmt.get_attr_val_utf8('nsDS5ReplicaPort') == str(consumer.port) and
+ agmt.get_attr_val_utf8('nsDS5ReplicaHost') == consumer.host):
+ return agmt
+ return None;
+
+
+def test_ruv_url_not_added_if_different_uuid(topo_m2c2):
+ """Check that RUV url is not updated if RUV generation uuid are different
+
+ :id: 7cc30a4e-0ffd-4758-8f00-e500279af344
+ :setup: Two masters + two consumers replication setup
+ :steps:
+ 1. Generate ldif without replication data
+ 2. Init both masters from that ldif
+ (to clear the ruvs and generates different generation uuid)
+ 3. Perform on line init from master1 to consumer1
+ and from master2 to consumer2
+ 4. Perform update on both masters
+ 5. Check that c1 RUV does not contains URL towards m2
+ 6. Check that c2 RUV does contains URL towards m2
+ 7. Perform on line init from master1 to master2
+ 8. Perform update on master2
+ 9. Check that c1 RUV does contains URL towards m2
+ :expectedresults:
+ 1. No error while generating ldif
+ 2. No error while importing the ldif file
+ 3. No error and Initialization done.
+ 4. No error
+ 5. master2 replicaid should not be in the consumer1 RUV
+ 6. master2 replicaid should be in the consumer2 RUV
+ 7. No error and Initialization done.
+ 8. No error
+ 9. master2 replicaid should be in the consumer1 RUV
+
+ """
+
+ # Variables initialization
+ repl = ReplicationManager(DEFAULT_SUFFIX)
+
+ m1 = topo_m2c2.ms["master1"]
+ m2 = topo_m2c2.ms["master2"]
+ c1 = topo_m2c2.cs["consumer1"]
+ c2 = topo_m2c2.cs["consumer2"]
+
+ replica_m1 = Replicas(m1).get(DEFAULT_SUFFIX)
+ replica_m2 = Replicas(m2).get(DEFAULT_SUFFIX)
+ replica_c1 = Replicas(c1).get(DEFAULT_SUFFIX)
+ replica_c2 = Replicas(c2).get(DEFAULT_SUFFIX)
+
+ replicid_m2 = replica_m2.get_rid()
+
+ agmts_m1 = Agreements(m1, replica_m1.dn)
+ agmts_m2 = Agreements(m2, replica_m2.dn)
+
+ m1_m2 = get_agreement(agmts_m1, m2)
+ m1_c1 = get_agreement(agmts_m1, c1)
+ m1_c2 = get_agreement(agmts_m1, c2)
+ m2_m1 = get_agreement(agmts_m2, m1)
+ m2_c1 = get_agreement(agmts_m2, c1)
+ m2_c2 = get_agreement(agmts_m2, c2)
+
+ # Step 1: Generate ldif without replication data
+ m1.stop()
+ m2.stop()
+ ldif_file = '%s/norepl.ldif' % m1.get_ldif_dir()
+ m1.db2ldif(bename=DEFAULT_BENAME, suffixes=[DEFAULT_SUFFIX],
+ excludeSuffixes=None, repl_data=False,
+ outputfile=ldif_file, encrypt=False)
+ # Remove replication metadata that are still in the ldif
+ # _remove_replication_data(ldif_file)
+
+ # Step 2: Init both masters from that ldif
+ m1.ldif2db(DEFAULT_BENAME, None, None, None, ldif_file)
+ m2.ldif2db(DEFAULT_BENAME, None, None, None, ldif_file)
+ m1.start()
+ m2.start()
+
+ # Step 3: Perform on line init from master1 to consumer1
+ # and from master2 to consumer2
+ m1_c1.begin_reinit()
+ m2_c2.begin_reinit()
+ (done, error) = m1_c1.wait_reinit()
+ assert done is True
+ assert error is False
+ (done, error) = m2_c2.wait_reinit()
+ assert done is True
+ assert error is False
+
+ # Step 4: Perform update on both masters
+ repl.test_replication(m1, c1)
+ repl.test_replication(m2, c2)
+
+ # Step 5: Check that c1 RUV does not contains URL towards m2
+ ruv = replica_c1.get_ruv()
+ log.debug(f"c1 RUV: {ruv}")
+ url=ruv._rid_url.get(replica_m2.get_rid())
+ if (url == None):
+ log.debug(f"No URL for RID {replica_m2.get_rid()} in RUV");
+ else:
+ log.debug(f"URL for RID {replica_m2.get_rid()} in RUV is {url}");
+ log.error(f"URL for RID {replica_m2.get_rid()} found in RUV")
+ #Note: this assertion fails if issue 2054 is not fixed.
+ assert False
+
+ # Step 6: Check that c2 RUV does contains URL towards m2
+ ruv = replica_c2.get_ruv()
+ log.debug(f"c1 RUV: {ruv} {ruv._rids} ")
+ url=ruv._rid_url.get(replica_m2.get_rid())
+ if (url == None):
+ log.error(f"No URL for RID {replica_m2.get_rid()} in RUV");
+ assert False
+ else:
+ log.debug(f"URL for RID {replica_m2.get_rid()} in RUV is {url}");
+
+
+ # Step 7: Perform on line init from master1 to master2
+ m1_m2.begin_reinit()
+ (done, error) = m1_m2.wait_reinit()
+ assert done is True
+ assert error is False
+
+ # Step 8: Perform update on master2
+ repl.test_replication(m2, c1)
+
+ # Step 9: Check that c1 RUV does contains URL towards m2
+ ruv = replica_c1.get_ruv()
+ log.debug(f"c1 RUV: {ruv} {ruv._rids} ")
+ url=ruv._rid_url.get(replica_m2.get_rid())
+ if (url == None):
+ log.error(f"No URL for RID {replica_m2.get_rid()} in RUV");
+ assert False
+ else:
+ log.debug(f"URL for RID {replica_m2.get_rid()} in RUV is {url}");
+
+
+def test_csngen_state_not_updated_if_different_uuid(topo_m2c2):
+ """Check that csngen remote offset is not updated if RUV generation uuid are different
+
+ :id: 77694b8e-22ae-11eb-89b2-482ae39447e5
+ :setup: Two masters + two consumers replication setup
+ :steps:
+ 1. Disable m1<->m2 agreement to avoid propagate timeSkew
+ 2. Generate ldif without replication data
+ 3. Increase time skew on master2
+ 4. Init both masters from that ldif
+ (to clear the ruvs and generates different generation uuid)
+ 5. Perform on line init from master1 to consumer1 and master2 to consumer2
+ 6. Perform update on both masters
+ 7: Check that c1 has no time skew
+ 8: Check that c2 has time skew
+ 9. Init master2 from master1
+ 10. Perform update on master2
+ 11. Check that c1 has time skew
+ :expectedresults:
+ 1. No error
+ 2. No error while generating ldif
+ 3. No error
+ 4. No error while importing the ldif file
+ 5. No error and Initialization done.
+ 6. No error
+ 7. c1 time skew should be lesser than threshold
+ 8. c2 time skew should be higher than threshold
+ 9. No error and Initialization done.
+ 10. No error
+ 11. c1 time skew should be higher than threshold
+
+ """
+
+ # Variables initialization
+ repl = ReplicationManager(DEFAULT_SUFFIX)
+
+ m1 = topo_m2c2.ms["master1"]
+ m2 = topo_m2c2.ms["master2"]
+ c1 = topo_m2c2.cs["consumer1"]
+ c2 = topo_m2c2.cs["consumer2"]
+
+ replica_m1 = Replicas(m1).get(DEFAULT_SUFFIX)
+ replica_m2 = Replicas(m2).get(DEFAULT_SUFFIX)
+ replica_c1 = Replicas(c1).get(DEFAULT_SUFFIX)
+ replica_c2 = Replicas(c2).get(DEFAULT_SUFFIX)
+
+ replicid_m2 = replica_m2.get_rid()
+
+ agmts_m1 = Agreements(m1, replica_m1.dn)
+ agmts_m2 = Agreements(m2, replica_m2.dn)
+
+ m1_m2 = get_agreement(agmts_m1, m2)
+ m1_c1 = get_agreement(agmts_m1, c1)
+ m1_c2 = get_agreement(agmts_m1, c2)
+ m2_m1 = get_agreement(agmts_m2, m1)
+ m2_c1 = get_agreement(agmts_m2, c1)
+ m2_c2 = get_agreement(agmts_m2, c2)
+
+ # Step 1: Disable m1<->m2 agreement to avoid propagate timeSkew
+ m1_m2.pause()
+ m2_m1.pause()
+
+ # Step 2: Generate ldif without replication data
+ m1.stop()
+ m2.stop()
+ ldif_file = '%s/norepl.ldif' % m1.get_ldif_dir()
+ m1.db2ldif(bename=DEFAULT_BENAME, suffixes=[DEFAULT_SUFFIX],
+ excludeSuffixes=None, repl_data=False,
+ outputfile=ldif_file, encrypt=False)
+ # Remove replication metadata that are still in the ldif
+ # _remove_replication_data(ldif_file)
+
+ # Step 3: Increase time skew on master2
+ timeSkew=6*3600
+ # We can modify master2 time skew
+ # But the time skew on the consumer may be smaller
+ # depending on when the cnsgen generation time is updated
+ # and when first csn get replicated.
+ # Since we use timeSkew has threshold value to detect
+ # whether there are time skew or not,
+ # lets add a significative margin (longer than the test duration)
+ # to avoid any risk of erroneous failure
+ timeSkewMargin = 300
+ DSEldif(m2)._increaseTimeSkew(DEFAULT_SUFFIX, timeSkew+timeSkewMargin)
+
+ # Step 4: Init both masters from that ldif
+ m1.ldif2db(DEFAULT_BENAME, None, None, None, ldif_file)
+ m2.ldif2db(DEFAULT_BENAME, None, None, None, ldif_file)
+ m1.start()
+ m2.start()
+
+ # Step 5: Perform on line init from master1 to consumer1
+ # and from master2 to consumer2
+ m1_c1.begin_reinit()
+ m2_c2.begin_reinit()
+ (done, error) = m1_c1.wait_reinit()
+ assert done is True
+ assert error is False
+ (done, error) = m2_c2.wait_reinit()
+ assert done is True
+ assert error is False
+
+ # Step 6: Perform update on both masters
+ repl.test_replication(m1, c1)
+ repl.test_replication(m2, c2)
+
+ # Step 7: Check that c1 has no time skew
+ # Stop server to insure that dse.ldif is uptodate
+ c1.stop()
+ c1_nsState = DSEldif(c1).readNsState(DEFAULT_SUFFIX)[0]
+ c1_timeSkew = int(c1_nsState['time_skew'])
+ log.debug(f"c1 time skew: {c1_timeSkew}")
+ if (c1_timeSkew >= timeSkew):
+ log.error(f"c1 csngen state has unexpectedly been synchronized with m2: time skew {c1_timeSkew}")
+ assert False
+ c1.start()
+
+ # Step 8: Check that c2 has time skew
+ # Stop server to insure that dse.ldif is uptodate
+ c2.stop()
+ c2_nsState = DSEldif(c2).readNsState(DEFAULT_SUFFIX)[0]
+ c2_timeSkew = int(c2_nsState['time_skew'])
+ log.debug(f"c2 time skew: {c2_timeSkew}")
+ if (c2_timeSkew < timeSkew):
+ log.error(f"c2 csngen state has not been synchronized with m2: time skew {c2_timeSkew}")
+ assert False
+ c2.start()
+
+ # Step 9: Perform on line init from master1 to master2
+ m1_c1.pause()
+ m1_m2.resume()
+ m1_m2.begin_reinit()
+ (done, error) = m1_m2.wait_reinit()
+ assert done is True
+ assert error is False
+
+ # Step 10: Perform update on master2
+ repl.test_replication(m2, c1)
+
+ # Step 11: Check that c1 has time skew
+ # Stop server to insure that dse.ldif is uptodate
+ c1.stop()
+ c1_nsState = DSEldif(c1).readNsState(DEFAULT_SUFFIX)[0]
+ c1_timeSkew = int(c1_nsState['time_skew'])
+ log.debug(f"c1 time skew: {c1_timeSkew}")
+ if (c1_timeSkew < timeSkew):
+ log.error(f"c1 csngen state has not been synchronized with m2: time skew {c1_timeSkew}")
+ assert False
+
+
if __name__ == '__main__':
# Run isolated
# -s for DEBUG mode
diff --git a/ldap/servers/plugins/replication/repl5.h b/ldap/servers/plugins/replication/repl5.h
index 638471744..b2605011a 100644
--- a/ldap/servers/plugins/replication/repl5.h
+++ b/ldap/servers/plugins/replication/repl5.h
@@ -698,6 +698,7 @@ void replica_dump(Replica *r);
void replica_set_enabled(Replica *r, PRBool enable);
Replica *replica_get_replica_from_dn(const Slapi_DN *dn);
Replica *replica_get_replica_from_root(const char *repl_root);
+int replica_check_generation(Replica *r, const RUV *remote_ruv);
int replica_update_ruv(Replica *replica, const CSN *csn, const char *replica_purl);
Replica *replica_get_replica_for_op(Slapi_PBlock *pb);
/* the functions below manipulate replica hash */
diff --git a/ldap/servers/plugins/replication/repl5_inc_protocol.c b/ldap/servers/plugins/replication/repl5_inc_protocol.c
index 29b1fb073..af5e5897c 100644
--- a/ldap/servers/plugins/replication/repl5_inc_protocol.c
+++ b/ldap/servers/plugins/replication/repl5_inc_protocol.c
@@ -2161,26 +2161,12 @@ examine_update_vector(Private_Repl_Protocol *prp, RUV *remote_ruv)
} else if (NULL == remote_ruv) {
return_value = EXAMINE_RUV_PRISTINE_REPLICA;
} else {
- char *local_gen = NULL;
- char *remote_gen = ruv_get_replica_generation(remote_ruv);
- Object *local_ruv_obj;
- RUV *local_ruv;
-
PR_ASSERT(NULL != prp->replica);
- local_ruv_obj = replica_get_ruv(prp->replica);
- if (NULL != local_ruv_obj) {
- local_ruv = (RUV *)object_get_data(local_ruv_obj);
- PR_ASSERT(local_ruv);
- local_gen = ruv_get_replica_generation(local_ruv);
- object_release(local_ruv_obj);
- }
- if (NULL == remote_gen || NULL == local_gen || strcmp(remote_gen, local_gen) != 0) {
- return_value = EXAMINE_RUV_GENERATION_MISMATCH;
- } else {
+ if (replica_check_generation(prp->replica, remote_ruv)) {
return_value = EXAMINE_RUV_OK;
+ } else {
+ return_value = EXAMINE_RUV_GENERATION_MISMATCH;
}
- slapi_ch_free((void **)&remote_gen);
- slapi_ch_free((void **)&local_gen);
}
return return_value;
}
diff --git a/ldap/servers/plugins/replication/repl5_replica.c b/ldap/servers/plugins/replication/repl5_replica.c
index f0ea0f8ef..7e56d6557 100644
--- a/ldap/servers/plugins/replication/repl5_replica.c
+++ b/ldap/servers/plugins/replication/repl5_replica.c
@@ -812,6 +812,36 @@ replica_set_ruv(Replica *r, RUV *ruv)
replica_unlock(r->repl_lock);
}
+/*
+ * Check if replica generation is the same than the remote ruv one
+ */
+int
+replica_check_generation(Replica *r, const RUV *remote_ruv)
+{
+ int return_value;
+ char *local_gen = NULL;
+ char *remote_gen = ruv_get_replica_generation(remote_ruv);
+ Object *local_ruv_obj;
+ RUV *local_ruv;
+
+ PR_ASSERT(NULL != r);
+ local_ruv_obj = replica_get_ruv(r);
+ if (NULL != local_ruv_obj) {
+ local_ruv = (RUV *)object_get_data(local_ruv_obj);
+ PR_ASSERT(local_ruv);
+ local_gen = ruv_get_replica_generation(local_ruv);
+ object_release(local_ruv_obj);
+ }
+ if (NULL == remote_gen || NULL == local_gen || strcmp(remote_gen, local_gen) != 0) {
+ return_value = PR_FALSE;
+ } else {
+ return_value = PR_TRUE;
+ }
+ slapi_ch_free_string(&remote_gen);
+ slapi_ch_free_string(&local_gen);
+ return return_value;
+}
+
/*
* Update one particular CSN in an RUV. This is meant to be called
* whenever (a) the server has processed a client operation and
@@ -1298,6 +1328,11 @@ replica_update_csngen_state_ext(Replica *r, const RUV *ruv, const CSN *extracsn)
PR_ASSERT(r && ruv);
+ if (!replica_check_generation(r, ruv)) /* ruv has wrong generation - we are done */
+ {
+ return 0;
+ }
+
rc = ruv_get_max_csn(ruv, &csn);
if (rc != RUV_SUCCESS) {
return -1;
@@ -3713,8 +3748,8 @@ replica_update_ruv_consumer(Replica *r, RUV *supplier_ruv)
replica_lock(r->repl_lock);
local_ruv = (RUV *)object_get_data(r->repl_ruv);
-
- if (is_cleaned_rid(supplier_id) || local_ruv == NULL) {
+ if (is_cleaned_rid(supplier_id) || local_ruv == NULL ||
+ !replica_check_generation(r, supplier_ruv)) {
replica_unlock(r->repl_lock);
return;
}
diff --git a/src/lib389/lib389/dseldif.py b/src/lib389/lib389/dseldif.py
index f2725add9..6e6be7cd2 100644
--- a/src/lib389/lib389/dseldif.py
+++ b/src/lib389/lib389/dseldif.py
@@ -316,6 +316,43 @@ class DSEldif(DSLint):
return states
+ def _increaseTimeSkew(self, suffix, timeSkew):
+ # Increase csngen state local_offset by timeSkew
+ # Warning: instance must be stopped before calling this function
+ assert (timeSkew >= 0)
+ nsState = self.readNsState(suffix)[0]
+ self._instance.log.debug(f'_increaseTimeSkew nsState is {nsState}')
+ oldNsState = self.get(nsState['dn'], 'nsState', True)
+ self._instance.log.debug(f'oldNsState is {oldNsState}')
+
+ # Lets reencode the new nsState
+ from lib389.utils import print_nice_time
+ if pack('<h', 1) == pack('=h',1):
+ end = '<'
+ elif pack('>h', 1) == pack('=h',1):
+ end = '>'
+ else:
+ raise ValueError("Unknown endian, unable to proceed")
+
+ thelen = len(oldNsState)
+ if thelen <= 20:
+ pad = 2 # padding for short H values
+ timefmt = 'I' # timevals are unsigned 32-bit int
+ else:
+ pad = 6 # padding for short H values
+ timefmt = 'Q' # timevals are unsigned 64-bit int
+ fmtstr = "%sH%dx3%sH%dx" % (end, pad, timefmt, pad)
+ newNsState = base64.b64encode(pack(fmtstr, int(nsState['rid']),
+ int(nsState['gen_time']), int(nsState['local_offset'])+timeSkew,
+ int(nsState['remote_offset']), int(nsState['seq_num'])))
+ newNsState = newNsState.decode('utf-8')
+ self._instance.log.debug(f'newNsState is {newNsState}')
+ # Lets replace the value.
+ (entry_dn_i, attr_data) = self._find_attr(nsState['dn'], 'nsState')
+ attr_i = next(iter(attr_data))
+ self._contents[entry_dn_i + attr_i] = f"nsState:: {newNsState}"
+ self._update()
+
class FSChecks(DSLint):
"""This is for the healthcheck feature, check commonly used system config files the
--
2.26.2

View File

@ -0,0 +1,159 @@
From 0b0147cdaad0f1fc54451c23b6e5d70da178736f Mon Sep 17 00:00:00 2001
From: Mark Reynolds <mreynolds@redhat.com>
Date: Wed, 11 Nov 2020 08:59:18 -0500
Subject: [PATCH 7/8] Issue 4383 - Do not normalize escaped spaces in a DN
Bug Description: Adding an entry with an escaped leading space leads to many
problems. Mainly id2entry can get corrupted during an
import of such an entry, and the entryrdn index is not
updated correctly
Fix Description: In slapi_dn_normalize_ext() leave an escaped space intact.
Relates: https://github.com/389ds/389-ds-base/issues/4383
Reviewed by: firstyear, progier, and tbordaz (Thanks!!!)
---
.../tests/suites/syntax/acceptance_test.py | 75 ++++++++++++++++++-
ldap/servers/slapd/dn.c | 8 +-
2 files changed, 77 insertions(+), 6 deletions(-)
diff --git a/dirsrvtests/tests/suites/syntax/acceptance_test.py b/dirsrvtests/tests/suites/syntax/acceptance_test.py
index 543718689..7939a99a7 100644
--- a/dirsrvtests/tests/suites/syntax/acceptance_test.py
+++ b/dirsrvtests/tests/suites/syntax/acceptance_test.py
@@ -1,5 +1,5 @@
# --- BEGIN COPYRIGHT BLOCK ---
-# Copyright (C) 2019 Red Hat, Inc.
+# Copyright (C) 2020 Red Hat, Inc.
# All rights reserved.
#
# License: GPL (version 3 or any later version).
@@ -7,13 +7,12 @@
# --- END COPYRIGHT BLOCK ---
import ldap
-import logging
import pytest
import os
from lib389.schema import Schema
from lib389.config import Config
from lib389.idm.user import UserAccounts
-from lib389.idm.group import Groups
+from lib389.idm.group import Group, Groups
from lib389._constants import DEFAULT_SUFFIX
from lib389.topologies import log, topology_st as topo
@@ -127,7 +126,7 @@ def test_invalid_dn_syntax_crash(topo):
4. Success
"""
- # Create group
+ # Create group
groups = Groups(topo.standalone, DEFAULT_SUFFIX)
group = groups.create(properties={'cn': ' test'})
@@ -145,6 +144,74 @@ def test_invalid_dn_syntax_crash(topo):
groups.list()
+@pytest.mark.parametrize("props, rawdn", [
+ ({'cn': ' leadingSpace'}, "cn=\\20leadingSpace,ou=Groups,dc=example,dc=com"),
+ ({'cn': 'trailingSpace '}, "cn=trailingSpace\\20,ou=Groups,dc=example,dc=com")])
+def test_dn_syntax_spaces_delete(topo, props, rawdn):
+ """Test that an entry with a space as the first character in the DN can be
+ deleted without error. We also want to make sure the indexes are properly
+ updated by repeatedly adding and deleting the entry, and that the entry cache
+ is properly maintained.
+
+ :id: b993f37c-c2b0-4312-992c-a9048ff98965
+ :parametrized: yes
+ :setup: Standalone Instance
+ :steps:
+ 1. Create a group with a DN that has a space as the first/last
+ character.
+ 2. Delete group
+ 3. Add group
+ 4. Modify group
+ 5. Restart server and modify entry
+ 6. Delete group
+ 7. Add group back
+ 8. Delete group using specific DN
+ :expectedresults:
+ 1. Success
+ 2. Success
+ 3. Success
+ 4. Success
+ 5. Success
+ 6. Success
+ 7. Success
+ 8. Success
+ """
+
+ # Create group
+ groups = Groups(topo.standalone, DEFAULT_SUFFIX)
+ group = groups.create(properties=props.copy())
+
+ # Delete group (verifies DN/RDN parsing works and cache is correct)
+ group.delete()
+
+ # Add group again (verifies entryrdn index was properly updated)
+ groups = Groups(topo.standalone, DEFAULT_SUFFIX)
+ group = groups.create(properties=props.copy())
+
+ # Modify the group (verifies dn/rdn parsing is correct)
+ group.replace('description', 'escaped space group')
+
+ # Restart the server. This will pull the entry from the database and
+ # convert it into a cache entry, which is different than how a client
+ # first adds an entry and is put into the cache before being written to
+ # disk.
+ topo.standalone.restart()
+
+ # Make sure we can modify the entry (verifies cache entry was created
+ # correctly)
+ group.replace('description', 'escaped space group after restart')
+
+ # Make sure it can still be deleted (verifies cache again).
+ group.delete()
+
+ # Add it back so we can delete it using a specific DN (sanity test to verify
+ # another DN/RDN parsing variation).
+ groups = Groups(topo.standalone, DEFAULT_SUFFIX)
+ group = groups.create(properties=props.copy())
+ group = Group(topo.standalone, dn=rawdn)
+ group.delete()
+
+
if __name__ == '__main__':
# Run isolated
# -s for DEBUG mode
diff --git a/ldap/servers/slapd/dn.c b/ldap/servers/slapd/dn.c
index 2af3f38fc..3980b897f 100644
--- a/ldap/servers/slapd/dn.c
+++ b/ldap/servers/slapd/dn.c
@@ -894,8 +894,7 @@ slapi_dn_normalize_ext(char *src, size_t src_len, char **dest, size_t *dest_len)
s++;
}
}
- } else if (s + 2 < ends &&
- isxdigit(*(s + 1)) && isxdigit(*(s + 2))) {
+ } else if (s + 2 < ends && isxdigit(*(s + 1)) && isxdigit(*(s + 2))) {
/* esc hexpair ==> real character */
int n = slapi_hexchar2int(*(s + 1));
int n2 = slapi_hexchar2int(*(s + 2));
@@ -903,6 +902,11 @@ slapi_dn_normalize_ext(char *src, size_t src_len, char **dest, size_t *dest_len)
if (n == 0) { /* don't change \00 */
*d++ = *++s;
*d++ = *++s;
+ } else if (n == 32) { /* leave \20 (space) intact */
+ *d++ = *s;
+ *d++ = *++s;
+ *d++ = *++s;
+ s++;
} else {
*d++ = n;
s += 3;
--
2.26.2

View File

@ -0,0 +1,560 @@
From 220dbafa048269105b3f7958a5d5bfd1d988da26 Mon Sep 17 00:00:00 2001
From: Simon Pichugin <spichugi@redhat.com>
Date: Tue, 30 Jun 2020 15:39:30 +0200
Subject: [PATCH 8/8] Issue 49300 - entryUSN is duplicated after memberOf
operation
Bug Description: When we assign a member to a group we have two
oprations - group modification and user modification.
As a result, they both have the same entryUSN because USN Plugin
assigns entryUSN value in bepreop but increments the counter
in the postop and a lot of things can happen in between.
Fix Description: Increment the counter in bepreop together with
entryUSN assignment. Also, decrement the counter in bepostop if
the failuer has happened.
Add test suite to cover the change.
https://pagure.io/389-ds-base/issue/49300
Reviewed by: tbordaz (Thanks!)
---
.../tests/suites/plugins/entryusn_test.py | 240 ++++++++++++++++++
ldap/servers/plugins/usn/usn.c | 109 ++++----
ldap/servers/slapd/pblock.c | 14 +-
ldap/servers/slapd/pblock_v3.h | 1 +
ldap/servers/slapd/slapi-plugin.h | 3 +
5 files changed, 322 insertions(+), 45 deletions(-)
create mode 100644 dirsrvtests/tests/suites/plugins/entryusn_test.py
diff --git a/dirsrvtests/tests/suites/plugins/entryusn_test.py b/dirsrvtests/tests/suites/plugins/entryusn_test.py
new file mode 100644
index 000000000..721315419
--- /dev/null
+++ b/dirsrvtests/tests/suites/plugins/entryusn_test.py
@@ -0,0 +1,240 @@
+# --- BEGIN COPYRIGHT BLOCK ---
+# Copyright (C) 2020 Red Hat, Inc.
+# All rights reserved.
+#
+# License: GPL (version 3 or any later version).
+# See LICENSE for details.
+# --- END COPYRIGHT BLOCK ---
+#
+import ldap
+import logging
+import pytest
+from lib389._constants import DEFAULT_SUFFIX
+from lib389.config import Config
+from lib389.plugins import USNPlugin, MemberOfPlugin
+from lib389.idm.group import Groups
+from lib389.idm.user import UserAccounts
+from lib389.idm.organizationalunit import OrganizationalUnit
+from lib389.tombstone import Tombstones
+from lib389.rootdse import RootDSE
+from lib389.topologies import topology_st, topology_m2
+
+log = logging.getLogger(__name__)
+
+USER_NUM = 10
+GROUP_NUM = 3
+
+
+def check_entryusn_no_duplicates(entryusn_list):
+ """Check that all values in the list are unique"""
+
+ if len(entryusn_list) > len(set(entryusn_list)):
+ raise AssertionError(f"EntryUSN values have duplicates, please, check logs")
+
+
+def check_lastusn_after_restart(inst):
+ """Check that last usn is the same after restart"""
+
+ root_dse = RootDSE(inst)
+ last_usn_before = root_dse.get_attr_val_int("lastusn;userroot")
+ inst.restart()
+ last_usn_after = root_dse.get_attr_val_int("lastusn;userroot")
+ assert last_usn_after == last_usn_before
+
+
+@pytest.fixture(scope="module")
+def setup(topology_st, request):
+ """
+ Enable USN plug-in
+ Enable MEMBEROF plugin
+ Add test entries
+ """
+
+ inst = topology_st.standalone
+
+ log.info("Enable the USN plugin...")
+ plugin = USNPlugin(inst)
+ plugin.enable()
+
+ log.info("Enable the MEMBEROF plugin...")
+ plugin = MemberOfPlugin(inst)
+ plugin.enable()
+
+ inst.restart()
+
+ users_list = []
+ log.info("Adding test entries...")
+ users = UserAccounts(inst, DEFAULT_SUFFIX)
+ for id in range(USER_NUM):
+ user = users.create_test_user(uid=id)
+ users_list.append(user)
+
+ groups_list = []
+ log.info("Adding test groups...")
+ groups = Groups(inst, DEFAULT_SUFFIX)
+ for id in range(GROUP_NUM):
+ group = groups.create(properties={'cn': f'test_group{id}'})
+ groups_list.append(group)
+
+ def fin():
+ for user in users_list:
+ try:
+ user.delete()
+ except ldap.NO_SUCH_OBJECT:
+ pass
+ for group in groups_list:
+ try:
+ group.delete()
+ except ldap.NO_SUCH_OBJECT:
+ pass
+ request.addfinalizer(fin)
+
+ return {"users": users_list,
+ "groups": groups_list}
+
+
+def test_entryusn_no_duplicates(topology_st, setup):
+ """Verify that entryUSN is not duplicated after memberOf operation
+
+ :id: 1a7d382d-1214-4d56-b9c2-9c4ed57d1683
+ :setup: Standalone instance, Groups and Users, USN and memberOf are enabled
+ :steps:
+ 1. Add a member to group 1
+ 2. Add a member to group 1 and 2
+ 3. Check that entryUSNs are different
+ 4. Check that lastusn before and after a restart are the same
+ :expectedresults:
+ 1. Success
+ 2. Success
+ 3. Success
+ 4. Success
+ """
+
+ inst = topology_st.standalone
+ config = Config(inst)
+ config.replace('nsslapd-accesslog-level', '260') # Internal op
+ config.replace('nsslapd-errorlog-level', '65536')
+ config.replace('nsslapd-plugin-logging', 'on')
+ entryusn_list = []
+
+ users = setup["users"]
+ groups = setup["groups"]
+
+ groups[0].replace('member', users[0].dn)
+ entryusn_list.append(users[0].get_attr_val_int('entryusn'))
+ log.info(f"{users[0].dn}_1: {entryusn_list[-1:]}")
+ entryusn_list.append(groups[0].get_attr_val_int('entryusn'))
+ log.info(f"{groups[0].dn}_1: {entryusn_list[-1:]}")
+ check_entryusn_no_duplicates(entryusn_list)
+
+ groups[1].replace('member', [users[0].dn, users[1].dn])
+ entryusn_list.append(users[0].get_attr_val_int('entryusn'))
+ log.info(f"{users[0].dn}_2: {entryusn_list[-1:]}")
+ entryusn_list.append(users[1].get_attr_val_int('entryusn'))
+ log.info(f"{users[1].dn}_2: {entryusn_list[-1:]}")
+ entryusn_list.append(groups[1].get_attr_val_int('entryusn'))
+ log.info(f"{groups[1].dn}_2: {entryusn_list[-1:]}")
+ check_entryusn_no_duplicates(entryusn_list)
+
+ check_lastusn_after_restart(inst)
+
+
+def test_entryusn_is_same_after_failure(topology_st, setup):
+ """Verify that entryUSN is the same after failed operation
+
+ :id: 1f227533-370a-48c1-b920-9b3b0bcfc32e
+ :setup: Standalone instance, Groups and Users, USN and memberOf are enabled
+ :steps:
+ 1. Get current group's entryUSN value
+ 2. Try to modify the group with an invalid syntax
+ 3. Get new group's entryUSN value and compare with old
+ 4. Check that lastusn before and after a restart are the same
+ :expectedresults:
+ 1. Success
+ 2. Invalid Syntax error
+ 3. Should be the same
+ 4. Success
+ """
+
+ inst = topology_st.standalone
+ users = setup["users"]
+
+ # We need this update so we get the latest USN pointed to our entry
+ users[0].replace('description', 'update')
+
+ entryusn_before = users[0].get_attr_val_int('entryusn')
+ users[0].replace('description', 'update')
+ try:
+ users[0].replace('uid', 'invalid update')
+ except ldap.NOT_ALLOWED_ON_RDN:
+ pass
+ users[0].replace('description', 'second update')
+ entryusn_after = users[0].get_attr_val_int('entryusn')
+
+ # entryUSN should be OLD + 2 (only two user updates)
+ assert entryusn_after == (entryusn_before + 2)
+
+ check_lastusn_after_restart(inst)
+
+
+def test_entryusn_after_repl_delete(topology_m2):
+ """Verify that entryUSN is incremented on 1 after delete operation which creates a tombstone
+
+ :id: 1704cf65-41bc-4347-bdaf-20fc2431b218
+ :setup: An instance with replication, Users, USN enabled
+ :steps:
+ 1. Try to delete a user
+ 2. Check the tombstone has the incremented USN
+ 3. Try to delete ou=People with users
+ 4. Check the entry has a not incremented entryUSN
+ :expectedresults:
+ 1. Success
+ 2. Success
+ 3. Should fail with Not Allowed On Non-leaf error
+ 4. Success
+ """
+
+ inst = topology_m2.ms["master1"]
+ plugin = USNPlugin(inst)
+ plugin.enable()
+ inst.restart()
+ users = UserAccounts(inst, DEFAULT_SUFFIX)
+
+ try:
+ user_1 = users.create_test_user()
+ user_rdn = user_1.rdn
+ tombstones = Tombstones(inst, DEFAULT_SUFFIX)
+
+ user_1.replace('description', 'update_ts')
+ user_usn = user_1.get_attr_val_int('entryusn')
+
+ user_1.delete()
+
+ ts = tombstones.get(user_rdn)
+ ts_usn = ts.get_attr_val_int('entryusn')
+
+ assert (user_usn + 1) == ts_usn
+
+ user_1 = users.create_test_user()
+ org = OrganizationalUnit(inst, f"ou=People,{DEFAULT_SUFFIX}")
+ org.replace('description', 'update_ts')
+ ou_usn_before = org.get_attr_val_int('entryusn')
+ try:
+ org.delete()
+ except ldap.NOT_ALLOWED_ON_NONLEAF:
+ pass
+ ou_usn_after = org.get_attr_val_int('entryusn')
+ assert ou_usn_before == ou_usn_after
+
+ finally:
+ try:
+ user_1.delete()
+ except ldap.NO_SUCH_OBJECT:
+ pass
+
+
+if __name__ == '__main__':
+ # Run isolated
+ # -s for DEBUG mode
+ CURRENT_FILE = os.path.realpath(__file__)
+ pytest.main("-s %s" % CURRENT_FILE)
diff --git a/ldap/servers/plugins/usn/usn.c b/ldap/servers/plugins/usn/usn.c
index 12ba040c6..f2cc8a62c 100644
--- a/ldap/servers/plugins/usn/usn.c
+++ b/ldap/servers/plugins/usn/usn.c
@@ -333,6 +333,12 @@ _usn_add_next_usn(Slapi_Entry *e, Slapi_Backend *be)
}
slapi_ch_free_string(&usn_berval.bv_val);
+ /*
+ * increment the counter now and decrement in the bepostop
+ * if the operation will fail
+ */
+ slapi_counter_increment(be->be_usn_counter);
+
slapi_log_err(SLAPI_LOG_TRACE, USN_PLUGIN_SUBSYSTEM,
"<-- _usn_add_next_usn\n");
@@ -370,6 +376,12 @@ _usn_mod_next_usn(LDAPMod ***mods, Slapi_Backend *be)
*mods = slapi_mods_get_ldapmods_passout(&smods);
+ /*
+ * increment the counter now and decrement in the bepostop
+ * if the operation will fail
+ */
+ slapi_counter_increment(be->be_usn_counter);
+
slapi_log_err(SLAPI_LOG_TRACE, USN_PLUGIN_SUBSYSTEM,
"<-- _usn_mod_next_usn\n");
return LDAP_SUCCESS;
@@ -420,6 +432,7 @@ usn_betxnpreop_delete(Slapi_PBlock *pb)
{
Slapi_Entry *e = NULL;
Slapi_Backend *be = NULL;
+ int32_t tombstone_incremented = 0;
int rc = SLAPI_PLUGIN_SUCCESS;
slapi_log_err(SLAPI_LOG_TRACE, USN_PLUGIN_SUBSYSTEM,
@@ -441,7 +454,9 @@ usn_betxnpreop_delete(Slapi_PBlock *pb)
goto bail;
}
_usn_add_next_usn(e, be);
+ tombstone_incremented = 1;
bail:
+ slapi_pblock_set(pb, SLAPI_USN_INCREMENT_FOR_TOMBSTONE, &tombstone_incremented);
slapi_log_err(SLAPI_LOG_TRACE, USN_PLUGIN_SUBSYSTEM,
"<-- usn_betxnpreop_delete\n");
@@ -483,7 +498,7 @@ bail:
return rc;
}
-/* count up the counter */
+/* count down the counter */
static int
usn_bepostop(Slapi_PBlock *pb)
{
@@ -493,25 +508,24 @@ usn_bepostop(Slapi_PBlock *pb)
slapi_log_err(SLAPI_LOG_TRACE, USN_PLUGIN_SUBSYSTEM,
"--> usn_bepostop\n");
- /* if op is not successful, don't increment the counter */
+ /* if op is not successful, decrement the counter, else - do nothing */
slapi_pblock_get(pb, SLAPI_RESULT_CODE, &rc);
if (LDAP_SUCCESS != rc) {
- /* no plugin failure */
- rc = SLAPI_PLUGIN_SUCCESS;
- goto bail;
- }
+ slapi_pblock_get(pb, SLAPI_BACKEND, &be);
+ if (NULL == be) {
+ rc = LDAP_PARAM_ERROR;
+ slapi_pblock_set(pb, SLAPI_RESULT_CODE, &rc);
+ rc = SLAPI_PLUGIN_FAILURE;
+ goto bail;
+ }
- slapi_pblock_get(pb, SLAPI_BACKEND, &be);
- if (NULL == be) {
- rc = LDAP_PARAM_ERROR;
- slapi_pblock_set(pb, SLAPI_RESULT_CODE, &rc);
- rc = SLAPI_PLUGIN_FAILURE;
- goto bail;
+ if (be->be_usn_counter) {
+ slapi_counter_decrement(be->be_usn_counter);
+ }
}
- if (be->be_usn_counter) {
- slapi_counter_increment(be->be_usn_counter);
- }
+ /* no plugin failure */
+ rc = SLAPI_PLUGIN_SUCCESS;
bail:
slapi_log_err(SLAPI_LOG_TRACE, USN_PLUGIN_SUBSYSTEM,
"<-- usn_bepostop\n");
@@ -519,13 +533,14 @@ bail:
return rc;
}
-/* count up the counter */
+/* count down the counter on a failure and mod ignore */
static int
usn_bepostop_modify(Slapi_PBlock *pb)
{
int rc = SLAPI_PLUGIN_FAILURE;
Slapi_Backend *be = NULL;
LDAPMod **mods = NULL;
+ int32_t do_decrement = 0;
int i;
slapi_log_err(SLAPI_LOG_TRACE, USN_PLUGIN_SUBSYSTEM,
@@ -534,9 +549,7 @@ usn_bepostop_modify(Slapi_PBlock *pb)
/* if op is not successful, don't increment the counter */
slapi_pblock_get(pb, SLAPI_RESULT_CODE, &rc);
if (LDAP_SUCCESS != rc) {
- /* no plugin failure */
- rc = SLAPI_PLUGIN_SUCCESS;
- goto bail;
+ do_decrement = 1;
}
slapi_pblock_get(pb, SLAPI_MODIFY_MODS, &mods);
@@ -545,25 +558,29 @@ usn_bepostop_modify(Slapi_PBlock *pb)
if (mods[i]->mod_op & LDAP_MOD_IGNORE) {
slapi_log_err(SLAPI_LOG_TRACE, USN_PLUGIN_SUBSYSTEM,
"usn_bepostop_modify - MOD_IGNORE detected\n");
- goto bail; /* conflict occurred.
- skip incrementing the counter. */
+ do_decrement = 1; /* conflict occurred.
+ decrement he counter. */
} else {
break;
}
}
}
- slapi_pblock_get(pb, SLAPI_BACKEND, &be);
- if (NULL == be) {
- rc = LDAP_PARAM_ERROR;
- slapi_pblock_set(pb, SLAPI_RESULT_CODE, &rc);
- rc = SLAPI_PLUGIN_FAILURE;
- goto bail;
+ if (do_decrement) {
+ slapi_pblock_get(pb, SLAPI_BACKEND, &be);
+ if (NULL == be) {
+ rc = LDAP_PARAM_ERROR;
+ slapi_pblock_set(pb, SLAPI_RESULT_CODE, &rc);
+ rc = SLAPI_PLUGIN_FAILURE;
+ goto bail;
+ }
+ if (be->be_usn_counter) {
+ slapi_counter_decrement(be->be_usn_counter);
+ }
}
- if (be->be_usn_counter) {
- slapi_counter_increment(be->be_usn_counter);
- }
+ /* no plugin failure */
+ rc = SLAPI_PLUGIN_SUCCESS;
bail:
slapi_log_err(SLAPI_LOG_TRACE, USN_PLUGIN_SUBSYSTEM,
"<-- usn_bepostop_modify\n");
@@ -573,34 +590,38 @@ bail:
/* count up the counter */
/* if the op is delete and the op was not successful, remove preventryusn */
+/* the function is executed on TXN level */
static int
usn_bepostop_delete(Slapi_PBlock *pb)
{
int rc = SLAPI_PLUGIN_FAILURE;
Slapi_Backend *be = NULL;
+ int32_t tombstone_incremented = 0;
slapi_log_err(SLAPI_LOG_TRACE, USN_PLUGIN_SUBSYSTEM,
"--> usn_bepostop_delete\n");
- /* if op is not successful, don't increment the counter */
+ /* if op is not successful and it is a tombstone entry, decrement the counter */
slapi_pblock_get(pb, SLAPI_RESULT_CODE, &rc);
if (LDAP_SUCCESS != rc) {
- /* no plugin failure */
- rc = SLAPI_PLUGIN_SUCCESS;
- goto bail;
- }
+ slapi_pblock_get(pb, SLAPI_USN_INCREMENT_FOR_TOMBSTONE, &tombstone_incremented);
+ if (tombstone_incremented) {
+ slapi_pblock_get(pb, SLAPI_BACKEND, &be);
+ if (NULL == be) {
+ rc = LDAP_PARAM_ERROR;
+ slapi_pblock_set(pb, SLAPI_RESULT_CODE, &rc);
+ rc = SLAPI_PLUGIN_FAILURE;
+ goto bail;
+ }
- slapi_pblock_get(pb, SLAPI_BACKEND, &be);
- if (NULL == be) {
- rc = LDAP_PARAM_ERROR;
- slapi_pblock_set(pb, SLAPI_RESULT_CODE, &rc);
- rc = SLAPI_PLUGIN_FAILURE;
- goto bail;
+ if (be->be_usn_counter) {
+ slapi_counter_decrement(be->be_usn_counter);
+ }
+ }
}
- if (be->be_usn_counter) {
- slapi_counter_increment(be->be_usn_counter);
- }
+ /* no plugin failure */
+ rc = SLAPI_PLUGIN_SUCCESS;
bail:
slapi_log_err(SLAPI_LOG_TRACE, USN_PLUGIN_SUBSYSTEM,
"<-- usn_bepostop_delete\n");
diff --git a/ldap/servers/slapd/pblock.c b/ldap/servers/slapd/pblock.c
index cb562e938..454ea9cc3 100644
--- a/ldap/servers/slapd/pblock.c
+++ b/ldap/servers/slapd/pblock.c
@@ -2436,7 +2436,7 @@ slapi_pblock_get(Slapi_PBlock *pblock, int arg, void *value)
(*(char **)value) = NULL;
}
break;
-
+
case SLAPI_SEARCH_CTRLS:
if (pblock->pb_intop != NULL) {
(*(LDAPControl ***)value) = pblock->pb_intop->pb_search_ctrls;
@@ -2479,6 +2479,14 @@ slapi_pblock_get(Slapi_PBlock *pblock, int arg, void *value)
}
break;
+ case SLAPI_USN_INCREMENT_FOR_TOMBSTONE:
+ if (pblock->pb_intop != NULL) {
+ (*(int32_t *)value) = pblock->pb_intop->pb_usn_tombstone_incremented;
+ } else {
+ (*(int32_t *)value) = 0;
+ }
+ break;
+
/* ACI Target Check */
case SLAPI_ACI_TARGET_CHECK:
if (pblock->pb_misc != NULL) {
@@ -4156,6 +4164,10 @@ slapi_pblock_set(Slapi_PBlock *pblock, int arg, void *value)
pblock->pb_intop->pb_paged_results_cookie = *(int *)value;
break;
+ case SLAPI_USN_INCREMENT_FOR_TOMBSTONE:
+ pblock->pb_intop->pb_usn_tombstone_incremented = *((int32_t *)value);
+ break;
+
/* ACI Target Check */
case SLAPI_ACI_TARGET_CHECK:
_pblock_assert_pb_misc(pblock);
diff --git a/ldap/servers/slapd/pblock_v3.h b/ldap/servers/slapd/pblock_v3.h
index 7ec2f37d6..90498c0b0 100644
--- a/ldap/servers/slapd/pblock_v3.h
+++ b/ldap/servers/slapd/pblock_v3.h
@@ -161,6 +161,7 @@ typedef struct _slapi_pblock_intop
int pb_paged_results_index; /* stash SLAPI_PAGED_RESULTS_INDEX */
int pb_paged_results_cookie; /* stash SLAPI_PAGED_RESULTS_COOKIE */
+ int32_t pb_usn_tombstone_incremented; /* stash SLAPI_PAGED_RESULTS_COOKIE */
} slapi_pblock_intop;
/* Stuff that is rarely used, but still present */
diff --git a/ldap/servers/slapd/slapi-plugin.h b/ldap/servers/slapd/slapi-plugin.h
index 04c02cf7c..589830bb4 100644
--- a/ldap/servers/slapd/slapi-plugin.h
+++ b/ldap/servers/slapd/slapi-plugin.h
@@ -7483,6 +7483,9 @@ typedef enum _slapi_op_note_t {
#define SLAPI_PAGED_RESULTS_INDEX 1945
#define SLAPI_PAGED_RESULTS_COOKIE 1949
+/* USN Plugin flag for tombstone entries */
+#define SLAPI_USN_INCREMENT_FOR_TOMBSTONE 1950
+
/* ACI Target Check */
#define SLAPI_ACI_TARGET_CHECK 1946
--
2.26.2

View File

@ -0,0 +1,4 @@
For detailed information on developing plugins for
389 Directory Server visit.
http://port389/wiki/Plugins

View File

@ -0,0 +1,16 @@
#!/bin/bash
DATE=`date +%Y%m%d`
# use a real tag name here
VERSION=1.3.5.14
PKGNAME=389-ds-base
TAG=${TAG:-$PKGNAME-$VERSION}
URL="https://git.fedorahosted.org/git/?p=389/ds.git;a=snapshot;h=$TAG;sf=tgz"
SRCNAME=$PKGNAME-$VERSION
wget -O $SRCNAME.tar.gz "$URL"
echo convert tgz format to tar.bz2 format
gunzip $PKGNAME-$VERSION.tar.gz
bzip2 $PKGNAME-$VERSION.tar

1088
SPECS/389-ds-base.spec Normal file

File diff suppressed because it is too large Load Diff