import 389-ds-base-1.4.3.16-11.module+el8.4.0+9969+312e177c

This commit is contained in:
CentOS Sources 2021-03-30 10:14:00 -04:00 committed by Stepan Oksanichenko
parent d759e61ce0
commit 595f28b9d9
61 changed files with 16631 additions and 6562 deletions

View File

@ -1,2 +1,2 @@
7e651c99e43265c678c98ac2d8e31b8c48522be6 SOURCES/389-ds-base-1.4.3.8.tar.bz2
90cda7aea8d8644eea5a2af28c72350dd915db34 SOURCES/389-ds-base-1.4.3.16.tar.bz2
9e06b5cc57fd185379d007696da153893cf73e30 SOURCES/jemalloc-5.2.1.tar.bz2

2
.gitignore vendored
View File

@ -1,2 +1,2 @@
SOURCES/389-ds-base-1.4.3.8.tar.bz2
SOURCES/389-ds-base-1.4.3.16.tar.bz2
SOURCES/jemalloc-5.2.1.tar.bz2

View File

@ -0,0 +1,159 @@
From 81dcaf1c37c2de24c46672df8d4f968c2fb40a6e Mon Sep 17 00:00:00 2001
From: Mark Reynolds <mreynolds@redhat.com>
Date: Wed, 11 Nov 2020 08:59:18 -0500
Subject: [PATCH 1/3] Issue 4383 - Do not normalize escaped spaces in a DN
Bug Description: Adding an entry with an escaped leading space leads to many
problems. Mainly id2entry can get corrupted during an
import of such an entry, and the entryrdn index is not
updated correctly
Fix Description: In slapi_dn_normalize_ext() leave an escaped space intact.
Relates: https://github.com/389ds/389-ds-base/issues/4383
Reviewed by: firstyear, progier, and tbordaz (Thanks!!!)
---
.../tests/suites/syntax/acceptance_test.py | 75 ++++++++++++++++++-
ldap/servers/slapd/dn.c | 8 +-
2 files changed, 77 insertions(+), 6 deletions(-)
diff --git a/dirsrvtests/tests/suites/syntax/acceptance_test.py b/dirsrvtests/tests/suites/syntax/acceptance_test.py
index 543718689..7939a99a7 100644
--- a/dirsrvtests/tests/suites/syntax/acceptance_test.py
+++ b/dirsrvtests/tests/suites/syntax/acceptance_test.py
@@ -1,5 +1,5 @@
# --- BEGIN COPYRIGHT BLOCK ---
-# Copyright (C) 2019 Red Hat, Inc.
+# Copyright (C) 2020 Red Hat, Inc.
# All rights reserved.
#
# License: GPL (version 3 or any later version).
@@ -7,13 +7,12 @@
# --- END COPYRIGHT BLOCK ---
import ldap
-import logging
import pytest
import os
from lib389.schema import Schema
from lib389.config import Config
from lib389.idm.user import UserAccounts
-from lib389.idm.group import Groups
+from lib389.idm.group import Group, Groups
from lib389._constants import DEFAULT_SUFFIX
from lib389.topologies import log, topology_st as topo
@@ -127,7 +126,7 @@ def test_invalid_dn_syntax_crash(topo):
4. Success
"""
- # Create group
+ # Create group
groups = Groups(topo.standalone, DEFAULT_SUFFIX)
group = groups.create(properties={'cn': ' test'})
@@ -145,6 +144,74 @@ def test_invalid_dn_syntax_crash(topo):
groups.list()
+@pytest.mark.parametrize("props, rawdn", [
+ ({'cn': ' leadingSpace'}, "cn=\\20leadingSpace,ou=Groups,dc=example,dc=com"),
+ ({'cn': 'trailingSpace '}, "cn=trailingSpace\\20,ou=Groups,dc=example,dc=com")])
+def test_dn_syntax_spaces_delete(topo, props, rawdn):
+ """Test that an entry with a space as the first character in the DN can be
+ deleted without error. We also want to make sure the indexes are properly
+ updated by repeatedly adding and deleting the entry, and that the entry cache
+ is properly maintained.
+
+ :id: b993f37c-c2b0-4312-992c-a9048ff98965
+ :parametrized: yes
+ :setup: Standalone Instance
+ :steps:
+ 1. Create a group with a DN that has a space as the first/last
+ character.
+ 2. Delete group
+ 3. Add group
+ 4. Modify group
+ 5. Restart server and modify entry
+ 6. Delete group
+ 7. Add group back
+ 8. Delete group using specific DN
+ :expectedresults:
+ 1. Success
+ 2. Success
+ 3. Success
+ 4. Success
+ 5. Success
+ 6. Success
+ 7. Success
+ 8. Success
+ """
+
+ # Create group
+ groups = Groups(topo.standalone, DEFAULT_SUFFIX)
+ group = groups.create(properties=props.copy())
+
+ # Delete group (verifies DN/RDN parsing works and cache is correct)
+ group.delete()
+
+ # Add group again (verifies entryrdn index was properly updated)
+ groups = Groups(topo.standalone, DEFAULT_SUFFIX)
+ group = groups.create(properties=props.copy())
+
+ # Modify the group (verifies dn/rdn parsing is correct)
+ group.replace('description', 'escaped space group')
+
+ # Restart the server. This will pull the entry from the database and
+ # convert it into a cache entry, which is different than how a client
+ # first adds an entry and is put into the cache before being written to
+ # disk.
+ topo.standalone.restart()
+
+ # Make sure we can modify the entry (verifies cache entry was created
+ # correctly)
+ group.replace('description', 'escaped space group after restart')
+
+ # Make sure it can still be deleted (verifies cache again).
+ group.delete()
+
+ # Add it back so we can delete it using a specific DN (sanity test to verify
+ # another DN/RDN parsing variation).
+ groups = Groups(topo.standalone, DEFAULT_SUFFIX)
+ group = groups.create(properties=props.copy())
+ group = Group(topo.standalone, dn=rawdn)
+ group.delete()
+
+
if __name__ == '__main__':
# Run isolated
# -s for DEBUG mode
diff --git a/ldap/servers/slapd/dn.c b/ldap/servers/slapd/dn.c
index 2af3f38fc..3980b897f 100644
--- a/ldap/servers/slapd/dn.c
+++ b/ldap/servers/slapd/dn.c
@@ -894,8 +894,7 @@ slapi_dn_normalize_ext(char *src, size_t src_len, char **dest, size_t *dest_len)
s++;
}
}
- } else if (s + 2 < ends &&
- isxdigit(*(s + 1)) && isxdigit(*(s + 2))) {
+ } else if (s + 2 < ends && isxdigit(*(s + 1)) && isxdigit(*(s + 2))) {
/* esc hexpair ==> real character */
int n = slapi_hexchar2int(*(s + 1));
int n2 = slapi_hexchar2int(*(s + 2));
@@ -903,6 +902,11 @@ slapi_dn_normalize_ext(char *src, size_t src_len, char **dest, size_t *dest_len)
if (n == 0) { /* don't change \00 */
*d++ = *++s;
*d++ = *++s;
+ } else if (n == 32) { /* leave \20 (space) intact */
+ *d++ = *s;
+ *d++ = *++s;
+ *d++ = *++s;
+ s++;
} else {
*d++ = n;
s += 3;
--
2.26.2

View File

@ -1,43 +0,0 @@
From 97ecf0190f264a2d87750bc2d26ebf011542e3e1 Mon Sep 17 00:00:00 2001
From: Mark Reynolds <mreynolds@redhat.com>
Date: Fri, 8 May 2020 10:52:43 -0400
Subject: [PATCH 01/12] Issue 51076 - prevent unnecessarily duplication of the
target entry
Bug Description: For any update operation the MEP plugin was calling
slapi_search_internal_get_entry() which duplicates
the entry it returns. In this case the entry is just
read from and discarded, but this entry is already
in the pblock (the PRE OP ENTRY).
Fix Description: Just grab the PRE OP ENTRY from the pblock and use
that to read the attribute values from. This saves
two entry duplications for every update operation
from MEP.
fixes: https://pagure.io/389-ds-base/issue/51076
Reviewed by: tbordaz & firstyear(Thanks!!)
---
ldap/servers/plugins/mep/mep.c | 5 ++---
1 file changed, 2 insertions(+), 3 deletions(-)
diff --git a/ldap/servers/plugins/mep/mep.c b/ldap/servers/plugins/mep/mep.c
index ca9a64b3b..401d95e3a 100644
--- a/ldap/servers/plugins/mep/mep.c
+++ b/ldap/servers/plugins/mep/mep.c
@@ -2165,9 +2165,8 @@ mep_pre_op(Slapi_PBlock *pb, int modop)
if (e && free_entry) {
slapi_entry_free(e);
}
-
- slapi_search_internal_get_entry(sdn, 0, &e, mep_get_plugin_id());
- free_entry = 1;
+ slapi_pblock_get(pb, SLAPI_ENTRY_PRE_OP, &e);
+ free_entry = 0;
}
if (e && mep_is_managed_entry(e)) {
--
2.26.2

View File

@ -1,116 +0,0 @@
From 1426f086623404ab2eacb04de7e6414177c0993a Mon Sep 17 00:00:00 2001
From: Thierry Bordaz <tbordaz@redhat.com>
Date: Mon, 11 May 2020 17:11:49 +0200
Subject: [PATCH 02/12] Ticket 51082 - abort when a empty valueset is freed
Bug Description:
A large valueset (more than 10 values) manages a sorted array of values.
replication purges old values from a valueset (valueset_array_purge). If it purges all the values
the valueset is freed (slapi_valueset_done).
A problem is that the counter of values, in the valueset, is still reflecting the initial number
of values (before the purge). When the valueset is freed (because empty) a safety checking
detects incoherent values based on the wrong counter.
Fix Description:
When all the values have been purge reset the counter before freeing the valueset
https://pagure.io/389-ds-base/issue/51082
Reviewed by: Mark Reynolds
Platforms tested: F30
Flag Day: no
Doc impact: no
---
.../suites/replication/acceptance_test.py | 57 +++++++++++++++++++
ldap/servers/slapd/valueset.c | 4 ++
2 files changed, 61 insertions(+)
diff --git a/dirsrvtests/tests/suites/replication/acceptance_test.py b/dirsrvtests/tests/suites/replication/acceptance_test.py
index c8e0a4c93..5009f4e7c 100644
--- a/dirsrvtests/tests/suites/replication/acceptance_test.py
+++ b/dirsrvtests/tests/suites/replication/acceptance_test.py
@@ -500,6 +500,63 @@ def test_warining_for_invalid_replica(topo_m4):
assert topo_m4.ms["master1"].ds_error_log.match('.*nsds5ReplicaBackoffMax.*10.*invalid.*')
+@pytest.mark.ds51082
+def test_csnpurge_large_valueset(topo_m2):
+ """Test csn generator test
+
+ :id: 63e2bdb2-0a8f-4660-9465-7b80a9f72a74
+ :setup: MMR with 2 masters
+ :steps:
+ 1. Create a test_user
+ 2. add a large set of values (more than 10)
+ 3. delete all the values (more than 10)
+ 4. configure the replica to purge those values (purgedelay=5s)
+ 5. Waiting for 6 second
+ 6. do a series of update
+ :expectedresults:
+ 1. Should succeeds
+ 2. Should succeeds
+ 3. Should succeeds
+ 4. Should succeeds
+ 5. Should succeeds
+ 6. Should not crash
+ """
+ m1 = topo_m2.ms["master2"]
+
+ test_user = UserAccount(m1, TEST_ENTRY_DN)
+ if test_user.exists():
+ log.info('Deleting entry {}'.format(TEST_ENTRY_DN))
+ test_user.delete()
+ test_user.create(properties={
+ 'uid': TEST_ENTRY_NAME,
+ 'cn': TEST_ENTRY_NAME,
+ 'sn': TEST_ENTRY_NAME,
+ 'userPassword': TEST_ENTRY_NAME,
+ 'uidNumber' : '1000',
+ 'gidNumber' : '2000',
+ 'homeDirectory' : '/home/mmrepl_test',
+ })
+
+ # create a large value set so that it is sorted
+ for i in range(1,20):
+ test_user.add('description', 'value {}'.format(str(i)))
+
+ # delete all values of the valueset
+ for i in range(1,20):
+ test_user.remove('description', 'value {}'.format(str(i)))
+
+ # set purging delay to 5 second and wait more that 5second
+ replicas = Replicas(m1)
+ replica = replicas.list()[0]
+ log.info('nsds5ReplicaPurgeDelay to 5')
+ replica.set('nsds5ReplicaPurgeDelay', '5')
+ time.sleep(6)
+
+ # add some new values to the valueset containing entries that should be purged
+ for i in range(21,25):
+ test_user.add('description', 'value {}'.format(str(i)))
+
+
if __name__ == '__main__':
# Run isolated
# -s for DEBUG mode
diff --git a/ldap/servers/slapd/valueset.c b/ldap/servers/slapd/valueset.c
index 2af3ee18d..12027ecb8 100644
--- a/ldap/servers/slapd/valueset.c
+++ b/ldap/servers/slapd/valueset.c
@@ -801,6 +801,10 @@ valueset_array_purge(const Slapi_Attr *a, Slapi_ValueSet *vs, const CSN *csn)
}
}
} else {
+ /* empty valueset - reset the vs->num so that further
+ * checking will not abort
+ */
+ vs->num = 0;
slapi_valueset_done(vs);
}
--
2.26.2

View File

@ -0,0 +1,232 @@
From 29c9e1c3c760f0941b022d45d14c248e9ceb9738 Mon Sep 17 00:00:00 2001
From: progier389 <72748589+progier389@users.noreply.github.com>
Date: Tue, 3 Nov 2020 12:18:50 +0100
Subject: [PATCH 2/3] ticket 2058: Add keep alive entry after on-line
initialization - second version (#4399)
Bug description:
Keep alive entry is not created on target master after on line initialization,
and its RUVelement stays empty until a direct update is issued on that master
Fix description:
The patch allows a consumer (configured as a master) to create (if it did not
exist before) the consumer's keep alive entry. It creates it at the end of a
replication session at a time we are sure the changelog exists and will not
be reset. It allows a consumer to have RUVelement with csn in the RUV at the
first incoming replication session.
That is basically lkrispen's proposal with an associated pytest testcase
Second version changes:
- moved the testcase to suites/replication/regression_test.py
- set up the topology from a 2 master topology then
reinitialized the replicas from an ldif without replication metadata
rather than using the cli.
- search for keepalive entries using search_s instead of getEntry
- add a comment about keep alive entries purpose
last commit:
- wait that ruv are in sync before checking keep alive entries
Reviewed by: droideck, Firstyear
Platforms tested: F32
relates: #2058
---
.../suites/replication/regression_test.py | 130 ++++++++++++++++++
.../plugins/replication/repl5_replica.c | 14 ++
ldap/servers/plugins/replication/repl_extop.c | 4 +
3 files changed, 148 insertions(+)
diff --git a/dirsrvtests/tests/suites/replication/regression_test.py b/dirsrvtests/tests/suites/replication/regression_test.py
index 844d762b9..14b9d6a44 100644
--- a/dirsrvtests/tests/suites/replication/regression_test.py
+++ b/dirsrvtests/tests/suites/replication/regression_test.py
@@ -98,6 +98,30 @@ def _move_ruv(ldif_file):
for dn, entry in ldif_list:
ldif_writer.unparse(dn, entry)
+def _remove_replication_data(ldif_file):
+ """ Remove the replication data from ldif file:
+ db2lif without -r includes some of the replica data like
+ - nsUniqueId
+ - keepalive entries
+ This function filters the ldif fil to remove these data
+ """
+
+ with open(ldif_file) as f:
+ parser = ldif.LDIFRecordList(f)
+ parser.parse()
+
+ ldif_list = parser.all_records
+ # Iterate on a copy of the ldif entry list
+ for dn, entry in ldif_list[:]:
+ if dn.startswith('cn=repl keep alive'):
+ ldif_list.remove((dn,entry))
+ else:
+ entry.pop('nsUniqueId')
+ with open(ldif_file, 'w') as f:
+ ldif_writer = ldif.LDIFWriter(f)
+ for dn, entry in ldif_list:
+ ldif_writer.unparse(dn, entry)
+
@pytest.fixture(scope="module")
def topo_with_sigkill(request):
@@ -897,6 +921,112 @@ def test_moving_entry_make_online_init_fail(topology_m2):
assert len(m1entries) == len(m2entries)
+def get_keepalive_entries(instance,replica):
+ # Returns the keep alive entries that exists with the suffix of the server instance
+ try:
+ entries = instance.search_s(replica.get_suffix(), ldap.SCOPE_ONELEVEL,
+ "(&(objectclass=ldapsubentry)(cn=repl keep alive*))",
+ ['cn', 'nsUniqueId', 'modifierTimestamp'])
+ except ldap.LDAPError as e:
+ log.fatal('Failed to retrieve keepalive entry (%s) on instance %s: error %s' % (dn, instance, str(e)))
+ assert False
+ # No error, so lets log the keepalive entries
+ if log.isEnabledFor(logging.DEBUG):
+ for ret in entries:
+ log.debug("Found keepalive entry:\n"+str(ret));
+ return entries
+
+def verify_keepalive_entries(topo, expected):
+ #Check that keep alive entries exists (or not exists) for every masters on every masters
+ #Note: The testing method is quite basic: counting that there is one keepalive entry per master.
+ # that is ok for simple test cases like test_online_init_should_create_keepalive_entries but
+ # not for the general case as keep alive associated with no more existing master may exists
+ # (for example after: db2ldif / demote a master / ldif2db / init other masters)
+ # ==> if the function is somehow pushed in lib389, a check better than simply counting the entries
+ # should be done.
+ for masterId in topo.ms:
+ master=topo.ms[masterId]
+ for replica in Replicas(master).list():
+ if (replica.get_role() != ReplicaRole.MASTER):
+ continue
+ replica_info = f'master: {masterId} RID: {replica.get_rid()} suffix: {replica.get_suffix()}'
+ log.debug(f'Checking keepAliveEntries on {replica_info}')
+ keepaliveEntries = get_keepalive_entries(master, replica);
+ expectedCount = len(topo.ms) if expected else 0
+ foundCount = len(keepaliveEntries)
+ if (foundCount == expectedCount):
+ log.debug(f'Found {foundCount} keepalive entries as expected on {replica_info}.')
+ else:
+ log.error(f'{foundCount} Keepalive entries are found '
+ f'while {expectedCount} were expected on {replica_info}.')
+ assert False
+
+
+def test_online_init_should_create_keepalive_entries(topo_m2):
+ """Check that keep alive entries are created when initializinf a master from another one
+
+ :id: d5940e71-d18a-4b71-aaf7-b9185361fffe
+ :setup: Two masters replication setup
+ :steps:
+ 1. Generate ldif without replication data
+ 2 Init both masters from that ldif
+ 3 Check that keep alive entries does not exists
+ 4 Perform on line init of master2 from master1
+ 5 Check that keep alive entries exists
+ :expectedresults:
+ 1. No error while generating ldif
+ 2. No error while importing the ldif file
+ 3. No keepalive entrie should exists on any masters
+ 4. No error while initializing master2
+ 5. All keepalive entries should exist on every masters
+
+ """
+
+ repl = ReplicationManager(DEFAULT_SUFFIX)
+ m1 = topo_m2.ms["master1"]
+ m2 = topo_m2.ms["master2"]
+ # Step 1: Generate ldif without replication data
+ m1.stop()
+ m2.stop()
+ ldif_file = '%s/norepl.ldif' % m1.get_ldif_dir()
+ m1.db2ldif(bename=DEFAULT_BENAME, suffixes=[DEFAULT_SUFFIX],
+ excludeSuffixes=None, repl_data=False,
+ outputfile=ldif_file, encrypt=False)
+ # Remove replication metadata that are still in the ldif
+ _remove_replication_data(ldif_file)
+
+ # Step 2: Init both masters from that ldif
+ m1.ldif2db(DEFAULT_BENAME, None, None, None, ldif_file)
+ m2.ldif2db(DEFAULT_BENAME, None, None, None, ldif_file)
+ m1.start()
+ m2.start()
+
+ """ Replica state is now as if CLI setup has been done using:
+ dsconf master1 replication enable --suffix "${SUFFIX}" --role master
+ dsconf master2 replication enable --suffix "${SUFFIX}" --role master
+ dsconf master1 replication create-manager --name "${REPLICATION_MANAGER_NAME}" --passwd "${REPLICATION_MANAGER_PASSWORD}"
+ dsconf master2 replication create-manager --name "${REPLICATION_MANAGER_NAME}" --passwd "${REPLICATION_MANAGER_PASSWORD}"
+ dsconf master1 repl-agmt create --suffix "${SUFFIX}"
+ dsconf master2 repl-agmt create --suffix "${SUFFIX}"
+ """
+
+ # Step 3: No keepalive entrie should exists on any masters
+ verify_keepalive_entries(topo_m2, False)
+
+ # Step 4: Perform on line init of master2 from master1
+ agmt = Agreements(m1).list()[0]
+ agmt.begin_reinit()
+ (done, error) = agmt.wait_reinit()
+ assert done is True
+ assert error is False
+
+ # Step 5: All keepalive entries should exists on every masters
+ # Verify the keep alive entry once replication is in sync
+ # (that is the step that fails when bug is not fixed)
+ repl.wait_for_ruv(m2,m1)
+ verify_keepalive_entries(topo_m2, True);
+
+
if __name__ == '__main__':
# Run isolated
# -s for DEBUG mode
diff --git a/ldap/servers/plugins/replication/repl5_replica.c b/ldap/servers/plugins/replication/repl5_replica.c
index f01782330..f0ea0f8ef 100644
--- a/ldap/servers/plugins/replication/repl5_replica.c
+++ b/ldap/servers/plugins/replication/repl5_replica.c
@@ -373,6 +373,20 @@ replica_destroy(void **arg)
slapi_ch_free((void **)arg);
}
+/******************************************************************************
+ ******************** REPLICATION KEEP ALIVE ENTRIES **************************
+ ******************************************************************************
+ * They are subentries of the replicated suffix and there is one per master. *
+ * These entries exist only to trigger a change that get replicated over the *
+ * topology. *
+ * Their main purpose is to generate records in the changelog and they are *
+ * updated from time to time by fractional replication to insure that at *
+ * least a change must be replicated by FR after a great number of not *
+ * replicated changes are found in the changelog. The interest is that the *
+ * fractional RUV get then updated so less changes need to be walked in the *
+ * changelog when searching for the first change to send *
+ ******************************************************************************/
+
#define KEEP_ALIVE_ATTR "keepalivetimestamp"
#define KEEP_ALIVE_ENTRY "repl keep alive"
#define KEEP_ALIVE_DN_FORMAT "cn=%s %d,%s"
diff --git a/ldap/servers/plugins/replication/repl_extop.c b/ldap/servers/plugins/replication/repl_extop.c
index 14c8e0bcc..af486f730 100644
--- a/ldap/servers/plugins/replication/repl_extop.c
+++ b/ldap/servers/plugins/replication/repl_extop.c
@@ -1173,6 +1173,10 @@ multimaster_extop_EndNSDS50ReplicationRequest(Slapi_PBlock *pb)
*/
if (cl5GetState() == CL5_STATE_OPEN) {
replica_log_ruv_elements(r);
+ /* now that the changelog is open and started, we can alos cretae the
+ * keep alive entry without risk that db and cl will not match
+ */
+ replica_subentry_check(replica_get_root(r), replica_get_rid(r));
}
/* ONREPL code that dealt with new RUV, etc was moved into the code
--
2.26.2

View File

@ -1,45 +0,0 @@
From 7a62e72b81d75ebb844835619ecc97dbf5e21058 Mon Sep 17 00:00:00 2001
From: Mark Reynolds <mreynolds@redhat.com>
Date: Thu, 14 May 2020 09:38:20 -0400
Subject: [PATCH 03/12] Issue 51091 - healthcheck json report fails when
mapping tree is deleted
Description: We were passing the bename in bytes and not as a utf8 string.
This caused the json dumping to fail.
relates: https://pagure.io/389-ds-base/issue/51091
Reviewed by: firstyear(Thanks!)
---
src/lib389/lib389/backend.py | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/src/lib389/lib389/backend.py b/src/lib389/lib389/backend.py
index e472d3de5..4f752f414 100644
--- a/src/lib389/lib389/backend.py
+++ b/src/lib389/lib389/backend.py
@@ -11,7 +11,7 @@ import copy
import ldap
from lib389._constants import *
from lib389.properties import *
-from lib389.utils import normalizeDN, ensure_str, ensure_bytes, assert_c
+from lib389.utils import normalizeDN, ensure_str, assert_c
from lib389 import Entry
# Need to fix this ....
@@ -488,10 +488,10 @@ class Backend(DSLdapObject):
# Check for the missing mapping tree.
suffix = self.get_attr_val_utf8('nsslapd-suffix')
- bename = self.get_attr_val_bytes('cn')
+ bename = self.get_attr_val_utf8('cn')
try:
mt = self._mts.get(suffix)
- if mt.get_attr_val_bytes('nsslapd-backend') != bename and mt.get_attr_val('nsslapd-state') != ensure_bytes('backend'):
+ if mt.get_attr_val_utf8('nsslapd-backend') != bename and mt.get_attr_val_utf8('nsslapd-state') != 'backend':
raise ldap.NO_SUCH_OBJECT("We have a matching suffix, but not a backend or correct database name.")
except ldap.NO_SUCH_OBJECT:
result = DSBLE0001
--
2.26.2

View File

@ -0,0 +1,513 @@
From e202c62c3b4c92163d2de9f3da9a9f3efc81e4b8 Mon Sep 17 00:00:00 2001
From: progier389 <72748589+progier389@users.noreply.github.com>
Date: Thu, 12 Nov 2020 18:50:04 +0100
Subject: [PATCH 3/3] do not add referrals for masters with different data
generation #2054 (#4427)
Bug description:
The problem is that some operation mandatory in the usual cases are
also performed when replication cannot take place because the
database set are differents (i.e: RUV generation ids are different)
One of the issue is that the csn generator state is updated when
starting a replication session (it is a problem when trying to
reset the time skew, as freshly reinstalled replicas get infected
by the old ones)
A second issue is that the RUV got updated when ending a replication session
(which may add replica that does not share the same data set,
then update operations on consumer retun referrals towards wrong masters
Fix description:
The fix checks the RUVs generation id before updating the csn generator
and before updating the RUV.
Reviewed by: mreynolds
firstyear
vashirov
Platforms tested: F32
---
.../suites/replication/regression_test.py | 290 ++++++++++++++++++
ldap/servers/plugins/replication/repl5.h | 1 +
.../plugins/replication/repl5_inc_protocol.c | 20 +-
.../plugins/replication/repl5_replica.c | 39 ++-
src/lib389/lib389/dseldif.py | 37 +++
5 files changed, 368 insertions(+), 19 deletions(-)
diff --git a/dirsrvtests/tests/suites/replication/regression_test.py b/dirsrvtests/tests/suites/replication/regression_test.py
index 14b9d6a44..a72af6b30 100644
--- a/dirsrvtests/tests/suites/replication/regression_test.py
+++ b/dirsrvtests/tests/suites/replication/regression_test.py
@@ -13,6 +13,7 @@ from lib389.idm.user import TEST_USER_PROPERTIES, UserAccounts
from lib389.pwpolicy import PwPolicyManager
from lib389.utils import *
from lib389.topologies import topology_m2 as topo_m2, TopologyMain, topology_m3 as topo_m3, create_topology, _remove_ssca_db, topology_i2 as topo_i2
+from lib389.topologies import topology_m2c2 as topo_m2c2
from lib389._constants import *
from lib389.idm.organizationalunit import OrganizationalUnits
from lib389.idm.user import UserAccount
@@ -22,6 +23,7 @@ from lib389.idm.directorymanager import DirectoryManager
from lib389.replica import Replicas, ReplicationManager, Changelog5, BootstrapReplicationManager
from lib389.agreement import Agreements
from lib389 import pid_from_file
+from lib389.dseldif import *
pytestmark = pytest.mark.tier1
@@ -1027,6 +1029,294 @@ def test_online_init_should_create_keepalive_entries(topo_m2):
verify_keepalive_entries(topo_m2, True);
+def get_agreement(agmts, consumer):
+ # Get agreement towards consumer among the agremment list
+ for agmt in agmts.list():
+ if (agmt.get_attr_val_utf8('nsDS5ReplicaPort') == str(consumer.port) and
+ agmt.get_attr_val_utf8('nsDS5ReplicaHost') == consumer.host):
+ return agmt
+ return None;
+
+
+def test_ruv_url_not_added_if_different_uuid(topo_m2c2):
+ """Check that RUV url is not updated if RUV generation uuid are different
+
+ :id: 7cc30a4e-0ffd-4758-8f00-e500279af344
+ :setup: Two masters + two consumers replication setup
+ :steps:
+ 1. Generate ldif without replication data
+ 2. Init both masters from that ldif
+ (to clear the ruvs and generates different generation uuid)
+ 3. Perform on line init from master1 to consumer1
+ and from master2 to consumer2
+ 4. Perform update on both masters
+ 5. Check that c1 RUV does not contains URL towards m2
+ 6. Check that c2 RUV does contains URL towards m2
+ 7. Perform on line init from master1 to master2
+ 8. Perform update on master2
+ 9. Check that c1 RUV does contains URL towards m2
+ :expectedresults:
+ 1. No error while generating ldif
+ 2. No error while importing the ldif file
+ 3. No error and Initialization done.
+ 4. No error
+ 5. master2 replicaid should not be in the consumer1 RUV
+ 6. master2 replicaid should be in the consumer2 RUV
+ 7. No error and Initialization done.
+ 8. No error
+ 9. master2 replicaid should be in the consumer1 RUV
+
+ """
+
+ # Variables initialization
+ repl = ReplicationManager(DEFAULT_SUFFIX)
+
+ m1 = topo_m2c2.ms["master1"]
+ m2 = topo_m2c2.ms["master2"]
+ c1 = topo_m2c2.cs["consumer1"]
+ c2 = topo_m2c2.cs["consumer2"]
+
+ replica_m1 = Replicas(m1).get(DEFAULT_SUFFIX)
+ replica_m2 = Replicas(m2).get(DEFAULT_SUFFIX)
+ replica_c1 = Replicas(c1).get(DEFAULT_SUFFIX)
+ replica_c2 = Replicas(c2).get(DEFAULT_SUFFIX)
+
+ replicid_m2 = replica_m2.get_rid()
+
+ agmts_m1 = Agreements(m1, replica_m1.dn)
+ agmts_m2 = Agreements(m2, replica_m2.dn)
+
+ m1_m2 = get_agreement(agmts_m1, m2)
+ m1_c1 = get_agreement(agmts_m1, c1)
+ m1_c2 = get_agreement(agmts_m1, c2)
+ m2_m1 = get_agreement(agmts_m2, m1)
+ m2_c1 = get_agreement(agmts_m2, c1)
+ m2_c2 = get_agreement(agmts_m2, c2)
+
+ # Step 1: Generate ldif without replication data
+ m1.stop()
+ m2.stop()
+ ldif_file = '%s/norepl.ldif' % m1.get_ldif_dir()
+ m1.db2ldif(bename=DEFAULT_BENAME, suffixes=[DEFAULT_SUFFIX],
+ excludeSuffixes=None, repl_data=False,
+ outputfile=ldif_file, encrypt=False)
+ # Remove replication metadata that are still in the ldif
+ # _remove_replication_data(ldif_file)
+
+ # Step 2: Init both masters from that ldif
+ m1.ldif2db(DEFAULT_BENAME, None, None, None, ldif_file)
+ m2.ldif2db(DEFAULT_BENAME, None, None, None, ldif_file)
+ m1.start()
+ m2.start()
+
+ # Step 3: Perform on line init from master1 to consumer1
+ # and from master2 to consumer2
+ m1_c1.begin_reinit()
+ m2_c2.begin_reinit()
+ (done, error) = m1_c1.wait_reinit()
+ assert done is True
+ assert error is False
+ (done, error) = m2_c2.wait_reinit()
+ assert done is True
+ assert error is False
+
+ # Step 4: Perform update on both masters
+ repl.test_replication(m1, c1)
+ repl.test_replication(m2, c2)
+
+ # Step 5: Check that c1 RUV does not contains URL towards m2
+ ruv = replica_c1.get_ruv()
+ log.debug(f"c1 RUV: {ruv}")
+ url=ruv._rid_url.get(replica_m2.get_rid())
+ if (url == None):
+ log.debug(f"No URL for RID {replica_m2.get_rid()} in RUV");
+ else:
+ log.debug(f"URL for RID {replica_m2.get_rid()} in RUV is {url}");
+ log.error(f"URL for RID {replica_m2.get_rid()} found in RUV")
+ #Note: this assertion fails if issue 2054 is not fixed.
+ assert False
+
+ # Step 6: Check that c2 RUV does contains URL towards m2
+ ruv = replica_c2.get_ruv()
+ log.debug(f"c1 RUV: {ruv} {ruv._rids} ")
+ url=ruv._rid_url.get(replica_m2.get_rid())
+ if (url == None):
+ log.error(f"No URL for RID {replica_m2.get_rid()} in RUV");
+ assert False
+ else:
+ log.debug(f"URL for RID {replica_m2.get_rid()} in RUV is {url}");
+
+
+ # Step 7: Perform on line init from master1 to master2
+ m1_m2.begin_reinit()
+ (done, error) = m1_m2.wait_reinit()
+ assert done is True
+ assert error is False
+
+ # Step 8: Perform update on master2
+ repl.test_replication(m2, c1)
+
+ # Step 9: Check that c1 RUV does contains URL towards m2
+ ruv = replica_c1.get_ruv()
+ log.debug(f"c1 RUV: {ruv} {ruv._rids} ")
+ url=ruv._rid_url.get(replica_m2.get_rid())
+ if (url == None):
+ log.error(f"No URL for RID {replica_m2.get_rid()} in RUV");
+ assert False
+ else:
+ log.debug(f"URL for RID {replica_m2.get_rid()} in RUV is {url}");
+
+
+def test_csngen_state_not_updated_if_different_uuid(topo_m2c2):
+ """Check that csngen remote offset is not updated if RUV generation uuid are different
+
+ :id: 77694b8e-22ae-11eb-89b2-482ae39447e5
+ :setup: Two masters + two consumers replication setup
+ :steps:
+ 1. Disable m1<->m2 agreement to avoid propagate timeSkew
+ 2. Generate ldif without replication data
+ 3. Increase time skew on master2
+ 4. Init both masters from that ldif
+ (to clear the ruvs and generates different generation uuid)
+ 5. Perform on line init from master1 to consumer1 and master2 to consumer2
+ 6. Perform update on both masters
+ 7: Check that c1 has no time skew
+ 8: Check that c2 has time skew
+ 9. Init master2 from master1
+ 10. Perform update on master2
+ 11. Check that c1 has time skew
+ :expectedresults:
+ 1. No error
+ 2. No error while generating ldif
+ 3. No error
+ 4. No error while importing the ldif file
+ 5. No error and Initialization done.
+ 6. No error
+ 7. c1 time skew should be lesser than threshold
+ 8. c2 time skew should be higher than threshold
+ 9. No error and Initialization done.
+ 10. No error
+ 11. c1 time skew should be higher than threshold
+
+ """
+
+ # Variables initialization
+ repl = ReplicationManager(DEFAULT_SUFFIX)
+
+ m1 = topo_m2c2.ms["master1"]
+ m2 = topo_m2c2.ms["master2"]
+ c1 = topo_m2c2.cs["consumer1"]
+ c2 = topo_m2c2.cs["consumer2"]
+
+ replica_m1 = Replicas(m1).get(DEFAULT_SUFFIX)
+ replica_m2 = Replicas(m2).get(DEFAULT_SUFFIX)
+ replica_c1 = Replicas(c1).get(DEFAULT_SUFFIX)
+ replica_c2 = Replicas(c2).get(DEFAULT_SUFFIX)
+
+ replicid_m2 = replica_m2.get_rid()
+
+ agmts_m1 = Agreements(m1, replica_m1.dn)
+ agmts_m2 = Agreements(m2, replica_m2.dn)
+
+ m1_m2 = get_agreement(agmts_m1, m2)
+ m1_c1 = get_agreement(agmts_m1, c1)
+ m1_c2 = get_agreement(agmts_m1, c2)
+ m2_m1 = get_agreement(agmts_m2, m1)
+ m2_c1 = get_agreement(agmts_m2, c1)
+ m2_c2 = get_agreement(agmts_m2, c2)
+
+ # Step 1: Disable m1<->m2 agreement to avoid propagate timeSkew
+ m1_m2.pause()
+ m2_m1.pause()
+
+ # Step 2: Generate ldif without replication data
+ m1.stop()
+ m2.stop()
+ ldif_file = '%s/norepl.ldif' % m1.get_ldif_dir()
+ m1.db2ldif(bename=DEFAULT_BENAME, suffixes=[DEFAULT_SUFFIX],
+ excludeSuffixes=None, repl_data=False,
+ outputfile=ldif_file, encrypt=False)
+ # Remove replication metadata that are still in the ldif
+ # _remove_replication_data(ldif_file)
+
+ # Step 3: Increase time skew on master2
+ timeSkew=6*3600
+ # We can modify master2 time skew
+ # But the time skew on the consumer may be smaller
+ # depending on when the cnsgen generation time is updated
+ # and when first csn get replicated.
+ # Since we use timeSkew has threshold value to detect
+ # whether there are time skew or not,
+ # lets add a significative margin (longer than the test duration)
+ # to avoid any risk of erroneous failure
+ timeSkewMargin = 300
+ DSEldif(m2)._increaseTimeSkew(DEFAULT_SUFFIX, timeSkew+timeSkewMargin)
+
+ # Step 4: Init both masters from that ldif
+ m1.ldif2db(DEFAULT_BENAME, None, None, None, ldif_file)
+ m2.ldif2db(DEFAULT_BENAME, None, None, None, ldif_file)
+ m1.start()
+ m2.start()
+
+ # Step 5: Perform on line init from master1 to consumer1
+ # and from master2 to consumer2
+ m1_c1.begin_reinit()
+ m2_c2.begin_reinit()
+ (done, error) = m1_c1.wait_reinit()
+ assert done is True
+ assert error is False
+ (done, error) = m2_c2.wait_reinit()
+ assert done is True
+ assert error is False
+
+ # Step 6: Perform update on both masters
+ repl.test_replication(m1, c1)
+ repl.test_replication(m2, c2)
+
+ # Step 7: Check that c1 has no time skew
+ # Stop server to insure that dse.ldif is uptodate
+ c1.stop()
+ c1_nsState = DSEldif(c1).readNsState(DEFAULT_SUFFIX)[0]
+ c1_timeSkew = int(c1_nsState['time_skew'])
+ log.debug(f"c1 time skew: {c1_timeSkew}")
+ if (c1_timeSkew >= timeSkew):
+ log.error(f"c1 csngen state has unexpectedly been synchronized with m2: time skew {c1_timeSkew}")
+ assert False
+ c1.start()
+
+ # Step 8: Check that c2 has time skew
+ # Stop server to insure that dse.ldif is uptodate
+ c2.stop()
+ c2_nsState = DSEldif(c2).readNsState(DEFAULT_SUFFIX)[0]
+ c2_timeSkew = int(c2_nsState['time_skew'])
+ log.debug(f"c2 time skew: {c2_timeSkew}")
+ if (c2_timeSkew < timeSkew):
+ log.error(f"c2 csngen state has not been synchronized with m2: time skew {c2_timeSkew}")
+ assert False
+ c2.start()
+
+ # Step 9: Perform on line init from master1 to master2
+ m1_c1.pause()
+ m1_m2.resume()
+ m1_m2.begin_reinit()
+ (done, error) = m1_m2.wait_reinit()
+ assert done is True
+ assert error is False
+
+ # Step 10: Perform update on master2
+ repl.test_replication(m2, c1)
+
+ # Step 11: Check that c1 has time skew
+ # Stop server to insure that dse.ldif is uptodate
+ c1.stop()
+ c1_nsState = DSEldif(c1).readNsState(DEFAULT_SUFFIX)[0]
+ c1_timeSkew = int(c1_nsState['time_skew'])
+ log.debug(f"c1 time skew: {c1_timeSkew}")
+ if (c1_timeSkew < timeSkew):
+ log.error(f"c1 csngen state has not been synchronized with m2: time skew {c1_timeSkew}")
+ assert False
+
+
if __name__ == '__main__':
# Run isolated
# -s for DEBUG mode
diff --git a/ldap/servers/plugins/replication/repl5.h b/ldap/servers/plugins/replication/repl5.h
index b35f724c2..f1c596a3f 100644
--- a/ldap/servers/plugins/replication/repl5.h
+++ b/ldap/servers/plugins/replication/repl5.h
@@ -708,6 +708,7 @@ void replica_dump(Replica *r);
void replica_set_enabled(Replica *r, PRBool enable);
Replica *replica_get_replica_from_dn(const Slapi_DN *dn);
Replica *replica_get_replica_from_root(const char *repl_root);
+int replica_check_generation(Replica *r, const RUV *remote_ruv);
int replica_update_ruv(Replica *replica, const CSN *csn, const char *replica_purl);
Replica *replica_get_replica_for_op(Slapi_PBlock *pb);
/* the functions below manipulate replica hash */
diff --git a/ldap/servers/plugins/replication/repl5_inc_protocol.c b/ldap/servers/plugins/replication/repl5_inc_protocol.c
index 29b1fb073..af5e5897c 100644
--- a/ldap/servers/plugins/replication/repl5_inc_protocol.c
+++ b/ldap/servers/plugins/replication/repl5_inc_protocol.c
@@ -2161,26 +2161,12 @@ examine_update_vector(Private_Repl_Protocol *prp, RUV *remote_ruv)
} else if (NULL == remote_ruv) {
return_value = EXAMINE_RUV_PRISTINE_REPLICA;
} else {
- char *local_gen = NULL;
- char *remote_gen = ruv_get_replica_generation(remote_ruv);
- Object *local_ruv_obj;
- RUV *local_ruv;
-
PR_ASSERT(NULL != prp->replica);
- local_ruv_obj = replica_get_ruv(prp->replica);
- if (NULL != local_ruv_obj) {
- local_ruv = (RUV *)object_get_data(local_ruv_obj);
- PR_ASSERT(local_ruv);
- local_gen = ruv_get_replica_generation(local_ruv);
- object_release(local_ruv_obj);
- }
- if (NULL == remote_gen || NULL == local_gen || strcmp(remote_gen, local_gen) != 0) {
- return_value = EXAMINE_RUV_GENERATION_MISMATCH;
- } else {
+ if (replica_check_generation(prp->replica, remote_ruv)) {
return_value = EXAMINE_RUV_OK;
+ } else {
+ return_value = EXAMINE_RUV_GENERATION_MISMATCH;
}
- slapi_ch_free((void **)&remote_gen);
- slapi_ch_free((void **)&local_gen);
}
return return_value;
}
diff --git a/ldap/servers/plugins/replication/repl5_replica.c b/ldap/servers/plugins/replication/repl5_replica.c
index f0ea0f8ef..7e56d6557 100644
--- a/ldap/servers/plugins/replication/repl5_replica.c
+++ b/ldap/servers/plugins/replication/repl5_replica.c
@@ -812,6 +812,36 @@ replica_set_ruv(Replica *r, RUV *ruv)
replica_unlock(r->repl_lock);
}
+/*
+ * Check if replica generation is the same than the remote ruv one
+ */
+int
+replica_check_generation(Replica *r, const RUV *remote_ruv)
+{
+ int return_value;
+ char *local_gen = NULL;
+ char *remote_gen = ruv_get_replica_generation(remote_ruv);
+ Object *local_ruv_obj;
+ RUV *local_ruv;
+
+ PR_ASSERT(NULL != r);
+ local_ruv_obj = replica_get_ruv(r);
+ if (NULL != local_ruv_obj) {
+ local_ruv = (RUV *)object_get_data(local_ruv_obj);
+ PR_ASSERT(local_ruv);
+ local_gen = ruv_get_replica_generation(local_ruv);
+ object_release(local_ruv_obj);
+ }
+ if (NULL == remote_gen || NULL == local_gen || strcmp(remote_gen, local_gen) != 0) {
+ return_value = PR_FALSE;
+ } else {
+ return_value = PR_TRUE;
+ }
+ slapi_ch_free_string(&remote_gen);
+ slapi_ch_free_string(&local_gen);
+ return return_value;
+}
+
/*
* Update one particular CSN in an RUV. This is meant to be called
* whenever (a) the server has processed a client operation and
@@ -1298,6 +1328,11 @@ replica_update_csngen_state_ext(Replica *r, const RUV *ruv, const CSN *extracsn)
PR_ASSERT(r && ruv);
+ if (!replica_check_generation(r, ruv)) /* ruv has wrong generation - we are done */
+ {
+ return 0;
+ }
+
rc = ruv_get_max_csn(ruv, &csn);
if (rc != RUV_SUCCESS) {
return -1;
@@ -3713,8 +3748,8 @@ replica_update_ruv_consumer(Replica *r, RUV *supplier_ruv)
replica_lock(r->repl_lock);
local_ruv = (RUV *)object_get_data(r->repl_ruv);
-
- if (is_cleaned_rid(supplier_id) || local_ruv == NULL) {
+ if (is_cleaned_rid(supplier_id) || local_ruv == NULL ||
+ !replica_check_generation(r, supplier_ruv)) {
replica_unlock(r->repl_lock);
return;
}
diff --git a/src/lib389/lib389/dseldif.py b/src/lib389/lib389/dseldif.py
index 10baba4d7..6850c9a8a 100644
--- a/src/lib389/lib389/dseldif.py
+++ b/src/lib389/lib389/dseldif.py
@@ -317,6 +317,43 @@ class DSEldif(DSLint):
return states
+ def _increaseTimeSkew(self, suffix, timeSkew):
+ # Increase csngen state local_offset by timeSkew
+ # Warning: instance must be stopped before calling this function
+ assert (timeSkew >= 0)
+ nsState = self.readNsState(suffix)[0]
+ self._instance.log.debug(f'_increaseTimeSkew nsState is {nsState}')
+ oldNsState = self.get(nsState['dn'], 'nsState', True)
+ self._instance.log.debug(f'oldNsState is {oldNsState}')
+
+ # Lets reencode the new nsState
+ from lib389.utils import print_nice_time
+ if pack('<h', 1) == pack('=h',1):
+ end = '<'
+ elif pack('>h', 1) == pack('=h',1):
+ end = '>'
+ else:
+ raise ValueError("Unknown endian, unable to proceed")
+
+ thelen = len(oldNsState)
+ if thelen <= 20:
+ pad = 2 # padding for short H values
+ timefmt = 'I' # timevals are unsigned 32-bit int
+ else:
+ pad = 6 # padding for short H values
+ timefmt = 'Q' # timevals are unsigned 64-bit int
+ fmtstr = "%sH%dx3%sH%dx" % (end, pad, timefmt, pad)
+ newNsState = base64.b64encode(pack(fmtstr, int(nsState['rid']),
+ int(nsState['gen_time']), int(nsState['local_offset'])+timeSkew,
+ int(nsState['remote_offset']), int(nsState['seq_num'])))
+ newNsState = newNsState.decode('utf-8')
+ self._instance.log.debug(f'newNsState is {newNsState}')
+ # Lets replace the value.
+ (entry_dn_i, attr_data) = self._find_attr(nsState['dn'], 'nsState')
+ attr_i = next(iter(attr_data))
+ self._contents[entry_dn_i + attr_i] = f"nsState:: {newNsState}"
+ self._update()
+
class FSChecks(DSLint):
"""This is for the healthcheck feature, check commonly used system config files the
--
2.26.2

View File

@ -1,943 +0,0 @@
From f13d630ff98eb5b5505f1db3e7f207175b51b237 Mon Sep 17 00:00:00 2001
From: Mark Reynolds <mreynolds@redhat.com>
Date: Tue, 12 May 2020 13:48:30 -0400
Subject: [PATCH 04/12] Issue 51076 - remove unnecessary slapi entry dups
Description: So the problem is that slapi_search_internal_get_entry()
duplicates the entry twice. It does that as a convenience
where it will allocate a pblock, do the search, copy
the entry, free search results from the pblock, and then
free the pblock itself. I basically split this function
into two functions. One function allocates the pblock,
does the search and returns the entry. The other function
frees the entries and pblock.
99% of time when we call slapi_search_internal_get_entry()
we are just reading it and freeing it. It's not being
consumed. In these cases we can use the two function
approach eliminates an extra slapi_entry_dup(). Over the
time of an operation/connection we can save quite a bit
of mallocing/freeing. This could also help with memory
fragmentation.
ASAN: passed
relates: https://pagure.io/389-ds-base/issue/51076
Reviewed by: firstyear & tbordaz(Thanks!)
---
ldap/servers/plugins/acctpolicy/acct_config.c | 6 +--
ldap/servers/plugins/acctpolicy/acct_plugin.c | 36 +++++++-------
ldap/servers/plugins/acctpolicy/acct_util.c | 6 +--
ldap/servers/plugins/automember/automember.c | 17 +++----
ldap/servers/plugins/dna/dna.c | 23 ++++-----
ldap/servers/plugins/memberof/memberof.c | 16 +++----
.../plugins/pam_passthru/pam_ptconfig.c | 10 ++--
.../servers/plugins/pam_passthru/pam_ptimpl.c | 7 +--
.../plugins/pam_passthru/pam_ptpreop.c | 9 ++--
.../plugins/replication/repl5_tot_protocol.c | 5 +-
ldap/servers/plugins/uiduniq/uid.c | 23 ++++-----
ldap/servers/slapd/daemon.c | 11 ++---
ldap/servers/slapd/modify.c | 12 +++--
ldap/servers/slapd/plugin_internal_op.c | 48 +++++++++++++++++++
ldap/servers/slapd/resourcelimit.c | 13 ++---
ldap/servers/slapd/schema.c | 7 ++-
ldap/servers/slapd/slapi-plugin.h | 23 ++++++++-
17 files changed, 161 insertions(+), 111 deletions(-)
diff --git a/ldap/servers/plugins/acctpolicy/acct_config.c b/ldap/servers/plugins/acctpolicy/acct_config.c
index fe35ba5a0..01e4f319f 100644
--- a/ldap/servers/plugins/acctpolicy/acct_config.c
+++ b/ldap/servers/plugins/acctpolicy/acct_config.c
@@ -37,6 +37,7 @@ static int acct_policy_entry2config(Slapi_Entry *e,
int
acct_policy_load_config_startup(Slapi_PBlock *pb __attribute__((unused)), void *plugin_id)
{
+ Slapi_PBlock *entry_pb = NULL;
acctPluginCfg *newcfg;
Slapi_Entry *config_entry = NULL;
Slapi_DN *config_sdn = NULL;
@@ -44,8 +45,7 @@ acct_policy_load_config_startup(Slapi_PBlock *pb __attribute__((unused)), void *
/* Retrieve the config entry */
config_sdn = slapi_sdn_new_normdn_byref(PLUGIN_CONFIG_DN);
- rc = slapi_search_internal_get_entry(config_sdn, NULL, &config_entry,
- plugin_id);
+ rc = slapi_search_get_entry(&entry_pb, config_sdn, NULL, &config_entry, plugin_id);
slapi_sdn_free(&config_sdn);
if (rc != LDAP_SUCCESS || config_entry == NULL) {
@@ -60,7 +60,7 @@ acct_policy_load_config_startup(Slapi_PBlock *pb __attribute__((unused)), void *
rc = acct_policy_entry2config(config_entry, newcfg);
config_unlock();
- slapi_entry_free(config_entry);
+ slapi_search_get_entry_done(&entry_pb);
return (rc);
}
diff --git a/ldap/servers/plugins/acctpolicy/acct_plugin.c b/ldap/servers/plugins/acctpolicy/acct_plugin.c
index 2a876ad72..c3c32b074 100644
--- a/ldap/servers/plugins/acctpolicy/acct_plugin.c
+++ b/ldap/servers/plugins/acctpolicy/acct_plugin.c
@@ -209,6 +209,7 @@ done:
int
acct_bind_preop(Slapi_PBlock *pb)
{
+ Slapi_PBlock *entry_pb = NULL;
const char *dn = NULL;
Slapi_DN *sdn = NULL;
Slapi_Entry *target_entry = NULL;
@@ -236,8 +237,7 @@ acct_bind_preop(Slapi_PBlock *pb)
goto done;
}
- ldrc = slapi_search_internal_get_entry(sdn, NULL, &target_entry,
- plugin_id);
+ ldrc = slapi_search_get_entry(&entry_pb, sdn, NULL, &target_entry, plugin_id);
/* There was a problem retrieving the entry */
if (ldrc != LDAP_SUCCESS) {
@@ -275,7 +275,7 @@ done:
slapi_send_ldap_result(pb, LDAP_UNWILLING_TO_PERFORM, NULL, NULL, 0, NULL);
}
- slapi_entry_free(target_entry);
+ slapi_search_get_entry_done(&entry_pb);
free_acctpolicy(&policy);
@@ -293,6 +293,7 @@ done:
int
acct_bind_postop(Slapi_PBlock *pb)
{
+ Slapi_PBlock *entry_pb = NULL;
char *dn = NULL;
int ldrc, tracklogin = 0;
int rc = 0; /* Optimistic default */
@@ -327,8 +328,7 @@ acct_bind_postop(Slapi_PBlock *pb)
covered by an account policy to decide whether we should track */
if (tracklogin == 0) {
sdn = slapi_sdn_new_normdn_byref(dn);
- ldrc = slapi_search_internal_get_entry(sdn, NULL, &target_entry,
- plugin_id);
+ ldrc = slapi_search_get_entry(&entry_pb, sdn, NULL, &target_entry, plugin_id);
if (ldrc != LDAP_SUCCESS) {
slapi_log_err(SLAPI_LOG_ERR, POST_PLUGIN_NAME,
@@ -355,7 +355,7 @@ done:
slapi_send_ldap_result(pb, LDAP_UNWILLING_TO_PERFORM, NULL, NULL, 0, NULL);
}
- slapi_entry_free(target_entry);
+ slapi_search_get_entry_done(&entry_pb);
slapi_sdn_free(&sdn);
@@ -370,11 +370,11 @@ done:
static int
acct_pre_op(Slapi_PBlock *pb, int modop)
{
+ Slapi_PBlock *entry_pb = NULL;
Slapi_DN *sdn = 0;
Slapi_Entry *e = 0;
Slapi_Mods *smods = 0;
LDAPMod **mods;
- int free_entry = 0;
char *errstr = NULL;
int ret = SLAPI_PLUGIN_SUCCESS;
@@ -384,28 +384,25 @@ acct_pre_op(Slapi_PBlock *pb, int modop)
if (acct_policy_dn_is_config(sdn)) {
/* Validate config changes, but don't apply them.
- * This allows us to reject invalid config changes
- * here at the pre-op stage. Applying the config
- * needs to be done at the post-op stage. */
+ * This allows us to reject invalid config changes
+ * here at the pre-op stage. Applying the config
+ * needs to be done at the post-op stage. */
if (LDAP_CHANGETYPE_ADD == modop) {
slapi_pblock_get(pb, SLAPI_ADD_ENTRY, &e);
- /* If the entry doesn't exist, just bail and
- * let the server handle it. */
+ /* If the entry doesn't exist, just bail and let the server handle it. */
if (e == NULL) {
goto bail;
}
} else if (LDAP_CHANGETYPE_MODIFY == modop) {
/* Fetch the entry being modified so we can
- * create the resulting entry for validation. */
+ * create the resulting entry for validation. */
if (sdn) {
- slapi_search_internal_get_entry(sdn, 0, &e, get_identity());
- free_entry = 1;
+ slapi_search_get_entry(&entry_pb, sdn, 0, &e, get_identity());
}
- /* If the entry doesn't exist, just bail and
- * let the server handle it. */
+ /* If the entry doesn't exist, just bail and let the server handle it. */
if (e == NULL) {
goto bail;
}
@@ -418,7 +415,7 @@ acct_pre_op(Slapi_PBlock *pb, int modop)
/* Apply the mods to create the resulting entry. */
if (mods && (slapi_entry_apply_mods(e, mods) != LDAP_SUCCESS)) {
/* The mods don't apply cleanly, so we just let this op go
- * to let the main server handle it. */
+ * to let the main server handle it. */
goto bailmod;
}
} else if (modop == LDAP_CHANGETYPE_DELETE) {
@@ -439,8 +436,7 @@ bailmod:
}
bail:
- if (free_entry && e)
- slapi_entry_free(e);
+ slapi_search_get_entry_done(&entry_pb);
if (ret) {
slapi_log_err(SLAPI_LOG_PLUGIN, PRE_PLUGIN_NAME,
diff --git a/ldap/servers/plugins/acctpolicy/acct_util.c b/ldap/servers/plugins/acctpolicy/acct_util.c
index f25a3202d..f432092fe 100644
--- a/ldap/servers/plugins/acctpolicy/acct_util.c
+++ b/ldap/servers/plugins/acctpolicy/acct_util.c
@@ -85,6 +85,7 @@ get_attr_string_val(Slapi_Entry *target_entry, char *attr_name)
int
get_acctpolicy(Slapi_PBlock *pb __attribute__((unused)), Slapi_Entry *target_entry, void *plugin_id, acctPolicy **policy)
{
+ Slapi_PBlock *entry_pb = NULL;
Slapi_DN *sdn = NULL;
Slapi_Entry *policy_entry = NULL;
Slapi_Attr *attr;
@@ -123,8 +124,7 @@ get_acctpolicy(Slapi_PBlock *pb __attribute__((unused)), Slapi_Entry *target_ent
}
sdn = slapi_sdn_new_dn_byref(policy_dn);
- ldrc = slapi_search_internal_get_entry(sdn, NULL, &policy_entry,
- plugin_id);
+ ldrc = slapi_search_get_entry(&entry_pb, sdn, NULL, &policy_entry, plugin_id);
slapi_sdn_free(&sdn);
/* There should be a policy but it can't be retrieved; fatal error */
@@ -160,7 +160,7 @@ dopolicy:
done:
config_unlock();
slapi_ch_free_string(&policy_dn);
- slapi_entry_free(policy_entry);
+ slapi_search_get_entry_done(&entry_pb);
return (rc);
}
diff --git a/ldap/servers/plugins/automember/automember.c b/ldap/servers/plugins/automember/automember.c
index 7c875c852..39350ad53 100644
--- a/ldap/servers/plugins/automember/automember.c
+++ b/ldap/servers/plugins/automember/automember.c
@@ -1629,13 +1629,12 @@ automember_update_member_value(Slapi_Entry *member_e, const char *group_dn, char
char *member_value = NULL;
int rc = 0;
Slapi_DN *group_sdn;
- Slapi_Entry *group_entry = NULL;
/* First thing check that the group still exists */
group_sdn = slapi_sdn_new_dn_byval(group_dn);
- rc = slapi_search_internal_get_entry(group_sdn, NULL, &group_entry, automember_get_plugin_id());
+ rc = slapi_search_internal_get_entry(group_sdn, NULL, NULL, automember_get_plugin_id());
slapi_sdn_free(&group_sdn);
- if (rc != LDAP_SUCCESS || group_entry == NULL) {
+ if (rc != LDAP_SUCCESS) {
if (rc == LDAP_NO_SUCH_OBJECT) {
/* the automember group (default or target) does not exist, just skip this definition */
slapi_log_err(SLAPI_LOG_INFO, AUTOMEMBER_PLUGIN_SUBSYSTEM,
@@ -1647,10 +1646,8 @@ automember_update_member_value(Slapi_Entry *member_e, const char *group_dn, char
"automember_update_member_value - group (default or target) can not be retrieved (%s) err=%d\n",
group_dn, rc);
}
- slapi_entry_free(group_entry);
return rc;
}
- slapi_entry_free(group_entry);
/* If grouping_value is dn, we need to fetch the dn instead. */
if (slapi_attr_type_cmp(grouping_value, "dn", SLAPI_TYPE_CMP_EXACT) == 0) {
@@ -1752,11 +1749,11 @@ out:
static int
automember_pre_op(Slapi_PBlock *pb, int modop)
{
+ Slapi_PBlock *entry_pb = NULL;
Slapi_DN *sdn = 0;
Slapi_Entry *e = 0;
Slapi_Mods *smods = 0;
LDAPMod **mods;
- int free_entry = 0;
char *errstr = NULL;
int ret = SLAPI_PLUGIN_SUCCESS;
@@ -1784,8 +1781,7 @@ automember_pre_op(Slapi_PBlock *pb, int modop)
/* Fetch the entry being modified so we can
* create the resulting entry for validation. */
if (sdn) {
- slapi_search_internal_get_entry(sdn, 0, &e, automember_get_plugin_id());
- free_entry = 1;
+ slapi_search_get_entry(&entry_pb, sdn, 0, &e, automember_get_plugin_id());
}
/* If the entry doesn't exist, just bail and
@@ -1799,7 +1795,7 @@ automember_pre_op(Slapi_PBlock *pb, int modop)
smods = slapi_mods_new();
slapi_mods_init_byref(smods, mods);
- /* Apply the mods to create the resulting entry. */
+ /* Apply the mods to create the resulting entry. */
if (mods && (slapi_entry_apply_mods(e, mods) != LDAP_SUCCESS)) {
/* The mods don't apply cleanly, so we just let this op go
* to let the main server handle it. */
@@ -1831,8 +1827,7 @@ bailmod:
}
bail:
- if (free_entry && e)
- slapi_entry_free(e);
+ slapi_search_get_entry_done(&entry_pb);
if (ret) {
slapi_log_err(SLAPI_LOG_PLUGIN, AUTOMEMBER_PLUGIN_SUBSYSTEM,
diff --git a/ldap/servers/plugins/dna/dna.c b/ldap/servers/plugins/dna/dna.c
index 1ee271359..16c625bb0 100644
--- a/ldap/servers/plugins/dna/dna.c
+++ b/ldap/servers/plugins/dna/dna.c
@@ -1178,7 +1178,6 @@ dna_parse_config_entry(Slapi_PBlock *pb, Slapi_Entry *e, int apply)
value = slapi_entry_attr_get_charptr(e, DNA_SHARED_CFG_DN);
if (value) {
- Slapi_Entry *shared_e = NULL;
Slapi_DN *sdn = NULL;
char *normdn = NULL;
char *attrs[2];
@@ -1197,10 +1196,8 @@ dna_parse_config_entry(Slapi_PBlock *pb, Slapi_Entry *e, int apply)
/* We don't need attributes */
attrs[0] = "cn";
attrs[1] = NULL;
- slapi_search_internal_get_entry(sdn, attrs, &shared_e, getPluginID());
-
/* Make sure that the shared config entry exists. */
- if (!shared_e) {
+ if(slapi_search_internal_get_entry(sdn, attrs, NULL, getPluginID()) != LDAP_SUCCESS) {
/* We didn't locate the shared config container entry. Log
* a message and skip this config entry. */
slapi_log_err(SLAPI_LOG_ERR, DNA_PLUGIN_SUBSYSTEM,
@@ -1210,9 +1207,6 @@ dna_parse_config_entry(Slapi_PBlock *pb, Slapi_Entry *e, int apply)
ret = DNA_FAILURE;
slapi_sdn_free(&sdn);
goto bail;
- } else {
- slapi_entry_free(shared_e);
- shared_e = NULL;
}
normdn = (char *)slapi_sdn_get_dn(sdn);
@@ -1539,6 +1533,7 @@ dna_delete_shared_servers(PRCList **servers)
static int
dna_load_host_port(void)
{
+ Slapi_PBlock *pb = NULL;
int status = DNA_SUCCESS;
Slapi_Entry *e = NULL;
Slapi_DN *config_dn = NULL;
@@ -1554,7 +1549,7 @@ dna_load_host_port(void)
config_dn = slapi_sdn_new_ndn_byref("cn=config");
if (config_dn) {
- slapi_search_internal_get_entry(config_dn, attrs, &e, getPluginID());
+ slapi_search_get_entry(&pb, config_dn, attrs, &e, getPluginID());
slapi_sdn_free(&config_dn);
}
@@ -1562,8 +1557,8 @@ dna_load_host_port(void)
hostname = slapi_entry_attr_get_charptr(e, "nsslapd-localhost");
portnum = slapi_entry_attr_get_charptr(e, "nsslapd-port");
secureportnum = slapi_entry_attr_get_charptr(e, "nsslapd-secureport");
- slapi_entry_free(e);
}
+ slapi_search_get_entry_done(&pb);
if (!hostname || !portnum) {
status = DNA_FAILURE;
@@ -2876,6 +2871,7 @@ bail:
static int
dna_is_replica_bind_dn(char *range_dn, char *bind_dn)
{
+ Slapi_PBlock *entry_pb = NULL;
char *replica_dn = NULL;
Slapi_DN *replica_sdn = NULL;
Slapi_DN *range_sdn = NULL;
@@ -2912,8 +2908,7 @@ dna_is_replica_bind_dn(char *range_dn, char *bind_dn)
attrs[2] = 0;
/* Find cn=replica entry via search */
- slapi_search_internal_get_entry(replica_sdn, attrs, &e, getPluginID());
-
+ slapi_search_get_entry(&entry_pb, replica_sdn, attrs, &e, getPluginID());
if (e) {
/* Check if the passed in bind dn matches any of the replica bind dns. */
Slapi_Value *bind_dn_sv = slapi_value_new_string(bind_dn);
@@ -2927,6 +2922,7 @@ dna_is_replica_bind_dn(char *range_dn, char *bind_dn)
attrs[0] = "member";
attrs[1] = "uniquemember";
attrs[2] = 0;
+ slapi_search_get_entry_done(&entry_pb);
for (i = 0; bind_group_dn != NULL && bind_group_dn[i] != NULL; i++) {
if (ret) {
/* already found a member, just free group */
@@ -2934,14 +2930,14 @@ dna_is_replica_bind_dn(char *range_dn, char *bind_dn)
continue;
}
bind_group_sdn = slapi_sdn_new_normdn_passin(bind_group_dn[i]);
- slapi_search_internal_get_entry(bind_group_sdn, attrs, &bind_group_entry, getPluginID());
+ slapi_search_get_entry(&entry_pb, bind_group_sdn, attrs, &bind_group_entry, getPluginID());
if (bind_group_entry) {
ret = slapi_entry_attr_has_syntax_value(bind_group_entry, "member", bind_dn_sv);
if (ret == 0) {
ret = slapi_entry_attr_has_syntax_value(bind_group_entry, "uniquemember", bind_dn_sv);
}
}
- slapi_entry_free(bind_group_entry);
+ slapi_search_get_entry_done(&entry_pb);
slapi_sdn_free(&bind_group_sdn);
}
slapi_ch_free((void **)&bind_group_dn);
@@ -2956,7 +2952,6 @@ dna_is_replica_bind_dn(char *range_dn, char *bind_dn)
}
done:
- slapi_entry_free(e);
slapi_sdn_free(&range_sdn);
slapi_sdn_free(&replica_sdn);
diff --git a/ldap/servers/plugins/memberof/memberof.c b/ldap/servers/plugins/memberof/memberof.c
index 40bd4b380..e9e1ec4c7 100644
--- a/ldap/servers/plugins/memberof/memberof.c
+++ b/ldap/servers/plugins/memberof/memberof.c
@@ -884,7 +884,7 @@ memberof_postop_modrdn(Slapi_PBlock *pb)
pre_sdn = slapi_entry_get_sdn(pre_e);
post_sdn = slapi_entry_get_sdn(post_e);
}
-
+
if (pre_sdn && post_sdn && slapi_sdn_compare(pre_sdn, post_sdn) == 0) {
/* Regarding memberof plugin, this rename is a no-op
* but it can be expensive to process it. So skip it
@@ -1466,6 +1466,7 @@ memberof_modop_one_r(Slapi_PBlock *pb, MemberOfConfig *config, int mod_op, Slapi
int
memberof_modop_one_replace_r(Slapi_PBlock *pb, MemberOfConfig *config, int mod_op, Slapi_DN *group_sdn, Slapi_DN *op_this_sdn, Slapi_DN *replace_with_sdn, Slapi_DN *op_to_sdn, memberofstringll *stack)
{
+ Slapi_PBlock *entry_pb = NULL;
int rc = 0;
LDAPMod mod;
LDAPMod replace_mod;
@@ -1515,8 +1516,7 @@ memberof_modop_one_replace_r(Slapi_PBlock *pb, MemberOfConfig *config, int mod_o
}
/* determine if this is a group op or single entry */
- slapi_search_internal_get_entry(op_to_sdn, config->groupattrs,
- &e, memberof_get_plugin_id());
+ slapi_search_get_entry(&entry_pb, op_to_sdn, config->groupattrs, &e, memberof_get_plugin_id());
if (!e) {
/* In the case of a delete, we need to worry about the
* missing entry being a nested group. There's a small
@@ -1751,7 +1751,7 @@ memberof_modop_one_replace_r(Slapi_PBlock *pb, MemberOfConfig *config, int mod_o
bail:
slapi_value_free(&to_dn_val);
slapi_value_free(&this_dn_val);
- slapi_entry_free(e);
+ slapi_search_get_entry_done(&entry_pb);
return rc;
}
@@ -2368,6 +2368,7 @@ bail:
int
memberof_is_direct_member(MemberOfConfig *config, Slapi_Value *groupdn, Slapi_Value *memberdn)
{
+ Slapi_PBlock *pb = NULL;
int rc = 0;
Slapi_DN *sdn = 0;
Slapi_Entry *group_e = 0;
@@ -2376,8 +2377,8 @@ memberof_is_direct_member(MemberOfConfig *config, Slapi_Value *groupdn, Slapi_Va
sdn = slapi_sdn_new_normdn_byref(slapi_value_get_string(groupdn));
- slapi_search_internal_get_entry(sdn, config->groupattrs,
- &group_e, memberof_get_plugin_id());
+ slapi_search_get_entry(&pb, sdn, config->groupattrs,
+ &group_e, memberof_get_plugin_id());
if (group_e) {
/* See if memberdn is referred to by any of the group attributes. */
@@ -2388,9 +2389,8 @@ memberof_is_direct_member(MemberOfConfig *config, Slapi_Value *groupdn, Slapi_Va
break;
}
}
-
- slapi_entry_free(group_e);
}
+ slapi_search_get_entry_done(&pb);
slapi_sdn_free(&sdn);
return rc;
diff --git a/ldap/servers/plugins/pam_passthru/pam_ptconfig.c b/ldap/servers/plugins/pam_passthru/pam_ptconfig.c
index 46a76d884..cbec2ec40 100644
--- a/ldap/servers/plugins/pam_passthru/pam_ptconfig.c
+++ b/ldap/servers/plugins/pam_passthru/pam_ptconfig.c
@@ -749,22 +749,22 @@ pam_passthru_get_config(Slapi_DN *bind_sdn)
if (pam_passthru_check_suffix(cfg, bind_sdn) == LDAP_SUCCESS) {
if (cfg->slapi_filter) {
/* A filter is configured, so see if the bind entry is a match. */
+ Slapi_PBlock *entry_pb = NULL;
Slapi_Entry *test_e = NULL;
/* Fetch the bind entry */
- slapi_search_internal_get_entry(bind_sdn, NULL, &test_e,
- pam_passthruauth_get_plugin_identity());
+ slapi_search_get_entry(&entry_pb, bind_sdn, NULL, &test_e,
+ pam_passthruauth_get_plugin_identity());
/* If the entry doesn't exist, just fall through to the main server code */
if (test_e) {
/* Evaluate the filter. */
if (LDAP_SUCCESS == slapi_filter_test_simple(test_e, cfg->slapi_filter)) {
/* This is a match. */
- slapi_entry_free(test_e);
+ slapi_search_get_entry_done(&entry_pb);
goto done;
}
-
- slapi_entry_free(test_e);
+ slapi_search_get_entry_done(&entry_pb);
}
} else {
/* There is no filter to check, so this is a match. */
diff --git a/ldap/servers/plugins/pam_passthru/pam_ptimpl.c b/ldap/servers/plugins/pam_passthru/pam_ptimpl.c
index 7f5fb02c4..5b43f8d1f 100644
--- a/ldap/servers/plugins/pam_passthru/pam_ptimpl.c
+++ b/ldap/servers/plugins/pam_passthru/pam_ptimpl.c
@@ -81,11 +81,12 @@ derive_from_bind_dn(Slapi_PBlock *pb __attribute__((unused)), const Slapi_DN *bi
static char *
derive_from_bind_entry(Slapi_PBlock *pb, const Slapi_DN *bindsdn, MyStrBuf *pam_id, char *map_ident_attr, int *locked)
{
+ Slapi_PBlock *entry_pb = NULL;
Slapi_Entry *entry = NULL;
char *attrs[] = {NULL, NULL};
attrs[0] = map_ident_attr;
- int rc = slapi_search_internal_get_entry((Slapi_DN *)bindsdn, attrs, &entry,
- pam_passthruauth_get_plugin_identity());
+ int32_t rc = slapi_search_get_entry(&entry_pb, (Slapi_DN *)bindsdn, attrs, &entry,
+ pam_passthruauth_get_plugin_identity());
if (rc != LDAP_SUCCESS) {
slapi_log_err(SLAPI_LOG_ERR, PAM_PASSTHRU_PLUGIN_SUBSYSTEM,
@@ -108,7 +109,7 @@ derive_from_bind_entry(Slapi_PBlock *pb, const Slapi_DN *bindsdn, MyStrBuf *pam_
init_my_str_buf(pam_id, val);
}
- slapi_entry_free(entry);
+ slapi_search_get_entry_done(&entry_pb);
return pam_id->str;
}
diff --git a/ldap/servers/plugins/pam_passthru/pam_ptpreop.c b/ldap/servers/plugins/pam_passthru/pam_ptpreop.c
index 3d0067531..5bca823ff 100644
--- a/ldap/servers/plugins/pam_passthru/pam_ptpreop.c
+++ b/ldap/servers/plugins/pam_passthru/pam_ptpreop.c
@@ -526,6 +526,7 @@ done:
static int
pam_passthru_preop(Slapi_PBlock *pb, int modtype)
{
+ Slapi_PBlock *entry_pb = NULL;
Slapi_DN *sdn = NULL;
Slapi_Entry *e = NULL;
LDAPMod **mods;
@@ -555,8 +556,8 @@ pam_passthru_preop(Slapi_PBlock *pb, int modtype)
case LDAP_CHANGETYPE_MODIFY:
/* Fetch the entry being modified so we can
* create the resulting entry for validation. */
- slapi_search_internal_get_entry(sdn, 0, &e,
- pam_passthruauth_get_plugin_identity());
+ slapi_search_get_entry(&entry_pb, sdn, 0, &e,
+ pam_passthruauth_get_plugin_identity());
/* If the entry doesn't exist, just bail and
* let the server handle it. */
@@ -576,9 +577,6 @@ pam_passthru_preop(Slapi_PBlock *pb, int modtype)
/* Don't bail here, as we need to free the entry. */
}
}
-
- /* Free the entry. */
- slapi_entry_free(e);
break;
case LDAP_CHANGETYPE_DELETE:
case LDAP_CHANGETYPE_MODDN:
@@ -591,6 +589,7 @@ pam_passthru_preop(Slapi_PBlock *pb, int modtype)
}
bail:
+ slapi_search_get_entry_done(&entry_pb);
/* If we are refusing the operation, return the result to the client. */
if (ret) {
slapi_send_ldap_result(pb, ret, NULL, returntext, 0, NULL);
diff --git a/ldap/servers/plugins/replication/repl5_tot_protocol.c b/ldap/servers/plugins/replication/repl5_tot_protocol.c
index 3b65d6b20..a25839f21 100644
--- a/ldap/servers/plugins/replication/repl5_tot_protocol.c
+++ b/ldap/servers/plugins/replication/repl5_tot_protocol.c
@@ -469,7 +469,8 @@ retry:
*/
/* Get suffix */
Slapi_Entry *suffix = NULL;
- rc = slapi_search_internal_get_entry(area_sdn, NULL, &suffix, repl_get_plugin_identity(PLUGIN_MULTIMASTER_REPLICATION));
+ Slapi_PBlock *suffix_pb = NULL;
+ rc = slapi_search_get_entry(&suffix_pb, area_sdn, NULL, &suffix, repl_get_plugin_identity(PLUGIN_MULTIMASTER_REPLICATION));
if (rc) {
slapi_log_err(SLAPI_LOG_ERR, repl_plugin_name, "repl5_tot_run - Unable to "
"get the suffix entry \"%s\".\n",
@@ -517,7 +518,7 @@ retry:
LDAP_SCOPE_SUBTREE, "(parentid>=1)", NULL, 0, ctrls, NULL,
repl_get_plugin_identity(PLUGIN_MULTIMASTER_REPLICATION), OP_FLAG_BULK_IMPORT);
cb_data.num_entries = 0UL;
- slapi_entry_free(suffix);
+ slapi_search_get_entry_done(&suffix_pb);
} else {
/* Original total update */
/* we need to provide managedsait control so that referral entries can
diff --git a/ldap/servers/plugins/uiduniq/uid.c b/ldap/servers/plugins/uiduniq/uid.c
index d7ccf0e07..e69012204 100644
--- a/ldap/servers/plugins/uiduniq/uid.c
+++ b/ldap/servers/plugins/uiduniq/uid.c
@@ -1254,6 +1254,7 @@ preop_modify(Slapi_PBlock *pb)
static int
preop_modrdn(Slapi_PBlock *pb)
{
+ Slapi_PBlock *entry_pb = NULL;
int result = LDAP_SUCCESS;
Slapi_Entry *e = NULL;
Slapi_Value *sv_requiredObjectClass = NULL;
@@ -1351,7 +1352,7 @@ preop_modrdn(Slapi_PBlock *pb)
/* Get the entry that is being renamed so we can make a dummy copy
* of what it will look like after the rename. */
- err = slapi_search_internal_get_entry(sdn, NULL, &e, plugin_identity);
+ err = slapi_search_get_entry(&entry_pb, sdn, NULL, &e, plugin_identity);
if (err != LDAP_SUCCESS) {
result = uid_op_error(35);
/* We want to return a no such object error if the target doesn't exist. */
@@ -1371,24 +1372,24 @@ preop_modrdn(Slapi_PBlock *pb)
/*
- * Check if it has the required object class
- */
+ * Check if it has the required object class
+ */
if (requiredObjectClass &&
!slapi_entry_attr_has_syntax_value(e, SLAPI_ATTR_OBJECTCLASS, sv_requiredObjectClass)) {
break;
}
/*
- * Find any unique attribute data in the new RDN
- */
+ * Find any unique attribute data in the new RDN
+ */
for (i = 0; attrNames && attrNames[i]; i++) {
err = slapi_entry_attr_find(e, attrNames[i], &attr);
if (!err) {
/*
- * Passed all the requirements - this is an operation we
- * need to enforce uniqueness on. Now find all parent entries
- * with the marker object class, and do a search for each one.
- */
+ * Passed all the requirements - this is an operation we
+ * need to enforce uniqueness on. Now find all parent entries
+ * with the marker object class, and do a search for each one.
+ */
if (NULL != markerObjectClass) {
/* Subtree defined by location of marker object class */
result = findSubtreeAndSearch(slapi_entry_get_sdn(e), attrNames, attr, NULL,
@@ -1407,8 +1408,8 @@ preop_modrdn(Slapi_PBlock *pb)
END
/* Clean-up */
slapi_value_free(&sv_requiredObjectClass);
- if (e)
- slapi_entry_free(e);
+
+ slapi_search_get_entry_done(&entry_pb);
if (result) {
slapi_log_err(SLAPI_LOG_PLUGIN, plugin_name,
diff --git a/ldap/servers/slapd/daemon.c b/ldap/servers/slapd/daemon.c
index 65f23363a..a70f40316 100644
--- a/ldap/servers/slapd/daemon.c
+++ b/ldap/servers/slapd/daemon.c
@@ -1916,18 +1916,13 @@ slapd_bind_local_user(Connection *conn)
char *root_dn = config_get_ldapi_root_dn();
if (root_dn) {
+ Slapi_PBlock *entry_pb = NULL;
Slapi_DN *edn = slapi_sdn_new_dn_byref(
slapi_dn_normalize(root_dn));
Slapi_Entry *e = 0;
/* root might be locked too! :) */
- ret = slapi_search_internal_get_entry(
- edn, 0,
- &e,
- (void *)plugin_get_default_component_id()
-
- );
-
+ ret = slapi_search_get_entry(&entry_pb, edn, 0, &e, (void *)plugin_get_default_component_id());
if (0 == ret && e) {
ret = slapi_check_account_lock(
0, /* pb not req */
@@ -1955,7 +1950,7 @@ slapd_bind_local_user(Connection *conn)
root_map_free:
/* root_dn consumed by bind creds set */
slapi_sdn_free(&edn);
- slapi_entry_free(e);
+ slapi_search_get_entry_done(&entry_pb);
ret = 0;
}
}
diff --git a/ldap/servers/slapd/modify.c b/ldap/servers/slapd/modify.c
index bbc0ab71a..259bedfff 100644
--- a/ldap/servers/slapd/modify.c
+++ b/ldap/servers/slapd/modify.c
@@ -592,6 +592,7 @@ modify_internal_pb(Slapi_PBlock *pb)
static void
op_shared_modify(Slapi_PBlock *pb, int pw_change, char *old_pw)
{
+ Slapi_PBlock *entry_pb = NULL;
Slapi_Backend *be = NULL;
Slapi_Entry *pse;
Slapi_Entry *referral;
@@ -723,7 +724,7 @@ op_shared_modify(Slapi_PBlock *pb, int pw_change, char *old_pw)
* 2. If yes, then if the mods contain any passwdpolicy specific attributes.
* 3. If yes, then it invokes corrosponding checking function.
*/
- if (!repl_op && !internal_op && normdn && (e = get_entry(pb, normdn))) {
+ if (!repl_op && !internal_op && normdn && slapi_search_get_entry(&entry_pb, sdn, NULL, &e, NULL) == LDAP_SUCCESS) {
Slapi_Value target;
slapi_value_init(&target);
slapi_value_set_string(&target, "passwordpolicy");
@@ -1072,7 +1073,7 @@ free_and_return : {
slapi_entry_free(epre);
slapi_entry_free(epost);
}
- slapi_entry_free(e);
+ slapi_search_get_entry_done(&entry_pb);
if (be)
slapi_be_Unlock(be);
@@ -1202,12 +1203,13 @@ op_shared_allow_pw_change(Slapi_PBlock *pb, LDAPMod *mod, char **old_pw, Slapi_M
if (!internal_op) {
/* slapi_acl_check_mods needs an array of LDAPMods, but
* we're really only interested in the one password mod. */
+ Slapi_PBlock *entry_pb = NULL;
LDAPMod *mods[2];
mods[0] = mod;
mods[1] = NULL;
/* We need to actually fetch the target here to use for ACI checking. */
- slapi_search_internal_get_entry(&sdn, NULL, &e, (void *)plugin_get_default_component_id());
+ slapi_search_get_entry(&entry_pb, &sdn, NULL, &e, NULL);
/* Create a bogus entry with just the target dn if we were unable to
* find the actual entry. This will only be used for checking the ACIs. */
@@ -1238,9 +1240,12 @@ op_shared_allow_pw_change(Slapi_PBlock *pb, LDAPMod *mod, char **old_pw, Slapi_M
}
send_ldap_result(pb, res, NULL, errtxt, 0, NULL);
slapi_ch_free_string(&errtxt);
+ slapi_search_get_entry_done(&entry_pb);
rc = -1;
goto done;
}
+ /* done with slapi entry e */
+ slapi_search_get_entry_done(&entry_pb);
/*
* If this mod is being performed by a password administrator/rootDN,
@@ -1353,7 +1358,6 @@ op_shared_allow_pw_change(Slapi_PBlock *pb, LDAPMod *mod, char **old_pw, Slapi_M
valuearray_free(&values);
done:
- slapi_entry_free(e);
slapi_sdn_done(&sdn);
slapi_ch_free_string(&proxydn);
slapi_ch_free_string(&proxystr);
diff --git a/ldap/servers/slapd/plugin_internal_op.c b/ldap/servers/slapd/plugin_internal_op.c
index 9da266b61..a140e7988 100644
--- a/ldap/servers/slapd/plugin_internal_op.c
+++ b/ldap/servers/slapd/plugin_internal_op.c
@@ -882,3 +882,51 @@ slapi_search_internal_get_entry(Slapi_DN *dn, char **attrs, Slapi_Entry **ret_en
int_search_pb = NULL;
return rc;
}
+
+int32_t
+slapi_search_get_entry(Slapi_PBlock **pb, Slapi_DN *dn, char **attrs, Slapi_Entry **ret_entry, void *component_identity)
+{
+ Slapi_Entry **entries = NULL;
+ int32_t rc = 0;
+ void *component = component_identity;
+
+ if (ret_entry) {
+ *ret_entry = NULL;
+ }
+
+ if (component == NULL) {
+ component = (void *)plugin_get_default_component_id();
+ }
+
+ if (*pb == NULL) {
+ *pb = slapi_pblock_new();
+ }
+ slapi_search_internal_set_pb(*pb, slapi_sdn_get_dn(dn), LDAP_SCOPE_BASE,
+ "(|(objectclass=*)(objectclass=ldapsubentry))",
+ attrs, 0, NULL, NULL, component, 0 );
+ slapi_search_internal_pb(*pb);
+ slapi_pblock_get(*pb, SLAPI_PLUGIN_INTOP_RESULT, &rc);
+ if (LDAP_SUCCESS == rc) {
+ slapi_pblock_get(*pb, SLAPI_PLUGIN_INTOP_SEARCH_ENTRIES, &entries);
+ if (NULL != entries && NULL != entries[0]) {
+ /* Only need to dup the entry if the caller passed ret_entry in. */
+ if (ret_entry) {
+ *ret_entry = entries[0];
+ }
+ } else {
+ rc = LDAP_NO_SUCH_OBJECT;
+ }
+ }
+
+ return rc;
+}
+
+void
+slapi_search_get_entry_done(Slapi_PBlock **pb)
+{
+ if (pb && *pb) {
+ slapi_free_search_results_internal(*pb);
+ slapi_pblock_destroy(*pb);
+ *pb = NULL;
+ }
+}
diff --git a/ldap/servers/slapd/resourcelimit.c b/ldap/servers/slapd/resourcelimit.c
index 705344c84..9c2619716 100644
--- a/ldap/servers/slapd/resourcelimit.c
+++ b/ldap/servers/slapd/resourcelimit.c
@@ -305,22 +305,17 @@ reslimit_get_ext(Slapi_Connection *conn, const char *logname, SLAPIResLimitConnD
int
reslimit_update_from_dn(Slapi_Connection *conn, Slapi_DN *dn)
{
- Slapi_Entry *e;
+ Slapi_PBlock *pb = NULL;
+ Slapi_Entry *e = NULL;
int rc;
- e = NULL;
if (dn != NULL) {
-
char **attrs = reslimit_get_registered_attributes();
- (void)slapi_search_internal_get_entry(dn, attrs, &e, reslimit_componentid);
+ slapi_search_get_entry(&pb, dn, attrs, &e, reslimit_componentid);
charray_free(attrs);
}
-
rc = reslimit_update_from_entry(conn, e);
-
- if (NULL != e) {
- slapi_entry_free(e);
- }
+ slapi_search_get_entry_done(&pb);
return (rc);
}
diff --git a/ldap/servers/slapd/schema.c b/ldap/servers/slapd/schema.c
index d44b03b0e..bf7e59f75 100644
--- a/ldap/servers/slapd/schema.c
+++ b/ldap/servers/slapd/schema.c
@@ -341,6 +341,7 @@ schema_policy_add_action(Slapi_Entry *entry, char *attrName, schema_item_t **lis
static void
schema_load_repl_policy(const char *dn, repl_schema_policy_t *replica)
{
+ Slapi_PBlock *pb = NULL;
Slapi_DN sdn;
Slapi_Entry *entry = NULL;
schema_item_t *schema_item, *next;
@@ -369,8 +370,7 @@ schema_load_repl_policy(const char *dn, repl_schema_policy_t *replica)
/* Load the replication policy of the schema */
slapi_sdn_init_dn_byref(&sdn, dn);
- if (slapi_search_internal_get_entry(&sdn, NULL, &entry, plugin_get_default_component_id()) == LDAP_SUCCESS) {
-
+ if (slapi_search_get_entry(&pb, &sdn, NULL, &entry, plugin_get_default_component_id()) == LDAP_SUCCESS) {
/* fill the policies (accept/reject) regarding objectclass */
schema_policy_add_action(entry, ATTR_SCHEMA_UPDATE_OBJECTCLASS_ACCEPT, &replica->objectclasses);
schema_policy_add_action(entry, ATTR_SCHEMA_UPDATE_OBJECTCLASS_REJECT, &replica->objectclasses);
@@ -378,9 +378,8 @@ schema_load_repl_policy(const char *dn, repl_schema_policy_t *replica)
/* fill the policies (accept/reject) regarding attribute */
schema_policy_add_action(entry, ATTR_SCHEMA_UPDATE_ATTRIBUTE_ACCEPT, &replica->attributes);
schema_policy_add_action(entry, ATTR_SCHEMA_UPDATE_ATTRIBUTE_REJECT, &replica->attributes);
-
- slapi_entry_free(entry);
}
+ slapi_search_get_entry_done(&pb);
slapi_sdn_done(&sdn);
}
diff --git a/ldap/servers/slapd/slapi-plugin.h b/ldap/servers/slapd/slapi-plugin.h
index 0e3857068..be1e52e4d 100644
--- a/ldap/servers/slapd/slapi-plugin.h
+++ b/ldap/servers/slapd/slapi-plugin.h
@@ -5972,7 +5972,7 @@ void slapi_seq_internal_set_pb(Slapi_PBlock *pb, char *ibase, int type, char *at
/*
* slapi_search_internal_get_entry() finds an entry given a dn. It returns
- * an LDAP error code (LDAP_SUCCESS if all goes well).
+ * an LDAP error code (LDAP_SUCCESS if all goes well). Caller must free ret_entry
*/
int slapi_search_internal_get_entry(Slapi_DN *dn, char **attrlist, Slapi_Entry **ret_entry, void *caller_identity);
@@ -8296,6 +8296,27 @@ uint64_t slapi_atomic_decr_64(uint64_t *ptr, int memorder);
/* helper function */
const char * slapi_fetch_attr(Slapi_Entry *e, const char *attrname, char *default_val);
+/**
+ * Get a Slapi_Entry via an internal search. The caller then needs to call
+ * slapi_get_entry_done() to free any resources allocated to get the entry
+ *
+ * \param pb - slapi_pblock pointer (the function will allocate if necessary)
+ * \param dn - Slapi_DN of the entry to retrieve
+ * \param attrs - char list of attributes to get
+ * \param ret_entry - pointer to a Slapi_entry wer the returned entry is stored
+ * \param component_identity - plugin component
+ *
+ * \return - ldap result code
+ */
+int32_t slapi_search_get_entry(Slapi_PBlock **pb, Slapi_DN *dn, char **attrs, Slapi_Entry **ret_entry, void *component_identity);
+
+/**
+ * Free the resources allocated by slapi_search_get_entry()
+ *
+ * \param pb - slapi_pblock pointer
+ */
+void slapi_search_get_entry_done(Slapi_PBlock **pb);
+
#ifdef __cplusplus
}
#endif
--
2.26.2

View File

@ -0,0 +1,179 @@
From 826a1bb4ea88915ac492828d1cc4a901623f7866 Mon Sep 17 00:00:00 2001
From: William Brown <william@blackhats.net.au>
Date: Thu, 14 May 2020 14:31:47 +1000
Subject: [PATCH 1/2] Ticket 50933 - Update 2307compat.ldif
Bug Description: This resolves a potential conflict between 60nis.ldif
in freeipa and others with 2307compat, by removing the conflicting
definitions from 2307bis that were included.
Fix Description: By not including these in 2307compat, this means that
sites that rely on the values provided by 2307bis may ALSO need
60nis.ldif to be present. However, these nis values seem like they are
likely very rare in reality, and this also will avoid potential
issues with freeipa. It also is the least disruptive as we don't need
to change an already defined file, and we don't have values where the name
to oid relationship changes.
Fixes: #50933
https://pagure.io/389-ds-base/issue/50933
Author: William Brown <william@blackhats.net.au>
Review by: tbordaz (Thanks!)
---
ldap/schema/10rfc2307compat.ldif | 66 --------------------------------
ldap/schema/60autofs.ldif | 39 ++++++++++++-------
2 files changed, 26 insertions(+), 79 deletions(-)
diff --git a/ldap/schema/10rfc2307compat.ldif b/ldap/schema/10rfc2307compat.ldif
index 8810231ac..78c588d08 100644
--- a/ldap/schema/10rfc2307compat.ldif
+++ b/ldap/schema/10rfc2307compat.ldif
@@ -176,50 +176,6 @@ attributeTypes: (
SYNTAX 1.3.6.1.4.1.1466.115.121.1.26
SINGLE-VALUE
)
-attributeTypes: (
- 1.3.6.1.1.1.1.28 NAME 'nisPublicKey'
- DESC 'NIS public key'
- EQUALITY octetStringMatch
- SYNTAX 1.3.6.1.4.1.1466.115.121.1.40
- SINGLE-VALUE
- )
-attributeTypes: (
- 1.3.6.1.1.1.1.29 NAME 'nisSecretKey'
- DESC 'NIS secret key'
- EQUALITY octetStringMatch
- SYNTAX 1.3.6.1.4.1.1466.115.121.1.40
- SINGLE-VALUE
- )
-attributeTypes: (
- 1.3.6.1.1.1.1.30 NAME 'nisDomain'
- DESC 'NIS domain'
- EQUALITY caseIgnoreIA5Match
- SYNTAX 1.3.6.1.4.1.1466.115.121.1.26
- )
-attributeTypes: (
- 1.3.6.1.1.1.1.31 NAME 'automountMapName'
- DESC 'automount Map Name'
- EQUALITY caseExactIA5Match
- SUBSTR caseExactIA5SubstringsMatch
- SYNTAX 1.3.6.1.4.1.1466.115.121.1.26
- SINGLE-VALUE
- )
-attributeTypes: (
- 1.3.6.1.1.1.1.32 NAME 'automountKey'
- DESC 'Automount Key value'
- EQUALITY caseExactIA5Match
- SUBSTR caseExactIA5SubstringsMatch
- SYNTAX 1.3.6.1.4.1.1466.115.121.1.26
- SINGLE-VALUE
- )
-attributeTypes: (
- 1.3.6.1.1.1.1.33 NAME 'automountInformation'
- DESC 'Automount information'
- EQUALITY caseExactIA5Match
- SUBSTR caseExactIA5SubstringsMatch
- SYNTAX 1.3.6.1.4.1.1466.115.121.1.26
- SINGLE-VALUE
- )
# end of attribute types - beginning of objectclasses
objectClasses: (
1.3.6.1.1.1.2.0 NAME 'posixAccount' SUP top AUXILIARY
@@ -324,28 +280,6 @@ objectClasses: (
seeAlso $ serialNumber'
MAY ( bootFile $ bootParameter $ cn $ description $ l $ o $ ou $ owner $ seeAlso $ serialNumber )
)
-objectClasses: (
- 1.3.6.1.1.1.2.14 NAME 'nisKeyObject' SUP top AUXILIARY
- DESC 'An object with a public and secret key'
- MUST ( cn $ nisPublicKey $ nisSecretKey )
- MAY ( uidNumber $ description )
- )
-objectClasses: (
- 1.3.6.1.1.1.2.15 NAME 'nisDomainObject' SUP top AUXILIARY
- DESC 'Associates a NIS domain with a naming context'
- MUST nisDomain
- )
-objectClasses: (
- 1.3.6.1.1.1.2.16 NAME 'automountMap' SUP top STRUCTURAL
- MUST ( automountMapName )
- MAY description
- )
-objectClasses: (
- 1.3.6.1.1.1.2.17 NAME 'automount' SUP top STRUCTURAL
- DESC 'Automount information'
- MUST ( automountKey $ automountInformation )
- MAY description
- )
## namedObject is needed for groups without members
objectClasses: (
1.3.6.1.4.1.5322.13.1.1 NAME 'namedObject' SUP top STRUCTURAL
diff --git a/ldap/schema/60autofs.ldif b/ldap/schema/60autofs.ldif
index 084e9ec30..de3922aa2 100644
--- a/ldap/schema/60autofs.ldif
+++ b/ldap/schema/60autofs.ldif
@@ -6,7 +6,23 @@ dn: cn=schema
################################################################################
#
attributeTypes: (
- 1.3.6.1.1.1.1.33
+ 1.3.6.1.1.1.1.31 NAME 'automountMapName'
+ DESC 'automount Map Name'
+ EQUALITY caseExactIA5Match
+ SUBSTR caseExactIA5SubstringsMatch
+ SYNTAX 1.3.6.1.4.1.1466.115.121.1.26
+ SINGLE-VALUE
+ )
+attributeTypes: (
+ 1.3.6.1.1.1.1.32 NAME 'automountKey'
+ DESC 'Automount Key value'
+ EQUALITY caseExactIA5Match
+ SUBSTR caseExactIA5SubstringsMatch
+ SYNTAX 1.3.6.1.4.1.1466.115.121.1.26
+ SINGLE-VALUE
+ )
+attributeTypes: (
+ 1.3.6.1.1.1.1.33
NAME 'automountInformation'
DESC 'Information used by the autofs automounter'
EQUALITY caseExactIA5Match
@@ -18,25 +34,22 @@ attributeTypes: (
################################################################################
#
objectClasses: (
- 1.3.6.1.1.1.2.17
- NAME 'automount'
- DESC 'An entry in an automounter map'
+ 1.3.6.1.1.1.2.16
+ NAME 'automountMap'
+ DESC 'An group of related automount objects'
SUP top
STRUCTURAL
- MUST ( cn $ automountInformation )
- MAY ( description )
+ MAY ( ou $ automountMapName $ description )
X-ORIGIN 'draft-howard-rfc2307bis'
)
-#
-################################################################################
-#
objectClasses: (
- 1.3.6.1.1.1.2.16
- NAME 'automountMap'
- DESC 'An group of related automount objects'
+ 1.3.6.1.1.1.2.17
+ NAME 'automount'
+ DESC 'An entry in an automounter map'
SUP top
STRUCTURAL
- MUST ( ou )
+ MUST ( automountInformation )
+ MAY ( cn $ description $ automountKey )
X-ORIGIN 'draft-howard-rfc2307bis'
)
#
--
2.26.2

View File

@ -0,0 +1,36 @@
From 3d9ced9e340678cc02b1a36c2139492c95ef15a6 Mon Sep 17 00:00:00 2001
From: Mark Reynolds <mreynolds@redhat.com>
Date: Wed, 12 Aug 2020 12:46:42 -0400
Subject: [PATCH 2/2] Issue 50933 - Fix OID change between 10rfc2307 and
10rfc2307compat
Bug Description: 10rfc2307compat changed the OID for nisMap objectclass to
match the standard OID, but this breaks replication with
older versions of DS.
Fix Description: Continue to use the old(invalid?) oid for nisMap so that
replication does not break in a mixed version environment.
Fixes: https://pagure.io/389-ds-base/issue/50933
Reviewed by: firstyear & tbordaz(Thanks!!)
---
ldap/schema/10rfc2307compat.ldif | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/ldap/schema/10rfc2307compat.ldif b/ldap/schema/10rfc2307compat.ldif
index 78c588d08..8ba72e1e3 100644
--- a/ldap/schema/10rfc2307compat.ldif
+++ b/ldap/schema/10rfc2307compat.ldif
@@ -253,7 +253,7 @@ objectClasses: (
MAY ( nisNetgroupTriple $ memberNisNetgroup $ description )
)
objectClasses: (
- 1.3.6.1.1.1.2.9 NAME 'nisMap' SUP top STRUCTURAL
+ 1.3.6.1.1.1.2.13 NAME 'nisMap' SUP top STRUCTURAL
DESC 'A generic abstraction of a NIS map'
MUST nisMapName
MAY description
--
2.26.2

View File

@ -1,96 +0,0 @@
From 9710c327b3034d7a9d112306961c9cec98083df5 Mon Sep 17 00:00:00 2001
From: Simon Pichugin <simon.pichugin@gmail.com>
Date: Mon, 18 May 2020 22:33:45 +0200
Subject: [PATCH 05/12] Issue 51086 - Improve dscreate instance name validation
Bug Description: When creating an instance using dscreate, it doesn't enforce
max name length. The ldapi socket name contains name of the instance. If it's
too long, we can hit limits, and the file name will be truncated. Also, it
doesn't sanitize the instance name, it's possible to create an instance with
non-ascii symbols in its name.
Fix Description: Add more checks to 'dscreate from-file' installation.
Add a limitation for nsslapd-ldapifilepath string lenght because it is
limited by sizeof((*ports_info.i_listenaddr)->local.path)) it is copied to.
https://pagure.io/389-ds-base/issue/51086
Reviewed by: firstyear, mreynolds (Thanks!)
---
ldap/servers/slapd/libglobs.c | 12 ++++++++++++
src/cockpit/389-console/src/ds.jsx | 8 ++++++--
src/lib389/lib389/instance/setup.py | 9 +++++++++
3 files changed, 27 insertions(+), 2 deletions(-)
diff --git a/ldap/servers/slapd/libglobs.c b/ldap/servers/slapd/libglobs.c
index 0d3d9a924..fbf90d92d 100644
--- a/ldap/servers/slapd/libglobs.c
+++ b/ldap/servers/slapd/libglobs.c
@@ -2390,11 +2390,23 @@ config_set_ldapi_filename(const char *attrname, char *value, char *errorbuf, int
{
int retVal = LDAP_SUCCESS;
slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig();
+ /*
+ * LDAPI file path length is limited by sizeof((*ports_info.i_listenaddr)->local.path))
+ * which is set in main.c inside of "#if defined(ENABLE_LDAPI)" block
+ * ports_info.i_listenaddr is sizeof(PRNetAddr) and our required sizes is 8 bytes less
+ */
+ size_t result_size = sizeof(PRNetAddr) - 8;
if (config_value_is_null(attrname, value, errorbuf, 0)) {
return LDAP_OPERATIONS_ERROR;
}
+ if (strlen(value) >= result_size) {
+ slapi_create_errormsg(errorbuf, SLAPI_DSE_RETURNTEXT_SIZE, "%s: \"%s\" is invalid, its length must be less than %d",
+ attrname, value, result_size);
+ return LDAP_OPERATIONS_ERROR;
+ }
+
if (apply) {
CFG_LOCK_WRITE(slapdFrontendConfig);
diff --git a/src/cockpit/389-console/src/ds.jsx b/src/cockpit/389-console/src/ds.jsx
index 90d9e5abd..53aa5cb79 100644
--- a/src/cockpit/389-console/src/ds.jsx
+++ b/src/cockpit/389-console/src/ds.jsx
@@ -793,10 +793,14 @@ class CreateInstanceModal extends React.Component {
return;
}
newServerId = newServerId.replace(/^slapd-/i, ""); // strip "slapd-"
- if (newServerId.length > 128) {
+ if (newServerId === "admin") {
+ addNotification("warning", "Instance Name 'admin' is reserved, please choose a different name");
+ return;
+ }
+ if (newServerId.length > 80) {
addNotification(
"warning",
- "Instance name is too long, it must not exceed 128 characters"
+ "Instance name is too long, it must not exceed 80 characters"
);
return;
}
diff --git a/src/lib389/lib389/instance/setup.py b/src/lib389/lib389/instance/setup.py
index 803992275..f5fc5495d 100644
--- a/src/lib389/lib389/instance/setup.py
+++ b/src/lib389/lib389/instance/setup.py
@@ -567,6 +567,15 @@ class SetupDs(object):
# We need to know the prefix before we can do the instance checks
assert_c(slapd['instance_name'] is not None, "Configuration instance_name in section [slapd] not found")
+ assert_c(len(slapd['instance_name']) <= 80, "Server identifier should not be longer than 80 symbols")
+ assert_c(all(ord(c) < 128 for c in slapd['instance_name']), "Server identifier can not contain non ascii characters")
+ assert_c(' ' not in slapd['instance_name'], "Server identifier can not contain a space")
+ assert_c(slapd['instance_name'] != 'admin', "Server identifier \"admin\" is reserved, please choose a different identifier")
+
+ # Check that valid characters are used
+ safe = re.compile(r'^[#%:\w@_-]+$').search
+ assert_c(bool(safe(slapd['instance_name'])), "Server identifier has invalid characters, please choose a different value")
+
# Check if the instance exists or not.
# Should I move this import? I think this prevents some recursion
from lib389 import DirSrv
--
2.26.2

View File

@ -1,254 +0,0 @@
From c0cb15445c1434b3d317b1c06ab1a0ba8dbc6f04 Mon Sep 17 00:00:00 2001
From: Mark Reynolds <mreynolds@redhat.com>
Date: Tue, 19 May 2020 15:11:53 -0400
Subject: [PATCH 06/12] Issue 51102 - RFE - ds-replcheck - make online timeout
configurable
Bug Description: When doing an online check with replicas that are very
far apart the connection can time out as the hardcoded
timeout is 5 seconds.
Fix Description: Change the default timeout to never timeout, and add an
CLI option to specify a specific timeout.
Also caught all the possible LDAP exceptions so we can
cleanly "fail". Fixed some python syntax issues, and
improved the entry inconsistency report
relates: https://pagure.io/389-ds-base/issue/51102
Reviewed by: firstyear & spichugi(Thanks!)
---
ldap/admin/src/scripts/ds-replcheck | 90 ++++++++++++++++++-----------
1 file changed, 57 insertions(+), 33 deletions(-)
diff --git a/ldap/admin/src/scripts/ds-replcheck b/ldap/admin/src/scripts/ds-replcheck
index 30bcfd65d..5bb7dfce3 100755
--- a/ldap/admin/src/scripts/ds-replcheck
+++ b/ldap/admin/src/scripts/ds-replcheck
@@ -1,7 +1,7 @@
#!/usr/bin/python3
# --- BEGIN COPYRIGHT BLOCK ---
-# Copyright (C) 2018 Red Hat, Inc.
+# Copyright (C) 2020 Red Hat, Inc.
# All rights reserved.
#
# License: GPL (version 3 or any later version).
@@ -21,10 +21,9 @@ import getpass
import signal
from ldif import LDIFRecordList
from ldap.ldapobject import SimpleLDAPObject
-from ldap.cidict import cidict
from ldap.controls import SimplePagedResultsControl
from lib389._entry import Entry
-from lib389.utils import ensure_str, ensure_list_str, ensure_int
+from lib389.utils import ensure_list_str, ensure_int
VERSION = "2.0"
RUV_FILTER = '(&(nsuniqueid=ffffffff-ffffffff-ffffffff-ffffffff)(objectclass=nstombstone))'
@@ -185,11 +184,11 @@ def report_conflict(entry, attr, opts):
report = True
if 'nscpentrywsi' in entry.data:
- found = False
for val in entry.data['nscpentrywsi']:
if val.lower().startswith(attr + ';'):
if (opts['starttime'] - extract_time(val)) <= opts['lag']:
report = False
+ break
return report
@@ -321,6 +320,9 @@ def ldif_search(LDIF, dn):
count = 0
ignore_list = ['conflictcsn', 'modifytimestamp', 'modifiersname']
val = ""
+ attr = ""
+ state_attr = ""
+ part_dn = ""
result['entry'] = None
result['conflict'] = None
result['tombstone'] = False
@@ -570,6 +572,7 @@ def cmp_entry(mentry, rentry, opts):
if val.lower().startswith(mattr + ';'):
if not found:
diff['diff'].append(" Master:")
+ diff['diff'].append(" - Value: %s" % (val.split(':')[1].lstrip()))
diff['diff'].append(" - State Info: %s" % (val))
diff['diff'].append(" - Date: %s\n" % (time.ctime(extract_time(val))))
found = True
@@ -588,6 +591,7 @@ def cmp_entry(mentry, rentry, opts):
if val.lower().startswith(mattr + ';'):
if not found:
diff['diff'].append(" Replica:")
+ diff['diff'].append(" - Value: %s" % (val.split(':')[1].lstrip()))
diff['diff'].append(" - State Info: %s" % (val))
diff['diff'].append(" - Date: %s\n" % (time.ctime(extract_time(val))))
found = True
@@ -654,7 +658,6 @@ def do_offline_report(opts, output_file=None):
rconflicts = []
rtombstones = 0
mtombstones = 0
- idx = 0
# Open LDIF files
try:
@@ -926,7 +929,7 @@ def validate_suffix(ldapnode, suffix, hostname):
:return - True if suffix exists, otherwise False
"""
try:
- master_basesuffix = ldapnode.search_s(suffix, ldap.SCOPE_BASE )
+ ldapnode.search_s(suffix, ldap.SCOPE_BASE)
except ldap.NO_SUCH_OBJECT:
print("Error: Failed to validate suffix in {}. {} does not exist.".format(hostname, suffix))
return False
@@ -968,12 +971,12 @@ def connect_to_replicas(opts):
replica = SimpleLDAPObject(ruri)
# Set timeouts
- master.set_option(ldap.OPT_NETWORK_TIMEOUT,5.0)
- master.set_option(ldap.OPT_TIMEOUT,5.0)
- replica.set_option(ldap.OPT_NETWORK_TIMEOUT,5.0)
- replica.set_option(ldap.OPT_TIMEOUT,5.0)
+ master.set_option(ldap.OPT_NETWORK_TIMEOUT, opts['timeout'])
+ master.set_option(ldap.OPT_TIMEOUT, opts['timeout'])
+ replica.set_option(ldap.OPT_NETWORK_TIMEOUT, opts['timeout'])
+ replica.set_option(ldap.OPT_TIMEOUT, opts['timeout'])
- # Setup Secure Conenction
+ # Setup Secure Connection
if opts['certdir'] is not None:
# Setup Master
if opts['mprotocol'] != LDAPI:
@@ -1003,7 +1006,7 @@ def connect_to_replicas(opts):
try:
master.simple_bind_s(opts['binddn'], opts['bindpw'])
except ldap.SERVER_DOWN as e:
- print("Cannot connect to %r" % muri)
+ print(f"Cannot connect to {muri} ({str(e)})")
sys.exit(1)
except ldap.LDAPError as e:
print("Error: Failed to authenticate to Master: ({}). "
@@ -1014,7 +1017,7 @@ def connect_to_replicas(opts):
try:
replica.simple_bind_s(opts['binddn'], opts['bindpw'])
except ldap.SERVER_DOWN as e:
- print("Cannot connect to %r" % ruri)
+ print(f"Cannot connect to {ruri} ({str(e)})")
sys.exit(1)
except ldap.LDAPError as e:
print("Error: Failed to authenticate to Replica: ({}). "
@@ -1218,7 +1221,6 @@ def do_online_report(opts, output_file=None):
"""
m_done = False
r_done = False
- done = False
report = {}
report['diff'] = []
report['m_missing'] = []
@@ -1257,15 +1259,22 @@ def do_online_report(opts, output_file=None):
# Read the results and start comparing
while not m_done or not r_done:
- if not m_done:
- m_rtype, m_rdata, m_rmsgid, m_rctrls = master.result3(master_msgid)
- elif not r_done:
- m_rdata = []
-
- if not r_done:
- r_rtype, r_rdata, r_rmsgid, r_rctrls = replica.result3(replica_msgid)
- elif not m_done:
- r_rdata = []
+ try:
+ if not m_done:
+ m_rtype, m_rdata, m_rmsgid, m_rctrls = master.result3(master_msgid)
+ elif not r_done:
+ m_rdata = []
+ except ldap.LDAPError as e:
+ print("Error: Problem getting the results from the master: %s", str(e))
+ sys.exit(1)
+ try:
+ if not r_done:
+ r_rtype, r_rdata, r_rmsgid, r_rctrls = replica.result3(replica_msgid)
+ elif not m_done:
+ r_rdata = []
+ except ldap.LDAPError as e:
+ print("Error: Problem getting the results from the replica: %s", str(e))
+ sys.exit(1)
# Convert entries
mresult = convert_entries(m_rdata)
@@ -1291,11 +1300,15 @@ def do_online_report(opts, output_file=None):
]
if m_pctrls:
if m_pctrls[0].cookie:
- # Copy cookie from response control to request control
- req_pr_ctrl.cookie = m_pctrls[0].cookie
- master_msgid = master.search_ext(opts['suffix'], ldap.SCOPE_SUBTREE,
- "(|(objectclass=*)(objectclass=ldapsubentry))",
- ['*', 'createtimestamp', 'nscpentrywsi', 'conflictcsn', 'nsds5replconflict'], serverctrls=controls)
+ try:
+ # Copy cookie from response control to request control
+ req_pr_ctrl.cookie = m_pctrls[0].cookie
+ master_msgid = master.search_ext(opts['suffix'], ldap.SCOPE_SUBTREE,
+ "(|(objectclass=*)(objectclass=ldapsubentry))",
+ ['*', 'createtimestamp', 'nscpentrywsi', 'conflictcsn', 'nsds5replconflict'], serverctrls=controls)
+ except ldap.LDAPError as e:
+ print("Error: Problem searching the master: %s", str(e))
+ sys.exit(1)
else:
m_done = True # No more pages available
else:
@@ -1311,11 +1324,15 @@ def do_online_report(opts, output_file=None):
if r_pctrls:
if r_pctrls[0].cookie:
- # Copy cookie from response control to request control
- req_pr_ctrl.cookie = r_pctrls[0].cookie
- replica_msgid = replica.search_ext(opts['suffix'], ldap.SCOPE_SUBTREE,
- "(|(objectclass=*)(objectclass=ldapsubentry))",
- ['*', 'createtimestamp', 'nscpentrywsi', 'conflictcsn', 'nsds5replconflict'], serverctrls=controls)
+ try:
+ # Copy cookie from response control to request control
+ req_pr_ctrl.cookie = r_pctrls[0].cookie
+ replica_msgid = replica.search_ext(opts['suffix'], ldap.SCOPE_SUBTREE,
+ "(|(objectclass=*)(objectclass=ldapsubentry))",
+ ['*', 'createtimestamp', 'nscpentrywsi', 'conflictcsn', 'nsds5replconflict'], serverctrls=controls)
+ except ldap.LDAPError as e:
+ print("Error: Problem searching the replica: %s", str(e))
+ sys.exit(1)
else:
r_done = True # No more pages available
else:
@@ -1426,6 +1443,9 @@ def init_online_params(args):
# prompt for password
opts['bindpw'] = getpass.getpass('Enter password: ')
+ # lastly handle the timeout
+ opts['timeout'] = int(args.timeout)
+
return opts
@@ -1553,6 +1573,8 @@ def main():
state_parser.add_argument('-y', '--pass-file', help='A text file containing the clear text password for the bind dn', dest='pass_file', default=None)
state_parser.add_argument('-Z', '--cert-dir', help='The certificate database directory for secure connections',
dest='certdir', default=None)
+ state_parser.add_argument('-t', '--timeout', help='The timeout for the LDAP connections. Default is no timeout.',
+ type=int, dest='timeout', default=-1)
# Online mode
online_parser = subparsers.add_parser('online', help="Compare two online replicas for differences")
@@ -1577,6 +1599,8 @@ def main():
online_parser.add_argument('-p', '--page-size', help='The paged-search result grouping size (default 500 entries)',
dest='pagesize', default=500)
online_parser.add_argument('-o', '--out-file', help='The output file', dest='file', default=None)
+ online_parser.add_argument('-t', '--timeout', help='The timeout for the LDAP connections. Default is no timeout.',
+ type=int, dest='timeout', default=-1)
# Offline LDIF mode
offline_parser = subparsers.add_parser('offline', help="Compare two replication LDIF files for differences (LDIF file generated by 'db2ldif -r')")
--
2.26.2

View File

@ -0,0 +1,147 @@
From 1085823bf5586d55103cfba249fdf212e9afcb7c Mon Sep 17 00:00:00 2001
From: William Brown <william@blackhats.net.au>
Date: Thu, 4 Jun 2020 11:51:53 +1000
Subject: [PATCH] Ticket 51131 - improve mutex alloc in conntable
Bug Description: We previously did delayed allocation
of mutexs, which @tbordaz noted can lead to high usage
of the pthread mutex init routines. This was done under
the conntable lock, as well as cleaning the connection
Fix Description: rather than delayed allocation, we
initialise everything at start up instead, which means
that while startup may have a delay, at run time we have
a smaller and lighter connection allocation routine,
that is able to release the CT lock sooner.
https://pagure.io/389-ds-base/issue/51131
Author: William Brown <william@blackhats.net.au>
Review by: ???
---
ldap/servers/slapd/conntable.c | 86 +++++++++++++++++++---------------
1 file changed, 47 insertions(+), 39 deletions(-)
diff --git a/ldap/servers/slapd/conntable.c b/ldap/servers/slapd/conntable.c
index b23dc3435..feb9c0d75 100644
--- a/ldap/servers/slapd/conntable.c
+++ b/ldap/servers/slapd/conntable.c
@@ -138,10 +138,21 @@ connection_table_new(int table_size)
ct->conn_next_offset = 1;
ct->conn_free_offset = 1;
+ pthread_mutexattr_t monitor_attr = {0};
+ pthread_mutexattr_init(&monitor_attr);
+ pthread_mutexattr_settype(&monitor_attr, PTHREAD_MUTEX_RECURSIVE);
+
/* We rely on the fact that we called calloc, which zeros the block, so we don't
* init any structure element unless a zero value is troublesome later
*/
for (i = 0; i < table_size; i++) {
+ /*
+ * Technically this is a no-op due to calloc, but we should always be
+ * careful with things like this ....
+ */
+ ct->c[i].c_state = CONN_STATE_FREE;
+ /* Start the conn setup. */
+
LBER_SOCKET invalid_socket;
/* DBDB---move this out of here once everything works */
ct->c[i].c_sb = ber_sockbuf_alloc();
@@ -161,11 +172,20 @@ connection_table_new(int table_size)
ct->c[i].c_prev = NULL;
ct->c[i].c_ci = i;
ct->c[i].c_fdi = SLAPD_INVALID_SOCKET_INDEX;
- /*
- * Technically this is a no-op due to calloc, but we should always be
- * careful with things like this ....
- */
- ct->c[i].c_state = CONN_STATE_FREE;
+
+ if (pthread_mutex_init(&(ct->c[i].c_mutex), &monitor_attr) != 0) {
+ slapi_log_err(SLAPI_LOG_ERR, "connection_table_get_connection", "pthread_mutex_init failed\n");
+ exit(1);
+ }
+
+ ct->c[i].c_pdumutex = PR_NewLock();
+ if (ct->c[i].c_pdumutex == NULL) {
+ slapi_log_err(SLAPI_LOG_ERR, "connection_table_get_connection", "PR_NewLock failed\n");
+ exit(1);
+ }
+
+ /* Ready to rock, mark as such. */
+ ct->c[i].c_state = CONN_STATE_INIT;
/* Prepare the connection into the freelist. */
ct->c_freelist[i] = &(ct->c[i]);
}
@@ -241,44 +261,32 @@ connection_table_get_connection(Connection_Table *ct, int sd)
/* Never use slot 0 */
ct->conn_next_offset += 1;
}
- /* Now prep the slot for usage. */
- PR_ASSERT(c->c_next == NULL);
- PR_ASSERT(c->c_prev == NULL);
- PR_ASSERT(c->c_extension == NULL);
-
- if (c->c_state == CONN_STATE_FREE) {
-
- c->c_state = CONN_STATE_INIT;
-
- pthread_mutexattr_t monitor_attr = {0};
- pthread_mutexattr_init(&monitor_attr);
- pthread_mutexattr_settype(&monitor_attr, PTHREAD_MUTEX_RECURSIVE);
- if (pthread_mutex_init(&(c->c_mutex), &monitor_attr) != 0) {
- slapi_log_err(SLAPI_LOG_ERR, "connection_table_get_connection", "pthread_mutex_init failed\n");
- exit(1);
- }
-
- c->c_pdumutex = PR_NewLock();
- if (c->c_pdumutex == NULL) {
- c->c_pdumutex = NULL;
- slapi_log_err(SLAPI_LOG_ERR, "connection_table_get_connection", "PR_NewLock failed\n");
- exit(1);
- }
- }
- /* Let's make sure there's no cruft left on there from the last time this connection was used. */
- /* Note: no need to lock c->c_mutex because this function is only
- * called by one thread (the slapd_daemon thread), and if we got this
- * far then `c' is not being used by any operation threads, etc.
- */
- connection_cleanup(c);
- c->c_ct = ct; /* pointer to connection table that owns this connection */
+ PR_Unlock(ct->table_mutex);
} else {
- /* couldn't find a Connection */
+ /* couldn't find a Connection, table must be full */
slapi_log_err(SLAPI_LOG_CONNS, "connection_table_get_connection", "Max open connections reached\n");
+ PR_Unlock(ct->table_mutex);
+ return NULL;
}
- /* We could move this to before the c alloc as there is no point to remain here. */
- PR_Unlock(ct->table_mutex);
+ /* Now prep the slot for usage. */
+ PR_ASSERT(c != NULL);
+ PR_ASSERT(c->c_next == NULL);
+ PR_ASSERT(c->c_prev == NULL);
+ PR_ASSERT(c->c_extension == NULL);
+ PR_ASSERT(c->c_state == CONN_STATE_INIT);
+ /* Let's make sure there's no cruft left on there from the last time this connection was used. */
+
+ /*
+ * Note: no need to lock c->c_mutex because this function is only
+ * called by one thread (the slapd_daemon thread), and if we got this
+ * far then `c' is not being used by any operation threads, etc. The
+ * memory ordering will be provided by the work queue sending c to a
+ * thread.
+ */
+ connection_cleanup(c);
+ /* pointer to connection table that owns this connection */
+ c->c_ct = ct;
return c;
}
--
2.26.2

View File

@ -0,0 +1,66 @@
From a9f53e9958861e6a7a827bd852d72d51a6512396 Mon Sep 17 00:00:00 2001
From: tbordaz <tbordaz@redhat.com>
Date: Wed, 25 Nov 2020 18:07:34 +0100
Subject: [PATCH] Issue 4297 - 2nd fix for on ADD replication URP issue
internal searches with filter containing unescaped chars (#4439)
Bug description:
Previous fix is buggy because slapi_filter_escape_filter_value returns
a escaped filter component not an escaped assertion value.
Fix description:
use the escaped filter component
relates: https://github.com/389ds/389-ds-base/issues/4297
Reviewed by: William Brown
Platforms tested: F31
---
ldap/servers/plugins/replication/urp.c | 16 ++++++++--------
1 file changed, 8 insertions(+), 8 deletions(-)
diff --git a/ldap/servers/plugins/replication/urp.c b/ldap/servers/plugins/replication/urp.c
index f41dbc72d..ed340c9d8 100644
--- a/ldap/servers/plugins/replication/urp.c
+++ b/ldap/servers/plugins/replication/urp.c
@@ -1411,12 +1411,12 @@ urp_add_check_tombstone (Slapi_PBlock *pb, char *sessionid, Slapi_Entry *entry,
Slapi_Entry **entries = NULL;
Slapi_PBlock *newpb;
char *basedn = slapi_entry_get_ndn(entry);
- char *escaped_basedn;
+ char *escaped_filter;
const Slapi_DN *suffix = slapi_get_suffix_by_dn(slapi_entry_get_sdn (entry));
- escaped_basedn = slapi_filter_escape_filter_value("nscpentrydn", basedn);
+ escaped_filter = slapi_filter_escape_filter_value("nscpentrydn", basedn);
- char *filter = slapi_filter_sprintf("(&(objectclass=nstombstone)(nscpentrydn=%s))", escaped_basedn);
- slapi_ch_free((void **)&escaped_basedn);
+ char *filter = slapi_filter_sprintf("(&(objectclass=nstombstone)%s)", escaped_filter);
+ slapi_ch_free((void **)&escaped_filter);
newpb = slapi_pblock_new();
slapi_search_internal_set_pb(newpb,
slapi_sdn_get_dn(suffix), /* Base DN */
@@ -1605,15 +1605,15 @@ urp_find_tombstone_for_glue (Slapi_PBlock *pb, char *sessionid, const Slapi_Entr
Slapi_Entry **entries = NULL;
Slapi_PBlock *newpb;
const char *basedn = slapi_sdn_get_dn(parentdn);
- char *escaped_basedn;
- escaped_basedn = slapi_filter_escape_filter_value("nscpentrydn", (char *)basedn);
+ char *escaped_filter;
+ escaped_filter = slapi_filter_escape_filter_value("nscpentrydn", (char *)basedn);
char *conflict_csnstr = (char*)slapi_entry_attr_get_ref((Slapi_Entry *)entry, "conflictcsn");
CSN *conflict_csn = csn_new_by_string(conflict_csnstr);
CSN *tombstone_csn = NULL;
- char *filter = slapi_filter_sprintf("(&(objectclass=nstombstone)(nscpentrydn=%s))", escaped_basedn);
- slapi_ch_free((void **)&escaped_basedn);
+ char *filter = slapi_filter_sprintf("(&(objectclass=nstombstone)%s)", escaped_filter);
+ slapi_ch_free((void **)&escaped_filter);
newpb = slapi_pblock_new();
char *parent_dn = slapi_dn_parent (basedn);
slapi_search_internal_set_pb(newpb,
--
2.26.2

View File

@ -1,428 +0,0 @@
From a1cd3cf8e8b6b33ab21d5338921187a76dd9dcd0 Mon Sep 17 00:00:00 2001
From: Mark Reynolds <mreynolds@redhat.com>
Date: Fri, 22 May 2020 15:41:45 -0400
Subject: [PATCH 07/12] Issue 51110 - Fix ASAN ODR warnings
Description: Fixed ODR issues with glboal attributes which were duplicated from
the core server into the replication and retrocl plugins.
relates: https://pagure.io/389-ds-base/issue/51110
Reviewed by: firstyear(Thanks!)
---
ldap/servers/plugins/replication/repl5.h | 17 +++---
.../plugins/replication/repl_globals.c | 17 +++---
ldap/servers/plugins/replication/replutil.c | 16 +++---
ldap/servers/plugins/retrocl/retrocl.h | 22 ++++----
ldap/servers/plugins/retrocl/retrocl_cn.c | 12 ++---
ldap/servers/plugins/retrocl/retrocl_po.c | 52 +++++++++----------
ldap/servers/plugins/retrocl/retrocl_trim.c | 30 +++++------
7 files changed, 82 insertions(+), 84 deletions(-)
diff --git a/ldap/servers/plugins/replication/repl5.h b/ldap/servers/plugins/replication/repl5.h
index 873dd8a16..72b7089e3 100644
--- a/ldap/servers/plugins/replication/repl5.h
+++ b/ldap/servers/plugins/replication/repl5.h
@@ -280,15 +280,14 @@ struct berval *NSDS90StartReplicationRequest_new(const char *protocol_oid,
int multimaster_extop_NSDS50ReplicationEntry(Slapi_PBlock *pb);
/* From repl_globals.c */
-extern char *attr_changenumber;
-extern char *attr_targetdn;
-extern char *attr_changetype;
-extern char *attr_newrdn;
-extern char *attr_deleteoldrdn;
-extern char *attr_changes;
-extern char *attr_newsuperior;
-extern char *attr_changetime;
-extern char *attr_dataversion;
+extern char *repl_changenumber;
+extern char *repl_targetdn;
+extern char *repl_changetype;
+extern char *repl_newrdn;
+extern char *repl_deleteoldrdn;
+extern char *repl_changes;
+extern char *repl_newsuperior;
+extern char *repl_changetime;
extern char *attr_csn;
extern char *changetype_add;
extern char *changetype_delete;
diff --git a/ldap/servers/plugins/replication/repl_globals.c b/ldap/servers/plugins/replication/repl_globals.c
index 355a0ffa1..c615c77da 100644
--- a/ldap/servers/plugins/replication/repl_globals.c
+++ b/ldap/servers/plugins/replication/repl_globals.c
@@ -48,15 +48,14 @@ char *changetype_delete = CHANGETYPE_DELETE;
char *changetype_modify = CHANGETYPE_MODIFY;
char *changetype_modrdn = CHANGETYPE_MODRDN;
char *changetype_moddn = CHANGETYPE_MODDN;
-char *attr_changenumber = ATTR_CHANGENUMBER;
-char *attr_targetdn = ATTR_TARGETDN;
-char *attr_changetype = ATTR_CHANGETYPE;
-char *attr_newrdn = ATTR_NEWRDN;
-char *attr_deleteoldrdn = ATTR_DELETEOLDRDN;
-char *attr_changes = ATTR_CHANGES;
-char *attr_newsuperior = ATTR_NEWSUPERIOR;
-char *attr_changetime = ATTR_CHANGETIME;
-char *attr_dataversion = ATTR_DATAVERSION;
+char *repl_changenumber = ATTR_CHANGENUMBER;
+char *repl_targetdn = ATTR_TARGETDN;
+char *repl_changetype = ATTR_CHANGETYPE;
+char *repl_newrdn = ATTR_NEWRDN;
+char *repl_deleteoldrdn = ATTR_DELETEOLDRDN;
+char *repl_changes = ATTR_CHANGES;
+char *repl_newsuperior = ATTR_NEWSUPERIOR;
+char *repl_changetime = ATTR_CHANGETIME;
char *attr_csn = ATTR_CSN;
char *type_copyingFrom = TYPE_COPYINGFROM;
char *type_copiedFrom = TYPE_COPIEDFROM;
diff --git a/ldap/servers/plugins/replication/replutil.c b/ldap/servers/plugins/replication/replutil.c
index de1e77880..39f821d12 100644
--- a/ldap/servers/plugins/replication/replutil.c
+++ b/ldap/servers/plugins/replication/replutil.c
@@ -64,14 +64,14 @@ get_cleattrs()
{
if (cleattrs[0] == NULL) {
cleattrs[0] = type_objectclass;
- cleattrs[1] = attr_changenumber;
- cleattrs[2] = attr_targetdn;
- cleattrs[3] = attr_changetype;
- cleattrs[4] = attr_newrdn;
- cleattrs[5] = attr_deleteoldrdn;
- cleattrs[6] = attr_changes;
- cleattrs[7] = attr_newsuperior;
- cleattrs[8] = attr_changetime;
+ cleattrs[1] = repl_changenumber;
+ cleattrs[2] = repl_targetdn;
+ cleattrs[3] = repl_changetype;
+ cleattrs[4] = repl_newrdn;
+ cleattrs[5] = repl_deleteoldrdn;
+ cleattrs[6] = repl_changes;
+ cleattrs[7] = repl_newsuperior;
+ cleattrs[8] = repl_changetime;
cleattrs[9] = NULL;
}
return cleattrs;
diff --git a/ldap/servers/plugins/retrocl/retrocl.h b/ldap/servers/plugins/retrocl/retrocl.h
index 06482a14c..2ce76fcec 100644
--- a/ldap/servers/plugins/retrocl/retrocl.h
+++ b/ldap/servers/plugins/retrocl/retrocl.h
@@ -94,17 +94,17 @@ extern int retrocl_nattributes;
extern char **retrocl_attributes;
extern char **retrocl_aliases;
-extern const char *attr_changenumber;
-extern const char *attr_targetdn;
-extern const char *attr_changetype;
-extern const char *attr_newrdn;
-extern const char *attr_newsuperior;
-extern const char *attr_deleteoldrdn;
-extern const char *attr_changes;
-extern const char *attr_changetime;
-extern const char *attr_objectclass;
-extern const char *attr_nsuniqueid;
-extern const char *attr_isreplicated;
+extern const char *retrocl_changenumber;
+extern const char *retrocl_targetdn;
+extern const char *retrocl_changetype;
+extern const char *retrocl_newrdn;
+extern const char *retrocl_newsuperior;
+extern const char *retrocl_deleteoldrdn;
+extern const char *retrocl_changes;
+extern const char *retrocl_changetime;
+extern const char *retrocl_objectclass;
+extern const char *retrocl_nsuniqueid;
+extern const char *retrocl_isreplicated;
extern PRLock *retrocl_internal_lock;
extern Slapi_RWLock *retrocl_cn_lock;
diff --git a/ldap/servers/plugins/retrocl/retrocl_cn.c b/ldap/servers/plugins/retrocl/retrocl_cn.c
index 709d7a857..5fc5f586d 100644
--- a/ldap/servers/plugins/retrocl/retrocl_cn.c
+++ b/ldap/servers/plugins/retrocl/retrocl_cn.c
@@ -62,7 +62,7 @@ handle_cnum_entry(Slapi_Entry *e, void *callback_data)
Slapi_Attr *chattr = NULL;
sval = NULL;
value = NULL;
- if (slapi_entry_attr_find(e, attr_changenumber, &chattr) == 0) {
+ if (slapi_entry_attr_find(e, retrocl_changenumber, &chattr) == 0) {
slapi_attr_first_value(chattr, &sval);
if (NULL != sval) {
value = slapi_value_get_berval(sval);
@@ -79,7 +79,7 @@ handle_cnum_entry(Slapi_Entry *e, void *callback_data)
chattr = NULL;
sval = NULL;
value = NULL;
- if (slapi_entry_attr_find(e, attr_changetime, &chattr) == 0) {
+ if (slapi_entry_attr_find(e, retrocl_changetime, &chattr) == 0) {
slapi_attr_first_value(chattr, &sval);
if (NULL != sval) {
value = slapi_value_get_berval(sval);
@@ -134,7 +134,7 @@ retrocl_get_changenumbers(void)
cr.cr_time = 0;
slapi_seq_callback(RETROCL_CHANGELOG_DN, SLAPI_SEQ_FIRST,
- (char *)attr_changenumber, /* cast away const */
+ (char *)retrocl_changenumber, /* cast away const */
NULL, NULL, 0, &cr, NULL, handle_cnum_result,
handle_cnum_entry, NULL);
@@ -144,7 +144,7 @@ retrocl_get_changenumbers(void)
slapi_ch_free((void **)&cr.cr_time);
slapi_seq_callback(RETROCL_CHANGELOG_DN, SLAPI_SEQ_LAST,
- (char *)attr_changenumber, /* cast away const */
+ (char *)retrocl_changenumber, /* cast away const */
NULL, NULL, 0, &cr, NULL, handle_cnum_result,
handle_cnum_entry, NULL);
@@ -185,7 +185,7 @@ retrocl_getchangetime(int type, int *err)
return NO_TIME;
}
slapi_seq_callback(RETROCL_CHANGELOG_DN, type,
- (char *)attr_changenumber, /* cast away const */
+ (char *)retrocl_changenumber, /* cast away const */
NULL,
NULL, 0, &cr, NULL,
handle_cnum_result, handle_cnum_entry, NULL);
@@ -353,7 +353,7 @@ retrocl_update_lastchangenumber(void)
cr.cr_cnum = 0;
cr.cr_time = 0;
slapi_seq_callback(RETROCL_CHANGELOG_DN, SLAPI_SEQ_LAST,
- (char *)attr_changenumber, /* cast away const */
+ (char *)retrocl_changenumber, /* cast away const */
NULL, NULL, 0, &cr, NULL, handle_cnum_result,
handle_cnum_entry, NULL);
diff --git a/ldap/servers/plugins/retrocl/retrocl_po.c b/ldap/servers/plugins/retrocl/retrocl_po.c
index d2af79b31..e1488f56b 100644
--- a/ldap/servers/plugins/retrocl/retrocl_po.c
+++ b/ldap/servers/plugins/retrocl/retrocl_po.c
@@ -25,17 +25,17 @@ modrdn2reple(Slapi_Entry *e, const char *newrdn, int deloldrdn, LDAPMod **ldm, c
/******************************/
-const char *attr_changenumber = "changenumber";
-const char *attr_targetdn = "targetdn";
-const char *attr_changetype = "changetype";
-const char *attr_newrdn = "newrdn";
-const char *attr_deleteoldrdn = "deleteoldrdn";
-const char *attr_changes = "changes";
-const char *attr_newsuperior = "newsuperior";
-const char *attr_changetime = "changetime";
-const char *attr_objectclass = "objectclass";
-const char *attr_nsuniqueid = "nsuniqueid";
-const char *attr_isreplicated = "isreplicated";
+const char *retrocl_changenumber = "changenumber";
+const char *retrocl_targetdn = "targetdn";
+const char *retrocl_changetype = "changetype";
+const char *retrocl_newrdn = "newrdn";
+const char *retrocl_deleteoldrdn = "deleteoldrdn";
+const char *retrocl_changes = "changes";
+const char *retrocl_newsuperior = "newsuperior";
+const char *retrocl_changetime = "changetime";
+const char *retrocl_objectclass = "objectclass";
+const char *retrocl_nsuniqueid = "nsuniqueid";
+const char *retrocl_isreplicated = "isreplicated";
/*
* Function: make_changes_string
@@ -185,7 +185,7 @@ write_replog_db(
changenum, dn);
/* Construct the dn of this change record */
- edn = slapi_ch_smprintf("%s=%lu,%s", attr_changenumber, changenum, RETROCL_CHANGELOG_DN);
+ edn = slapi_ch_smprintf("%s=%lu,%s", retrocl_changenumber, changenum, RETROCL_CHANGELOG_DN);
/*
* Create the entry struct, and fill in fields common to all types
@@ -214,7 +214,7 @@ write_replog_db(
attributeAlias = attributeName;
}
- if (strcasecmp(attributeName, attr_nsuniqueid) == 0) {
+ if (strcasecmp(attributeName, retrocl_nsuniqueid) == 0) {
Slapi_Entry *entry = NULL;
const char *uniqueId = NULL;
@@ -236,7 +236,7 @@ write_replog_db(
extensibleObject = 1;
- } else if (strcasecmp(attributeName, attr_isreplicated) == 0) {
+ } else if (strcasecmp(attributeName, retrocl_isreplicated) == 0) {
int isReplicated = 0;
char *attributeValue = NULL;
@@ -298,17 +298,17 @@ write_replog_db(
sprintf(chnobuf, "%lu", changenum);
val.bv_val = chnobuf;
val.bv_len = strlen(chnobuf);
- slapi_entry_add_values(e, attr_changenumber, vals);
+ slapi_entry_add_values(e, retrocl_changenumber, vals);
/* Set the targetentrydn attribute */
val.bv_val = dn;
val.bv_len = strlen(dn);
- slapi_entry_add_values(e, attr_targetdn, vals);
+ slapi_entry_add_values(e, retrocl_targetdn, vals);
/* Set the changeTime attribute */
val.bv_val = format_genTime(curtime);
val.bv_len = strlen(val.bv_val);
- slapi_entry_add_values(e, attr_changetime, vals);
+ slapi_entry_add_values(e, retrocl_changetime, vals);
slapi_ch_free((void **)&val.bv_val);
/*
@@ -344,7 +344,7 @@ write_replog_db(
/* Set the changetype attribute */
val.bv_val = "delete";
val.bv_len = 6;
- slapi_entry_add_values(e, attr_changetype, vals);
+ slapi_entry_add_values(e, retrocl_changetype, vals);
}
break;
@@ -422,7 +422,7 @@ entry2reple(Slapi_Entry *e, Slapi_Entry *oe, int optype)
} else {
return (1);
}
- slapi_entry_add_values(e, attr_changetype, vals);
+ slapi_entry_add_values(e, retrocl_changetype, vals);
estr = slapi_entry2str(oe, &len);
p = estr;
@@ -435,7 +435,7 @@ entry2reple(Slapi_Entry *e, Slapi_Entry *oe, int optype)
}
val.bv_val = p;
val.bv_len = len - (p - estr); /* length + terminating \0 */
- slapi_entry_add_values(e, attr_changes, vals);
+ slapi_entry_add_values(e, retrocl_changes, vals);
slapi_ch_free_string(&estr);
return 0;
}
@@ -471,7 +471,7 @@ mods2reple(Slapi_Entry *e, LDAPMod **ldm)
if (NULL != l) {
val.bv_val = l->ls_buf;
val.bv_len = l->ls_len + 1; /* string + terminating \0 */
- slapi_entry_add_values(e, attr_changes, vals);
+ slapi_entry_add_values(e, retrocl_changes, vals);
lenstr_free(&l);
}
}
@@ -511,12 +511,12 @@ modrdn2reple(
val.bv_val = "modrdn";
val.bv_len = 6;
- slapi_entry_add_values(e, attr_changetype, vals);
+ slapi_entry_add_values(e, retrocl_changetype, vals);
if (newrdn) {
val.bv_val = (char *)newrdn; /* cast away const */
val.bv_len = strlen(newrdn);
- slapi_entry_add_values(e, attr_newrdn, vals);
+ slapi_entry_add_values(e, retrocl_newrdn, vals);
}
if (deloldrdn == 0) {
@@ -526,12 +526,12 @@ modrdn2reple(
val.bv_val = "TRUE";
val.bv_len = 4;
}
- slapi_entry_add_values(e, attr_deleteoldrdn, vals);
+ slapi_entry_add_values(e, retrocl_deleteoldrdn, vals);
if (newsuperior) {
val.bv_val = (char *)newsuperior; /* cast away const */
val.bv_len = strlen(newsuperior);
- slapi_entry_add_values(e, attr_newsuperior, vals);
+ slapi_entry_add_values(e, retrocl_newsuperior, vals);
}
if (NULL != ldm) {
@@ -540,7 +540,7 @@ modrdn2reple(
if (l->ls_len) {
val.bv_val = l->ls_buf;
val.bv_len = l->ls_len;
- slapi_entry_add_values(e, attr_changes, vals);
+ slapi_entry_add_values(e, retrocl_changes, vals);
}
lenstr_free(&l);
}
diff --git a/ldap/servers/plugins/retrocl/retrocl_trim.c b/ldap/servers/plugins/retrocl/retrocl_trim.c
index 0378eb7f6..d031dc3f8 100644
--- a/ldap/servers/plugins/retrocl/retrocl_trim.c
+++ b/ldap/servers/plugins/retrocl/retrocl_trim.c
@@ -49,15 +49,15 @@ static const char **
get_cleattrs(void)
{
if (cleattrs[0] == NULL) {
- cleattrs[0] = attr_objectclass;
- cleattrs[1] = attr_changenumber;
- cleattrs[2] = attr_targetdn;
- cleattrs[3] = attr_changetype;
- cleattrs[4] = attr_newrdn;
- cleattrs[5] = attr_deleteoldrdn;
- cleattrs[6] = attr_changes;
- cleattrs[7] = attr_newsuperior;
- cleattrs[8] = attr_changetime;
+ cleattrs[0] = retrocl_objectclass;
+ cleattrs[1] = retrocl_changenumber;
+ cleattrs[2] = retrocl_targetdn;
+ cleattrs[3] = retrocl_changetype;
+ cleattrs[4] = retrocl_newrdn;
+ cleattrs[5] = retrocl_deleteoldrdn;
+ cleattrs[6] = retrocl_changes;
+ cleattrs[7] = retrocl_newsuperior;
+ cleattrs[8] = retrocl_changetime;
cleattrs[9] = NULL;
}
return cleattrs;
@@ -81,7 +81,7 @@ delete_changerecord(changeNumber cnum)
char *dnbuf;
int delrc;
- dnbuf = slapi_ch_smprintf("%s=%ld, %s", attr_changenumber, cnum,
+ dnbuf = slapi_ch_smprintf("%s=%ld, %s", retrocl_changenumber, cnum,
RETROCL_CHANGELOG_DN);
pb = slapi_pblock_new();
slapi_delete_internal_set_pb(pb, dnbuf, NULL /*controls*/, NULL /* uniqueid */,
@@ -154,7 +154,7 @@ handle_getchangetime_search(Slapi_Entry *e, void *callback_data)
if (NULL != e) {
Slapi_Value *sval = NULL;
const struct berval *val = NULL;
- rc = slapi_entry_attr_find(e, attr_changetime, &attr);
+ rc = slapi_entry_attr_find(e, retrocl_changetime, &attr);
/* Bug 624442: Logic checking for lack of timestamp was
reversed. */
if (0 != rc || slapi_attr_first_value(attr, &sval) == -1 ||
@@ -174,14 +174,14 @@ handle_getchangetime_search(Slapi_Entry *e, void *callback_data)
/*
* Function: get_changetime
* Arguments: cnum - number of change record to retrieve
- * Returns: Taking the attr_changetime of the 'cnum' entry,
+ * Returns: Taking the retrocl_changetime of the 'cnum' entry,
* it converts it into time_t (parse_localTime) and returns this time value.
* It returns 0 in the following cases:
- * - changerecord entry has not attr_changetime
+ * - changerecord entry has not retrocl_changetime
* - attr_changetime attribute has no value
* - attr_changetime attribute value is empty
*
- * Description: Retrieve attr_changetime ("changetime") from a changerecord whose number is "cnum".
+ * Description: Retrieve retrocl_changetime ("changetime") from a changerecord whose number is "cnum".
*/
static time_t
get_changetime(changeNumber cnum, int *err)
@@ -198,7 +198,7 @@ get_changetime(changeNumber cnum, int *err)
}
crtp->crt_nentries = crtp->crt_err = 0;
crtp->crt_time = 0;
- PR_snprintf(fstr, sizeof(fstr), "%s=%ld", attr_changenumber, cnum);
+ PR_snprintf(fstr, sizeof(fstr), "%s=%ld", retrocl_changenumber, cnum);
pb = slapi_pblock_new();
slapi_search_internal_set_pb(pb, RETROCL_CHANGELOG_DN,
--
2.26.2

View File

@ -0,0 +1,502 @@
From 4faec52810e12070ef72da347bb590c57d8761e4 Mon Sep 17 00:00:00 2001
From: Mark Reynolds <mreynolds@redhat.com>
Date: Fri, 20 Nov 2020 17:47:18 -0500
Subject: [PATCH 1/2] Issue 3657 - Add options to dsctl for dsrc file
Description: Add options to create, modify, delete, and display
the .dsrc CLI tool shortcut file.
Relates: https://github.com/389ds/389-ds-base/issues/3657
Reviewed by: firstyear(Thanks!)
---
dirsrvtests/tests/suites/clu/dsrc_test.py | 136 ++++++++++
src/lib389/cli/dsctl | 2 +
src/lib389/lib389/cli_ctl/dsrc.py | 312 ++++++++++++++++++++++
3 files changed, 450 insertions(+)
create mode 100644 dirsrvtests/tests/suites/clu/dsrc_test.py
create mode 100644 src/lib389/lib389/cli_ctl/dsrc.py
diff --git a/dirsrvtests/tests/suites/clu/dsrc_test.py b/dirsrvtests/tests/suites/clu/dsrc_test.py
new file mode 100644
index 000000000..1b27700ec
--- /dev/null
+++ b/dirsrvtests/tests/suites/clu/dsrc_test.py
@@ -0,0 +1,136 @@
+import logging
+import pytest
+import os
+from os.path import expanduser
+from lib389.cli_base import FakeArgs
+from lib389.cli_ctl.dsrc import create_dsrc, modify_dsrc, delete_dsrc, display_dsrc
+from lib389._constants import DEFAULT_SUFFIX, DN_DM
+from lib389.topologies import topology_st as topo
+
+log = logging.getLogger(__name__)
+
+
+@pytest.fixture(scope="function")
+def setup(topo, request):
+ """Preserve any existing .dsrc file"""
+
+ dsrc_file = f'{expanduser("~")}/.dsrc'
+ backup_file = dsrc_file + ".original"
+ if os.path.exists(dsrc_file):
+ os.rename(dsrc_file, backup_file)
+
+ def fin():
+ if os.path.exists(backup_file):
+ os.rename(backup_file, dsrc_file)
+
+ request.addfinalizer(fin)
+
+
+def test_dsrc(topo, setup):
+ """Test "dsctl dsrc" command
+
+ :id: 0610de6c-e167-4761-bdab-3e677b2d44bb
+ :setup: Standalone Instance
+ :steps:
+ 1. Test creation works
+ 2. Test creating duplicate section
+ 3. Test adding an additional inst config works
+ 4. Test removing an instance works
+ 5. Test modify works
+ 6. Test delete works
+ 7. Test display fails when no file is present
+
+ :expectedresults:
+ 1. Success
+ 2. Success
+ 3. Success
+ 4. Success
+ 5. Success
+ 6. Success
+ 7. Success
+ """
+
+ inst = topo.standalone
+ serverid = inst.serverid
+ second_inst_name = "Second"
+ second_inst_basedn = "o=second"
+ different_suffix = "o=different"
+
+ # Setup our args
+ args = FakeArgs()
+ args.basedn = DEFAULT_SUFFIX
+ args.binddn = DN_DM
+ args.json = None
+ args.uri = None
+ args.saslmech = None
+ args.tls_cacertdir = None
+ args.tls_cert = None
+ args.tls_key = None
+ args.tls_reqcert = None
+ args.starttls = None
+ args.cancel_starttls = None
+ args.pwdfile = None
+ args.do_it = True
+
+ # Create a dsrc configuration entry
+ create_dsrc(inst, log, args)
+ display_dsrc(inst, topo.logcap.log, args)
+ assert topo.logcap.contains("basedn = " + args.basedn)
+ assert topo.logcap.contains("binddn = " + args.binddn)
+ assert topo.logcap.contains("[" + serverid + "]")
+ topo.logcap.flush()
+
+ # Attempt to add duplicate instance section
+ with pytest.raises(ValueError):
+ create_dsrc(inst, log, args)
+
+ # Test adding a second instance works correctly
+ inst.serverid = second_inst_name
+ args.basedn = second_inst_basedn
+ create_dsrc(inst, log, args)
+ display_dsrc(inst, topo.logcap.log, args)
+ assert topo.logcap.contains("basedn = " + args.basedn)
+ assert topo.logcap.contains("[" + second_inst_name + "]")
+ topo.logcap.flush()
+
+ # Delete second instance
+ delete_dsrc(inst, log, args)
+ inst.serverid = serverid # Restore original instance name
+ display_dsrc(inst, topo.logcap.log, args)
+ assert not topo.logcap.contains("[" + second_inst_name + "]")
+ assert not topo.logcap.contains("basedn = " + args.basedn)
+ # Make sure first instance config is still present
+ assert topo.logcap.contains("[" + serverid + "]")
+ assert topo.logcap.contains("binddn = " + args.binddn)
+ topo.logcap.flush()
+
+ # Modify the config
+ args.basedn = different_suffix
+ modify_dsrc(inst, log, args)
+ display_dsrc(inst, topo.logcap.log, args)
+ assert topo.logcap.contains(different_suffix)
+ topo.logcap.flush()
+
+ # Remove an arg from the config
+ args.basedn = ""
+ modify_dsrc(inst, log, args)
+ display_dsrc(inst, topo.logcap.log, args)
+ assert not topo.logcap.contains(different_suffix)
+ topo.logcap.flush()
+
+ # Remove the last entry, which should delete the file
+ delete_dsrc(inst, log, args)
+ dsrc_file = f'{expanduser("~")}/.dsrc'
+ assert not os.path.exists(dsrc_file)
+
+ # Make sure display fails
+ with pytest.raises(ValueError):
+ display_dsrc(inst, log, args)
+
+
+if __name__ == '__main__':
+ # Run isolated
+ # -s for DEBUG mode
+ CURRENT_FILE = os.path.realpath(__file__)
+ pytest.main(["-s", CURRENT_FILE])
+
diff --git a/src/lib389/cli/dsctl b/src/lib389/cli/dsctl
index fe9bc10e9..69f069297 100755
--- a/src/lib389/cli/dsctl
+++ b/src/lib389/cli/dsctl
@@ -23,6 +23,7 @@ from lib389.cli_ctl import tls as cli_tls
from lib389.cli_ctl import health as cli_health
from lib389.cli_ctl import nsstate as cli_nsstate
from lib389.cli_ctl import dbgen as cli_dbgen
+from lib389.cli_ctl import dsrc as cli_dsrc
from lib389.cli_ctl.instance import instance_remove_all
from lib389.cli_base import (
disconnect_instance,
@@ -61,6 +62,7 @@ cli_tls.create_parser(subparsers)
cli_health.create_parser(subparsers)
cli_nsstate.create_parser(subparsers)
cli_dbgen.create_parser(subparsers)
+cli_dsrc.create_parser(subparsers)
argcomplete.autocomplete(parser)
diff --git a/src/lib389/lib389/cli_ctl/dsrc.py b/src/lib389/lib389/cli_ctl/dsrc.py
new file mode 100644
index 000000000..e49c7f819
--- /dev/null
+++ b/src/lib389/lib389/cli_ctl/dsrc.py
@@ -0,0 +1,312 @@
+# --- BEGIN COPYRIGHT BLOCK ---
+# Copyright (C) 2020 Red Hat, Inc.
+# All rights reserved.
+#
+# License: GPL (version 3 or any later version).
+# See LICENSE for details.
+# --- END COPYRIGHT BLOCK ---
+
+import json
+from os.path import expanduser
+from os import path, remove
+from ldapurl import isLDAPUrl
+from ldap.dn import is_dn
+import configparser
+
+
+def create_dsrc(inst, log, args):
+ """Create the .dsrc file
+
+ [instance]
+ uri = ldaps://hostname:port
+ basedn = dc=example,dc=com
+ binddn = uid=user,....
+ saslmech = [EXTERNAL|PLAIN]
+ tls_cacertdir = /path/to/cacertdir
+ tls_cert = /path/to/user.crt
+ tls_key = /path/to/user.key
+ tls_reqcert = [never, hard, allow]
+ starttls = [true, false]
+ pwdfile = /path/to/file
+ """
+
+ dsrc_file = f'{expanduser("~")}/.dsrc'
+ config = configparser.ConfigParser()
+ config.read(dsrc_file)
+
+ # Verify this section does not already exist
+ instances = config.sections()
+ if inst.serverid in instances:
+ raise ValueError("There is already a configuration section for this instance!")
+
+ # Process and validate the args
+ config[inst.serverid] = {}
+
+ if args.uri is not None:
+ if not isLDAPUrl(args.uri):
+ raise ValueError("The uri is not a valid LDAP URL!")
+ if args.uri.startswith("ldapi"):
+ # We must use EXTERNAL saslmech for LDAPI
+ args.saslmech = "EXTERNAL"
+ config[inst.serverid]['uri'] = args.uri
+ if args.basedn is not None:
+ if not is_dn(args.basedn):
+ raise ValueError("The basedn is not a valid DN!")
+ config[inst.serverid]['basedn'] = args.basedn
+ if args.binddn is not None:
+ if not is_dn(args.binddn):
+ raise ValueError("The binddn is not a valid DN!")
+ config[inst.serverid]['binddn'] = args.binddn
+ if args.saslmech is not None:
+ if args.saslmech not in ['EXTERNAL', 'PLAIN']:
+ raise ValueError("The saslmech must be EXTERNAL or PLAIN!")
+ config[inst.serverid]['saslmech'] = args.saslmech
+ if args.tls_cacertdir is not None:
+ if not path.exists(args.tls_cacertdir):
+ raise ValueError('--tls-cacertdir directory does not exist!')
+ config[inst.serverid]['tls_cacertdir'] = args.tls_cacertdir
+ if args.tls_cert is not None:
+ if not path.exists(args.tls_cert):
+ raise ValueError('--tls-cert does not point to an existing file!')
+ config[inst.serverid]['tls_cert'] = args.tls_cert
+ if args.tls_key is not None:
+ if not path.exists(args.tls_key):
+ raise ValueError('--tls-key does not point to an existing file!')
+ config[inst.serverid]['tls_key'] = args.tls_key
+ if args.tls_reqcert is not None:
+ if args.tls_reqcert not in ['never', 'hard', 'allow']:
+ raise ValueError('--tls-reqcert value is invalid (must be either "never", "allow", or "hard")!')
+ config[inst.serverid]['tls_reqcert'] = args.tls_reqcert
+ if args.starttls:
+ config[inst.serverid]['starttls'] = 'true'
+ if args.pwdfile is not None:
+ if not path.exists(args.pwdfile):
+ raise ValueError('--pwdfile does not exist!')
+ config[inst.serverid]['pwdfile'] = args.pwdfile
+
+ if len(config[inst.serverid]) == 0:
+ # No args set
+ raise ValueError("You must set at least one argument for the new dsrc file!")
+
+ # Print a preview of the config
+ log.info(f'Updating "{dsrc_file}" with:\n')
+ log.info(f' [{inst.serverid}]')
+ for k, v in config[inst.serverid].items():
+ log.info(f' {k} = {v}')
+
+ # Perform confirmation?
+ if not args.do_it:
+ while 1:
+ val = input(f'\nUpdate "{dsrc_file}" ? [yes]: ').rstrip().lower()
+ if val == '' or val == 'y' or val == 'yes':
+ break
+ if val == 'n' or val == 'no':
+ return
+
+ # Now write the file
+ with open(dsrc_file, 'w') as configfile:
+ config.write(configfile)
+
+ log.info(f'Successfully updated: {dsrc_file}')
+
+
+def modify_dsrc(inst, log, args):
+ """Modify the instance config
+ """
+ dsrc_file = f'{expanduser("~")}/.dsrc'
+
+ if path.exists(dsrc_file):
+ config = configparser.ConfigParser()
+ config.read(dsrc_file)
+
+ # Verify we have a section to modify
+ instances = config.sections()
+ if inst.serverid not in instances:
+ raise ValueError("There is no configuration section for this instance to modify!")
+
+ # Process and validate the args
+ if args.uri is not None:
+ if not isLDAPUrl(args.uri):
+ raise ValueError("The uri is not a valid LDAP URL!")
+ if args.uri.startswith("ldapi"):
+ # We must use EXTERNAL saslmech for LDAPI
+ args.saslmech = "EXTERNAL"
+ if args.uri == '':
+ del config[inst.serverid]['uri']
+ else:
+ config[inst.serverid]['uri'] = args.uri
+ if args.basedn is not None:
+ if not is_dn(args.basedn):
+ raise ValueError("The basedn is not a valid DN!")
+ if args.basedn == '':
+ del config[inst.serverid]['basedn']
+ else:
+ config[inst.serverid]['basedn'] = args.basedn
+ if args.binddn is not None:
+ if not is_dn(args.binddn):
+ raise ValueError("The binddn is not a valid DN!")
+ if args.binddn == '':
+ del config[inst.serverid]['binddn']
+ else:
+ config[inst.serverid]['binddn'] = args.binddn
+ if args.saslmech is not None:
+ if args.saslmech not in ['EXTERNAL', 'PLAIN']:
+ raise ValueError("The saslmech must be EXTERNAL or PLAIN!")
+ if args.saslmech == '':
+ del config[inst.serverid]['saslmech']
+ else:
+ config[inst.serverid]['saslmech'] = args.saslmech
+ if args.tls_cacertdir is not None:
+ if not path.exists(args.tls_cacertdir):
+ raise ValueError('--tls-cacertdir directory does not exist!')
+ if args.tls_cacertdir == '':
+ del config[inst.serverid]['tls_cacertdir']
+ else:
+ config[inst.serverid]['tls_cacertdir'] = args.tls_cacertdir
+ if args.tls_cert is not None:
+ if not path.exists(args.tls_cert):
+ raise ValueError('--tls-cert does not point to an existing file!')
+ if args.tls_cert == '':
+ del config[inst.serverid]['tls_cert']
+ else:
+ config[inst.serverid]['tls_cert'] = args.tls_cert
+ if args.tls_key is not None:
+ if not path.exists(args.tls_key):
+ raise ValueError('--tls-key does not point to an existing file!')
+ if args.tls_key == '':
+ del config[inst.serverid]['tls_key']
+ else:
+ config[inst.serverid]['tls_key'] = args.tls_key
+ if args.tls_reqcert is not None:
+ if args.tls_reqcert not in ['never', 'hard', 'allow']:
+ raise ValueError('--tls-reqcert value is invalid (must be either "never", "allow", or "hard")!')
+ if args.tls_reqcert == '':
+ del config[inst.serverid]['tls_reqcert']
+ else:
+ config[inst.serverid]['tls_reqcert'] = args.tls_reqcert
+ if args.starttls:
+ config[inst.serverid]['starttls'] = 'true'
+ if args.cancel_starttls:
+ config[inst.serverid]['starttls'] = 'false'
+ if args.pwdfile is not None:
+ if not path.exists(args.pwdfile):
+ raise ValueError('--pwdfile does not exist!')
+ if args.pwdfile == '':
+ del config[inst.serverid]['pwdfile']
+ else:
+ config[inst.serverid]['pwdfile'] = args.pwdfile
+
+ # Okay now rewrite the file
+ with open(dsrc_file, 'w') as configfile:
+ config.write(configfile)
+
+ log.info(f'Successfully updated: {dsrc_file}')
+ else:
+ raise ValueError(f'There is no .dsrc file "{dsrc_file}" to update!')
+
+
+def delete_dsrc(inst, log, args):
+ """Delete the .dsrc file
+ """
+ dsrc_file = f'{expanduser("~")}/.dsrc'
+ if path.exists(dsrc_file):
+ if not args.do_it:
+ # Get confirmation
+ while 1:
+ val = input(f'\nAre you sure you want to remove this instances configuration ? [no]: ').rstrip().lower()
+ if val == 'y' or val == 'yes':
+ break
+ if val == '' or val == 'n' or val == 'no':
+ return
+
+ config = configparser.ConfigParser()
+ config.read(dsrc_file)
+ instances = config.sections()
+ if inst.serverid not in instances:
+ raise ValueError("The is no configuration for this instance")
+
+ # Update the config object
+ del config[inst.serverid]
+
+ if len(config.sections()) == 0:
+ # The file would be empty so just delete it
+ try:
+ remove(dsrc_file)
+ log.info(f'Successfully removed: {dsrc_file}')
+ return
+ except OSError as e:
+ raise ValueError(f'Failed to delete "{dsrc_file}", error: {str(e)}')
+ else:
+ # write the updated config
+ with open(dsrc_file, 'w') as configfile:
+ config.write(configfile)
+ else:
+ raise ValueError(f'There is no .dsrc file "{dsrc_file}" to update!')
+
+ log.info(f'Successfully updated: {dsrc_file}')
+
+def display_dsrc(inst, log, args):
+ """Display the contents of the ~/.dsrc file
+ """
+ dsrc_file = f'{expanduser("~")}/.dsrc'
+
+ if not path.exists(dsrc_file):
+ raise ValueError(f'There is no dsrc file "{dsrc_file}" to display!')
+
+ config = configparser.ConfigParser()
+ config.read(dsrc_file)
+ instances = config.sections()
+
+ for inst_section in instances:
+ if args.json:
+ log.info(json.dumps({inst_section: dict(config[inst_section])}, indent=4))
+ else:
+ log.info(f'[{inst_section}]')
+ for k, v in config[inst_section].items():
+ log.info(f'{k} = {v}')
+ log.info("")
+
+
+def create_parser(subparsers):
+ dsrc_parser = subparsers.add_parser('dsrc', help="Manage the .dsrc file")
+ subcommands = dsrc_parser.add_subparsers(help="action")
+
+ # Create .dsrc file
+ dsrc_create_parser = subcommands.add_parser('create', help='Generate the .dsrc file')
+ dsrc_create_parser.set_defaults(func=create_dsrc)
+ dsrc_create_parser.add_argument('--uri', help="The URI (LDAP URL) for the Directory Server instance.")
+ dsrc_create_parser.add_argument('--basedn', help="The default database suffix.")
+ dsrc_create_parser.add_argument('--binddn', help="The default Bind DN used or authentication.")
+ dsrc_create_parser.add_argument('--saslmech', help="The SASL mechanism to use: PLAIN or EXTERNAL.")
+ dsrc_create_parser.add_argument('--tls-cacertdir', help="The directory containing the Trusted Certificate Authority certificate.")
+ dsrc_create_parser.add_argument('--tls-cert', help="The absolute file name to the server certificate.")
+ dsrc_create_parser.add_argument('--tls-key', help="The absolute file name to the server certificate key.")
+ dsrc_create_parser.add_argument('--tls-reqcert', help="Request certificate strength: 'never', 'allow', 'hard'")
+ dsrc_create_parser.add_argument('--starttls', action='store_true', help="Use startTLS for connection to the server.")
+ dsrc_create_parser.add_argument('--pwdfile', help="The absolute path to a file containing the Bind DN's password.")
+ dsrc_create_parser.add_argument('--do-it', action='store_true', help="Create the file without any confirmation.")
+
+ dsrc_modify_parser = subcommands.add_parser('modify', help='Modify the .dsrc file')
+ dsrc_modify_parser.set_defaults(func=modify_dsrc)
+ dsrc_modify_parser.add_argument('--uri', nargs='?', const='', help="The URI (LDAP URL) for the Directory Server instance.")
+ dsrc_modify_parser.add_argument('--basedn', nargs='?', const='', help="The default database suffix.")
+ dsrc_modify_parser.add_argument('--binddn', nargs='?', const='', help="The default Bind DN used or authentication.")
+ dsrc_modify_parser.add_argument('--saslmech', nargs='?', const='', help="The SASL mechanism to use: PLAIN or EXTERNAL.")
+ dsrc_modify_parser.add_argument('--tls-cacertdir', nargs='?', const='', help="The directory containing the Trusted Certificate Authority certificate.")
+ dsrc_modify_parser.add_argument('--tls-cert', nargs='?', const='', help="The absolute file name to the server certificate.")
+ dsrc_modify_parser.add_argument('--tls-key', nargs='?', const='', help="The absolute file name to the server certificate key.")
+ dsrc_modify_parser.add_argument('--tls-reqcert', nargs='?', const='', help="Request certificate strength: 'never', 'allow', 'hard'")
+ dsrc_modify_parser.add_argument('--starttls', action='store_true', help="Use startTLS for connection to the server.")
+ dsrc_modify_parser.add_argument('--cancel-starttls', action='store_true', help="Do not use startTLS for connection to the server.")
+ dsrc_modify_parser.add_argument('--pwdfile', nargs='?', const='', help="The absolute path to a file containing the Bind DN's password.")
+ dsrc_modify_parser.add_argument('--do-it', action='store_true', help="Update the file without any confirmation.")
+
+ # Delete the instance from the .dsrc file
+ dsrc_delete_parser = subcommands.add_parser('delete', help='Delete instance configuration from the .dsrc file.')
+ dsrc_delete_parser.set_defaults(func=delete_dsrc)
+ dsrc_delete_parser.add_argument('--do-it', action='store_true',
+ help="Delete this instance's configuration from the .dsrc file.")
+
+ # Display .dsrc file
+ dsrc_display_parser = subcommands.add_parser('display', help='Display the contents of the .dsrc file.')
+ dsrc_display_parser.set_defaults(func=display_dsrc)
--
2.26.2

View File

@ -1,466 +0,0 @@
From 8d14ff153e9335b09739438344f9c3c78a496548 Mon Sep 17 00:00:00 2001
From: Mark Reynolds <mreynolds@redhat.com>
Date: Fri, 22 May 2020 10:42:11 -0400
Subject: [PATCH 08/12] Issue 51095 - abort operation if CSN can not be
generated
Bug Description: If we fail to get the system time then we were using an
uninitialized timespec struct which could lead to bizarre
times in CSN's.
Fix description: Check if the system time function fails, and if it does
then abort the update operation.
relates: https://pagure.io/389-ds-base/issue/51095
Reviewed by: firstyear & tbordaz(Thanks!!)
---
ldap/servers/plugins/replication/repl5.h | 2 +-
.../plugins/replication/repl5_replica.c | 33 ++++++++------
ldap/servers/slapd/back-ldbm/ldbm_add.c | 8 +++-
ldap/servers/slapd/back-ldbm/ldbm_delete.c | 9 +++-
ldap/servers/slapd/back-ldbm/ldbm_modify.c | 10 ++++-
ldap/servers/slapd/back-ldbm/ldbm_modrdn.c | 8 +++-
ldap/servers/slapd/csngen.c | 18 +++++++-
ldap/servers/slapd/entrywsi.c | 15 ++++---
ldap/servers/slapd/slap.h | 2 +-
ldap/servers/slapd/slapi-plugin.h | 8 ++++
ldap/servers/slapd/slapi-private.h | 5 ++-
ldap/servers/slapd/time.c | 43 +++++++++++++------
12 files changed, 118 insertions(+), 43 deletions(-)
diff --git a/ldap/servers/plugins/replication/repl5.h b/ldap/servers/plugins/replication/repl5.h
index 72b7089e3..638471744 100644
--- a/ldap/servers/plugins/replication/repl5.h
+++ b/ldap/servers/plugins/replication/repl5.h
@@ -776,7 +776,7 @@ void replica_disable_replication(Replica *r);
int replica_start_agreement(Replica *r, Repl_Agmt *ra);
int windows_replica_start_agreement(Replica *r, Repl_Agmt *ra);
-CSN *replica_generate_next_csn(Slapi_PBlock *pb, const CSN *basecsn);
+int32_t replica_generate_next_csn(Slapi_PBlock *pb, const CSN *basecsn, CSN **opcsn);
int replica_get_attr(Slapi_PBlock *pb, const char *type, void *value);
/* mapping tree extensions manipulation */
diff --git a/ldap/servers/plugins/replication/repl5_replica.c b/ldap/servers/plugins/replication/repl5_replica.c
index 02caa88d9..f01782330 100644
--- a/ldap/servers/plugins/replication/repl5_replica.c
+++ b/ldap/servers/plugins/replication/repl5_replica.c
@@ -3931,11 +3931,9 @@ windows_replica_start_agreement(Replica *r, Repl_Agmt *ra)
* A callback function registered as op->o_csngen_handler and
* called by backend ops to generate opcsn.
*/
-CSN *
-replica_generate_next_csn(Slapi_PBlock *pb, const CSN *basecsn)
+int32_t
+replica_generate_next_csn(Slapi_PBlock *pb, const CSN *basecsn, CSN **opcsn)
{
- CSN *opcsn = NULL;
-
Replica *replica = replica_get_replica_for_op(pb);
if (NULL != replica) {
Slapi_Operation *op;
@@ -3946,17 +3944,26 @@ replica_generate_next_csn(Slapi_PBlock *pb, const CSN *basecsn)
CSNGen *gen = (CSNGen *)object_get_data(gen_obj);
if (NULL != gen) {
/* The new CSN should be greater than the base CSN */
- csngen_new_csn(gen, &opcsn, PR_FALSE /* don't notify */);
- if (csn_compare(opcsn, basecsn) <= 0) {
- char opcsnstr[CSN_STRSIZE], basecsnstr[CSN_STRSIZE];
+ if (csngen_new_csn(gen, opcsn, PR_FALSE /* don't notify */) != CSN_SUCCESS) {
+ /* Failed to generate CSN we must abort */
+ object_release(gen_obj);
+ return -1;
+ }
+ if (csn_compare(*opcsn, basecsn) <= 0) {
+ char opcsnstr[CSN_STRSIZE];
+ char basecsnstr[CSN_STRSIZE];
char opcsn2str[CSN_STRSIZE];
- csn_as_string(opcsn, PR_FALSE, opcsnstr);
+ csn_as_string(*opcsn, PR_FALSE, opcsnstr);
csn_as_string(basecsn, PR_FALSE, basecsnstr);
- csn_free(&opcsn);
+ csn_free(opcsn);
csngen_adjust_time(gen, basecsn);
- csngen_new_csn(gen, &opcsn, PR_FALSE /* don't notify */);
- csn_as_string(opcsn, PR_FALSE, opcsn2str);
+ if (csngen_new_csn(gen, opcsn, PR_FALSE) != CSN_SUCCESS) {
+ /* Failed to generate CSN we must abort */
+ object_release(gen_obj);
+ return -1;
+ }
+ csn_as_string(*opcsn, PR_FALSE, opcsn2str);
slapi_log_err(SLAPI_LOG_WARNING, repl_plugin_name,
"replica_generate_next_csn - "
"opcsn=%s <= basecsn=%s, adjusted opcsn=%s\n",
@@ -3966,14 +3973,14 @@ replica_generate_next_csn(Slapi_PBlock *pb, const CSN *basecsn)
* Insert opcsn into the csn pending list.
* This is the notify effect in csngen_new_csn().
*/
- assign_csn_callback(opcsn, (void *)replica);
+ assign_csn_callback(*opcsn, (void *)replica);
}
object_release(gen_obj);
}
}
}
- return opcsn;
+ return 0;
}
/*
diff --git a/ldap/servers/slapd/back-ldbm/ldbm_add.c b/ldap/servers/slapd/back-ldbm/ldbm_add.c
index d0d88bf16..ee366c74c 100644
--- a/ldap/servers/slapd/back-ldbm/ldbm_add.c
+++ b/ldap/servers/slapd/back-ldbm/ldbm_add.c
@@ -645,7 +645,13 @@ ldbm_back_add(Slapi_PBlock *pb)
* Current op is a user request. Opcsn will be assigned
* if the dn is in an updatable replica.
*/
- opcsn = entry_assign_operation_csn(pb, e, parententry ? parententry->ep_entry : NULL);
+ if (entry_assign_operation_csn(pb, e, parententry ? parententry->ep_entry : NULL, &opcsn) != 0) {
+ slapi_log_err(SLAPI_LOG_ERR, "ldbm_back_add",
+ "failed to generate add CSN for entry (%s), aborting operation\n",
+ slapi_entry_get_dn(e));
+ ldap_result_code = LDAP_OPERATIONS_ERROR;
+ goto error_return;
+ }
}
if (opcsn != NULL) {
entry_set_csn(e, opcsn);
diff --git a/ldap/servers/slapd/back-ldbm/ldbm_delete.c b/ldap/servers/slapd/back-ldbm/ldbm_delete.c
index 873b5b00e..fbcb57310 100644
--- a/ldap/servers/slapd/back-ldbm/ldbm_delete.c
+++ b/ldap/servers/slapd/back-ldbm/ldbm_delete.c
@@ -464,7 +464,14 @@ replace_entry:
* by entry_assign_operation_csn() if the dn is in an
* updatable replica.
*/
- opcsn = entry_assign_operation_csn ( pb, e->ep_entry, NULL );
+ if (entry_assign_operation_csn(pb, e->ep_entry, NULL, &opcsn) != 0) {
+ slapi_log_err(SLAPI_LOG_ERR, "ldbm_back_delete",
+ "failed to generate delete CSN for entry (%s), aborting operation\n",
+ slapi_entry_get_dn(e->ep_entry));
+ retval = -1;
+ ldap_result_code = LDAP_OPERATIONS_ERROR;
+ goto error_return;
+ }
}
if (opcsn != NULL) {
if (!is_fixup_operation) {
diff --git a/ldap/servers/slapd/back-ldbm/ldbm_modify.c b/ldap/servers/slapd/back-ldbm/ldbm_modify.c
index b0c477e3f..e9d7e87e3 100644
--- a/ldap/servers/slapd/back-ldbm/ldbm_modify.c
+++ b/ldap/servers/slapd/back-ldbm/ldbm_modify.c
@@ -598,12 +598,18 @@ ldbm_back_modify(Slapi_PBlock *pb)
goto error_return;
}
opcsn = operation_get_csn(operation);
- if (NULL == opcsn && operation->o_csngen_handler) {
+ if (opcsn == NULL && operation->o_csngen_handler) {
/*
* Current op is a user request. Opcsn will be assigned
* if the dn is in an updatable replica.
*/
- opcsn = entry_assign_operation_csn(pb, e->ep_entry, NULL);
+ if (entry_assign_operation_csn(pb, e->ep_entry, NULL, &opcsn) != 0) {
+ slapi_log_err(SLAPI_LOG_ERR, "ldbm_back_modify",
+ "failed to generate modify CSN for entry (%s), aborting operation\n",
+ slapi_entry_get_dn(e->ep_entry));
+ ldap_result_code = LDAP_OPERATIONS_ERROR;
+ goto error_return;
+ }
}
if (opcsn) {
entry_set_maxcsn(e->ep_entry, opcsn);
diff --git a/ldap/servers/slapd/back-ldbm/ldbm_modrdn.c b/ldap/servers/slapd/back-ldbm/ldbm_modrdn.c
index 26698012a..fde83c99f 100644
--- a/ldap/servers/slapd/back-ldbm/ldbm_modrdn.c
+++ b/ldap/servers/slapd/back-ldbm/ldbm_modrdn.c
@@ -543,7 +543,13 @@ ldbm_back_modrdn(Slapi_PBlock *pb)
* Current op is a user request. Opcsn will be assigned
* if the dn is in an updatable replica.
*/
- opcsn = entry_assign_operation_csn(pb, e->ep_entry, parententry ? parententry->ep_entry : NULL);
+ if (entry_assign_operation_csn(pb, e->ep_entry, parententry ? parententry->ep_entry : NULL, &opcsn) != 0) {
+ slapi_log_err(SLAPI_LOG_ERR, "ldbm_back_modrdn",
+ "failed to generate modrdn CSN for entry (%s), aborting operation\n",
+ slapi_entry_get_dn(e->ep_entry));
+ ldap_result_code = LDAP_OPERATIONS_ERROR;
+ goto error_return;
+ }
}
if (opcsn != NULL) {
entry_set_maxcsn(e->ep_entry, opcsn);
diff --git a/ldap/servers/slapd/csngen.c b/ldap/servers/slapd/csngen.c
index 68dbbda8e..b08d8b25c 100644
--- a/ldap/servers/slapd/csngen.c
+++ b/ldap/servers/slapd/csngen.c
@@ -164,6 +164,7 @@ csngen_free(CSNGen **gen)
int
csngen_new_csn(CSNGen *gen, CSN **csn, PRBool notify)
{
+ struct timespec now = {0};
int rc = CSN_SUCCESS;
time_t cur_time;
int delta;
@@ -179,12 +180,25 @@ csngen_new_csn(CSNGen *gen, CSN **csn, PRBool notify)
return CSN_MEMORY_ERROR;
}
- slapi_rwlock_wrlock(gen->lock);
+ if ((rc = slapi_clock_gettime(&now)) != 0) {
+ /* Failed to get system time, we must abort */
+ slapi_log_err(SLAPI_LOG_ERR, "csngen_new_csn",
+ "Failed to get system time (%s)\n",
+ slapd_system_strerror(rc));
+ return CSN_TIME_ERROR;
+ }
+ cur_time = now.tv_sec;
- cur_time = slapi_current_utc_time();
+ slapi_rwlock_wrlock(gen->lock);
/* check if the time should be adjusted */
delta = cur_time - gen->state.sampled_time;
+ if (delta > _SEC_PER_DAY || delta < (-1 * _SEC_PER_DAY)) {
+ /* We had a jump larger than a day */
+ slapi_log_err(SLAPI_LOG_INFO, "csngen_new_csn",
+ "Detected large jump in CSN time. Delta: %d (current time: %ld vs previous time: %ld)\n",
+ delta, cur_time, gen->state.sampled_time);
+ }
if (delta > 0) {
rc = _csngen_adjust_local_time(gen, cur_time);
if (rc != CSN_SUCCESS) {
diff --git a/ldap/servers/slapd/entrywsi.c b/ldap/servers/slapd/entrywsi.c
index 5d1d7238a..31bf65d8e 100644
--- a/ldap/servers/slapd/entrywsi.c
+++ b/ldap/servers/slapd/entrywsi.c
@@ -224,13 +224,12 @@ entry_add_rdn_csn(Slapi_Entry *e, const CSN *csn)
slapi_rdn_free(&rdn);
}
-CSN *
-entry_assign_operation_csn(Slapi_PBlock *pb, Slapi_Entry *e, Slapi_Entry *parententry)
+int32_t
+entry_assign_operation_csn(Slapi_PBlock *pb, Slapi_Entry *e, Slapi_Entry *parententry, CSN **opcsn)
{
Slapi_Operation *op;
const CSN *basecsn = NULL;
const CSN *parententry_dncsn = NULL;
- CSN *opcsn = NULL;
slapi_pblock_get(pb, SLAPI_OPERATION, &op);
@@ -252,14 +251,16 @@ entry_assign_operation_csn(Slapi_PBlock *pb, Slapi_Entry *e, Slapi_Entry *parent
basecsn = parententry_dncsn;
}
}
- opcsn = op->o_csngen_handler(pb, basecsn);
+ if(op->o_csngen_handler(pb, basecsn, opcsn) != 0) {
+ return -1;
+ }
- if (NULL != opcsn) {
- operation_set_csn(op, opcsn);
+ if (*opcsn) {
+ operation_set_csn(op, *opcsn);
}
}
- return opcsn;
+ return 0;
}
/*
diff --git a/ldap/servers/slapd/slap.h b/ldap/servers/slapd/slap.h
index a4cae784a..cef8c789c 100644
--- a/ldap/servers/slapd/slap.h
+++ b/ldap/servers/slapd/slap.h
@@ -1480,7 +1480,7 @@ struct op;
typedef void (*result_handler)(struct conn *, struct op *, int, char *, char *, int, struct berval **);
typedef int (*search_entry_handler)(Slapi_Backend *, struct conn *, struct op *, struct slapi_entry *);
typedef int (*search_referral_handler)(Slapi_Backend *, struct conn *, struct op *, struct berval **);
-typedef CSN *(*csngen_handler)(Slapi_PBlock *pb, const CSN *basecsn);
+typedef int32_t *(*csngen_handler)(Slapi_PBlock *pb, const CSN *basecsn, CSN **opcsn);
typedef int (*replica_attr_handler)(Slapi_PBlock *pb, const char *type, void **value);
/*
diff --git a/ldap/servers/slapd/slapi-plugin.h b/ldap/servers/slapd/slapi-plugin.h
index be1e52e4d..834a98742 100644
--- a/ldap/servers/slapd/slapi-plugin.h
+++ b/ldap/servers/slapd/slapi-plugin.h
@@ -6743,6 +6743,14 @@ int slapi_reslimit_get_integer_limit(Slapi_Connection *conn, int handle, int *li
*/
time_t slapi_current_time(void) __attribute__((deprecated));
+/**
+ * Get the system time and check for errors. Return
+ *
+ * \param tp - a timespec struct where the system time is set
+ * \return result code, upon success tp is set to the system time
+ */
+int32_t slapi_clock_gettime(struct timespec *tp);
+
/**
* Returns the current system time as a hr clock relative to uptime
* This means the clock is not affected by timezones
diff --git a/ldap/servers/slapd/slapi-private.h b/ldap/servers/slapd/slapi-private.h
index d85ee43e5..c98c1947c 100644
--- a/ldap/servers/slapd/slapi-private.h
+++ b/ldap/servers/slapd/slapi-private.h
@@ -233,7 +233,8 @@ enum
CSN_INVALID_PARAMETER, /* invalid function argument */
CSN_INVALID_FORMAT, /* invalid state format */
CSN_LDAP_ERROR, /* LDAP operation failed */
- CSN_NSPR_ERROR /* NSPR API failure */
+ CSN_NSPR_ERROR, /* NSPR API failure */
+ CSN_TIME_ERROR /* Error generating new CSN due to clock failure */
};
typedef struct csngen CSNGen;
@@ -326,7 +327,7 @@ int slapi_entries_diff(Slapi_Entry **old_entries, Slapi_Entry **new_entries, int
void set_attr_to_protected_list(char *attr, int flag);
/* entrywsi.c */
-CSN *entry_assign_operation_csn(Slapi_PBlock *pb, Slapi_Entry *e, Slapi_Entry *parententry);
+int32_t entry_assign_operation_csn(Slapi_PBlock *pb, Slapi_Entry *e, Slapi_Entry *parententry, CSN **opcsn);
const CSN *entry_get_maxcsn(const Slapi_Entry *entry);
void entry_set_maxcsn(Slapi_Entry *entry, const CSN *csn);
const CSN *entry_get_dncsn(const Slapi_Entry *entry);
diff --git a/ldap/servers/slapd/time.c b/ldap/servers/slapd/time.c
index 8048a3359..545538404 100644
--- a/ldap/servers/slapd/time.c
+++ b/ldap/servers/slapd/time.c
@@ -61,6 +61,25 @@ poll_current_time()
return 0;
}
+/*
+ * Check if the time function returns an error. If so return the errno
+ */
+int32_t
+slapi_clock_gettime(struct timespec *tp)
+{
+ int32_t rc = 0;
+
+ PR_ASSERT(tp && tp->tv_nsec == 0 && tp->tv_sec == 0);
+
+ if (clock_gettime(CLOCK_REALTIME, tp) != 0) {
+ rc = errno;
+ }
+
+ PR_ASSERT(rc == 0);
+
+ return rc;
+}
+
time_t
current_time(void)
{
@@ -69,7 +88,7 @@ current_time(void)
* but this should be removed in favour of the
* more accurately named slapi_current_utc_time
*/
- struct timespec now;
+ struct timespec now = {0};
clock_gettime(CLOCK_REALTIME, &now);
return now.tv_sec;
}
@@ -83,7 +102,7 @@ slapi_current_time(void)
struct timespec
slapi_current_rel_time_hr(void)
{
- struct timespec now;
+ struct timespec now = {0};
clock_gettime(CLOCK_MONOTONIC, &now);
return now;
}
@@ -91,7 +110,7 @@ slapi_current_rel_time_hr(void)
struct timespec
slapi_current_utc_time_hr(void)
{
- struct timespec ltnow;
+ struct timespec ltnow = {0};
clock_gettime(CLOCK_REALTIME, &ltnow);
return ltnow;
}
@@ -99,7 +118,7 @@ slapi_current_utc_time_hr(void)
time_t
slapi_current_utc_time(void)
{
- struct timespec ltnow;
+ struct timespec ltnow = {0};
clock_gettime(CLOCK_REALTIME, &ltnow);
return ltnow.tv_sec;
}
@@ -108,8 +127,8 @@ void
slapi_timestamp_utc_hr(char *buf, size_t bufsize)
{
PR_ASSERT(bufsize >= SLAPI_TIMESTAMP_BUFSIZE);
- struct timespec ltnow;
- struct tm utctm;
+ struct timespec ltnow = {0};
+ struct tm utctm = {0};
clock_gettime(CLOCK_REALTIME, &ltnow);
gmtime_r(&(ltnow.tv_sec), &utctm);
strftime(buf, bufsize, "%Y%m%d%H%M%SZ", &utctm);
@@ -140,7 +159,7 @@ format_localTime_log(time_t t, int initsize __attribute__((unused)), char *buf,
{
long tz;
- struct tm *tmsp, tms;
+ struct tm *tmsp, tms = {0};
char tbuf[*bufsize];
char sign;
/* make sure our buffer will be big enough. Need at least 29 */
@@ -191,7 +210,7 @@ format_localTime_hr_log(time_t t, long nsec, int initsize __attribute__((unused)
{
long tz;
- struct tm *tmsp, tms;
+ struct tm *tmsp, tms = {0};
char tbuf[*bufsize];
char sign;
/* make sure our buffer will be big enough. Need at least 39 */
@@ -278,7 +297,7 @@ slapi_timespec_expire_check(struct timespec *expire)
if (expire->tv_sec == 0 && expire->tv_nsec == 0) {
return TIMER_CONTINUE;
}
- struct timespec now;
+ struct timespec now = {0};
clock_gettime(CLOCK_MONOTONIC, &now);
if (now.tv_sec > expire->tv_sec ||
(expire->tv_sec == now.tv_sec && now.tv_sec > expire->tv_nsec)) {
@@ -293,7 +312,7 @@ format_localTime(time_t from)
in the syntax of a generalizedTime, except without the time zone. */
{
char *into;
- struct tm t;
+ struct tm t = {0};
localtime_r(&from, &t);
@@ -362,7 +381,7 @@ format_genTime(time_t from)
in the syntax of a generalizedTime. */
{
char *into;
- struct tm t;
+ struct tm t = {0};
gmtime_r(&from, &t);
into = slapi_ch_malloc(SLAPI_TIMESTAMP_BUFSIZE);
@@ -382,7 +401,7 @@ time_t
read_genTime(struct berval *from)
{
struct tm t = {0};
- time_t retTime;
+ time_t retTime = {0};
time_t diffsec = 0;
int i, gflag = 0, havesec = 0;
--
2.26.2

View File

@ -0,0 +1,902 @@
From 201cb1147c0a34bddbd3e5c03aecd804c47a9905 Mon Sep 17 00:00:00 2001
From: progier389 <72748589+progier389@users.noreply.github.com>
Date: Thu, 19 Nov 2020 10:21:10 +0100
Subject: [PATCH 2/2] Issue 4440 - BUG - ldifgen with --start-idx option fails
with unsupported operand (#4444)
Bug description:
Got TypeError exception when usign:
dsctl -v slapd-localhost ldifgen users --suffix
dc=example,dc=com --parent ou=people,dc=example,dc=com
--number 100000 --generic --start-idx=50
The reason is that by default python parser provides
value for numeric options:
as an integer if specified by "--option value" or
as a string if specified by "--option=value"
Fix description:
convert the numeric parameters to integer when using it.
options impacted are:
- in users subcommand: --number , --start-idx
- in mod-load subcommand: --num-users, --add-users,
--del-users, --modrdn-users, --mod-users
FYI: An alternative solution would have been to indicate the
parser that these values are an integer. But two reasons
leaded me to implement the first solution:
- first solution fix the problem for all users while the
second one fixes only dsctl command.
- first solution is easier to test:
I just added a new test file generated by a script
that duplicated existing ldifgen test, renamed the
test cases and replaced the numeric arguments by
strings.
Second solution would need to redesign the test framework
to be able to test the parser.
relates: https://github.com/389ds/389-ds-base/issues/4440
Reviewed by:
Platforms tested: F32
(cherry picked from commit 3c3e1f30cdb046a1aabb93aacebcf261a76a0892)
---
.../tests/suites/clu/dbgen_test_usan.py | 806 ++++++++++++++++++
src/lib389/lib389/cli_ctl/dbgen.py | 10 +-
src/lib389/lib389/dbgen.py | 3 +
3 files changed, 814 insertions(+), 5 deletions(-)
create mode 100644 dirsrvtests/tests/suites/clu/dbgen_test_usan.py
diff --git a/dirsrvtests/tests/suites/clu/dbgen_test_usan.py b/dirsrvtests/tests/suites/clu/dbgen_test_usan.py
new file mode 100644
index 000000000..80ff63417
--- /dev/null
+++ b/dirsrvtests/tests/suites/clu/dbgen_test_usan.py
@@ -0,0 +1,806 @@
+# --- BEGIN COPYRIGHT BLOCK ---
+# Copyright (C) 2020 Red Hat, Inc.
+# All rights reserved.
+#
+# License: GPL (version 3 or any later version).
+# See LICENSE for details.
+# --- END COPYRIGHT BLOCK ---
+#
+import time
+
+"""
+ This file contains tests similar to dbgen_test.py
+ except that paramaters that are number are expressed as string
+ (to mimic the parameters parser default behavior which returns an
+ int when parsing "option value" and a string when parsing "option=value"
+ This file has been generated by usign:
+sed '
+9r z1
+s/ test_/ test_usan/
+/args.*= [0-9]/s,[0-9]*$,"&",
+/:id:/s/.$/1/
+' dbgen_test.py > dbgen_test_usan.py
+ ( with z1 file containing this comment )
+"""
+
+
+
+import subprocess
+import pytest
+
+from lib389.cli_ctl.dbgen import *
+from lib389.cos import CosClassicDefinitions, CosPointerDefinitions, CosIndirectDefinitions, CosTemplates
+from lib389.idm.account import Accounts
+from lib389.idm.group import Groups
+from lib389.idm.role import ManagedRoles, FilteredRoles, NestedRoles
+from lib389.tasks import *
+from lib389.utils import *
+from lib389.topologies import topology_st
+from lib389.cli_base import FakeArgs
+
+pytestmark = pytest.mark.tier0
+
+LOG_FILE = '/tmp/dbgen.log'
+logging.getLogger(__name__).setLevel(logging.DEBUG)
+log = logging.getLogger(__name__)
+
+
+@pytest.fixture(scope="function")
+def set_log_file_and_ldif(topology_st, request):
+ global ldif_file
+ ldif_file = get_ldif_dir(topology_st.standalone) + '/created.ldif'
+
+ fh = logging.FileHandler(LOG_FILE)
+ fh.setLevel(logging.DEBUG)
+ log.addHandler(fh)
+
+ def fin():
+ log.info('Delete files')
+ os.remove(LOG_FILE)
+ os.remove(ldif_file)
+
+ request.addfinalizer(fin)
+
+
+def run_offline_import(instance, ldif_file):
+ log.info('Stopping the server and running offline import...')
+ instance.stop()
+ assert instance.ldif2db(bename=DEFAULT_BENAME, suffixes=[DEFAULT_SUFFIX], encrypt=None, excludeSuffixes=None,
+ import_file=ldif_file)
+ instance.start()
+
+
+def run_ldapmodify_from_file(instance, ldif_file, output_to_check=None):
+ LDAP_MOD = '/usr/bin/ldapmodify'
+ log.info('Add entries from ldif file with ldapmodify')
+ result = subprocess.check_output([LDAP_MOD, '-cx', '-D', DN_DM, '-w', PASSWORD,
+ '-h', instance.host, '-p', str(instance.port), '-af', ldif_file])
+ if output_to_check is not None:
+ assert output_to_check in ensure_str(result)
+
+
+def check_value_in_log_and_reset(content_list):
+ with open(LOG_FILE, 'r+') as f:
+ file_content = f.read()
+ log.info('Check if content is present in output')
+ for item in content_list:
+ assert item in file_content
+
+ log.info('Reset log file for next test')
+ f.truncate(0)
+
+
+@pytest.mark.ds50545
+@pytest.mark.bz1798394
+@pytest.mark.skipif(ds_is_older("1.4.3"), reason="Not implemented")
+def test_usandsconf_dbgen_users(topology_st, set_log_file_and_ldif):
+ """Test ldifgen (formerly dbgen) tool to create ldif with users
+
+ :id: 426b5b94-9923-454d-a736-7e71ca985e91
+ :setup: Standalone instance
+ :steps:
+ 1. Create DS instance
+ 2. Run ldifgen to generate ldif with users
+ 3. Import generated ldif to database
+ 4. Check it was properly imported
+ :expectedresults:
+ 1. Success
+ 2. Success
+ 3. Success
+ 4. Success
+ """
+
+ standalone = topology_st.standalone
+
+ args = FakeArgs()
+ args.suffix = DEFAULT_SUFFIX
+ args.parent = 'ou=people,dc=example,dc=com'
+ args.number = "1000"
+ args.rdn_cn = False
+ args.generic = True
+ args.start_idx = "50"
+ args.localize = False
+ args.ldif_file = ldif_file
+
+ content_list = ['Generating LDIF with the following options:',
+ 'suffix={}'.format(args.suffix),
+ 'parent={}'.format(args.parent),
+ 'number={}'.format(args.number),
+ 'rdn-cn={}'.format(args.rdn_cn),
+ 'generic={}'.format(args.generic),
+ 'start-idx={}'.format(args.start_idx),
+ 'localize={}'.format(args.localize),
+ 'ldif-file={}'.format(args.ldif_file),
+ 'Writing LDIF',
+ 'Successfully created LDIF file: {}'.format(args.ldif_file)]
+
+ log.info('Run ldifgen to create users ldif')
+ dbgen_create_users(standalone, log, args)
+
+ log.info('Check if file exists')
+ assert os.path.exists(ldif_file)
+
+ check_value_in_log_and_reset(content_list)
+
+ log.info('Get number of accounts before import')
+ accounts = Accounts(standalone, DEFAULT_SUFFIX)
+ count_account = len(accounts.filter('(uid=*)'))
+
+ run_offline_import(standalone, ldif_file)
+
+ log.info('Check that accounts are imported')
+ assert len(accounts.filter('(uid=*)')) > count_account
+
+
+@pytest.mark.ds50545
+@pytest.mark.bz1798394
+@pytest.mark.skipif(ds_is_older("1.4.3"), reason="Not implemented")
+def test_usandsconf_dbgen_groups(topology_st, set_log_file_and_ldif):
+ """Test ldifgen (formerly dbgen) tool to create ldif with group
+
+ :id: 97207413-9a93-4065-a5ec-63aa93801a31
+ :setup: Standalone instance
+ :steps:
+ 1. Create DS instance
+ 2. Run ldifgen to generate ldif with group
+ 3. Import generated ldif to database
+ 4. Check it was properly imported
+ :expectedresults:
+ 1. Success
+ 2. Success
+ 3. Success
+ 4. Success
+ """
+ LDAP_RESULT = 'adding new entry "cn=myGroup-1,ou=groups,dc=example,dc=com"'
+
+ standalone = topology_st.standalone
+
+ args = FakeArgs()
+ args.NAME = 'myGroup'
+ args.parent = 'ou=groups,dc=example,dc=com'
+ args.suffix = DEFAULT_SUFFIX
+ args.number = "1"
+ args.num_members = "1000"
+ args.create_members = True
+ args.member_attr = 'uniquemember'
+ args.member_parent = 'ou=people,dc=example,dc=com'
+ args.ldif_file = ldif_file
+
+ content_list = ['Generating LDIF with the following options:',
+ 'NAME={}'.format(args.NAME),
+ 'number={}'.format(args.number),
+ 'suffix={}'.format(args.suffix),
+ 'num-members={}'.format(args.num_members),
+ 'create-members={}'.format(args.create_members),
+ 'member-parent={}'.format(args.member_parent),
+ 'member-attr={}'.format(args.member_attr),
+ 'ldif-file={}'.format(args.ldif_file),
+ 'Writing LDIF',
+ 'Successfully created LDIF file: {}'.format(args.ldif_file)]
+
+ log.info('Run ldifgen to create group ldif')
+ dbgen_create_groups(standalone, log, args)
+
+ log.info('Check if file exists')
+ assert os.path.exists(ldif_file)
+
+ check_value_in_log_and_reset(content_list)
+
+ log.info('Get number of accounts before import')
+ accounts = Accounts(standalone, DEFAULT_SUFFIX)
+ count_account = len(accounts.filter('(uid=*)'))
+
+ # Groups, COS, Roles and modification ldifs are designed to be used by ldapmodify, not ldif2db
+ # ldapmodify will complain about already existing parent which causes subprocess to return exit code != 0
+ with pytest.raises(subprocess.CalledProcessError):
+ run_ldapmodify_from_file(standalone, ldif_file, LDAP_RESULT)
+
+ log.info('Check that accounts are imported')
+ assert len(accounts.filter('(uid=*)')) > count_account
+
+ log.info('Check that group is imported')
+ groups = Groups(standalone, DEFAULT_SUFFIX)
+ assert groups.exists(args.NAME + '-1')
+ new_group = groups.get(args.NAME + '-1')
+ new_group.present('uniquemember', 'uid=group_entry1-0152,ou=people,dc=example,dc=com')
+
+
+@pytest.mark.ds50545
+@pytest.mark.bz1798394
+@pytest.mark.skipif(ds_is_older("1.4.3"), reason="Not implemented")
+def test_usandsconf_dbgen_cos_classic(topology_st, set_log_file_and_ldif):
+ """Test ldifgen (formerly dbgen) tool to create a COS definition
+
+ :id: 8557f994-8a91-4f8a-86f6-9cb826a0b8f1
+ :setup: Standalone instance
+ :steps:
+ 1. Create DS instance
+ 2. Run ldifgen to generate ldif with classic COS definition
+ 3. Import generated ldif to database
+ 4. Check it was properly imported
+ :expectedresults:
+ 1. Success
+ 2. Success
+ 3. Success
+ 4. Success
+ """
+
+ LDAP_RESULT = 'adding new entry "cn=My_Postal_Def,ou=cos definitions,dc=example,dc=com"'
+
+ standalone = topology_st.standalone
+
+ args = FakeArgs()
+ args.type = 'classic'
+ args.NAME = 'My_Postal_Def'
+ args.parent = 'ou=cos definitions,dc=example,dc=com'
+ args.create_parent = True
+ args.cos_specifier = 'businessCategory'
+ args.cos_attr = ['postalcode', 'telephonenumber']
+ args.cos_template = 'cn=sales,cn=classicCoS,dc=example,dc=com'
+ args.ldif_file = ldif_file
+
+ content_list = ['Generating LDIF with the following options:',
+ 'NAME={}'.format(args.NAME),
+ 'type={}'.format(args.type),
+ 'parent={}'.format(args.parent),
+ 'create-parent={}'.format(args.create_parent),
+ 'cos-specifier={}'.format(args.cos_specifier),
+ 'cos-template={}'.format(args.cos_template),
+ 'cos-attr={}'.format(args.cos_attr),
+ 'ldif-file={}'.format(args.ldif_file),
+ 'Writing LDIF',
+ 'Successfully created LDIF file: {}'.format(args.ldif_file)]
+
+ log.info('Run ldifgen to create COS definition ldif')
+ dbgen_create_cos_def(standalone, log, args)
+
+ log.info('Check if file exists')
+ assert os.path.exists(ldif_file)
+
+ check_value_in_log_and_reset(content_list)
+
+ # Groups, COS, Roles and modification ldifs are designed to be used by ldapmodify, not ldif2db
+ run_ldapmodify_from_file(standalone, ldif_file, LDAP_RESULT)
+
+ log.info('Check that COS definition is imported')
+ cos_def = CosClassicDefinitions(standalone, args.parent)
+ assert cos_def.exists(args.NAME)
+ new_cos = cos_def.get(args.NAME)
+ assert new_cos.present('cosTemplateDN', args.cos_template)
+ assert new_cos.present('cosSpecifier', args.cos_specifier)
+ assert new_cos.present('cosAttribute', args.cos_attr[0])
+ assert new_cos.present('cosAttribute', args.cos_attr[1])
+
+
+@pytest.mark.ds50545
+@pytest.mark.bz1798394
+@pytest.mark.skipif(ds_is_older("1.4.3"), reason="Not implemented")
+def test_usandsconf_dbgen_cos_pointer(topology_st, set_log_file_and_ldif):
+ """Test ldifgen (formerly dbgen) tool to create a COS definition
+
+ :id: 6b26ca6d-226a-4f93-925e-faf95cc20211
+ :setup: Standalone instance
+ :steps:
+ 1. Create DS instance
+ 2. Run ldifgen to generate ldif with pointer COS definition
+ 3. Import generated ldif to database
+ 4. Check it was properly imported
+ :expectedresults:
+ 1. Success
+ 2. Success
+ 3. Success
+ 4. Success
+ """
+
+ LDAP_RESULT = 'adding new entry "cn=My_Postal_Def_pointer,ou=cos pointer definitions,dc=example,dc=com"'
+
+ standalone = topology_st.standalone
+
+ args = FakeArgs()
+ args.type = 'pointer'
+ args.NAME = 'My_Postal_Def_pointer'
+ args.parent = 'ou=cos pointer definitions,dc=example,dc=com'
+ args.create_parent = True
+ args.cos_specifier = None
+ args.cos_attr = ['postalcode', 'telephonenumber']
+ args.cos_template = 'cn=sales,cn=pointerCoS,dc=example,dc=com'
+ args.ldif_file = ldif_file
+
+ content_list = ['Generating LDIF with the following options:',
+ 'NAME={}'.format(args.NAME),
+ 'type={}'.format(args.type),
+ 'parent={}'.format(args.parent),
+ 'create-parent={}'.format(args.create_parent),
+ 'cos-template={}'.format(args.cos_template),
+ 'cos-attr={}'.format(args.cos_attr),
+ 'ldif-file={}'.format(args.ldif_file),
+ 'Writing LDIF',
+ 'Successfully created LDIF file: {}'.format(args.ldif_file)]
+
+ log.info('Run ldifgen to create COS definition ldif')
+ dbgen_create_cos_def(standalone, log, args)
+
+ log.info('Check if file exists')
+ assert os.path.exists(ldif_file)
+
+ check_value_in_log_and_reset(content_list)
+
+ # Groups, COS, Roles and modification ldifs are designed to be used by ldapmodify, not ldif2db
+ run_ldapmodify_from_file(standalone, ldif_file, LDAP_RESULT)
+
+ log.info('Check that COS definition is imported')
+ cos_def = CosPointerDefinitions(standalone, args.parent)
+ assert cos_def.exists(args.NAME)
+ new_cos = cos_def.get(args.NAME)
+ assert new_cos.present('cosTemplateDN', args.cos_template)
+ assert new_cos.present('cosAttribute', args.cos_attr[0])
+ assert new_cos.present('cosAttribute', args.cos_attr[1])
+
+
+@pytest.mark.ds50545
+@pytest.mark.bz1798394
+@pytest.mark.skipif(ds_is_older("1.4.3"), reason="Not implemented")
+def test_usandsconf_dbgen_cos_indirect(topology_st, set_log_file_and_ldif):
+ """Test ldifgen (formerly dbgen) tool to create a COS definition
+
+ :id: ab4b799e-e801-432a-a61d-badad2628201
+ :setup: Standalone instance
+ :steps:
+ 1. Create DS instance
+ 2. Run ldifgen to generate ldif with indirect COS definition
+ 3. Import generated ldif to database
+ 4. Check it was properly imported
+ :expectedresults:
+ 1. Success
+ 2. Success
+ 3. Success
+ 4. Success
+ """
+
+ LDAP_RESULT = 'adding new entry "cn=My_Postal_Def_indirect,ou=cos indirect definitions,dc=example,dc=com"'
+
+ standalone = topology_st.standalone
+
+ args = FakeArgs()
+ args.type = 'indirect'
+ args.NAME = 'My_Postal_Def_indirect'
+ args.parent = 'ou=cos indirect definitions,dc=example,dc=com'
+ args.create_parent = True
+ args.cos_specifier = 'businessCategory'
+ args.cos_attr = ['postalcode', 'telephonenumber']
+ args.cos_template = None
+ args.ldif_file = ldif_file
+
+ content_list = ['Generating LDIF with the following options:',
+ 'NAME={}'.format(args.NAME),
+ 'type={}'.format(args.type),
+ 'parent={}'.format(args.parent),
+ 'create-parent={}'.format(args.create_parent),
+ 'cos-specifier={}'.format(args.cos_specifier),
+ 'cos-attr={}'.format(args.cos_attr),
+ 'ldif-file={}'.format(args.ldif_file),
+ 'Writing LDIF',
+ 'Successfully created LDIF file: {}'.format(args.ldif_file)]
+
+ log.info('Run ldifgen to create COS definition ldif')
+ dbgen_create_cos_def(standalone, log, args)
+
+ log.info('Check if file exists')
+ assert os.path.exists(ldif_file)
+
+ check_value_in_log_and_reset(content_list)
+
+ # Groups, COS, Roles and modification ldifs are designed to be used by ldapmodify, not ldif2db
+ run_ldapmodify_from_file(standalone, ldif_file, LDAP_RESULT)
+
+ log.info('Check that COS definition is imported')
+ cos_def = CosIndirectDefinitions(standalone, args.parent)
+ assert cos_def.exists(args.NAME)
+ new_cos = cos_def.get(args.NAME)
+ assert new_cos.present('cosIndirectSpecifier', args.cos_specifier)
+ assert new_cos.present('cosAttribute', args.cos_attr[0])
+ assert new_cos.present('cosAttribute', args.cos_attr[1])
+
+
+@pytest.mark.ds50545
+@pytest.mark.bz1798394
+@pytest.mark.skipif(ds_is_older("1.4.3"), reason="Not implemented")
+def test_usandsconf_dbgen_cos_template(topology_st, set_log_file_and_ldif):
+ """Test ldifgen (formerly dbgen) tool to create a COS template
+
+ :id: 544017c7-4a82-4e7d-a047-00b68a28e071
+ :setup: Standalone instance
+ :steps:
+ 1. Create DS instance
+ 2. Run ldifgen to generate ldif with COS template
+ 3. Import generated ldif to database
+ 4. Check it was properly imported
+ :expectedresults:
+ 1. Success
+ 2. Success
+ 3. Success
+ 4. Success
+ """
+
+ LDAP_RESULT = 'adding new entry "cn=My_Template,ou=cos templates,dc=example,dc=com"'
+
+ standalone = topology_st.standalone
+
+ args = FakeArgs()
+ args.NAME = 'My_Template'
+ args.parent = 'ou=cos templates,dc=example,dc=com'
+ args.create_parent = True
+ args.cos_priority = "1"
+ args.cos_attr_val = 'postalcode:12345'
+ args.ldif_file = ldif_file
+
+ content_list = ['Generating LDIF with the following options:',
+ 'NAME={}'.format(args.NAME),
+ 'parent={}'.format(args.parent),
+ 'create-parent={}'.format(args.create_parent),
+ 'cos-priority={}'.format(args.cos_priority),
+ 'cos-attr-val={}'.format(args.cos_attr_val),
+ 'ldif-file={}'.format(args.ldif_file),
+ 'Writing LDIF',
+ 'Successfully created LDIF file: {}'.format(args.ldif_file)]
+
+ log.info('Run ldifgen to create COS template ldif')
+ dbgen_create_cos_tmp(standalone, log, args)
+
+ log.info('Check if file exists')
+ assert os.path.exists(ldif_file)
+
+ check_value_in_log_and_reset(content_list)
+
+ # Groups, COS, Roles and modification ldifs are designed to be used by ldapmodify, not ldif2db
+ run_ldapmodify_from_file(standalone, ldif_file, LDAP_RESULT)
+
+ log.info('Check that COS template is imported')
+ cos_temp = CosTemplates(standalone, args.parent)
+ assert cos_temp.exists(args.NAME)
+ new_cos = cos_temp.get(args.NAME)
+ assert new_cos.present('cosPriority', str(args.cos_priority))
+ assert new_cos.present('postalcode', '12345')
+
+
+@pytest.mark.ds50545
+@pytest.mark.bz1798394
+@pytest.mark.skipif(ds_is_older("1.4.3"), reason="Not implemented")
+def test_usandsconf_dbgen_managed_role(topology_st, set_log_file_and_ldif):
+ """Test ldifgen (formerly dbgen) tool to create a managed role
+
+ :id: 10e77b41-0bc1-4ad5-a144-2c5107455b91
+ :setup: Standalone instance
+ :steps:
+ 1. Create DS instance
+ 2. Run ldifgen to generate ldif with managed role
+ 3. Import generated ldif to database
+ 4. Check it was properly imported
+ :expectedresults:
+ 1. Success
+ 2. Success
+ 3. Success
+ 4. Success
+ """
+
+ LDAP_RESULT = 'adding new entry "cn=My_Managed_Role,ou=managed roles,dc=example,dc=com"'
+
+ standalone = topology_st.standalone
+
+ args = FakeArgs()
+
+ args.NAME = 'My_Managed_Role'
+ args.parent = 'ou=managed roles,dc=example,dc=com'
+ args.create_parent = True
+ args.type = 'managed'
+ args.filter = None
+ args.role_dn = None
+ args.ldif_file = ldif_file
+
+ content_list = ['Generating LDIF with the following options:',
+ 'NAME={}'.format(args.NAME),
+ 'parent={}'.format(args.parent),
+ 'create-parent={}'.format(args.create_parent),
+ 'type={}'.format(args.type),
+ 'ldif-file={}'.format(args.ldif_file),
+ 'Writing LDIF',
+ 'Successfully created LDIF file: {}'.format(args.ldif_file)]
+
+ log.info('Run ldifgen to create managed role ldif')
+ dbgen_create_role(standalone, log, args)
+
+ log.info('Check if file exists')
+ assert os.path.exists(ldif_file)
+
+ check_value_in_log_and_reset(content_list)
+
+ # Groups, COS, Roles and modification ldifs are designed to be used by ldapmodify, not ldif2db
+ run_ldapmodify_from_file(standalone, ldif_file, LDAP_RESULT)
+
+ log.info('Check that managed role is imported')
+ roles = ManagedRoles(standalone, DEFAULT_SUFFIX)
+ assert roles.exists(args.NAME)
+
+
+@pytest.mark.ds50545
+@pytest.mark.bz1798394
+@pytest.mark.skipif(ds_is_older("1.4.3"), reason="Not implemented")
+def test_usandsconf_dbgen_filtered_role(topology_st, set_log_file_and_ldif):
+ """Test ldifgen (formerly dbgen) tool to create a filtered role
+
+ :id: cb3c8ea8-4234-40e2-8810-fb6a25973921
+ :setup: Standalone instance
+ :steps:
+ 1. Create DS instance
+ 2. Run ldifgen to generate ldif with filtered role
+ 3. Import generated ldif to database
+ 4. Check it was properly imported
+ :expectedresults:
+ 1. Success
+ 2. Success
+ 3. Success
+ 4. Success
+ """
+
+ LDAP_RESULT = 'adding new entry "cn=My_Filtered_Role,ou=filtered roles,dc=example,dc=com"'
+
+ standalone = topology_st.standalone
+
+ args = FakeArgs()
+
+ args.NAME = 'My_Filtered_Role'
+ args.parent = 'ou=filtered roles,dc=example,dc=com'
+ args.create_parent = True
+ args.type = 'filtered'
+ args.filter = '"objectclass=posixAccount"'
+ args.role_dn = None
+ args.ldif_file = ldif_file
+
+ content_list = ['Generating LDIF with the following options:',
+ 'NAME={}'.format(args.NAME),
+ 'parent={}'.format(args.parent),
+ 'create-parent={}'.format(args.create_parent),
+ 'type={}'.format(args.type),
+ 'filter={}'.format(args.filter),
+ 'ldif-file={}'.format(args.ldif_file),
+ 'Writing LDIF',
+ 'Successfully created LDIF file: {}'.format(args.ldif_file)]
+
+ log.info('Run ldifgen to create filtered role ldif')
+ dbgen_create_role(standalone, log, args)
+
+ log.info('Check if file exists')
+ assert os.path.exists(ldif_file)
+
+ check_value_in_log_and_reset(content_list)
+
+ # Groups, COS, Roles and modification ldifs are designed to be used by ldapmodify, not ldif2db
+ run_ldapmodify_from_file(standalone, ldif_file, LDAP_RESULT)
+
+ log.info('Check that filtered role is imported')
+ roles = FilteredRoles(standalone, DEFAULT_SUFFIX)
+ assert roles.exists(args.NAME)
+ new_role = roles.get(args.NAME)
+ assert new_role.present('nsRoleFilter', args.filter)
+
+
+@pytest.mark.ds50545
+@pytest.mark.bz1798394
+@pytest.mark.skipif(ds_is_older("1.4.3"), reason="Not implemented")
+def test_usandsconf_dbgen_nested_role(topology_st, set_log_file_and_ldif):
+ """Test ldifgen (formerly dbgen) tool to create a nested role
+
+ :id: 97fff0a8-3103-4adb-be04-2799ff58d8f1
+ :setup: Standalone instance
+ :steps:
+ 1. Create DS instance
+ 2. Run ldifgen to generate ldif with nested role
+ 3. Import generated ldif to database
+ 4. Check it was properly imported
+ :expectedresults:
+ 1. Success
+ 2. Success
+ 3. Success
+ 4. Success
+ """
+
+ LDAP_RESULT = 'adding new entry "cn=My_Nested_Role,ou=nested roles,dc=example,dc=com"'
+
+ standalone = topology_st.standalone
+
+ args = FakeArgs()
+ args.NAME = 'My_Nested_Role'
+ args.parent = 'ou=nested roles,dc=example,dc=com'
+ args.create_parent = True
+ args.type = 'nested'
+ args.filter = None
+ args.role_dn = ['cn=some_role,ou=roles,dc=example,dc=com']
+ args.ldif_file = ldif_file
+
+ content_list = ['Generating LDIF with the following options:',
+ 'NAME={}'.format(args.NAME),
+ 'parent={}'.format(args.parent),
+ 'create-parent={}'.format(args.create_parent),
+ 'type={}'.format(args.type),
+ 'role-dn={}'.format(args.role_dn),
+ 'ldif-file={}'.format(args.ldif_file),
+ 'Writing LDIF',
+ 'Successfully created LDIF file: {}'.format(args.ldif_file)]
+
+ log.info('Run ldifgen to create nested role ldif')
+ dbgen_create_role(standalone, log, args)
+
+ log.info('Check if file exists')
+ assert os.path.exists(ldif_file)
+
+ check_value_in_log_and_reset(content_list)
+
+ # Groups, COS, Roles and modification ldifs are designed to be used by ldapmodify, not ldif2db
+ run_ldapmodify_from_file(standalone, ldif_file, LDAP_RESULT)
+
+ log.info('Check that nested role is imported')
+ roles = NestedRoles(standalone, DEFAULT_SUFFIX)
+ assert roles.exists(args.NAME)
+ new_role = roles.get(args.NAME)
+ assert new_role.present('nsRoleDN', args.role_dn[0])
+
+
+@pytest.mark.ds50545
+@pytest.mark.bz1798394
+@pytest.mark.skipif(ds_is_older("1.4.3"), reason="Not implemented")
+def test_usandsconf_dbgen_mod_ldif_mixed(topology_st, set_log_file_and_ldif):
+ """Test ldifgen (formerly dbgen) tool to create mixed modification ldif
+
+ :id: 4a2e0901-2b48-452e-a4a0-507735132c81
+ :setup: Standalone instance
+ :steps:
+ 1. Create DS instance
+ 2. Run ldifgen to generate modification ldif
+ 3. Import generated ldif to database
+ 4. Check it was properly imported
+ :expectedresults:
+ 1. Success
+ 2. Success
+ 3. Success
+ 4. Success
+ """
+
+ standalone = topology_st.standalone
+
+ args = FakeArgs()
+ args.parent = DEFAULT_SUFFIX
+ args.create_users = True
+ args.delete_users = True
+ args.create_parent = False
+ args.num_users = "1000"
+ args.add_users = "100"
+ args.del_users = "999"
+ args.modrdn_users = "100"
+ args.mod_users = "10"
+ args.mod_attrs = ['cn', 'uid', 'sn']
+ args.randomize = False
+ args.ldif_file = ldif_file
+
+ content_list = ['Generating LDIF with the following options:',
+ 'create-users={}'.format(args.create_users),
+ 'parent={}'.format(args.parent),
+ 'create-parent={}'.format(args.create_parent),
+ 'delete-users={}'.format(args.delete_users),
+ 'num-users={}'.format(args.num_users),
+ 'add-users={}'.format(args.add_users),
+ 'del-users={}'.format(args.del_users),
+ 'modrdn-users={}'.format(args.modrdn_users),
+ 'mod-users={}'.format(args.mod_users),
+ 'mod-attrs={}'.format(args.mod_attrs),
+ 'randomize={}'.format(args.randomize),
+ 'ldif-file={}'.format(args.ldif_file),
+ 'Writing LDIF',
+ 'Successfully created LDIF file: {}'.format(args.ldif_file)]
+
+ log.info('Run ldifgen to create modification ldif')
+ dbgen_create_mods(standalone, log, args)
+
+ log.info('Check if file exists')
+ assert os.path.exists(ldif_file)
+
+ check_value_in_log_and_reset(content_list)
+
+ log.info('Get number of accounts before import')
+ accounts = Accounts(standalone, DEFAULT_SUFFIX)
+ count_account = len(accounts.filter('(uid=*)'))
+
+ # Groups, COS, Roles and modification ldifs are designed to be used by ldapmodify, not ldif2db
+ # ldapmodify will complain about a lot of changes done which causes subprocess to return exit code != 0
+ with pytest.raises(subprocess.CalledProcessError):
+ run_ldapmodify_from_file(standalone, ldif_file)
+
+ log.info('Check that some accounts are imported')
+ assert len(accounts.filter('(uid=*)')) > count_account
+
+
+@pytest.mark.ds50545
+@pytest.mark.bz1798394
+@pytest.mark.skipif(ds_is_older("1.4.3"), reason="Not implemented")
+def test_usandsconf_dbgen_nested_ldif(topology_st, set_log_file_and_ldif):
+ """Test ldifgen (formerly dbgen) tool to create nested ldif
+
+ :id: 9c281c28-4169-45e0-8c07-c5502d9a7581
+ :setup: Standalone instance
+ :steps:
+ 1. Create DS instance
+ 2. Run ldifgen to generate nested ldif
+ 3. Import generated ldif to database
+ 4. Check it was properly imported
+ :expectedresults:
+ 1. Success
+ 2. Success
+ 3. Success
+ 4. Success
+ """
+
+ standalone = topology_st.standalone
+
+ args = FakeArgs()
+ args.suffix = DEFAULT_SUFFIX
+ args.node_limit = "100"
+ args.num_users = "600"
+ args.ldif_file = ldif_file
+
+ content_list = ['Generating LDIF with the following options:',
+ 'suffix={}'.format(args.suffix),
+ 'node-limit={}'.format(args.node_limit),
+ 'num-users={}'.format(args.num_users),
+ 'ldif-file={}'.format(args.ldif_file),
+ 'Writing LDIF',
+ 'Successfully created nested LDIF file ({}) containing 6 nodes/subtrees'.format(args.ldif_file)]
+
+ log.info('Run ldifgen to create nested ldif')
+ dbgen_create_nested(standalone, log, args)
+
+ log.info('Check if file exists')
+ assert os.path.exists(ldif_file)
+
+ check_value_in_log_and_reset(content_list)
+
+ log.info('Get number of accounts before import')
+ accounts = Accounts(standalone, DEFAULT_SUFFIX)
+ count_account = len(accounts.filter('(uid=*)'))
+ count_ou = len(accounts.filter('(ou=*)'))
+
+ # Groups, COS, Roles and modification ldifs are designed to be used by ldapmodify, not ldif2db
+ # ldapmodify will complain about already existing suffix which causes subprocess to return exit code != 0
+ with pytest.raises(subprocess.CalledProcessError):
+ run_ldapmodify_from_file(standalone, ldif_file)
+
+ standalone.restart()
+
+ log.info('Check that accounts are imported')
+ assert len(accounts.filter('(uid=*)')) > count_account
+ assert len(accounts.filter('(ou=*)')) > count_ou
+
+
+if __name__ == '__main__':
+ # Run isolated
+ # -s for DEBUG mode
+ CURRENT_FILE = os.path.realpath(__file__)
+ pytest.main("-s %s" % CURRENT_FILE)
diff --git a/src/lib389/lib389/cli_ctl/dbgen.py b/src/lib389/lib389/cli_ctl/dbgen.py
index 7bc3892ba..058342fb1 100644
--- a/src/lib389/lib389/cli_ctl/dbgen.py
+++ b/src/lib389/lib389/cli_ctl/dbgen.py
@@ -451,13 +451,13 @@ def dbgen_create_mods(inst, log, args):
props = {
"createUsers": args.create_users,
"deleteUsers": args.delete_users,
- "numUsers": args.num_users,
+ "numUsers": int(args.num_users),
"parent": args.parent,
"createParent": args.create_parent,
- "addUsers": args.add_users,
- "delUsers": args.del_users,
- "modrdnUsers": args.modrdn_users,
- "modUsers": args.mod_users,
+ "addUsers": int(args.add_users),
+ "delUsers": int(args.del_users),
+ "modrdnUsers": int(args.modrdn_users),
+ "modUsers": int(args.mod_users),
"random": args.randomize,
"modAttrs": args.mod_attrs
}
diff --git a/src/lib389/lib389/dbgen.py b/src/lib389/lib389/dbgen.py
index 6273781a2..10fb200f7 100644
--- a/src/lib389/lib389/dbgen.py
+++ b/src/lib389/lib389/dbgen.py
@@ -220,6 +220,9 @@ def dbgen_users(instance, number, ldif_file, suffix, generic=False, entry_name="
"""
Generate an LDIF of randomly named entries
"""
+ # Lets insure that integer parameters are not string
+ number=int(number)
+ startIdx=int(startIdx)
familyname_file = os.path.join(instance.ds_paths.data_dir, 'dirsrv/data/dbgen-FamilyNames')
givename_file = os.path.join(instance.ds_paths.data_dir, 'dirsrv/data/dbgen-GivenNames')
familynames = []
--
2.26.2

View File

@ -1,179 +0,0 @@
From 52ce524f7672563b543e84401665765cfa72dea5 Mon Sep 17 00:00:00 2001
From: Mark Reynolds <mreynolds@redhat.com>
Date: Tue, 26 May 2020 17:03:11 -0400
Subject: [PATCH 09/12] Issue 51113 - Allow using uid for replication manager
entry
Bug Description: Currently it was hardcoded to only allow "cn" as
the rdn attribute for the replication manager entry.
Fix description: Allow setting the rdn attribute of the replication
manager DS ldap object, and include the schema that
allows "uid".
relates: https://pagure.io/389-ds-base/issue/51113
Reviewed by: spichugi & firstyear(Thanks!!)
---
src/lib389/lib389/cli_conf/replication.py | 53 ++++++++++++-----------
src/lib389/lib389/replica.py | 11 +++--
2 files changed, 35 insertions(+), 29 deletions(-)
diff --git a/src/lib389/lib389/cli_conf/replication.py b/src/lib389/lib389/cli_conf/replication.py
index 09cb9b435..b9bc3d291 100644
--- a/src/lib389/lib389/cli_conf/replication.py
+++ b/src/lib389/lib389/cli_conf/replication.py
@@ -199,19 +199,21 @@ def enable_replication(inst, basedn, log, args):
# Create replication manager if password was provided
if args.bind_dn and args.bind_passwd:
- cn_rdn = args.bind_dn.split(",", 1)[0]
- cn_val = cn_rdn.split("=", 1)[1]
- manager = BootstrapReplicationManager(inst, dn=args.bind_dn)
+ rdn = args.bind_dn.split(",", 1)[0]
+ rdn_attr, rdn_val = rdn.split("=", 1)
+ manager = BootstrapReplicationManager(inst, dn=args.bind_dn, rdn_attr=rdn_attr)
try:
manager.create(properties={
- 'cn': cn_val,
+ 'cn': rdn_val,
+ 'uid': rdn_val,
'userPassword': args.bind_passwd
})
except ldap.ALREADY_EXISTS:
# Already there, but could have different password. Delete and recreate
manager.delete()
manager.create(properties={
- 'cn': cn_val,
+ 'cn': rdn_val,
+ 'uid': rdn_val,
'userPassword': args.bind_passwd
})
except ldap.NO_SUCH_OBJECT:
@@ -511,22 +513,23 @@ def get_cl(inst, basedn, log, args):
def create_repl_manager(inst, basedn, log, args):
- manager_cn = "replication manager"
+ manager_name = "replication manager"
repl_manager_password = ""
repl_manager_password_confirm = ""
if args.name:
- manager_cn = args.name
-
- if is_a_dn(manager_cn):
- # A full DN was provided, make sure it uses "cn" for the RDN
- if manager_cn.split("=", 1)[0].lower() != "cn":
- raise ValueError("Replication manager DN must use \"cn\" for the rdn attribute")
- manager_dn = manager_cn
- manager_rdn = manager_dn.split(",", 1)[0]
- manager_cn = manager_rdn.split("=", 1)[1]
+ manager_name = args.name
+
+ if is_a_dn(manager_name):
+ # A full DN was provided
+ manager_dn = manager_name
+ manager_rdn = manager_name.split(",", 1)[0]
+ manager_attr, manager_name = manager_rdn.split("=", 1)
+ if manager_attr.lower() not in ['cn', 'uid']:
+ raise ValueError(f'The RDN attribute "{manager_attr}" is not allowed, you must use "cn" or "uid"')
else:
- manager_dn = "cn={},cn=config".format(manager_cn)
+ manager_dn = "cn={},cn=config".format(manager_name)
+ manager_attr = "cn"
if args.passwd:
repl_manager_password = args.passwd
@@ -544,10 +547,11 @@ def create_repl_manager(inst, basedn, log, args):
repl_manager_password = ""
repl_manager_password_confirm = ""
- manager = BootstrapReplicationManager(inst, dn=manager_dn)
+ manager = BootstrapReplicationManager(inst, dn=manager_dn, rdn_attr=manager_attr)
try:
manager.create(properties={
- 'cn': manager_cn,
+ 'cn': manager_name,
+ 'uid': manager_name,
'userPassword': repl_manager_password
})
if args.suffix:
@@ -564,7 +568,8 @@ def create_repl_manager(inst, basedn, log, args):
# Already there, but could have different password. Delete and recreate
manager.delete()
manager.create(properties={
- 'cn': manager_cn,
+ 'cn': manager_name,
+ 'uid': manager_name,
'userPassword': repl_manager_password
})
if args.suffix:
@@ -954,6 +959,7 @@ def get_winsync_agmt_status(inst, basedn, log, args):
status = agmt.status(winsync=True, use_json=args.json)
log.info(status)
+
#
# Tasks
#
@@ -1347,8 +1353,7 @@ def create_parser(subparsers):
agmt_set_parser.add_argument('--wait-async-results', help="The amount of time in milliseconds the server waits if "
"the consumer is not ready before resending data")
agmt_set_parser.add_argument('--busy-wait-time', help="The amount of time in seconds a supplier should wait after "
- "a consumer sends back a busy response before making another "
- "attempt to acquire access.")
+ "a consumer sends back a busy response before making another attempt to acquire access.")
agmt_set_parser.add_argument('--session-pause-time', help="The amount of time in seconds a supplier should wait between update sessions.")
agmt_set_parser.add_argument('--flow-control-window', help="Sets the maximum number of entries and updates sent by a supplier, which are not acknowledged by the consumer.")
agmt_set_parser.add_argument('--flow-control-pause', help="The time in milliseconds to pause after reaching the number of entries and updates set in \"--flow-control-window\"")
@@ -1438,8 +1443,7 @@ def create_parser(subparsers):
winsync_agmt_add_parser.add_argument('--subtree-pair', help="Set the subtree pair: <DS_SUBTREE>:<WINDOWS_SUBTREE>")
winsync_agmt_add_parser.add_argument('--conn-timeout', help="The timeout used for replicaton connections")
winsync_agmt_add_parser.add_argument('--busy-wait-time', help="The amount of time in seconds a supplier should wait after "
- "a consumer sends back a busy response before making another "
- "attempt to acquire access.")
+ "a consumer sends back a busy response before making another attempt to acquire access.")
winsync_agmt_add_parser.add_argument('--session-pause-time', help="The amount of time in seconds a supplier should wait between update sessions.")
winsync_agmt_add_parser.add_argument('--init', action='store_true', default=False, help="Initialize the agreement after creating it.")
@@ -1468,8 +1472,7 @@ def create_parser(subparsers):
winsync_agmt_set_parser.add_argument('--subtree-pair', help="Set the subtree pair: <DS_SUBTREE>:<WINDOWS_SUBTREE>")
winsync_agmt_set_parser.add_argument('--conn-timeout', help="The timeout used for replicaton connections")
winsync_agmt_set_parser.add_argument('--busy-wait-time', help="The amount of time in seconds a supplier should wait after "
- "a consumer sends back a busy response before making another "
- "attempt to acquire access.")
+ "a consumer sends back a busy response before making another attempt to acquire access.")
winsync_agmt_set_parser.add_argument('--session-pause-time', help="The amount of time in seconds a supplier should wait between update sessions.")
# Get
diff --git a/src/lib389/lib389/replica.py b/src/lib389/lib389/replica.py
index e3fc7fe1f..f8adb3ce2 100644
--- a/src/lib389/lib389/replica.py
+++ b/src/lib389/lib389/replica.py
@@ -1779,15 +1779,18 @@ class BootstrapReplicationManager(DSLdapObject):
:type instance: lib389.DirSrv
:param dn: The dn to create
:type dn: str
+ :param rdn_attr: The attribute to use for the RDN
+ :type rdn_attr: str
"""
- def __init__(self, instance, dn='cn=replication manager,cn=config'):
+ def __init__(self, instance, dn='cn=replication manager,cn=config', rdn_attr='cn'):
super(BootstrapReplicationManager, self).__init__(instance, dn)
- self._rdn_attribute = 'cn'
+ self._rdn_attribute = rdn_attr
self._must_attributes = ['cn', 'userPassword']
self._create_objectclasses = [
'top',
- 'netscapeServer',
- 'nsAccount'
+ 'inetUser', # for uid
+ 'netscapeServer', # for cn
+ 'nsAccount', # for authentication attributes
]
if ds_is_older('1.4.0'):
self._create_objectclasses.remove('nsAccount')
--
2.26.2

View File

@ -0,0 +1,127 @@
From 2a2773d4bf8553ba64b396d567fe05506b22c94c Mon Sep 17 00:00:00 2001
From: progier389 <72748589+progier389@users.noreply.github.com>
Date: Tue, 24 Nov 2020 19:22:49 +0100
Subject: [PATCH] Issue 4449 - dsconf replication monitor fails to retrieve
database RUV - consumer (Unavailable) (#4451)
Bug Description:
"dsconf replication monitor" fails to retrieve database RUV entry from consumer and this
appears into the Cockpit web UI too.
The problem is that the bind credentials are not rightly propagated when trying to get
the consumers agreement status. Then supplier credntials are used instead and RUV
is searched anonymously because there is no bind dn in ldapi case.
Fix Description:
- Propagates the bind credentials when computing agreement status
- Add a credential cache because now a replica password could get asked several times:
when discovering the topology and
when getting the agreement maxcsn
- No testcase in 1.4.3 branch as the file modfied in master does not exists
- Add a comment about nonlocal keyword
Relates: #4449
Reviewers:
firstyear
droideck
mreynolds
Issue 4449: Add a comment about nonlocal keyword
(cherry picked from commit 73ee04fa12cd1de3a5e47c109e79e31c1aaaa2ab)
---
src/lib389/lib389/cli_conf/replication.py | 13 +++++++++++--
src/lib389/lib389/replica.py | 16 ++++++++++++----
2 files changed, 23 insertions(+), 6 deletions(-)
diff --git a/src/lib389/lib389/cli_conf/replication.py b/src/lib389/lib389/cli_conf/replication.py
index 9dbaa320a..248972cba 100644
--- a/src/lib389/lib389/cli_conf/replication.py
+++ b/src/lib389/lib389/cli_conf/replication.py
@@ -369,9 +369,16 @@ def set_repl_config(inst, basedn, log, args):
def get_repl_monitor_info(inst, basedn, log, args):
connection_data = dsrc_to_repl_monitor(DSRC_HOME, log)
+ credentials_cache = {}
# Additional details for the connections to the topology
def get_credentials(host, port):
+ # credentials_cache is nonlocal to refer to the instance
+ # from enclosing function (get_repl_monitor_info)`
+ nonlocal credentials_cache
+ key = f'{host}:{port}'
+ if key in credentials_cache:
+ return credentials_cache[key]
found = False
if args.connections:
connections = args.connections
@@ -406,8 +413,10 @@ def get_repl_monitor_info(inst, basedn, log, args):
binddn = input(f'\nEnter a bind DN for {host}:{port}: ').rstrip()
bindpw = getpass(f"Enter a password for {binddn} on {host}:{port}: ").rstrip()
- return {"binddn": binddn,
- "bindpw": bindpw}
+ credentials = {"binddn": binddn,
+ "bindpw": bindpw}
+ credentials_cache[key] = credentials
+ return credentials
repl_monitor = ReplicationMonitor(inst)
report_dict = repl_monitor.generate_report(get_credentials, args.json)
diff --git a/src/lib389/lib389/replica.py b/src/lib389/lib389/replica.py
index c2ad2104d..3d89e61fb 100644
--- a/src/lib389/lib389/replica.py
+++ b/src/lib389/lib389/replica.py
@@ -2487,9 +2487,10 @@ class ReplicationMonitor(object):
else:
self._log = logging.getLogger(__name__)
- def _get_replica_status(self, instance, report_data, use_json):
+ def _get_replica_status(self, instance, report_data, use_json, get_credentials=None):
"""Load all of the status data to report
and add new hostname:port pairs for future processing
+ :type get_credentials: function
"""
replicas_status = []
@@ -2503,6 +2504,13 @@ class ReplicationMonitor(object):
for agmt in agmts.list():
host = agmt.get_attr_val_utf8_l("nsds5replicahost")
port = agmt.get_attr_val_utf8_l("nsds5replicaport")
+ if get_credentials is not None:
+ credentials = get_credentials(host, port)
+ binddn = credentials["binddn"]
+ bindpw = credentials["bindpw"]
+ else:
+ binddn = instance.binddn
+ bindpw = instance.bindpw
protocol = agmt.get_attr_val_utf8_l('nsds5replicatransportinfo')
# Supply protocol here because we need it only for connection
# and agreement status is already preformatted for the user output
@@ -2510,9 +2518,9 @@ class ReplicationMonitor(object):
if consumer not in report_data:
report_data[f"{consumer}:{protocol}"] = None
if use_json:
- agmts_status.append(json.loads(agmt.status(use_json=True)))
+ agmts_status.append(json.loads(agmt.status(use_json=True, binddn=binddn, bindpw=bindpw)))
else:
- agmts_status.append(agmt.status())
+ agmts_status.append(agmt.status(binddn=binddn, bindpw=bindpw))
replicas_status.append({"replica_id": replica_id,
"replica_root": replica_root,
"replica_status": "Available",
@@ -2535,7 +2543,7 @@ class ReplicationMonitor(object):
initial_inst_key = f"{self._instance.config.get_attr_val_utf8_l('nsslapd-localhost')}:{self._instance.config.get_attr_val_utf8_l('nsslapd-port')}"
# Do this on an initial instance to get the agreements to other instances
try:
- report_data[initial_inst_key] = self._get_replica_status(self._instance, report_data, use_json)
+ report_data[initial_inst_key] = self._get_replica_status(self._instance, report_data, use_json, get_credentials)
except ldap.LDAPError as e:
self._log.debug(f"Connection to consumer ({supplier_hostname}:{supplier_port}) failed, error: {e}")
report_data[initial_inst_key] = [{"replica_status": f"Unavailable - {e.args[0]['desc']}"}]
--
2.26.2

View File

@ -1,34 +0,0 @@
From ec85e986ec5710682de883f0f40f539b2f9945fa Mon Sep 17 00:00:00 2001
From: Viktor Ashirov <vashirov@redhat.com>
Date: Wed, 27 May 2020 15:22:18 +0200
Subject: [PATCH 10/12] Issue 50931 - RFE AD filter rewriter for ObjectCategory
Bug Description:
ASAN build fails on RHEL due to linking issues
Fix Description:
Add missing libslapd.la for librewriters.la
Relates: https://pagure.io/389-ds-base/issue/50931
Reviewed by: tbordaz (Thanks!)
---
Makefile.am | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/Makefile.am b/Makefile.am
index 2309f3010..0e5f04f91 100644
--- a/Makefile.am
+++ b/Makefile.am
@@ -1159,7 +1159,7 @@ librewriters_la_SOURCES = \
librewriters_la_LDFLAGS = $(AM_LDFLAGS)
librewriters_la_CPPFLAGS = $(AM_CPPFLAGS) $(REWRITERS_INCLUDES) $(DSPLUGIN_CPPFLAGS)
-librewriters_la_LIBADD = $(NSS_LINK) $(NSPR_LINK)
+librewriters_la_LIBADD = libslapd.la $(NSS_LINK) $(NSPR_LINK)
#------------------------
# libsvrcore
--
2.26.2

View File

@ -0,0 +1,63 @@
From e540effa692976c2eef766f1f735702ba5dc0950 Mon Sep 17 00:00:00 2001
From: tbordaz <tbordaz@redhat.com>
Date: Mon, 30 Nov 2020 09:03:33 +0100
Subject: [PATCH] Issue 4243 - Fix test: SyncRepl plugin provides a wrong
cookie (#4467)
Bug description:
This test case was incorrect.
During a refreshPersistent search, a cookie is sent
with the intermediate message that indicates the end of the refresh phase.
Then a second cookie is sent on the updated entry (group10)
I believed this test was successful some time ago but neither python-ldap
nor sync_repl changed (intermediate sent in post refresh).
So the testcase was never successful :(
Fix description:
The fix is just to take into account the two expected cookies
relates: https://github.com/389ds/389-ds-base/issues/4243
Reviewed by: Mark Reynolds
Platforms tested: F31
---
.../tests/suites/syncrepl_plugin/basic_test.py | 12 +++++++-----
1 file changed, 7 insertions(+), 5 deletions(-)
diff --git a/dirsrvtests/tests/suites/syncrepl_plugin/basic_test.py b/dirsrvtests/tests/suites/syncrepl_plugin/basic_test.py
index 79ec374bc..7b35537d5 100644
--- a/dirsrvtests/tests/suites/syncrepl_plugin/basic_test.py
+++ b/dirsrvtests/tests/suites/syncrepl_plugin/basic_test.py
@@ -589,7 +589,7 @@ def test_sync_repl_cookie_with_failure(topology, request):
sync_repl.start()
time.sleep(5)
- # Add a test group just to check that sync_repl receives only one update
+ # Add a test group just to check that sync_repl receives that SyncControlInfo cookie
group.append(groups.create(properties={'cn': 'group%d' % 10}))
# create users, that automember/memberof will generate nested updates
@@ -610,13 +610,15 @@ def test_sync_repl_cookie_with_failure(topology, request):
time.sleep(10)
cookies = sync_repl.get_result()
- # checking that the cookie list contains only one entry
- assert len(cookies) == 1
- prev = 0
+ # checking that the cookie list contains only two entries
+ # the one from the SyncInfo/RefreshDelete that indicates the end of the refresh
+ # the the one from SyncStateControl related to the only updated entry (group10)
+ assert len(cookies) == 2
+ prev = -1
for cookie in cookies:
log.info('Check cookie %s' % cookie)
- assert int(cookie) > 0
+ assert int(cookie) >= 0
assert int(cookie) < 1000
assert int(cookie) > prev
prev = int(cookie)
--
2.26.2

View File

@ -0,0 +1,254 @@
From 8b0ba11c3dfb577d1696f4b71a6f4e9f8d42349f Mon Sep 17 00:00:00 2001
From: Pierre Rogier <progier@redhat.com>
Date: Mon, 30 Nov 2020 12:42:17 +0100
Subject: [PATCH] Add dsconf replication monitor test case (gitHub issue 4449)
in 1.4.3 branch
---
.../tests/suites/clu/repl_monitor_test.py | 234 ++++++++++++++++++
1 file changed, 234 insertions(+)
create mode 100644 dirsrvtests/tests/suites/clu/repl_monitor_test.py
diff --git a/dirsrvtests/tests/suites/clu/repl_monitor_test.py b/dirsrvtests/tests/suites/clu/repl_monitor_test.py
new file mode 100644
index 000000000..b03d170c8
--- /dev/null
+++ b/dirsrvtests/tests/suites/clu/repl_monitor_test.py
@@ -0,0 +1,234 @@
+# --- BEGIN COPYRIGHT BLOCK ---
+# Copyright (C) 2020 Red Hat, Inc.
+# All rights reserved.
+#
+# License: GPL (version 3 or any later version).
+# See LICENSE for details.
+# --- END COPYRIGHT BLOCK ---
+#
+import time
+import subprocess
+import pytest
+
+from lib389.cli_conf.replication import get_repl_monitor_info
+from lib389.tasks import *
+from lib389.utils import *
+from lib389.topologies import topology_m2
+from lib389.cli_base import FakeArgs
+from lib389.cli_base.dsrc import dsrc_arg_concat
+from lib389.cli_base import connect_instance
+
+pytestmark = pytest.mark.tier0
+
+LOG_FILE = '/tmp/monitor.log'
+logging.getLogger(__name__).setLevel(logging.DEBUG)
+log = logging.getLogger(__name__)
+
+
+@pytest.fixture(scope="function")
+def set_log_file(request):
+ fh = logging.FileHandler(LOG_FILE)
+ fh.setLevel(logging.DEBUG)
+ log.addHandler(fh)
+
+ def fin():
+ log.info('Delete files')
+ os.remove(LOG_FILE)
+
+ config = os.path.expanduser(DSRC_HOME)
+ if os.path.exists(config):
+ os.remove(config)
+
+ request.addfinalizer(fin)
+
+
+def check_value_in_log_and_reset(content_list, second_list=None, single_value=None, error_list=None):
+ with open(LOG_FILE, 'r+') as f:
+ file_content = f.read()
+
+ for item in content_list:
+ log.info('Check that "{}" is present'.format(item))
+ assert item in file_content
+
+ if second_list is not None:
+ log.info('Check for "{}"'.format(second_list))
+ for item in second_list:
+ assert item in file_content
+
+ if single_value is not None:
+ log.info('Check for "{}"'.format(single_value))
+ assert single_value in file_content
+
+ if error_list is not None:
+ log.info('Check that "{}" is not present'.format(error_list))
+ for item in error_list:
+ assert item not in file_content
+
+ log.info('Reset log file')
+ f.truncate(0)
+
+
+@pytest.mark.ds50545
+@pytest.mark.bz1739718
+@pytest.mark.skipif(ds_is_older("1.4.0"), reason="Not implemented")
+def test_dsconf_replication_monitor(topology_m2, set_log_file):
+ """Test replication monitor that was ported from legacy tools
+
+ :id: ce48020d-7c30-41b7-8f68-144c9cd757f6
+ :setup: 2 MM topology
+ :steps:
+ 1. Create DS instance
+ 2. Run replication monitor with connections option
+ 3. Run replication monitor with aliases option
+ 4. Run replication monitor with --json option
+ 5. Run replication monitor with .dsrc file created
+ 6. Run replication monitor with connections option as if using dsconf CLI
+ :expectedresults:
+ 1. Success
+ 2. Success
+ 3. Success
+ 4. Success
+ 5. Success
+ 6. Success
+ """
+
+ m1 = topology_m2.ms["master1"]
+ m2 = topology_m2.ms["master2"]
+
+ alias_content = ['Supplier: M1 (' + m1.host + ':' + str(m1.port) + ')',
+ 'Supplier: M2 (' + m2.host + ':' + str(m2.port) + ')']
+
+ connection_content = 'Supplier: '+ m1.host + ':' + str(m1.port)
+ content_list = ['Replica Root: dc=example,dc=com',
+ 'Replica ID: 1',
+ 'Replica Status: Available',
+ 'Max CSN',
+ 'Status For Agreement: "002" ('+ m2.host + ':' + str(m2.port) + ')',
+ 'Replica Enabled: on',
+ 'Update In Progress: FALSE',
+ 'Last Update Start:',
+ 'Last Update End:',
+ 'Number Of Changes Sent:',
+ 'Number Of Changes Skipped: None',
+ 'Last Update Status: Error (0) Replica acquired successfully: Incremental update succeeded',
+ 'Last Init Start:',
+ 'Last Init End:',
+ 'Last Init Status:',
+ 'Reap Active: 0',
+ 'Replication Status: In Synchronization',
+ 'Replication Lag Time:',
+ 'Supplier: ',
+ m2.host + ':' + str(m2.port),
+ 'Replica Root: dc=example,dc=com',
+ 'Replica ID: 2',
+ 'Status For Agreement: "001" (' + m1.host + ':' + str(m1.port)+')']
+
+ error_list = ['consumer (Unavailable)',
+ 'Failed to retrieve database RUV entry from consumer']
+
+ json_list = ['type',
+ 'list',
+ 'items',
+ 'name',
+ m1.host + ':' + str(m1.port),
+ 'data',
+ '"replica_id": "1"',
+ '"replica_root": "dc=example,dc=com"',
+ '"replica_status": "Available"',
+ 'maxcsn',
+ 'agmts_status',
+ 'agmt-name',
+ '002',
+ 'replica',
+ m2.host + ':' + str(m2.port),
+ 'replica-enabled',
+ 'update-in-progress',
+ 'last-update-start',
+ 'last-update-end',
+ 'number-changes-sent',
+ 'number-changes-skipped',
+ 'last-update-status',
+ 'Error (0) Replica acquired successfully: Incremental update succeeded',
+ 'last-init-start',
+ 'last-init-end',
+ 'last-init-status',
+ 'reap-active',
+ 'replication-status',
+ 'In Synchronization',
+ 'replication-lag-time',
+ '"replica_id": "2"',
+ '001',
+ m1.host + ':' + str(m1.port)]
+
+ dsrc_content = '[repl-monitor-connections]\n' \
+ 'connection1 = ' + m1.host + ':' + str(m1.port) + ':' + DN_DM + ':' + PW_DM + '\n' \
+ 'connection2 = ' + m2.host + ':' + str(m2.port) + ':' + DN_DM + ':' + PW_DM + '\n' \
+ '\n' \
+ '[repl-monitor-aliases]\n' \
+ 'M1 = ' + m1.host + ':' + str(m1.port) + '\n' \
+ 'M2 = ' + m2.host + ':' + str(m2.port)
+
+ connections = [m1.host + ':' + str(m1.port) + ':' + DN_DM + ':' + PW_DM,
+ m2.host + ':' + str(m2.port) + ':' + DN_DM + ':' + PW_DM]
+
+ aliases = ['M1=' + m1.host + ':' + str(m1.port),
+ 'M2=' + m2.host + ':' + str(m2.port)]
+
+ args = FakeArgs()
+ args.connections = connections
+ args.aliases = None
+ args.json = False
+
+ log.info('Run replication monitor with connections option')
+ get_repl_monitor_info(m1, DEFAULT_SUFFIX, log, args)
+ check_value_in_log_and_reset(content_list, connection_content, error_list=error_list)
+
+ log.info('Run replication monitor with aliases option')
+ args.aliases = aliases
+ get_repl_monitor_info(m1, DEFAULT_SUFFIX, log, args)
+ check_value_in_log_and_reset(content_list, alias_content)
+
+ log.info('Run replication monitor with --json option')
+ args.aliases = None
+ args.json = True
+ get_repl_monitor_info(m1, DEFAULT_SUFFIX, log, args)
+ check_value_in_log_and_reset(json_list)
+
+ with open(os.path.expanduser(DSRC_HOME), 'w+') as f:
+ f.write(dsrc_content)
+
+ args.connections = None
+ args.aliases = None
+ args.json = False
+
+ log.info('Run replication monitor when .dsrc file is present with content')
+ get_repl_monitor_info(m1, DEFAULT_SUFFIX, log, args)
+ check_value_in_log_and_reset(content_list, alias_content)
+ os.remove(os.path.expanduser(DSRC_HOME))
+
+ log.info('Run replication monitor with connections option as if using dsconf CLI')
+ # Perform same test than steps 2 test but without using directly the topology instance.
+ # but with an instance similar to those than dsconf cli generates:
+ # step 2 args
+ args.connections = connections
+ args.aliases = None
+ args.json = False
+ # args needed to generate an instance with dsrc_arg_concat
+ args.instance = 'master1'
+ args.basedn = None
+ args.binddn = None
+ args.bindpw = None
+ args.pwdfile = None
+ args.prompt = False
+ args.starttls = False
+ dsrc_inst = dsrc_arg_concat(args, None)
+ inst = connect_instance(dsrc_inst, True, args)
+ get_repl_monitor_info(inst, DEFAULT_SUFFIX, log, args)
+ check_value_in_log_and_reset(content_list, connection_content, error_list=error_list)
+
+
+if __name__ == '__main__':
+ # Run isolated
+ # -s for DEBUG mode
+ CURRENT_FILE = os.path.realpath(__file__)
+ pytest.main("-s %s" % CURRENT_FILE)
--
2.26.2

View File

@ -1,54 +0,0 @@
From 2540354b7eb6fa03db7d36a5b755001b0852aa1b Mon Sep 17 00:00:00 2001
From: Simon Pichugin <spichugi@redhat.com>
Date: Thu, 26 Mar 2020 19:33:47 +0100
Subject: [PATCH] Issue 50984 - Memory leaks in disk monitoring
Description: Memory leaks are reported by the disk monitoring test suite.
The direct leak is related to char **dirs array which is not freed at all.
Free the array when we clean up or go to shutdown.
Fix disk_monitoring_test.py::test_below_half_of_the_threshold_not_starting_after_shutdown.
It should accept different exception when the instance is not started.
https://pagure.io/389-ds-base/issue/50984
Reviewed by: firstyear (Thanks!)
---
ldap/servers/slapd/daemon.c | 2 --
ldap/servers/slapd/main.c | 1 -
2 files changed, 3 deletions(-)
diff --git a/ldap/servers/slapd/daemon.c b/ldap/servers/slapd/daemon.c
index a70f40316..542d31037 100644
--- a/ldap/servers/slapd/daemon.c
+++ b/ldap/servers/slapd/daemon.c
@@ -613,7 +613,6 @@ disk_monitoring_thread(void *nothing __attribute__((unused)))
}
}
slapi_ch_array_free(dirs);
- dirs = NULL;
return;
}
/*
@@ -713,7 +712,6 @@ disk_monitoring_thread(void *nothing __attribute__((unused)))
}
}
slapi_ch_array_free(dirs);
- dirs = NULL; /* now it is not needed but the code may be changed in the future and it'd better be more robust */
g_set_shutdown(SLAPI_SHUTDOWN_DISKFULL);
return;
}
diff --git a/ldap/servers/slapd/main.c b/ldap/servers/slapd/main.c
index e54b8e1c5..1f8b01959 100644
--- a/ldap/servers/slapd/main.c
+++ b/ldap/servers/slapd/main.c
@@ -958,7 +958,6 @@ main(int argc, char **argv)
goto cleanup;
}
slapi_ch_array_free(dirs);
- dirs = NULL;
}
/* log the max fd limit as it is typically set in env/systemd */
slapi_log_err(SLAPI_LOG_INFO, "main",
--
2.26.2

View File

@ -0,0 +1,100 @@
From 389b2c825742392365262a719be7c8f594e7e522 Mon Sep 17 00:00:00 2001
From: William Brown <william@blackhats.net.au>
Date: Thu, 26 Nov 2020 09:08:13 +1000
Subject: [PATCH] Issue 4460 - BUG - lib389 should use system tls policy
Bug Description: Due to some changes in dsrc for tlsreqcert
and how def open was structured in lib389, the system ldap.conf
policy was ignored.
Fix Description: Default to using the system ldap.conf policy
if undefined in lib389 or the tls_reqcert param in dsrc.
fixes: #4460
Author: William Brown <william@blackhats.net.au>
Review by: ???
---
src/lib389/lib389/__init__.py | 11 +++++++----
src/lib389/lib389/cli_base/dsrc.py | 16 +++++++++-------
2 files changed, 16 insertions(+), 11 deletions(-)
diff --git a/src/lib389/lib389/__init__.py b/src/lib389/lib389/__init__.py
index 99ea9cc6a..4e6a1905a 100644
--- a/src/lib389/lib389/__init__.py
+++ b/src/lib389/lib389/__init__.py
@@ -962,7 +962,7 @@ class DirSrv(SimpleLDAPObject, object):
# Now, we are still an allocated ds object so we can be re-installed
self.state = DIRSRV_STATE_ALLOCATED
- def open(self, uri=None, saslmethod=None, sasltoken=None, certdir=None, starttls=False, connOnly=False, reqcert=ldap.OPT_X_TLS_HARD,
+ def open(self, uri=None, saslmethod=None, sasltoken=None, certdir=None, starttls=False, connOnly=False, reqcert=None,
usercert=None, userkey=None):
'''
It opens a ldap bound connection to dirsrv so that online
@@ -1025,9 +1025,12 @@ class DirSrv(SimpleLDAPObject, object):
try:
# Note this sets LDAP.OPT not SELF. Because once self has opened
# it can NOT change opts on reused (ie restart)
- self.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, reqcert)
- self.log.debug("Using certificate policy %s", reqcert)
- self.log.debug("ldap.OPT_X_TLS_REQUIRE_CERT = %s", reqcert)
+ if reqcert is not None:
+ self.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, reqcert)
+ self.log.debug("Using lib389 certificate policy %s", reqcert)
+ else:
+ self.log.debug("Using /etc/openldap/ldap.conf certificate policy")
+ self.log.debug("ldap.OPT_X_TLS_REQUIRE_CERT = %s", self.get_option(ldap.OPT_X_TLS_REQUIRE_CERT))
except ldap.LDAPError as e:
self.log.fatal('TLS negotiation failed: %s', e)
raise e
diff --git a/src/lib389/lib389/cli_base/dsrc.py b/src/lib389/lib389/cli_base/dsrc.py
index fec18a5f9..9b09ea568 100644
--- a/src/lib389/lib389/cli_base/dsrc.py
+++ b/src/lib389/lib389/cli_base/dsrc.py
@@ -45,7 +45,7 @@ def dsrc_arg_concat(args, dsrc_inst):
'tls_cacertdir': None,
'tls_cert': None,
'tls_key': None,
- 'tls_reqcert': ldap.OPT_X_TLS_HARD,
+ 'tls_reqcert': None,
'starttls': args.starttls,
'prompt': False,
'pwdfile': None,
@@ -134,7 +134,7 @@ def dsrc_to_ldap(path, instance_name, log):
dsrc_inst['binddn'] = config.get(instance_name, 'binddn', fallback=None)
dsrc_inst['saslmech'] = config.get(instance_name, 'saslmech', fallback=None)
if dsrc_inst['saslmech'] is not None and dsrc_inst['saslmech'] not in ['EXTERNAL', 'PLAIN']:
- raise Exception("%s [%s] saslmech must be one of EXTERNAL or PLAIN" % (path, instance_name))
+ raise ValueError("%s [%s] saslmech must be one of EXTERNAL or PLAIN" % (path, instance_name))
dsrc_inst['tls_cacertdir'] = config.get(instance_name, 'tls_cacertdir', fallback=None)
# At this point, we should check if the provided cacertdir is indeed, a dir. This can be a cause
@@ -145,16 +145,18 @@ def dsrc_to_ldap(path, instance_name, log):
dsrc_inst['tls_cert'] = config.get(instance_name, 'tls_cert', fallback=None)
dsrc_inst['tls_key'] = config.get(instance_name, 'tls_key', fallback=None)
- dsrc_inst['tls_reqcert'] = config.get(instance_name, 'tls_reqcert', fallback='hard')
- if dsrc_inst['tls_reqcert'] not in ['never', 'allow', 'hard']:
- raise Exception("dsrc tls_reqcert value invalid. %s [%s] tls_reqcert should be one of never, allow or hard" % (instance_name,
- path))
+ dsrc_inst['tls_reqcert'] = config.get(instance_name, 'tls_reqcert', fallback=None)
if dsrc_inst['tls_reqcert'] == 'never':
dsrc_inst['tls_reqcert'] = ldap.OPT_X_TLS_NEVER
elif dsrc_inst['tls_reqcert'] == 'allow':
dsrc_inst['tls_reqcert'] = ldap.OPT_X_TLS_ALLOW
- else:
+ elif dsrc_inst['tls_reqcert'] == 'hard':
dsrc_inst['tls_reqcert'] = ldap.OPT_X_TLS_HARD
+ elif dsrc_inst['tls_reqcert'] is None:
+ # Use system value
+ pass
+ else:
+ raise ValueError("dsrc tls_reqcert value invalid. %s [%s] tls_reqcert should be one of never, allow or hard" % (instance_name, path))
dsrc_inst['starttls'] = config.getboolean(instance_name, 'starttls', fallback=False)
dsrc_inst['pwdfile'] = None
dsrc_inst['prompt'] = False
--
2.26.2

View File

@ -1,52 +0,0 @@
From a720e002751815323a295e11e77c56d7ce38314e Mon Sep 17 00:00:00 2001
From: Simon Pichugin <spichugi@redhat.com>
Date: Fri, 27 Mar 2020 11:35:55 +0100
Subject: [PATCH] Issue 50984 - Memory leaks in disk monitoring
Description: Reset dirs pointer every time we free it.
The code may be changed in the future so we should make it
more robust.
https://pagure.io/389-ds-base/issue/50984
Reviewed by: spichugi, tbordaz (one line commit rule)
---
ldap/servers/slapd/daemon.c | 2 ++
ldap/servers/slapd/main.c | 1 +
2 files changed, 3 insertions(+)
diff --git a/ldap/servers/slapd/daemon.c b/ldap/servers/slapd/daemon.c
index 542d31037..a70f40316 100644
--- a/ldap/servers/slapd/daemon.c
+++ b/ldap/servers/slapd/daemon.c
@@ -613,6 +613,7 @@ disk_monitoring_thread(void *nothing __attribute__((unused)))
}
}
slapi_ch_array_free(dirs);
+ dirs = NULL;
return;
}
/*
@@ -712,6 +713,7 @@ disk_monitoring_thread(void *nothing __attribute__((unused)))
}
}
slapi_ch_array_free(dirs);
+ dirs = NULL; /* now it is not needed but the code may be changed in the future and it'd better be more robust */
g_set_shutdown(SLAPI_SHUTDOWN_DISKFULL);
return;
}
diff --git a/ldap/servers/slapd/main.c b/ldap/servers/slapd/main.c
index 1f8b01959..e54b8e1c5 100644
--- a/ldap/servers/slapd/main.c
+++ b/ldap/servers/slapd/main.c
@@ -958,6 +958,7 @@ main(int argc, char **argv)
goto cleanup;
}
slapi_ch_array_free(dirs);
+ dirs = NULL;
}
/* log the max fd limit as it is typically set in env/systemd */
slapi_log_err(SLAPI_LOG_INFO, "main",
--
2.26.2

View File

@ -0,0 +1,60 @@
From 05b66529117d1cd85a636ab7d8fc84abdec814de Mon Sep 17 00:00:00 2001
From: William Brown <william@blackhats.net.au>
Date: Thu, 12 Nov 2020 13:04:21 +1000
Subject: [PATCH] Issue 4428 - BUG Paged Results with critical false causes
sigsegv in chaining
Bug Description: When a paged search through chaining backend is
received with a false criticality (such as SSSD), chaining backend
will sigsegv due to a null context.
Fix Description: When a NULL ctx is recieved to be freed, this is
as paged results have finished being sent, so we check the NULL
ctx and move on.
fixes: #4428
Author: William Brown <william@blackhats.net.au>
Review by: @droideck, @mreynolds389
---
ldap/servers/plugins/chainingdb/cb_search.c | 6 ++++++
ldap/servers/plugins/chainingdb/cb_utils.c | 4 ++++
2 files changed, 10 insertions(+)
diff --git a/ldap/servers/plugins/chainingdb/cb_search.c b/ldap/servers/plugins/chainingdb/cb_search.c
index 69d23a6b5..d47cbc8e4 100644
--- a/ldap/servers/plugins/chainingdb/cb_search.c
+++ b/ldap/servers/plugins/chainingdb/cb_search.c
@@ -740,6 +740,12 @@ chaining_back_search_results_release(void **sr)
slapi_log_err(SLAPI_LOG_PLUGIN, CB_PLUGIN_SUBSYSTEM,
"chaining_back_search_results_release\n");
+ if (ctx == NULL) {
+ /* The paged search is already complete, just return */
+ /* Could we have a ctx state flag instead? */
+ return;
+ }
+
if (ctx->readahead != ctx->tobefreed) {
slapi_entry_free(ctx->readahead);
}
diff --git a/ldap/servers/plugins/chainingdb/cb_utils.c b/ldap/servers/plugins/chainingdb/cb_utils.c
index dfd5dd92c..d52fd25a6 100644
--- a/ldap/servers/plugins/chainingdb/cb_utils.c
+++ b/ldap/servers/plugins/chainingdb/cb_utils.c
@@ -279,7 +279,11 @@ cb_add_suffix(cb_backend_instance *inst, struct berval **bvals, int apply_mod, c
return LDAP_SUCCESS;
}
+#ifdef DEBUG
+static int debug_on = 1;
+#else
static int debug_on = 0;
+#endif
int
cb_debug_on()
--
2.26.2

View File

@ -1,569 +0,0 @@
From f60364cd9472edc61e7d327d13dca67eadf0c5b2 Mon Sep 17 00:00:00 2001
From: Simon Pichugin <simon.pichugin@gmail.com>
Date: Tue, 28 Apr 2020 23:44:20 +0200
Subject: [PATCH] Issue 50201 - nsIndexIDListScanLimit accepts any value
Bug Description: Setting of nsIndexIDListScanLimit like
'limit=2 limit=3' are detected and logged in error logs.
But the invalid value is successfully applied in the config entry
and the operation itself is successful.
The impact is limited because the index will be used following
idlistscanlimit rather than invalid definition nsIndexIDListScanLimit.
Fix Description: Print the errors to the user when he tries to add
or to modify index config entry with malformed values.
Change tests accordingly.
https://pagure.io/389-ds-base/issue/50201
Reviewed by: mreynolds, tbordaz (Thanks!)
---
.../suites/filter/filterscanlimit_test.py | 87 ++++++++-----------
ldap/servers/slapd/back-ldbm/instance.c | 4 +-
ldap/servers/slapd/back-ldbm/ldbm_attr.c | 33 ++++++-
.../slapd/back-ldbm/ldbm_index_config.c | 59 +++++++++----
ldap/servers/slapd/back-ldbm/ldif2ldbm.c | 2 +-
.../servers/slapd/back-ldbm/proto-back-ldbm.h | 2 +-
6 files changed, 114 insertions(+), 73 deletions(-)
diff --git a/dirsrvtests/tests/suites/filter/filterscanlimit_test.py b/dirsrvtests/tests/suites/filter/filterscanlimit_test.py
index dd9c6ee4e..0198f6533 100644
--- a/dirsrvtests/tests/suites/filter/filterscanlimit_test.py
+++ b/dirsrvtests/tests/suites/filter/filterscanlimit_test.py
@@ -11,6 +11,7 @@ This script will test different type of Filers.
"""
import os
+import ldap
import pytest
from lib389._constants import DEFAULT_SUFFIX, PW_DM
@@ -19,11 +20,10 @@ from lib389.idm.user import UserAccounts
from lib389.idm.organizationalunit import OrganizationalUnits
from lib389.index import Index
from lib389.idm.account import Accounts
-from lib389.idm.group import UniqueGroups, Group
+from lib389.idm.group import UniqueGroups
pytestmark = pytest.mark.tier1
-
GIVEN_NAME = 'cn=givenname,cn=index,cn=userRoot,cn=ldbm database,cn=plugins,cn=config'
CN_NAME = 'cn=sn,cn=index,cn=userRoot,cn=ldbm database,cn=plugins,cn=config'
UNIQMEMBER = 'cn=uniquemember,cn=index,cn=userRoot,cn=ldbm database,cn=plugins,cn=config'
@@ -39,7 +39,6 @@ LIST_OF_USER_ACCOUNTING = [
"Judy Wallace",
"Marcus Ward",
"Judy McFarland",
- "Anuj Hall",
"Gern Triplett",
"Emanuel Johnson",
"Brad Walker",
@@ -57,7 +56,6 @@ LIST_OF_USER_ACCOUNTING = [
"Randy Ulrich",
"Richard Francis",
"Morgan White",
- "Anuj Maddox",
"Jody Jensen",
"Mike Carter",
"Gern Tyler",
@@ -77,8 +75,6 @@ LIST_OF_USER_HUMAN = [
"Robert Daugherty",
"Torrey Mason",
"Brad Talbot",
- "Anuj Jablonski",
- "Harry Miller",
"Jeffrey Campaigne",
"Stephen Triplett",
"John Falena",
@@ -107,8 +103,7 @@ LIST_OF_USER_HUMAN = [
"Tobias Schmith",
"Jon Goldstein",
"Janet Lutz",
- "Karl Cope",
-]
+ "Karl Cope"]
LIST_OF_USER_TESTING = [
"Andy Bergin",
@@ -122,8 +117,7 @@ LIST_OF_USER_TESTING = [
"Alan White",
"Daniel Ward",
"Lee Stockton",
- "Matthew Vaughan"
-]
+ "Matthew Vaughan"]
LIST_OF_USER_DEVELOPMENT = [
"Kelly Winters",
@@ -143,7 +137,6 @@ LIST_OF_USER_DEVELOPMENT = [
"Timothy Kelly",
"Sue Mason",
"Chris Alexander",
- "Anuj Jensen",
"Martin Talbot",
"Scott Farmer",
"Allison Jensen",
@@ -152,9 +145,7 @@ LIST_OF_USER_DEVELOPMENT = [
"Dan Langdon",
"Ashley Knutson",
"Jon Bourke",
- "Pete Hunt",
-
-]
+ "Pete Hunt"]
LIST_OF_USER_PAYROLL = [
"Ashley Chassin",
@@ -164,12 +155,17 @@ LIST_OF_USER_PAYROLL = [
"Patricia Shelton",
"Dietrich Swain",
"Allison Hunter",
- "Anne-Louise Barnes"
+ "Anne-Louise Barnes"]
-]
+LIST_OF_USER_PEOPLE = [
+ 'Sam Carter',
+ 'Tom Morris',
+ 'Kevin Vaughan',
+ 'Rich Daugherty',
+ 'Harry Miller',
+ 'Sam Schmith']
-@pytest.mark.skip(reason="https://pagure.io/389-ds-base/issue/50201")
def test_invalid_configuration(topo):
""""
Error handling for invalid configuration
@@ -190,10 +186,7 @@ def test_invalid_configuration(topo):
'limit=0 flags=AND flags=AND',
'limit=0 type=eq values=foo values=foo',
'limit=0 type=eq values=foo,foo',
- 'limit=0 type=sub',
- 'limit=0 type=eq values=notvalid',
'limit',
- 'limit=0 type=eq values=notavaliddn',
'limit=0 type=pres values=bogus',
'limit=0 type=eq,sub values=bogus',
'limit=',
@@ -203,7 +196,8 @@ def test_invalid_configuration(topo):
'limit=-2',
'type=eq',
'limit=0 type=bogus']:
- Index(topo.standalone, GIVEN_NAME).replace('nsIndexIDListScanLimit', i)
+ with pytest.raises(ldap.UNWILLING_TO_PERFORM):
+ Index(topo.standalone, GIVEN_NAME).replace('nsIndexIDListScanLimit', i)
def test_idlistscanlimit(topo):
@@ -247,28 +241,24 @@ def test_idlistscanlimit(topo):
(LIST_OF_USER_HUMAN, users_human),
(LIST_OF_USER_TESTING, users_testing),
(LIST_OF_USER_DEVELOPMENT, users_development),
- (LIST_OF_USER_PAYROLL, users_payroll)]:
+ (LIST_OF_USER_PAYROLL, users_payroll),
+ (LIST_OF_USER_PEOPLE, users_people)]:
for demo1 in data[0]:
+ fn = demo1.split()[0]
+ sn = demo1.split()[1]
+ uid = ''.join([fn[:1], sn]).lower()
data[1].create(properties={
- 'uid': demo1,
+ 'uid': uid,
'cn': demo1,
- 'sn': demo1.split()[1],
+ 'sn': sn,
'uidNumber': str(1000),
'gidNumber': '2000',
- 'homeDirectory': '/home/' + demo1,
- 'givenname': demo1.split()[0],
- 'userpassword': PW_DM
+ 'homeDirectory': f'/home/{uid}',
+ 'givenname': fn,
+ 'userpassword': PW_DM,
+ 'mail': f'{uid}@test.com'
})
- users_people.create(properties={
- 'uid': 'scarter',
- 'cn': 'Sam Carter',
- 'sn': 'Carter',
- 'uidNumber': str(1000),
- 'gidNumber': '2000',
- 'homeDirectory': '/home/' + 'scarter',
- 'mail': 'scarter@anuj.com',
- })
try:
# Change log levels
errorlog_value = topo.standalone.config.get_attr_val_utf8('nsslapd-errorlog-level')
@@ -297,16 +287,12 @@ def test_idlistscanlimit(topo):
Index(topo.standalone, UNIQMEMBER).\
replace('nsIndexIDListScanLimit',
- 'limit=0 type=eq values=uid=kvaughan,ou=People,'
- 'dc=example,dc=com,uid=rdaugherty,ou=People,dc=example,dc=com')
+ 'limit=0 type=eq values=uid=kvaughan\2Cou=People\2Cdc=example\2Cdc=com,'
+ 'uid=rdaugherty\2Cou=People\2Cdc=example\2Cdc=com')
Index(topo.standalone, OBJECTCLASS).\
replace('nsIndexIDListScanLimit', 'limit=0 type=eq flags=AND values=inetOrgPerson')
- Index(topo.standalone, MAIL).\
- replace('nsIndexIDListScanLimit',
- 'cn=mail,cn=index,cn=userRoot,cn=ldbm database,cn=plugins,cn=config')
-
# Search with filter
for i in ['(sn=Lutz)',
'(sn=*ter)',
@@ -321,22 +307,24 @@ def test_idlistscanlimit(topo):
'(&(sn=*)(cn=*))',
'(sn=Hunter)',
'(&(givenname=Richard)(objectclass=organizationalPerson))',
- '(givenname=Anuj)',
+ '(givenname=Morgan)',
'(&(givenname=*)(cn=*))',
'(givenname=*)']:
assert Accounts(topo.standalone, DEFAULT_SUFFIX).filter(f'{i}')
- # Creating Group
- Group(topo.standalone, 'cn=Accounting Managers,ou=groups,dc=example,dc=com').\
- add('uniquemember',
+ # Creating Groups and adding members
+ groups = UniqueGroups(topo.standalone, DEFAULT_SUFFIX)
+ accounting_managers = groups.ensure_state(properties={'cn': 'Accounting Managers'})
+ hr_managers = groups.ensure_state(properties={'cn': 'HR Managers'})
+
+ accounting_managers.add('uniquemember',
['uid=scarter, ou=People, dc=example,dc=com',
'uid=tmorris, ou=People, dc=example,dc=com',
'uid=kvaughan, ou=People, dc=example,dc=com',
'uid=rdaugherty, ou=People, dc=example,dc=com',
'uid=hmiller, ou=People, dc=example,dc=com'])
- Group(topo.standalone, 'cn=HR Managers,ou=groups,dc=example,dc=com').\
- add('uniquemember',
+ hr_managers.add('uniquemember',
['uid=kvaughan, ou=People, dc=example,dc=com',
'uid=cschmith, ou=People, dc=example,dc=com'])
@@ -403,10 +391,9 @@ def test_idlistscanlimit(topo):
'(&(sn=*)(cn=*))',
'(sn=Hunter)',
'(&(givenname=Richard)(objectclass=organizationalPerson))',
- '(givenname=Anuj)',
+ '(givenname=Morgan)',
'(&(givenname=*)(cn=*))',
'(givenname=*)']:
-
assert Accounts(topo.standalone, DEFAULT_SUFFIX).filter(value)
finally:
diff --git a/ldap/servers/slapd/back-ldbm/instance.c b/ldap/servers/slapd/back-ldbm/instance.c
index 04c28ff39..07655a8ec 100644
--- a/ldap/servers/slapd/back-ldbm/instance.c
+++ b/ldap/servers/slapd/back-ldbm/instance.c
@@ -231,7 +231,7 @@ ldbm_instance_create_default_indexes(backend *be)
/* ldbm_instance_config_add_index_entry(inst, 2, argv); */
e = ldbm_instance_init_config_entry(LDBM_PSEUDO_ATTR_DEFAULT, "none", 0, 0, 0);
- attr_index_config(be, "ldbm index init", 0, e, 1, 0);
+ attr_index_config(be, "ldbm index init", 0, e, 1, 0, NULL);
slapi_entry_free(e);
if (!entryrdn_get_noancestorid()) {
@@ -240,7 +240,7 @@ ldbm_instance_create_default_indexes(backend *be)
* but we still want to use the attr index file APIs.
*/
e = ldbm_instance_init_config_entry(LDBM_ANCESTORID_STR, "eq", 0, 0, 0);
- attr_index_config(be, "ldbm index init", 0, e, 1, 0);
+ attr_index_config(be, "ldbm index init", 0, e, 1, 0, NULL);
slapi_entry_free(e);
}
diff --git a/ldap/servers/slapd/back-ldbm/ldbm_attr.c b/ldap/servers/slapd/back-ldbm/ldbm_attr.c
index b9e130d77..f0d418572 100644
--- a/ldap/servers/slapd/back-ldbm/ldbm_attr.c
+++ b/ldap/servers/slapd/back-ldbm/ldbm_attr.c
@@ -633,6 +633,18 @@ attr_index_idlistsize_config(Slapi_Entry *e, struct attrinfo *ai, char *returnte
return rc;
}
+/*
+ * Function that process index attributes and modifies attrinfo structure
+ *
+ * Called while adding default indexes, during db2index execution and
+ * when we add/modify/delete index config entry
+ *
+ * If char *err_buf is not NULL, it will additionally print all error messages to STDERR
+ * It is used when we add/modify/delete index config entry, so the user would have a better verbose
+ *
+ * returns -1, 1 on a failure
+ * 0 on success
+ */
int
attr_index_config(
backend *be,
@@ -640,7 +652,8 @@ attr_index_config(
int lineno,
Slapi_Entry *e,
int init __attribute__((unused)),
- int indextype_none)
+ int indextype_none,
+ char *err_buf)
{
ldbm_instance *inst = (ldbm_instance *)be->be_instance_info;
int j = 0;
@@ -662,6 +675,7 @@ attr_index_config(
slapi_attr_first_value(attr, &sval);
attrValue = slapi_value_get_berval(sval);
} else {
+ slapi_create_errormsg(err_buf, SLAPI_DSE_RETURNTEXT_SIZE, "Error: missing indexing arguments\n");
slapi_log_err(SLAPI_LOG_ERR, "attr_index_config", "Missing indexing arguments\n");
return -1;
}
@@ -705,6 +719,10 @@ attr_index_config(
}
a->ai_indexmask = INDEX_OFFLINE; /* note that the index isn't available */
} else {
+ slapi_create_errormsg(err_buf, SLAPI_DSE_RETURNTEXT_SIZE,
+ "Error: %s: line %d: unknown index type \"%s\" (ignored) in entry (%s), "
+ "valid index types are \"pres\", \"eq\", \"approx\", or \"sub\"\n",
+ fname, lineno, attrValue->bv_val, slapi_entry_get_dn(e));
slapi_log_err(SLAPI_LOG_ERR, "attr_index_config",
"%s: line %d: unknown index type \"%s\" (ignored) in entry (%s), "
"valid index types are \"pres\", \"eq\", \"approx\", or \"sub\"\n",
@@ -715,6 +733,7 @@ attr_index_config(
}
if (hasIndexType == 0) {
/* indexType missing, error out */
+ slapi_create_errormsg(err_buf, SLAPI_DSE_RETURNTEXT_SIZE, "Error: missing index type\n");
slapi_log_err(SLAPI_LOG_ERR, "attr_index_config", "Missing index type\n");
attrinfo_delete(&a);
return -1;
@@ -873,16 +892,26 @@ attr_index_config(
slapi_ch_free((void **)&official_rules);
}
}
-
if ((return_value = attr_index_idlistsize_config(e, a, myreturntext))) {
+ slapi_create_errormsg(err_buf, SLAPI_DSE_RETURNTEXT_SIZE,
+ "Error: %s: Failed to parse idscanlimit info: %d:%s\n",
+ fname, return_value, myreturntext);
slapi_log_err(SLAPI_LOG_ERR, "attr_index_config", "%s: Failed to parse idscanlimit info: %d:%s\n",
fname, return_value, myreturntext);
+ if (err_buf != NULL) {
+ /* we are inside of a callback, we shouldn't allow malformed attributes in index entries */
+ attrinfo_delete(&a);
+ return return_value;
+ }
}
/* initialize the IDL code's private data */
return_value = idl_init_private(be, a);
if (0 != return_value) {
/* fatal error, exit */
+ slapi_create_errormsg(err_buf, SLAPI_DSE_RETURNTEXT_SIZE,
+ "Error: %s: line %d:Fatal Error: Failed to initialize attribute structure\n",
+ fname, lineno);
slapi_log_err(SLAPI_LOG_CRIT, "attr_index_config",
"%s: line %d:Fatal Error: Failed to initialize attribute structure\n",
fname, lineno);
diff --git a/ldap/servers/slapd/back-ldbm/ldbm_index_config.c b/ldap/servers/slapd/back-ldbm/ldbm_index_config.c
index 45f0034f0..720f93036 100644
--- a/ldap/servers/slapd/back-ldbm/ldbm_index_config.c
+++ b/ldap/servers/slapd/back-ldbm/ldbm_index_config.c
@@ -25,26 +25,34 @@ int ldbm_instance_index_config_delete_callback(Slapi_PBlock *pb, Slapi_Entry *en
#define INDEXTYPE_NONE 1
static int
-ldbm_index_parse_entry(ldbm_instance *inst, Slapi_Entry *e, const char *trace_string, char **index_name)
+ldbm_index_parse_entry(ldbm_instance *inst, Slapi_Entry *e, const char *trace_string, char **index_name, char *err_buf)
{
Slapi_Attr *attr;
const struct berval *attrValue;
Slapi_Value *sval;
+ char *edn = slapi_entry_get_dn(e);
/* Get the name of the attribute to index which will be the value
* of the cn attribute. */
if (slapi_entry_attr_find(e, "cn", &attr) != 0) {
- slapi_log_err(SLAPI_LOG_ERR, "ldbm_index_parse_entry", "Malformed index entry %s\n",
- slapi_entry_get_dn(e));
+ slapi_create_errormsg(err_buf, SLAPI_DSE_RETURNTEXT_SIZE,
+ "Error: malformed index entry %s\n",
+ edn);
+ slapi_log_err(SLAPI_LOG_ERR,
+ "ldbm_index_parse_entry", "Malformed index entry %s\n",
+ edn);
return LDAP_OPERATIONS_ERROR;
}
slapi_attr_first_value(attr, &sval);
attrValue = slapi_value_get_berval(sval);
if (NULL == attrValue->bv_val || 0 == attrValue->bv_len) {
+ slapi_create_errormsg(err_buf, SLAPI_DSE_RETURNTEXT_SIZE,
+ "Error: malformed index entry %s -- empty index name\n",
+ edn);
slapi_log_err(SLAPI_LOG_ERR,
"ldbm_index_parse_entry", "Malformed index entry %s -- empty index name\n",
- slapi_entry_get_dn(e));
+ edn);
return LDAP_OPERATIONS_ERROR;
}
@@ -59,16 +67,19 @@ ldbm_index_parse_entry(ldbm_instance *inst, Slapi_Entry *e, const char *trace_st
attrValue = slapi_value_get_berval(sval);
if (NULL == attrValue->bv_val || attrValue->bv_len == 0) {
/* missing the index type, error out */
- slapi_log_err(SLAPI_LOG_ERR,
- "ldbm_index_parse_entry", "Malformed index entry %s -- empty nsIndexType\n",
- slapi_entry_get_dn(e));
+ slapi_create_errormsg(err_buf, SLAPI_DSE_RETURNTEXT_SIZE,
+ "Error: malformed index entry %s -- empty nsIndexType\n",
+ edn);
+ slapi_log_err(SLAPI_LOG_ERR, "ldbm_index_parse_entry",
+ "Malformed index entry %s -- empty nsIndexType\n",
+ edn);
slapi_ch_free_string(index_name);
return LDAP_OPERATIONS_ERROR;
}
}
/* ok the entry is good to process, pass it to attr_index_config */
- if (attr_index_config(inst->inst_be, (char *)trace_string, 0, e, 0, 0)) {
+ if (attr_index_config(inst->inst_be, (char *)trace_string, 0, e, 0, 0, err_buf)) {
slapi_ch_free_string(index_name);
return LDAP_OPERATIONS_ERROR;
}
@@ -92,7 +103,7 @@ ldbm_index_init_entry_callback(Slapi_PBlock *pb __attribute__((unused)),
ldbm_instance *inst = (ldbm_instance *)arg;
returntext[0] = '\0';
- *returncode = ldbm_index_parse_entry(inst, e, "from ldbm instance init", NULL);
+ *returncode = ldbm_index_parse_entry(inst, e, "from ldbm instance init", NULL, NULL);
if (*returncode == LDAP_SUCCESS) {
return SLAPI_DSE_CALLBACK_OK;
} else {
@@ -117,7 +128,7 @@ ldbm_instance_index_config_add_callback(Slapi_PBlock *pb __attribute__((unused))
char *index_name = NULL;
returntext[0] = '\0';
- *returncode = ldbm_index_parse_entry(inst, e, "from DSE add", &index_name);
+ *returncode = ldbm_index_parse_entry(inst, e, "from DSE add", &index_name, returntext);
if (*returncode == LDAP_SUCCESS) {
struct attrinfo *ai = NULL;
/* if the index is a "system" index, we assume it's being added by
@@ -179,7 +190,7 @@ ldbm_instance_index_config_delete_callback(Slapi_PBlock *pb,
slapi_attr_first_value(attr, &sval);
attrValue = slapi_value_get_berval(sval);
- attr_index_config(inst->inst_be, "From DSE delete", 0, e, 0, INDEXTYPE_NONE);
+ attr_index_config(inst->inst_be, "From DSE delete", 0, e, 0, INDEXTYPE_NONE, returntext);
ainfo_get(inst->inst_be, attrValue->bv_val, &ainfo);
if (NULL == ainfo) {
@@ -213,14 +224,19 @@ ldbm_instance_index_config_modify_callback(Slapi_PBlock *pb __attribute__((unuse
Slapi_Value *sval;
const struct berval *attrValue;
struct attrinfo *ainfo = NULL;
+ char *edn = slapi_entry_get_dn(e);
+ char *edn_after = slapi_entry_get_dn(entryAfter);
returntext[0] = '\0';
*returncode = LDAP_SUCCESS;
if (slapi_entry_attr_find(entryAfter, "cn", &attr) != 0) {
+ slapi_create_errormsg(returntext, SLAPI_DSE_RETURNTEXT_SIZE,
+ "Error: malformed index entry %s - missing cn attribute\n",
+ edn_after);
slapi_log_err(SLAPI_LOG_ERR,
"ldbm_instance_index_config_modify_callback", "Malformed index entry %s - missing cn attribute\n",
- slapi_entry_get_dn(entryAfter));
+ edn_after);
*returncode = LDAP_OBJECT_CLASS_VIOLATION;
return SLAPI_DSE_CALLBACK_ERROR;
}
@@ -228,31 +244,40 @@ ldbm_instance_index_config_modify_callback(Slapi_PBlock *pb __attribute__((unuse
attrValue = slapi_value_get_berval(sval);
if (NULL == attrValue->bv_val || 0 == attrValue->bv_len) {
+ slapi_create_errormsg(returntext, SLAPI_DSE_RETURNTEXT_SIZE,
+ "Error: malformed index entry %s - missing index name\n",
+ edn);
slapi_log_err(SLAPI_LOG_ERR,
"ldbm_instance_index_config_modify_callback", "Malformed index entry %s, missing index name\n",
- slapi_entry_get_dn(e));
+ edn);
*returncode = LDAP_UNWILLING_TO_PERFORM;
return SLAPI_DSE_CALLBACK_ERROR;
}
ainfo_get(inst->inst_be, attrValue->bv_val, &ainfo);
if (NULL == ainfo) {
+ slapi_create_errormsg(returntext, SLAPI_DSE_RETURNTEXT_SIZE,
+ "Error: malformed index entry %s - missing cn attribute info\n",
+ edn);
slapi_log_err(SLAPI_LOG_ERR,
"ldbm_instance_index_config_modify_callback", "Malformed index entry %s - missing cn attribute info\n",
- slapi_entry_get_dn(e));
+ edn);
*returncode = LDAP_UNWILLING_TO_PERFORM;
return SLAPI_DSE_CALLBACK_ERROR;
}
if (slapi_entry_attr_find(entryAfter, "nsIndexType", &attr) != 0) {
+ slapi_create_errormsg(returntext, SLAPI_DSE_RETURNTEXT_SIZE,
+ "Error: malformed index entry %s - missing nsIndexType attribute\n",
+ edn_after);
slapi_log_err(SLAPI_LOG_ERR,
"ldbm_instance_index_config_modify_callback", "Malformed index entry %s - missing nsIndexType attribute\n",
- slapi_entry_get_dn(entryAfter));
+ edn_after);
*returncode = LDAP_OBJECT_CLASS_VIOLATION;
return SLAPI_DSE_CALLBACK_ERROR;
}
- if (attr_index_config(inst->inst_be, "from DSE modify", 0, entryAfter, 0, 0)) {
+ if (attr_index_config(inst->inst_be, "from DSE modify", 0, entryAfter, 0, 0, returntext)) {
*returncode = LDAP_UNWILLING_TO_PERFORM;
return SLAPI_DSE_CALLBACK_ERROR;
}
@@ -364,7 +389,7 @@ ldbm_instance_index_config_enable_index(ldbm_instance *inst, Slapi_Entry *e)
ainfo_get(inst->inst_be, index_name, &ai);
}
if (!ai) {
- rc = ldbm_index_parse_entry(inst, e, "from DSE add", &index_name);
+ rc = ldbm_index_parse_entry(inst, e, "from DSE add", &index_name, NULL);
}
if (rc == LDAP_SUCCESS) {
/* Assume the caller knows if it is OK to go online immediately */
diff --git a/ldap/servers/slapd/back-ldbm/ldif2ldbm.c b/ldap/servers/slapd/back-ldbm/ldif2ldbm.c
index 9d82c8228..f2ef5ecd4 100644
--- a/ldap/servers/slapd/back-ldbm/ldif2ldbm.c
+++ b/ldap/servers/slapd/back-ldbm/ldif2ldbm.c
@@ -291,7 +291,7 @@ db2index_add_indexed_attr(backend *be, char *attrString)
}
}
- attr_index_config(be, "from db2index()", 0, e, 0, 0);
+ attr_index_config(be, "from db2index()", 0, e, 0, 0, NULL);
slapi_entry_free(e);
return (0);
diff --git a/ldap/servers/slapd/back-ldbm/proto-back-ldbm.h b/ldap/servers/slapd/back-ldbm/proto-back-ldbm.h
index 9a86c752b..a07acee5e 100644
--- a/ldap/servers/slapd/back-ldbm/proto-back-ldbm.h
+++ b/ldap/servers/slapd/back-ldbm/proto-back-ldbm.h
@@ -24,7 +24,7 @@ void attrinfo_delete(struct attrinfo **pp);
void ainfo_get(backend *be, char *type, struct attrinfo **at);
void attr_masks(backend *be, char *type, int *indexmask, int *syntaxmask);
void attr_masks_ex(backend *be, char *type, int *indexmask, int *syntaxmask, struct attrinfo **at);
-int attr_index_config(backend *be, char *fname, int lineno, Slapi_Entry *e, int init, int none);
+int attr_index_config(backend *be, char *fname, int lineno, Slapi_Entry *e, int init, int none, char *err_buf);
int db2index_add_indexed_attr(backend *be, char *attrString);
int ldbm_compute_init(void);
void attrinfo_deletetree(ldbm_instance *inst);
--
2.26.2

View File

@ -0,0 +1,50 @@
From 4c133d448f451b7c3b2ff1b42806c7516d623f09 Mon Sep 17 00:00:00 2001
From: tbordaz <tbordaz@redhat.com>
Date: Mon, 7 Dec 2020 00:41:27 +0100
Subject: [PATCH] Issue 4315: performance search rate: nagle triggers high rate
of setsocketopt (#4437)
Bug description:
When a socket is set with NO_DELAY=0 (nagle), written pdu are buffered
until buffer is full or tcp_cork is set. This reduce network traffic when
the application writes partial pdu.
DS write complete pdu (results/entries/..) so it gives low benefit for DS.
In addition nagle being 'on' by default, DS sets/unset socket tcp_cork to send
immediately results/entries at each operation. This is an overhead of syscalls.
Fix description:
Disable nagle by default
relates: https://github.com/389ds/389-ds-base/issues/4315
Reviewed by: @mreynolds389, @Firstyear
Platforms tested: F33
---
ldap/servers/slapd/libglobs.c | 9 ++++-----
1 file changed, 4 insertions(+), 5 deletions(-)
diff --git a/ldap/servers/slapd/libglobs.c b/ldap/servers/slapd/libglobs.c
index 7d5374c90..f8cf162e6 100644
--- a/ldap/servers/slapd/libglobs.c
+++ b/ldap/servers/slapd/libglobs.c
@@ -1635,12 +1635,11 @@ FrontendConfig_init(void)
#endif /* USE_SYSCONF */
init_accesscontrol = cfg->accesscontrol = LDAP_ON;
-#if defined(LINUX)
- /* On Linux, by default, we use TCP_CORK so we must enable nagle */
- init_nagle = cfg->nagle = LDAP_ON;
-#else
+
+ /* nagle triggers set/unset TCP_CORK setsockopt per operation
+ * as DS only sends complete PDU there is no benefit of nagle/tcp_cork
+ */
init_nagle = cfg->nagle = LDAP_OFF;
-#endif
init_security = cfg->security = LDAP_OFF;
init_ssl_check_hostname = cfg->ssl_check_hostname = LDAP_ON;
cfg->tls_check_crl = TLS_CHECK_NONE;
--
2.26.2

View File

@ -1,213 +0,0 @@
From 3b3faee01e645577ad77ff4f38429a9e0806231b Mon Sep 17 00:00:00 2001
From: Simon Pichugin <simon.pichugin@gmail.com>
Date: Tue, 16 Jun 2020 20:35:05 +0200
Subject: [PATCH] Issue 51157 - Reindex task may create abandoned index file
Bug Description: Recreating an index for the same attribute but changing
the case of for example 1 letter, results in abandoned indexfile.
Fix Decsription: Add a test case to a newly created 'indexes' test suite.
When we remove the index config from the backend, - remove the attribute
info from LDBM instance attributes.
https://pagure.io/389-ds-base/issue/51157
Reviewed by: firstyear, mreynolds (Thanks!)
---
dirsrvtests/tests/suites/indexes/__init__.py | 3 +
.../tests/suites/indexes/regression_test.py | 125 ++++++++++++++++++
ldap/servers/slapd/back-ldbm/ldbm_attr.c | 7 +
.../slapd/back-ldbm/ldbm_index_config.c | 3 +
.../servers/slapd/back-ldbm/proto-back-ldbm.h | 1 +
5 files changed, 139 insertions(+)
create mode 100644 dirsrvtests/tests/suites/indexes/__init__.py
create mode 100644 dirsrvtests/tests/suites/indexes/regression_test.py
diff --git a/dirsrvtests/tests/suites/indexes/__init__.py b/dirsrvtests/tests/suites/indexes/__init__.py
new file mode 100644
index 000000000..04441667e
--- /dev/null
+++ b/dirsrvtests/tests/suites/indexes/__init__.py
@@ -0,0 +1,3 @@
+"""
+ :Requirement: 389-ds-base: Indexes
+"""
diff --git a/dirsrvtests/tests/suites/indexes/regression_test.py b/dirsrvtests/tests/suites/indexes/regression_test.py
new file mode 100644
index 000000000..1a71f16e9
--- /dev/null
+++ b/dirsrvtests/tests/suites/indexes/regression_test.py
@@ -0,0 +1,125 @@
+# --- BEGIN COPYRIGHT BLOCK ---
+# Copyright (C) 2020 Red Hat, Inc.
+# All rights reserved.
+#
+# License: GPL (version 3 or any later version).
+# See LICENSE for details.
+# --- END COPYRIGHT BLOCK ---
+#
+import time
+import os
+import pytest
+import ldap
+from lib389._constants import DEFAULT_BENAME, DEFAULT_SUFFIX
+from lib389.index import Indexes
+from lib389.backend import Backends
+from lib389.idm.user import UserAccounts
+from lib389.topologies import topology_st as topo
+
+pytestmark = pytest.mark.tier1
+
+
+def test_reindex_task_creates_abandoned_index_file(topo):
+ """
+ Recreating an index for the same attribute but changing
+ the case of for example 1 letter, results in abandoned indexfile
+
+ :id: 07ae5274-481a-4fa8-8074-e0de50d89ac6
+ :setup: Standalone instance
+ :steps:
+ 1. Create a user object with additional attributes:
+ objectClass: mozillaabpersonalpha
+ mozillaCustom1: xyz
+ 2. Add an index entry mozillacustom1
+ 3. Reindex the backend
+ 4. Check the content of the index (after it has been flushed to disk) mozillacustom1.db
+ 5. Remove the index
+ 6. Notice the mozillacustom1.db is removed
+ 7. Recreate the index but now use the exact case as mentioned in the schema
+ 8. Reindex the backend
+ 9. Check the content of the index (after it has been flushed to disk) mozillaCustom1.db
+ 10. Check that an ldapsearch does not return a result (mozillacustom1=xyz)
+ 11. Check that an ldapsearch returns the results (mozillaCustom1=xyz)
+ 12. Restart the instance
+ 13. Notice that an ldapsearch does not return a result(mozillacustom1=xyz)
+ 15. Check that an ldapsearch does not return a result (mozillacustom1=xyz)
+ 16. Check that an ldapsearch returns the results (mozillaCustom1=xyz)
+ 17. Reindex the backend
+ 18. Notice the second indexfile for this attribute
+ 19. Check the content of the index (after it has been flushed to disk) no mozillacustom1.db
+ 20. Check the content of the index (after it has been flushed to disk) mozillaCustom1.db
+ :expectedresults:
+ 1. Should Success.
+ 2. Should Success.
+ 3. Should Success.
+ 4. Should Success.
+ 5. Should Success.
+ 6. Should Success.
+ 7. Should Success.
+ 8. Should Success.
+ 9. Should Success.
+ 10. Should Success.
+ 11. Should Success.
+ 12. Should Success.
+ 13. Should Success.
+ 14. Should Success.
+ 15. Should Success.
+ 16. Should Success.
+ 17. Should Success.
+ 18. Should Success.
+ 19. Should Success.
+ 20. Should Success.
+ """
+
+ inst = topo.standalone
+ attr_name = "mozillaCustom1"
+ attr_value = "xyz"
+
+ users = UserAccounts(inst, DEFAULT_SUFFIX)
+ user = users.create_test_user()
+ user.add("objectClass", "mozillaabpersonalpha")
+ user.add(attr_name, attr_value)
+
+ backends = Backends(inst)
+ backend = backends.get(DEFAULT_BENAME)
+ indexes = backend.get_indexes()
+ index = indexes.create(properties={
+ 'cn': attr_name.lower(),
+ 'nsSystemIndex': 'false',
+ 'nsIndexType': ['eq', 'pres']
+ })
+
+ backend.reindex()
+ time.sleep(3)
+ assert os.path.exists(f"{inst.ds_paths.db_home_dir}/{DEFAULT_BENAME}/{attr_name.lower()}.db")
+ index.delete()
+ assert not os.path.exists(f"{inst.ds_paths.db_home_dir}/{DEFAULT_BENAME}/{attr_name.lower()}.db")
+
+ index = indexes.create(properties={
+ 'cn': attr_name,
+ 'nsSystemIndex': 'false',
+ 'nsIndexType': ['eq', 'pres']
+ })
+
+ backend.reindex()
+ time.sleep(3)
+ assert not os.path.exists(f"{inst.ds_paths.db_home_dir}/{DEFAULT_BENAME}/{attr_name.lower()}.db")
+ assert os.path.exists(f"{inst.ds_paths.db_home_dir}/{DEFAULT_BENAME}/{attr_name}.db")
+
+ entries = inst.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, f"{attr_name}={attr_value}")
+ assert len(entries) > 0
+ inst.restart()
+ entries = inst.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, f"{attr_name}={attr_value}")
+ assert len(entries) > 0
+
+ backend.reindex()
+ time.sleep(3)
+ assert not os.path.exists(f"{inst.ds_paths.db_home_dir}/{DEFAULT_BENAME}/{attr_name.lower()}.db")
+ assert os.path.exists(f"{inst.ds_paths.db_home_dir}/{DEFAULT_BENAME}/{attr_name}.db")
+
+
+if __name__ == "__main__":
+ # Run isolated
+ # -s for DEBUG mode
+ CURRENT_FILE = os.path.realpath(__file__)
+ pytest.main("-s %s" % CURRENT_FILE)
diff --git a/ldap/servers/slapd/back-ldbm/ldbm_attr.c b/ldap/servers/slapd/back-ldbm/ldbm_attr.c
index f0d418572..688c4f137 100644
--- a/ldap/servers/slapd/back-ldbm/ldbm_attr.c
+++ b/ldap/servers/slapd/back-ldbm/ldbm_attr.c
@@ -98,6 +98,13 @@ ainfo_cmp(
return (strcasecmp(a->ai_type, b->ai_type));
}
+void
+attrinfo_delete_from_tree(backend *be, struct attrinfo *ai)
+{
+ ldbm_instance *inst = (ldbm_instance *)be->be_instance_info;
+ avl_delete(&inst->inst_attrs, ai, ainfo_cmp);
+}
+
/*
* Called when a duplicate "index" line is encountered.
*
diff --git a/ldap/servers/slapd/back-ldbm/ldbm_index_config.c b/ldap/servers/slapd/back-ldbm/ldbm_index_config.c
index 720f93036..9722d0ce7 100644
--- a/ldap/servers/slapd/back-ldbm/ldbm_index_config.c
+++ b/ldap/servers/slapd/back-ldbm/ldbm_index_config.c
@@ -201,7 +201,10 @@ ldbm_instance_index_config_delete_callback(Slapi_PBlock *pb,
*returncode = LDAP_UNWILLING_TO_PERFORM;
rc = SLAPI_DSE_CALLBACK_ERROR;
}
+ attrinfo_delete_from_tree(inst->inst_be, ainfo);
}
+ /* Free attrinfo structure */
+ attrinfo_delete(&ainfo);
bail:
return rc;
}
diff --git a/ldap/servers/slapd/back-ldbm/proto-back-ldbm.h b/ldap/servers/slapd/back-ldbm/proto-back-ldbm.h
index a07acee5e..4d2524fd9 100644
--- a/ldap/servers/slapd/back-ldbm/proto-back-ldbm.h
+++ b/ldap/servers/slapd/back-ldbm/proto-back-ldbm.h
@@ -21,6 +21,7 @@
*/
struct attrinfo *attrinfo_new(void);
void attrinfo_delete(struct attrinfo **pp);
+void attrinfo_delete_from_tree(backend *be, struct attrinfo *ai);
void ainfo_get(backend *be, char *type, struct attrinfo **at);
void attr_masks(backend *be, char *type, int *indexmask, int *syntaxmask);
void attr_masks_ex(backend *be, char *type, int *indexmask, int *syntaxmask, struct attrinfo **at);
--
2.26.2

View File

@ -0,0 +1,39 @@
From 3007700a659ede03085f5390153cce483ce987a1 Mon Sep 17 00:00:00 2001
From: Firstyear <william@blackhats.net.au>
Date: Fri, 4 Dec 2020 10:14:33 +1000
Subject: [PATCH] Issue 4460 - BUG - add machine name to subject alt names in
SSCA (#4472)
Bug Description: During SSCA creation, the server cert did not have
the machine name, which meant that the cert would not work without
reqcert = never.
Fix Description: Add the machine name as an alt name during SSCA
creation. It is not guaranteed this value is correct, but it
is better than nothing.
relates: https://github.com/389ds/389-ds-base/issues/4460
Author: William Brown <william@blackhats.net.au>
Review by: mreynolds389, droideck
---
src/lib389/lib389/instance/setup.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/src/lib389/lib389/instance/setup.py b/src/lib389/lib389/instance/setup.py
index 7d42ba292..e46f2d1e5 100644
--- a/src/lib389/lib389/instance/setup.py
+++ b/src/lib389/lib389/instance/setup.py
@@ -887,7 +887,7 @@ class SetupDs(object):
tlsdb_inst = NssSsl(dbpath=os.path.join(etc_dirsrv_path, dir))
tlsdb_inst.import_rsa_crt(ca)
- csr = tlsdb.create_rsa_key_and_csr()
+ csr = tlsdb.create_rsa_key_and_csr(alt_names=[general['full_machine_name']])
(ca, crt) = ssca.rsa_ca_sign_csr(csr)
tlsdb.import_rsa_crt(ca, crt)
if general['selinux']:
--
2.26.2

View File

@ -1,668 +0,0 @@
From 282edde7950ceb2515d74fdbcc0a188131769d74 Mon Sep 17 00:00:00 2001
From: Mark Reynolds <mreynolds@redhat.com>
Date: Tue, 23 Jun 2020 16:38:55 -0400
Subject: [PATCH] Issue 51165 - add new access log keywords for wtime and
optime
Description: In addition to the "etime" stat in the access we can also
add the time the operation spent in the work queue, and
how long the actual operation took. We now have "wtime"
and "optime" to track these stats in the access log.
Also updated logconf for notes=F (related to a different
ticket), and stats for wtime and optime.
relates: https://pagure.io/389-ds-base/issue/51165
Reviewed by: ?
---
ldap/admin/src/logconv.pl | 187 +++++++++++++++++++++++++++---
ldap/servers/slapd/add.c | 3 +
ldap/servers/slapd/bind.c | 4 +
ldap/servers/slapd/delete.c | 3 +
ldap/servers/slapd/modify.c | 3 +
ldap/servers/slapd/modrdn.c | 3 +
ldap/servers/slapd/operation.c | 24 ++++
ldap/servers/slapd/opshared.c | 3 +
ldap/servers/slapd/result.c | 49 ++++----
ldap/servers/slapd/slap.h | 13 ++-
ldap/servers/slapd/slapi-plugin.h | 26 ++++-
11 files changed, 269 insertions(+), 49 deletions(-)
diff --git a/ldap/admin/src/logconv.pl b/ldap/admin/src/logconv.pl
index f4808a101..1ed44a888 100755
--- a/ldap/admin/src/logconv.pl
+++ b/ldap/admin/src/logconv.pl
@@ -3,7 +3,7 @@
#
# BEGIN COPYRIGHT BLOCK
# Copyright (C) 2001 Sun Microsystems, Inc. Used by permission.
-# Copyright (C) 2013 Red Hat, Inc.
+# Copyright (C) 2020 Red Hat, Inc.
# All rights reserved.
#
# License: GPL (version 3 or any later version).
@@ -55,7 +55,7 @@ my $reportStats = "";
my $dataLocation = "/tmp";
my $startTLSoid = "1.3.6.1.4.1.1466.20037";
my @statnames=qw(last last_str results srch add mod modrdn moddn cmp del abandon
- conns sslconns bind anonbind unbind notesA notesU etime);
+ conns sslconns bind anonbind unbind notesA notesU notesF etime);
my $s_stats;
my $m_stats;
my $verb = "no";
@@ -211,6 +211,7 @@ my $sslClientBindCount = 0;
my $sslClientFailedCount = 0;
my $objectclassTopCount= 0;
my $pagedSearchCount = 0;
+my $invalidFilterCount = 0;
my $bindCount = 0;
my $filterCount = 0;
my $baseCount = 0;
@@ -258,7 +259,7 @@ map {$conn{$_} = $_} @conncodes;
# hash db-backed hashes
my @hashnames = qw(attr rc src rsrc excount conn_hash ip_hash conncount nentries
filter base ds6xbadpwd saslmech saslconnop bindlist etime oid
- start_time_of_connection end_time_of_connection
+ start_time_of_connection end_time_of_connection notesf_conn_op
notesa_conn_op notesu_conn_op etime_conn_op nentries_conn_op
optype_conn_op time_conn_op srch_conn_op del_conn_op mod_conn_op
mdn_conn_op cmp_conn_op bind_conn_op unbind_conn_op ext_conn_op
@@ -926,7 +927,7 @@ if ($verb eq "yes" || $usage =~ /u/ || $usage =~ /U/){
}
while($op > 0){
# The bind op is not the same as the search op that triggered the notes=A.
- # We have adjust the key by decrementing the op count until we find the last bind op.
+ # We have to adjust the key by decrementing the op count until we find the last bind op.
$op--;
$binddn_key = "$srvRstCnt,$conn,$op";
if (exists($bind_conn_op->{$binddn_key}) && defined($bind_conn_op->{$binddn_key})) {
@@ -1049,9 +1050,60 @@ if ($verb eq "yes" || $usage =~ /u/ || $usage =~ /U/){
}
}
}
-} # end of unindexed search report
+ print "\n";
+}
+
+print "Invalid Attribute Filters: $invalidFilterCount\n";
+if ($invalidFilterCount > 0 && $verb eq "yes"){
+ my $conn_hash = $hashes->{conn_hash};
+ my $notesf_conn_op = $hashes->{notesf_conn_op};
+ my $time_conn_op = $hashes->{time_conn_op};
+ my $etime_conn_op = $hashes->{etime_conn_op};
+ my $nentries_conn_op = $hashes->{nentries_conn_op};
+ my $filter_conn_op = $hashes->{filter_conn_op};
+ my $bind_conn_op = $hashes->{bind_conn_op};
+ my $notesCount = 1;
+ my $unindexedIp;
+ my $binddn_key;
+ my %uniqFilt = (); # hash of unique filters
+ my %uniqFilter = (); # hash of unique filters bind dn
+ my %uniqBindDNs = (); # hash of unique bind dn's
+ my %uniqBindFilters = (); # hash of filters for a bind DN
+
+ while (my ($srcnt_conn_op, $count) = each %{$notesf_conn_op}) {
+ my ($srvRstCnt, $conn, $op) = split(",", $srcnt_conn_op);
+ my $attrIp = getIPfromConn($conn, $srvRstCnt);
+ print "\n Invalid Attribute Filter #".$notesCount." (notes=F)\n";
+ print " - Date/Time: $time_conn_op->{$srcnt_conn_op}\n";
+ print " - Connection Number: $conn\n";
+ print " - Operation Number: $op\n";
+ print " - Etime: $etime_conn_op->{$srcnt_conn_op}\n";
+ print " - Nentries: $nentries_conn_op->{$srcnt_conn_op}\n";
+ print " - IP Address: $attrIp\n";
+ if (exists($filter_conn_op->{$srcnt_conn_op}) && defined($filter_conn_op->{$srcnt_conn_op})) {
+ print " - Search Filter: $filter_conn_op->{$srcnt_conn_op}\n";
+ $uniqFilt{$filter_conn_op->{$srcnt_conn_op}}++;
+ }
+ while($op > 0){
+ # The bind op is not the same as the search op that triggered the notes=A.
+ # We have to adjust the key by decrementing the op count until we find the last bind op.
+ $op--;
+ $binddn_key = "$srvRstCnt,$conn,$op";
+ if (exists($bind_conn_op->{$binddn_key}) && defined($bind_conn_op->{$binddn_key})) {
+ print " - Bind DN: $bind_conn_op->{$binddn_key}\n";
+ $uniqBindDNs{$bind_conn_op->{$binddn_key}}++;
+ if( $uniqFilt{$filter_conn_op->{$srcnt_conn_op}} && defined($filter_conn_op->{$srcnt_conn_op})) {
+ $uniqBindFilters{$bind_conn_op->{$binddn_key}}{$filter_conn_op->{$srcnt_conn_op}}++;
+ $uniqFilter{$filter_conn_op->{$srcnt_conn_op}}{$bind_conn_op->{$binddn_key}}++;
+ }
+ last;
+ }
+ }
+ $notesCount++;
+ }
+ print "\n";
+}
-print "\n";
print "FDs Taken: $fdTaken\n";
print "FDs Returned: $fdReturned\n";
print "Highest FD Taken: $highestFdTaken\n\n";
@@ -1386,20 +1438,20 @@ if ($usage =~ /l/ || $verb eq "yes"){
}
}
-#########################################
-# #
-# Gather and Process the unique etimes #
-# #
-#########################################
+##############################################################
+# #
+# Gather and Process the unique etimes, wtimes, and optimes #
+# #
+##############################################################
my $first;
if ($usage =~ /t/i || $verb eq "yes"){
+ # Print the elapsed times (etime)
+
my $etime = $hashes->{etime};
my @ekeys = keys %{$etime};
- #
# print most often etimes
- #
- print "\n\n----- Top $sizeCount Most Frequent etimes -----\n\n";
+ print "\n\n----- Top $sizeCount Most Frequent etimes (elapsed times) -----\n\n";
my $eloop = 0;
my $retime = 0;
foreach my $et (sort { $etime->{$b} <=> $etime->{$a} } @ekeys) {
@@ -1411,16 +1463,84 @@ if ($usage =~ /t/i || $verb eq "yes"){
printf "%-8s %-12s\n", $etime->{ $et }, "etime=$et";
$eloop++;
}
- #
+ if ($eloop == 0) {
+ print "None";
+ }
# print longest etimes
- #
- print "\n\n----- Top $sizeCount Longest etimes -----\n\n";
+ print "\n\n----- Top $sizeCount Longest etimes (elapsed times) -----\n\n";
$eloop = 0;
foreach my $et (sort { $b <=> $a } @ekeys) {
if ($eloop == $sizeCount) { last; }
printf "%-12s %-10s\n","etime=$et",$etime->{ $et };
$eloop++;
}
+ if ($eloop == 0) {
+ print "None";
+ }
+
+ # Print the wait times (wtime)
+
+ my $wtime = $hashes->{wtime};
+ my @wkeys = keys %{$wtime};
+ # print most often wtimes
+ print "\n\n----- Top $sizeCount Most Frequent wtimes (wait times) -----\n\n";
+ $eloop = 0;
+ $retime = 0;
+ foreach my $et (sort { $wtime->{$b} <=> $wtime->{$a} } @wkeys) {
+ if ($eloop == $sizeCount) { last; }
+ if ($retime ne "2"){
+ $first = $et;
+ $retime = "2";
+ }
+ printf "%-8s %-12s\n", $wtime->{ $et }, "wtime=$et";
+ $eloop++;
+ }
+ if ($eloop == 0) {
+ print "None";
+ }
+ # print longest wtimes
+ print "\n\n----- Top $sizeCount Longest wtimes (wait times) -----\n\n";
+ $eloop = 0;
+ foreach my $et (sort { $b <=> $a } @wkeys) {
+ if ($eloop == $sizeCount) { last; }
+ printf "%-12s %-10s\n","wtime=$et",$wtime->{ $et };
+ $eloop++;
+ }
+ if ($eloop == 0) {
+ print "None";
+ }
+
+ # Print the operation times (optime)
+
+ my $optime = $hashes->{optime};
+ my @opkeys = keys %{$optime};
+ # print most often optimes
+ print "\n\n----- Top $sizeCount Most Frequent optimes (actual operation times) -----\n\n";
+ $eloop = 0;
+ $retime = 0;
+ foreach my $et (sort { $optime->{$b} <=> $optime->{$a} } @opkeys) {
+ if ($eloop == $sizeCount) { last; }
+ if ($retime ne "2"){
+ $first = $et;
+ $retime = "2";
+ }
+ printf "%-8s %-12s\n", $optime->{ $et }, "optime=$et";
+ $eloop++;
+ }
+ if ($eloop == 0) {
+ print "None";
+ }
+ # print longest optimes
+ print "\n\n----- Top $sizeCount Longest optimes (actual operation times) -----\n\n";
+ $eloop = 0;
+ foreach my $et (sort { $b <=> $a } @opkeys) {
+ if ($eloop == $sizeCount) { last; }
+ printf "%-12s %-10s\n","optime=$et",$optime->{ $et };
+ $eloop++;
+ }
+ if ($eloop == 0) {
+ print "None";
+ }
}
#######################################
@@ -2152,6 +2272,26 @@ sub parseLineNormal
if (m/ RESULT err=/ && m/ notes=[A-Z,]*P/){
$pagedSearchCount++;
}
+ if (m/ RESULT err=/ && m/ notes=[A-Z,]*F/){
+ $invalidFilterCount++;
+ $con = "";
+ if ($_ =~ /conn= *([0-9A-Z]+)/i){
+ $con = $1;
+ if ($_ =~ /op= *([0-9\-]+)/i){ $op = $1;}
+ }
+
+ if($reportStats){ inc_stats('notesF',$s_stats,$m_stats); }
+ if ($usage =~ /u/ || $usage =~ /U/ || $verb eq "yes"){
+ if($_ =~ /etime= *([0-9.]+)/i ){
+ if($1 >= $minEtime){
+ $hashes->{etime_conn_op}->{"$serverRestartCount,$con,$op"} = $1;
+ $hashes->{notesf_conn_op}->{"$serverRestartCount,$con,$op"}++;
+ if ($_ =~ / *([0-9a-z:\/]+)/i){ $hashes->{time_conn_op}->{"$serverRestartCount,$con,$op"} = $1; }
+ if ($_ =~ /nentries= *([0-9]+)/i ){ $hashes->{nentries_conn_op}->{"$serverRestartCount,$con,$op"} = $1; }
+ }
+ }
+ }
+ }
if (m/ notes=[A-Z,]*A/){
$con = "";
if ($_ =~ /conn= *([0-9A-Z]+)/i){
@@ -2435,6 +2575,16 @@ sub parseLineNormal
if ($usage =~ /t/i || $verb eq "yes"){ $hashes->{etime}->{$etime_val}++; }
if ($reportStats){ inc_stats_val('etime',$etime_val,$s_stats,$m_stats); }
}
+ if ($_ =~ /wtime= *([0-9.]+)/ ) {
+ my $wtime_val = $1;
+ if ($usage =~ /t/i || $verb eq "yes"){ $hashes->{wtime}->{$wtime_val}++; }
+ if ($reportStats){ inc_stats_val('wtime',$wtime_val,$s_stats,$m_stats); }
+ }
+ if ($_ =~ /optime= *([0-9.]+)/ ) {
+ my $optime_val = $1;
+ if ($usage =~ /t/i || $verb eq "yes"){ $hashes->{optime}->{$optime_val}++; }
+ if ($reportStats){ inc_stats_val('optime',$optime_val,$s_stats,$m_stats); }
+ }
if ($_ =~ / tag=101 / || $_ =~ / tag=111 / || $_ =~ / tag=100 / || $_ =~ / tag=115 /){
if ($_ =~ / nentries= *([0-9]+)/i ){
my $nents = $1;
@@ -2555,7 +2705,7 @@ sub parseLineNormal
}
}
}
- if (/ RESULT err=/ && / tag=97 nentries=0 etime=/ && $_ =~ /dn=\"(.*)\"/i){
+ if (/ RESULT err=/ && / tag=97 nentries=0 / && $_ =~ /dn=\"(.*)\"/i){
# Check if this is a sasl bind, if see we need to add the RESULT's dn as a bind dn
my $binddn = $1;
my ($conn, $op);
@@ -2680,6 +2830,7 @@ print_stats_block
$stats->{'unbind'},
$stats->{'notesA'},
$stats->{'notesU'},
+ $stats->{'notesF'},
$stats->{'etime'}),
"\n" );
} else {
diff --git a/ldap/servers/slapd/add.c b/ldap/servers/slapd/add.c
index 06ca1ee79..52c64fa3c 100644
--- a/ldap/servers/slapd/add.c
+++ b/ldap/servers/slapd/add.c
@@ -441,6 +441,9 @@ op_shared_add(Slapi_PBlock *pb)
internal_op = operation_is_flag_set(operation, OP_FLAG_INTERNAL);
pwpolicy = new_passwdPolicy(pb, slapi_entry_get_dn(e));
+ /* Set the time we actually started the operation */
+ slapi_operation_set_time_started(operation);
+
/* target spec is used to decide which plugins are applicable for the operation */
operation_set_target_spec(operation, slapi_entry_get_sdn(e));
diff --git a/ldap/servers/slapd/bind.c b/ldap/servers/slapd/bind.c
index 310216e89..55f865077 100644
--- a/ldap/servers/slapd/bind.c
+++ b/ldap/servers/slapd/bind.c
@@ -87,6 +87,10 @@ do_bind(Slapi_PBlock *pb)
send_ldap_result(pb, LDAP_OPERATIONS_ERROR, NULL, NULL, 0, NULL);
goto free_and_return;
}
+
+ /* Set the time we actually started the operation */
+ slapi_operation_set_time_started(pb_op);
+
ber = pb_op->o_ber;
/*
diff --git a/ldap/servers/slapd/delete.c b/ldap/servers/slapd/delete.c
index c0e61adf1..1a7209317 100644
--- a/ldap/servers/slapd/delete.c
+++ b/ldap/servers/slapd/delete.c
@@ -236,6 +236,9 @@ op_shared_delete(Slapi_PBlock *pb)
slapi_pblock_get(pb, SLAPI_OPERATION, &operation);
internal_op = operation_is_flag_set(operation, OP_FLAG_INTERNAL);
+ /* Set the time we actually started the operation */
+ slapi_operation_set_time_started(operation);
+
sdn = slapi_sdn_new_dn_byval(rawdn);
dn = slapi_sdn_get_dn(sdn);
slapi_pblock_set(pb, SLAPI_DELETE_TARGET_SDN, (void *)sdn);
diff --git a/ldap/servers/slapd/modify.c b/ldap/servers/slapd/modify.c
index 259bedfff..a186dbde3 100644
--- a/ldap/servers/slapd/modify.c
+++ b/ldap/servers/slapd/modify.c
@@ -626,6 +626,9 @@ op_shared_modify(Slapi_PBlock *pb, int pw_change, char *old_pw)
slapi_pblock_get(pb, SLAPI_SKIP_MODIFIED_ATTRS, &skip_modified_attrs);
slapi_pblock_get(pb, SLAPI_CONNECTION, &pb_conn);
+ /* Set the time we actually started the operation */
+ slapi_operation_set_time_started(operation);
+
if (sdn) {
passin_sdn = 1;
} else {
diff --git a/ldap/servers/slapd/modrdn.c b/ldap/servers/slapd/modrdn.c
index 3efe584a7..e04916b83 100644
--- a/ldap/servers/slapd/modrdn.c
+++ b/ldap/servers/slapd/modrdn.c
@@ -417,6 +417,9 @@ op_shared_rename(Slapi_PBlock *pb, int passin_args)
internal_op = operation_is_flag_set(operation, OP_FLAG_INTERNAL);
slapi_pblock_get(pb, SLAPI_CONNECTION, &pb_conn);
+ /* Set the time we actually started the operation */
+ slapi_operation_set_time_started(operation);
+
/*
* If ownership has not been passed to this function, we replace the
* string input fields within the pblock with strdup'd copies. Why?
diff --git a/ldap/servers/slapd/operation.c b/ldap/servers/slapd/operation.c
index ff16cd906..4dd3481c7 100644
--- a/ldap/servers/slapd/operation.c
+++ b/ldap/servers/slapd/operation.c
@@ -651,3 +651,27 @@ slapi_operation_time_expiry(Slapi_Operation *o, time_t timeout, struct timespec
{
slapi_timespec_expire_rel(timeout, &(o->o_hr_time_rel), expiry);
}
+
+/* Set the time the operation actually started */
+void
+slapi_operation_set_time_started(Slapi_Operation *o)
+{
+ clock_gettime(CLOCK_MONOTONIC, &(o->o_hr_time_started_rel));
+}
+
+/* The time diff of how long the operation took once it actually started */
+void
+slapi_operation_op_time_elapsed(Slapi_Operation *o, struct timespec *elapsed)
+{
+ struct timespec o_hr_time_now;
+ clock_gettime(CLOCK_MONOTONIC, &o_hr_time_now);
+
+ slapi_timespec_diff(&o_hr_time_now, &(o->o_hr_time_started_rel), elapsed);
+}
+
+/* The time diff the operation waited in the work queue */
+void
+slapi_operation_workq_time_elapsed(Slapi_Operation *o, struct timespec *elapsed)
+{
+ slapi_timespec_diff(&(o->o_hr_time_started_rel), &(o->o_hr_time_rel), elapsed);
+}
diff --git a/ldap/servers/slapd/opshared.c b/ldap/servers/slapd/opshared.c
index 9fe78655c..c0bc5dcd0 100644
--- a/ldap/servers/slapd/opshared.c
+++ b/ldap/servers/slapd/opshared.c
@@ -284,6 +284,9 @@ op_shared_search(Slapi_PBlock *pb, int send_result)
slapi_pblock_get(pb, SLAPI_SEARCH_TARGET_SDN, &sdn);
slapi_pblock_get(pb, SLAPI_OPERATION, &operation);
+ /* Set the time we actually started the operation */
+ slapi_operation_set_time_started(operation);
+
if (NULL == sdn) {
sdn = slapi_sdn_new_dn_byval(base);
slapi_pblock_set(pb, SLAPI_SEARCH_TARGET_SDN, sdn);
diff --git a/ldap/servers/slapd/result.c b/ldap/servers/slapd/result.c
index 0b13c30e9..61efb6f8d 100644
--- a/ldap/servers/slapd/result.c
+++ b/ldap/servers/slapd/result.c
@@ -1975,6 +1975,8 @@ log_result(Slapi_PBlock *pb, Operation *op, int err, ber_tag_t tag, int nentries
CSN *operationcsn = NULL;
char csn_str[CSN_STRSIZE + 5];
char etime[ETIME_BUFSIZ] = {0};
+ char wtime[ETIME_BUFSIZ] = {0};
+ char optime[ETIME_BUFSIZ] = {0};
int pr_idx = -1;
int pr_cookie = -1;
uint32_t operation_notes;
@@ -1982,19 +1984,26 @@ log_result(Slapi_PBlock *pb, Operation *op, int err, ber_tag_t tag, int nentries
int32_t op_id;
int32_t op_internal_id;
int32_t op_nested_count;
+ struct timespec o_hr_time_end;
get_internal_conn_op(&connid, &op_id, &op_internal_id, &op_nested_count);
-
slapi_pblock_get(pb, SLAPI_PAGED_RESULTS_INDEX, &pr_idx);
slapi_pblock_get(pb, SLAPI_PAGED_RESULTS_COOKIE, &pr_cookie);
-
internal_op = operation_is_flag_set(op, OP_FLAG_INTERNAL);
- struct timespec o_hr_time_end;
+ /* total elapsed time */
slapi_operation_time_elapsed(op, &o_hr_time_end);
+ snprintf(etime, ETIME_BUFSIZ, "%" PRId64 ".%.09" PRId64 "", (int64_t)o_hr_time_end.tv_sec, (int64_t)o_hr_time_end.tv_nsec);
+
+ /* wait time */
+ slapi_operation_workq_time_elapsed(op, &o_hr_time_end);
+ snprintf(wtime, ETIME_BUFSIZ, "%" PRId64 ".%.09" PRId64 "", (int64_t)o_hr_time_end.tv_sec, (int64_t)o_hr_time_end.tv_nsec);
+
+ /* op time */
+ slapi_operation_op_time_elapsed(op, &o_hr_time_end);
+ snprintf(optime, ETIME_BUFSIZ, "%" PRId64 ".%.09" PRId64 "", (int64_t)o_hr_time_end.tv_sec, (int64_t)o_hr_time_end.tv_nsec);
- snprintf(etime, ETIME_BUFSIZ, "%" PRId64 ".%.09" PRId64 "", (int64_t)o_hr_time_end.tv_sec, (int64_t)o_hr_time_end.tv_nsec);
operation_notes = slapi_pblock_get_operation_notes(pb);
@@ -2025,16 +2034,16 @@ log_result(Slapi_PBlock *pb, Operation *op, int err, ber_tag_t tag, int nentries
if (!internal_op) {
slapi_log_access(LDAP_DEBUG_STATS,
"conn=%" PRIu64 " op=%d RESULT err=%d"
- " tag=%" BERTAG_T " nentries=%d etime=%s%s%s"
+ " tag=%" BERTAG_T " nentries=%d wtime=%s optime=%s etime=%s%s%s"
", SASL bind in progress\n",
op->o_connid,
op->o_opid,
err, tag, nentries,
- etime,
+ wtime, optime, etime,
notes_str, csn_str);
} else {
-#define LOG_SASLMSG_FMT " tag=%" BERTAG_T " nentries=%d etime=%s%s%s, SASL bind in progress\n"
+#define LOG_SASLMSG_FMT " tag=%" BERTAG_T " nentries=%d wtime=%s optime=%s etime=%s%s%s, SASL bind in progress\n"
slapi_log_access(LDAP_DEBUG_ARGS,
connid == 0 ? LOG_CONN_OP_FMT_INT_INT LOG_SASLMSG_FMT :
LOG_CONN_OP_FMT_EXT_INT LOG_SASLMSG_FMT,
@@ -2043,7 +2052,7 @@ log_result(Slapi_PBlock *pb, Operation *op, int err, ber_tag_t tag, int nentries
op_internal_id,
op_nested_count,
err, tag, nentries,
- etime,
+ wtime, optime, etime,
notes_str, csn_str);
}
} else if (op->o_tag == LDAP_REQ_BIND && err == LDAP_SUCCESS) {
@@ -2057,15 +2066,15 @@ log_result(Slapi_PBlock *pb, Operation *op, int err, ber_tag_t tag, int nentries
if (!internal_op) {
slapi_log_access(LDAP_DEBUG_STATS,
"conn=%" PRIu64 " op=%d RESULT err=%d"
- " tag=%" BERTAG_T " nentries=%d etime=%s%s%s"
+ " tag=%" BERTAG_T " nentries=%d wtime=%s optime=%s etime=%s%s%s"
" dn=\"%s\"\n",
op->o_connid,
op->o_opid,
err, tag, nentries,
- etime,
+ wtime, optime, etime,
notes_str, csn_str, dn ? dn : "");
} else {
-#define LOG_BINDMSG_FMT " tag=%" BERTAG_T " nentries=%d etime=%s%s%s dn=\"%s\"\n"
+#define LOG_BINDMSG_FMT " tag=%" BERTAG_T " nentries=%d wtime=%s optime=%s etime=%s%s%s dn=\"%s\"\n"
slapi_log_access(LDAP_DEBUG_ARGS,
connid == 0 ? LOG_CONN_OP_FMT_INT_INT LOG_BINDMSG_FMT :
LOG_CONN_OP_FMT_EXT_INT LOG_BINDMSG_FMT,
@@ -2074,7 +2083,7 @@ log_result(Slapi_PBlock *pb, Operation *op, int err, ber_tag_t tag, int nentries
op_internal_id,
op_nested_count,
err, tag, nentries,
- etime,
+ wtime, optime, etime,
notes_str, csn_str, dn ? dn : "");
}
slapi_ch_free((void **)&dn);
@@ -2083,15 +2092,15 @@ log_result(Slapi_PBlock *pb, Operation *op, int err, ber_tag_t tag, int nentries
if (!internal_op) {
slapi_log_access(LDAP_DEBUG_STATS,
"conn=%" PRIu64 " op=%d RESULT err=%d"
- " tag=%" BERTAG_T " nentries=%d etime=%s%s%s"
+ " tag=%" BERTAG_T " nentries=%d wtime=%s optime=%s etime=%s%s%s"
" pr_idx=%d pr_cookie=%d\n",
op->o_connid,
op->o_opid,
err, tag, nentries,
- etime,
+ wtime, optime, etime,
notes_str, csn_str, pr_idx, pr_cookie);
} else {
-#define LOG_PRMSG_FMT " tag=%" BERTAG_T " nentries=%d etime=%s%s%s pr_idx=%d pr_cookie=%d \n"
+#define LOG_PRMSG_FMT " tag=%" BERTAG_T " nentries=%d wtime=%s optime=%s etime=%s%s%s pr_idx=%d pr_cookie=%d \n"
slapi_log_access(LDAP_DEBUG_ARGS,
connid == 0 ? LOG_CONN_OP_FMT_INT_INT LOG_PRMSG_FMT :
LOG_CONN_OP_FMT_EXT_INT LOG_PRMSG_FMT,
@@ -2100,7 +2109,7 @@ log_result(Slapi_PBlock *pb, Operation *op, int err, ber_tag_t tag, int nentries
op_internal_id,
op_nested_count,
err, tag, nentries,
- etime,
+ wtime, optime, etime,
notes_str, csn_str, pr_idx, pr_cookie);
}
} else if (!internal_op) {
@@ -2114,11 +2123,11 @@ log_result(Slapi_PBlock *pb, Operation *op, int err, ber_tag_t tag, int nentries
}
slapi_log_access(LDAP_DEBUG_STATS,
"conn=%" PRIu64 " op=%d RESULT err=%d"
- " tag=%" BERTAG_T " nentries=%d etime=%s%s%s%s\n",
+ " tag=%" BERTAG_T " nentries=%d wtime=%s optime=%s etime=%s%s%s%s\n",
op->o_connid,
op->o_opid,
err, tag, nentries,
- etime,
+ wtime, optime, etime,
notes_str, csn_str, ext_str);
if (pbtxt) {
/* if !pbtxt ==> ext_str == "". Don't free ext_str. */
@@ -2126,7 +2135,7 @@ log_result(Slapi_PBlock *pb, Operation *op, int err, ber_tag_t tag, int nentries
}
} else {
int optype;
-#define LOG_MSG_FMT " tag=%" BERTAG_T " nentries=%d etime=%s%s%s\n"
+#define LOG_MSG_FMT " tag=%" BERTAG_T " nentries=%d wtime=%s optime=%s etime=%s%s%s\n"
slapi_log_access(LDAP_DEBUG_ARGS,
connid == 0 ? LOG_CONN_OP_FMT_INT_INT LOG_MSG_FMT :
LOG_CONN_OP_FMT_EXT_INT LOG_MSG_FMT,
@@ -2135,7 +2144,7 @@ log_result(Slapi_PBlock *pb, Operation *op, int err, ber_tag_t tag, int nentries
op_internal_id,
op_nested_count,
err, tag, nentries,
- etime,
+ wtime, optime, etime,
notes_str, csn_str);
/*
* If this is an unindexed search we should log it in the error log if
diff --git a/ldap/servers/slapd/slap.h b/ldap/servers/slapd/slap.h
index cef8c789c..8e76393c3 100644
--- a/ldap/servers/slapd/slap.h
+++ b/ldap/servers/slapd/slap.h
@@ -1538,16 +1538,17 @@ typedef struct slapi_operation_results
*/
typedef struct op
{
- BerElement *o_ber; /* ber of the request */
- ber_int_t o_msgid; /* msgid of the request */
- ber_tag_t o_tag; /* tag of the request */
+ BerElement *o_ber; /* ber of the request */
+ ber_int_t o_msgid; /* msgid of the request */
+ ber_tag_t o_tag; /* tag of the request */
struct timespec o_hr_time_rel; /* internal system time op initiated */
struct timespec o_hr_time_utc; /* utc system time op initiated */
- int o_isroot; /* requestor is manager */
+ struct timespec o_hr_time_started_rel; /* internal system time op started */
+ int o_isroot; /* requestor is manager */
Slapi_DN o_sdn; /* dn bound when op was initiated */
- char *o_authtype; /* auth method used to bind dn */
+ char *o_authtype; /* auth method used to bind dn */
int o_ssf; /* ssf for this operation (highest between SASL and TLS/SSL) */
- int o_opid; /* id of this operation */
+ int o_opid; /* id of this operation */
PRUint64 o_connid; /* id of conn initiating this op; for logging only */
void *o_handler_data;
result_handler o_result_handler;
diff --git a/ldap/servers/slapd/slapi-plugin.h b/ldap/servers/slapd/slapi-plugin.h
index 834a98742..8d9c3fa6a 100644
--- a/ldap/servers/slapd/slapi-plugin.h
+++ b/ldap/servers/slapd/slapi-plugin.h
@@ -8210,13 +8210,29 @@ void slapi_operation_time_elapsed(Slapi_Operation *o, struct timespec *elapsed);
*/
void slapi_operation_time_initiated(Slapi_Operation *o, struct timespec *initiated);
/**
- * Given an operation and a timeout, return a populate struct with the expiry
- * time of the operation suitable for checking with slapi_timespec_expire_check
+ * Given an operation, determine the time elapsed since the op
+ * was actually started.
*
- * \param Slapi_Operation o - the operation that is in progress
- * \param time_t timeout the seconds relative to operation initiation to expiry at.
- * \param struct timespec *expiry the timespec to popluate with the relative expiry.
+ * \param Slapi_Operation o - the operation which is inprogress
+ * \param struct timespec *elapsed - location where the time difference will be
+ * placed.
+ */
+void slapi_operation_op_time_elapsed(Slapi_Operation *o, struct timespec *elapsed);
+/**
+ * Given an operation, determine the time elapsed that the op spent
+ * in the work queue before actually being dispatched to a worker thread
+ *
+ * \param Slapi_Operation o - the operation which is inprogress
+ * \param struct timespec *elapsed - location where the time difference will be
+ * placed.
+ */
+void slapi_operation_workq_time_elapsed(Slapi_Operation *o, struct timespec *elapsed);
+/**
+ * Set the time the operation actually started
+ *
+ * \param Slapi_Operation o - the operation which is inprogress
*/
+void slapi_operation_set_time_started(Slapi_Operation *o);
#endif
/**
--
2.26.2

View File

@ -0,0 +1,50 @@
From 1386b140d8cc81d37fdea6593487fe542587ccac Mon Sep 17 00:00:00 2001
From: Mark Reynolds <mreynolds@redhat.com>
Date: Wed, 9 Dec 2020 09:52:08 -0500
Subject: [PATCH] Issue 4483 - heap-use-after-free in slapi_be_getsuffix
Description: heap-use-after-free in slapi_be_getsuffix after disk
monitoring runs. This feature is freeing a list of
backends which it does not need to do.
Fixes: https://github.com/389ds/389-ds-base/issues/4483
Reviewed by: firstyear & tbordaz(Thanks!!)
---
ldap/servers/slapd/daemon.c | 13 +------------
1 file changed, 1 insertion(+), 12 deletions(-)
diff --git a/ldap/servers/slapd/daemon.c b/ldap/servers/slapd/daemon.c
index 49199e4df..691f77570 100644
--- a/ldap/servers/slapd/daemon.c
+++ b/ldap/servers/slapd/daemon.c
@@ -606,12 +606,6 @@ disk_monitoring_thread(void *nothing __attribute__((unused)))
now = start;
while ((now - start) < grace_period) {
if (g_get_shutdown()) {
- be_index = 0;
- if (be_list[be_index] != NULL) {
- while ((be = be_list[be_index++])) {
- slapi_be_free(&be);
- }
- }
slapi_ch_array_free(dirs);
dirs = NULL;
return;
@@ -706,12 +700,7 @@ disk_monitoring_thread(void *nothing __attribute__((unused)))
}
}
}
- be_index = 0;
- if (be_list[be_index] != NULL) {
- while ((be = be_list[be_index++])) {
- slapi_be_free(&be);
- }
- }
+
slapi_ch_array_free(dirs);
dirs = NULL; /* now it is not needed but the code may be changed in the future and it'd better be more robust */
g_set_shutdown(SLAPI_SHUTDOWN_DISKFULL);
--
2.26.2

View File

@ -1,31 +0,0 @@
From ec1714c81290a03ae9aa5fd10acf3e9be71596d7 Mon Sep 17 00:00:00 2001
From: Mark Reynolds <mreynolds@redhat.com>
Date: Thu, 11 Jun 2020 15:47:43 -0400
Subject: [PATCH] Issue 50912 - pwdReset can be modified by a user
Description: The attribute "pwdReset" should only be allowed to be set by the
server. Update schema definition to include NO-USER-MODIFICATION
relates: https://pagure.io/389-ds-base/issue/50912
Reviewed by: mreynolds(one line commit rule)
---
ldap/schema/02common.ldif | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/ldap/schema/02common.ldif b/ldap/schema/02common.ldif
index 966636bef..c6dc074db 100644
--- a/ldap/schema/02common.ldif
+++ b/ldap/schema/02common.ldif
@@ -76,7 +76,7 @@ attributeTypes: ( 2.16.840.1.113730.3.1.2349 NAME ( 'passwordDictCheck' 'pwdDict
attributeTypes: ( 2.16.840.1.113730.3.1.2350 NAME ( 'passwordDictPath' 'pwdDictPath' ) DESC '389 Directory Server password policy attribute type' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE X-ORIGIN '389 Directory Server' )
attributeTypes: ( 2.16.840.1.113730.3.1.2351 NAME ( 'passwordUserAttributes' 'pwdUserAttributes' ) DESC '389 Directory Server password policy attribute type' SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 SINGLE-VALUE X-ORIGIN '389 Directory Server' )
attributeTypes: ( 2.16.840.1.113730.3.1.2352 NAME ( 'passwordBadWords' 'pwdBadWords' ) DESC '389 Directory Server password policy attribute type' SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 SINGLE-VALUE X-ORIGIN '389 Directory Server' )
-attributeTypes: ( 2.16.840.1.113730.3.1.2366 NAME 'pwdReset' DESC '389 Directory Server password policy attribute type' EQUALITY booleanMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.7 SINGLE-VALUE USAGE directoryOperation X-ORIGIN '389 Directory Server' )
+attributeTypes: ( 2.16.840.1.113730.3.1.2366 NAME 'pwdReset' DESC '389 Directory Server password policy attribute type' EQUALITY booleanMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.7 SINGLE-VALUE NO-USER-MODIFICATION USAGE directoryOperation X-ORIGIN '389 Directory Server' )
attributeTypes: ( 2.16.840.1.113730.3.1.198 NAME 'memberURL' DESC 'Netscape defined attribute type' SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 X-ORIGIN 'Netscape Directory Server' )
attributeTypes: ( 2.16.840.1.113730.3.1.199 NAME 'memberCertificateDescription' DESC 'Netscape defined attribute type' SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 X-ORIGIN 'Netscape Directory Server' )
attributeTypes: ( 2.16.840.1.113730.3.1.207 NAME 'vlvBase' DESC 'Netscape defined attribute type' SYNTAX 1.3.6.1.4.1.1466.115.121.1.12 X-ORIGIN 'Netscape Directory Server' )
--
2.26.2

View File

@ -0,0 +1,65 @@
From 6e827f6d5e64e0be316f4e17111b2884899d302c Mon Sep 17 00:00:00 2001
From: tbordaz <tbordaz@redhat.com>
Date: Wed, 16 Dec 2020 16:30:28 +0100
Subject: [PATCH] Issue 4480 - Unexpected info returned to ldap request (#4491)
Bug description:
If the bind entry does not exist, the bind result info
reports that 'No such entry'. It should not give any
information if the target entry exists or not
Fix description:
Does not return any additional information during a bind
relates: https://github.com/389ds/389-ds-base/issues/4480
Reviewed by: William Brown, Viktor Ashirov, Mark Reynolds (thank you all)
Platforms tested: F31
---
dirsrvtests/tests/suites/basic/basic_test.py | 1 -
ldap/servers/slapd/back-ldbm/ldbm_config.c | 2 +-
ldap/servers/slapd/result.c | 2 +-
3 files changed, 2 insertions(+), 3 deletions(-)
diff --git a/dirsrvtests/tests/suites/basic/basic_test.py b/dirsrvtests/tests/suites/basic/basic_test.py
index 120207321..1ae82dcdd 100644
--- a/dirsrvtests/tests/suites/basic/basic_test.py
+++ b/dirsrvtests/tests/suites/basic/basic_test.py
@@ -1400,7 +1400,6 @@ def test_dscreate_multiple_dashes_name(dscreate_long_instance):
assert not dscreate_long_instance.exists()
-
if __name__ == '__main__':
# Run isolated
# -s for DEBUG mode
diff --git a/ldap/servers/slapd/back-ldbm/ldbm_config.c b/ldap/servers/slapd/back-ldbm/ldbm_config.c
index 3fe86d567..10cef250f 100644
--- a/ldap/servers/slapd/back-ldbm/ldbm_config.c
+++ b/ldap/servers/slapd/back-ldbm/ldbm_config.c
@@ -1234,7 +1234,7 @@ ldbm_config_search_entry_callback(Slapi_PBlock *pb __attribute__((unused)),
if (attrs) {
for (size_t i = 0; attrs[i]; i++) {
if (ldbm_config_moved_attr(attrs[i])) {
- slapi_pblock_set(pb, SLAPI_PB_RESULT_TEXT, "at least one required attribute has been moved to the BDB scecific configuration entry");
+ slapi_pblock_set(pb, SLAPI_RESULT_TEXT, "at least one required attribute has been moved to the BDB scecific configuration entry");
break;
}
}
diff --git a/ldap/servers/slapd/result.c b/ldap/servers/slapd/result.c
index 9daf3b151..ab0d79454 100644
--- a/ldap/servers/slapd/result.c
+++ b/ldap/servers/slapd/result.c
@@ -355,7 +355,7 @@ send_ldap_result_ext(
if (text) {
pbtext = text;
} else {
- slapi_pblock_get(pb, SLAPI_PB_RESULT_TEXT, &pbtext);
+ slapi_pblock_get(pb, SLAPI_RESULT_TEXT, &pbtext);
}
if (operation == NULL) {
--
2.26.2

View File

@ -1,202 +0,0 @@
From a6a52365df26edd4f6b0028056395d943344d787 Mon Sep 17 00:00:00 2001
From: Mark Reynolds <mreynolds@redhat.com>
Date: Thu, 11 Jun 2020 15:30:28 -0400
Subject: [PATCH] Issue 50791 - Healthcheck should look for notes=A/F in access
log
Description: Add checks for notes=A (fully unindexed search) and
notes=F (Unknown attribute in search filter) in the
current access log.
relates: https://pagure.io/389-ds-base/issue/50791
Reviewed by: firstyear(Thanks!)
---
src/lib389/lib389/cli_ctl/health.py | 4 +-
src/lib389/lib389/dirsrv_log.py | 72 +++++++++++++++++++++++++++--
src/lib389/lib389/lint.py | 26 ++++++++++-
3 files changed, 96 insertions(+), 6 deletions(-)
diff --git a/src/lib389/lib389/cli_ctl/health.py b/src/lib389/lib389/cli_ctl/health.py
index 6333a753a..89484a11b 100644
--- a/src/lib389/lib389/cli_ctl/health.py
+++ b/src/lib389/lib389/cli_ctl/health.py
@@ -1,5 +1,5 @@
# --- BEGIN COPYRIGHT BLOCK ---
-# Copyright (C) 2019 Red Hat, Inc.
+# Copyright (C) 2020 Red Hat, Inc.
# All rights reserved.
#
# License: GPL (version 3 or any later version).
@@ -18,6 +18,7 @@ from lib389.monitor import MonitorDiskSpace
from lib389.replica import Replica, Changelog5
from lib389.nss_ssl import NssSsl
from lib389.dseldif import FSChecks, DSEldif
+from lib389.dirsrv_log import DirsrvAccessLog
from lib389 import lint
from lib389 import plugins
from lib389._constants import DSRC_HOME
@@ -37,6 +38,7 @@ CHECK_OBJECTS = [
Changelog5,
DSEldif,
NssSsl,
+ DirsrvAccessLog,
]
diff --git a/src/lib389/lib389/dirsrv_log.py b/src/lib389/lib389/dirsrv_log.py
index baac2a3c9..7bed4bb17 100644
--- a/src/lib389/lib389/dirsrv_log.py
+++ b/src/lib389/lib389/dirsrv_log.py
@@ -1,5 +1,5 @@
# --- BEGIN COPYRIGHT BLOCK ---
-# Copyright (C) 2016 Red Hat, Inc.
+# Copyright (C) 2020 Red Hat, Inc.
# All rights reserved.
#
# License: GPL (version 3 or any later version).
@@ -9,12 +9,17 @@
"""Helpers for managing the directory server internal logs.
"""
+import copy
import re
import gzip
from dateutil.parser import parse as dt_parse
from glob import glob
from lib389.utils import ensure_bytes
-
+from lib389._mapped_object_lint import DSLint
+from lib389.lint import (
+ DSLOGNOTES0001, # Unindexed search
+ DSLOGNOTES0002, # Unknown attr in search filter
+)
# Because many of these settings can change live, we need to check for certain
# attributes all the time.
@@ -35,7 +40,7 @@ MONTH_LOOKUP = {
}
-class DirsrvLog(object):
+class DirsrvLog(DSLint):
"""Class of functions to working with the various DIrectory Server logs
"""
def __init__(self, dirsrv):
@@ -189,6 +194,67 @@ class DirsrvAccessLog(DirsrvLog):
self.full_regexs = [self.prog_m1, self.prog_con, self.prog_discon]
self.result_regexs = [self.prog_notes, self.prog_repl,
self.prog_result]
+ @classmethod
+ def lint_uid(cls):
+ return 'logs'
+
+ def _log_get_search_stats(self, conn, op):
+ lines = self.match(f".* conn={conn} op={op} SRCH base=.*")
+ if len(lines) != 1:
+ return None
+
+ quoted_vals = re.findall('"([^"]*)"', lines[0])
+ return {
+ 'base': quoted_vals[0],
+ 'filter': quoted_vals[1],
+ 'timestamp': re.findall('\[(.*)\]', lines[0])[0],
+ 'scope': lines[0].split(' scope=', 1)[1].split(' ',1)[0]
+ }
+
+ def _lint_notes(self):
+ """
+ Check for notes=A (fully unindexed searches), and
+ notes=F (unknown attribute in filter)
+ """
+ for pattern, lint_report in [(".* notes=A", DSLOGNOTES0001), (".* notes=F", DSLOGNOTES0002)]:
+ lines = self.match(pattern)
+ if len(lines) > 0:
+ count = 0
+ searches = []
+ for line in lines:
+ if ' RESULT err=' in line:
+ # Looks like a valid notes=A/F
+ conn = line.split(' conn=', 1)[1].split(' ',1)[0]
+ op = line.split(' op=', 1)[1].split(' ',1)[0]
+ etime = line.split(' etime=', 1)[1].split(' ',1)[0]
+ stats = self._log_get_search_stats(conn, op)
+ if stats is not None:
+ timestamp = stats['timestamp']
+ base = stats['base']
+ scope = stats['scope']
+ srch_filter = stats['filter']
+ count += 1
+ if lint_report == DSLOGNOTES0001:
+ searches.append(f'\n [{count}] Unindexed Search\n'
+ f' - date: {timestamp}\n'
+ f' - conn/op: {conn}/{op}\n'
+ f' - base: {base}\n'
+ f' - scope: {scope}\n'
+ f' - filter: {srch_filter}\n'
+ f' - etime: {etime}\n')
+ else:
+ searches.append(f'\n [{count}] Invalid Attribute in Filter\n'
+ f' - date: {timestamp}\n'
+ f' - conn/op: {conn}/{op}\n'
+ f' - filter: {srch_filter}\n')
+ if len(searches) > 0:
+ report = copy.deepcopy(lint_report)
+ report['items'].append(self._get_log_path())
+ report['detail'] = report['detail'].replace('NUMBER', str(count))
+ for srch in searches:
+ report['detail'] += srch
+ yield report
+
def _get_log_path(self):
"""Return the current log file location"""
diff --git a/src/lib389/lib389/lint.py b/src/lib389/lib389/lint.py
index a103feec7..4b1700b92 100644
--- a/src/lib389/lib389/lint.py
+++ b/src/lib389/lib389/lint.py
@@ -1,5 +1,5 @@
# --- BEGIN COPYRIGHT BLOCK ---
-# Copyright (C) 2019 Red Hat, Inc.
+# Copyright (C) 2020 Red Hat, Inc.
# All rights reserved.
#
# License: GPL (version 3 or any later version).
@@ -253,7 +253,7 @@ can use the CLI tool "dsconf" to resolve the conflict. Here is an example:
Remove conflict entry and keep only the original/counterpart entry:
- # dsconf slapd-YOUR_INSTANCE repl-conflict remove <DN of conflict entry>
+ # dsconf slapd-YOUR_INSTANCE repl-conflict delete <DN of conflict entry>
Replace the original/counterpart entry with the conflict entry:
@@ -418,3 +418,25 @@ until the time issues have been resolved:
Also look at https://access.redhat.com/documentation/en-us/red_hat_directory_server/11/html/administration_guide/managing_replication-troubleshooting_replication_related_problems
and find the paragraph "Too much time skew"."""
}
+
+DSLOGNOTES0001 = {
+ 'dsle': 'DSLOGNOTES0001',
+ 'severity': 'Medium',
+ 'description': 'Unindexed Search',
+ 'items': ['Performance'],
+ 'detail': """Found NUMBER fully unindexed searches in the current access log.
+Unindexed searches can cause high CPU and slow down the entire server's performance.\n""",
+ 'fix': """Examine the searches that are unindexed, and either properly index the attributes
+in the filter, increase the nsslapd-idlistscanlimit, or stop using that filter."""
+}
+
+DSLOGNOTES0002 = {
+ 'dsle': 'DSLOGNOTES0002',
+ 'severity': 'Medium',
+ 'description': 'Unknown Attribute In Filter',
+ 'items': ['Possible Performance Impact'],
+ 'detail': """Found NUMBER searches in the current access log that are using an
+unknown attribute in the search filter.\n""",
+ 'fix': """Stop using this these unknown attributes in the filter, or add the schema
+to the server and make sure it's properly indexed."""
+}
--
2.26.2

View File

@ -0,0 +1,108 @@
From 1fef5649ce05a17a741789cafb65269c099b396b Mon Sep 17 00:00:00 2001
From: progier389 <72748589+progier389@users.noreply.github.com>
Date: Wed, 16 Dec 2020 16:21:35 +0100
Subject: [PATCH 2/3] Issue #4504 - Fix pytest test_dsconf_replication_monitor
(#4505)
(cherry picked from commit 0b08e6f35b000d1383580be59f902ac813e940f2)
---
.../tests/suites/clu/repl_monitor_test.py | 50 +++++++++++++------
1 file changed, 36 insertions(+), 14 deletions(-)
diff --git a/dirsrvtests/tests/suites/clu/repl_monitor_test.py b/dirsrvtests/tests/suites/clu/repl_monitor_test.py
index b03d170c8..eb18d2da2 100644
--- a/dirsrvtests/tests/suites/clu/repl_monitor_test.py
+++ b/dirsrvtests/tests/suites/clu/repl_monitor_test.py
@@ -9,6 +9,7 @@
import time
import subprocess
import pytest
+import re
from lib389.cli_conf.replication import get_repl_monitor_info
from lib389.tasks import *
@@ -67,6 +68,25 @@ def check_value_in_log_and_reset(content_list, second_list=None, single_value=No
log.info('Reset log file')
f.truncate(0)
+def get_hostnames_from_log(port1, port2):
+ # Get the supplier host names as displayed in replication monitor output
+ with open(LOG_FILE, 'r') as logfile:
+ logtext = logfile.read()
+ # search for Supplier :hostname:port
+ # and use \D to insure there is no more number is after
+ # the matched port (i.e that 10 is not matching 101)
+ regexp = '(Supplier: )([^:]*)(:' + str(port1) + '\D)'
+ match=re.search(regexp, logtext)
+ host_m1 = 'localhost.localdomain'
+ if (match is not None):
+ host_m1 = match.group(2)
+ # Same for master 2
+ regexp = '(Supplier: )([^:]*)(:' + str(port2) + '\D)'
+ match=re.search(regexp, logtext)
+ host_m2 = 'localhost.localdomain'
+ if (match is not None):
+ host_m2 = match.group(2)
+ return (host_m1, host_m2)
@pytest.mark.ds50545
@pytest.mark.bz1739718
@@ -95,9 +115,6 @@ def test_dsconf_replication_monitor(topology_m2, set_log_file):
m1 = topology_m2.ms["master1"]
m2 = topology_m2.ms["master2"]
- alias_content = ['Supplier: M1 (' + m1.host + ':' + str(m1.port) + ')',
- 'Supplier: M2 (' + m2.host + ':' + str(m2.port) + ')']
-
connection_content = 'Supplier: '+ m1.host + ':' + str(m1.port)
content_list = ['Replica Root: dc=example,dc=com',
'Replica ID: 1',
@@ -160,20 +177,9 @@ def test_dsconf_replication_monitor(topology_m2, set_log_file):
'001',
m1.host + ':' + str(m1.port)]
- dsrc_content = '[repl-monitor-connections]\n' \
- 'connection1 = ' + m1.host + ':' + str(m1.port) + ':' + DN_DM + ':' + PW_DM + '\n' \
- 'connection2 = ' + m2.host + ':' + str(m2.port) + ':' + DN_DM + ':' + PW_DM + '\n' \
- '\n' \
- '[repl-monitor-aliases]\n' \
- 'M1 = ' + m1.host + ':' + str(m1.port) + '\n' \
- 'M2 = ' + m2.host + ':' + str(m2.port)
-
connections = [m1.host + ':' + str(m1.port) + ':' + DN_DM + ':' + PW_DM,
m2.host + ':' + str(m2.port) + ':' + DN_DM + ':' + PW_DM]
- aliases = ['M1=' + m1.host + ':' + str(m1.port),
- 'M2=' + m2.host + ':' + str(m2.port)]
-
args = FakeArgs()
args.connections = connections
args.aliases = None
@@ -181,8 +187,24 @@ def test_dsconf_replication_monitor(topology_m2, set_log_file):
log.info('Run replication monitor with connections option')
get_repl_monitor_info(m1, DEFAULT_SUFFIX, log, args)
+ (host_m1, host_m2) = get_hostnames_from_log(m1.port, m2.port)
check_value_in_log_and_reset(content_list, connection_content, error_list=error_list)
+ # Prepare the data for next tests
+ aliases = ['M1=' + host_m1 + ':' + str(m1.port),
+ 'M2=' + host_m2 + ':' + str(m2.port)]
+
+ alias_content = ['Supplier: M1 (' + host_m1 + ':' + str(m1.port) + ')',
+ 'Supplier: M2 (' + host_m2 + ':' + str(m2.port) + ')']
+
+ dsrc_content = '[repl-monitor-connections]\n' \
+ 'connection1 = ' + m1.host + ':' + str(m1.port) + ':' + DN_DM + ':' + PW_DM + '\n' \
+ 'connection2 = ' + m2.host + ':' + str(m2.port) + ':' + DN_DM + ':' + PW_DM + '\n' \
+ '\n' \
+ '[repl-monitor-aliases]\n' \
+ 'M1 = ' + host_m1 + ':' + str(m1.port) + '\n' \
+ 'M2 = ' + host_m2 + ':' + str(m2.port)
+
log.info('Run replication monitor with aliases option')
args.aliases = aliases
get_repl_monitor_info(m1, DEFAULT_SUFFIX, log, args)
--
2.26.2

View File

@ -1,51 +0,0 @@
From 2844d4ad90cbbd23ae75309e50ae4d7145586bb7 Mon Sep 17 00:00:00 2001
From: Mark Reynolds <mreynolds@redhat.com>
Date: Wed, 10 Jun 2020 14:07:24 -0400
Subject: [PATCH] Issue 51144 - dsctl fails with instance names that contain
slapd-
Bug Description: If an instance name contains 'slapd-' the CLI breaks:
slapd-test-slapd
Fix Description: Only strip off "slapd-" from the front of the instance
name.
relates: https://pagure.io/389-ds-base/issue/51144
Reviewed by: firstyear(Thanks!)
---
src/lib389/lib389/__init__.py | 2 +-
src/lib389/lib389/dseldif.py | 3 ++-
2 files changed, 3 insertions(+), 2 deletions(-)
diff --git a/src/lib389/lib389/__init__.py b/src/lib389/lib389/__init__.py
index 0ff1ab173..63d44b60a 100644
--- a/src/lib389/lib389/__init__.py
+++ b/src/lib389/lib389/__init__.py
@@ -710,7 +710,7 @@ class DirSrv(SimpleLDAPObject, object):
# Don't need a default value now since it's set in init.
if serverid is None and hasattr(self, 'serverid'):
serverid = self.serverid
- elif serverid is not None:
+ elif serverid is not None and serverid.startswith('slapd-'):
serverid = serverid.replace('slapd-', '', 1)
if self.serverid is None:
diff --git a/src/lib389/lib389/dseldif.py b/src/lib389/lib389/dseldif.py
index 96c9af9d1..f2725add9 100644
--- a/src/lib389/lib389/dseldif.py
+++ b/src/lib389/lib389/dseldif.py
@@ -40,7 +40,8 @@ class DSEldif(DSLint):
if serverid:
# Get the dse.ldif from the instance name
prefix = os.environ.get('PREFIX', ""),
- serverid = serverid.replace("slapd-", "")
+ if serverid.startswith("slapd-"):
+ serverid = serverid.replace("slapd-", "", 1)
self.path = "{}/etc/dirsrv/slapd-{}/dse.ldif".format(prefix[0], serverid)
else:
ds_paths = Paths(self._instance.serverid, self._instance)
--
2.26.2

View File

@ -0,0 +1,374 @@
From d7b49259ff2f9e0295bbfeaf128369ed33421974 Mon Sep 17 00:00:00 2001
From: James Chapman <jachapma@redhat.com>
Date: Mon, 30 Nov 2020 15:28:05 +0000
Subject: [PATCH 1/6] Issue 4418 - ldif2db - offline. Warn the user of skipped
entries
Bug Description: During an ldif2db import entries that do not
conform to various constraints will be skipped and not imported.
On completition of an import with skipped entries, the server
returns a success exit code and logs the skipped entry detail to
the error logs. The success exit code could lead the user to
believe that all entries were successfully imported.
Fix Description: If a skipped entry occurs during import, the
import will continue and a warning will be returned to the user.
CLI tools for offline import updated to handle warning code.
Test added to generate an incorrect ldif entry and perform an
import.
Fixes: #4418
Reviewed by: Firstyear, droideck (Thanks)
(cherry picked from commit a98fe54292e9b183a2163efbc7bdfe208d4abfb0)
---
.../tests/suites/import/import_test.py | 54 ++++++++++++++++++-
.../slapd/back-ldbm/db-bdb/bdb_import.c | 22 ++++++--
ldap/servers/slapd/main.c | 8 +++
ldap/servers/slapd/pblock.c | 24 +++++++++
ldap/servers/slapd/pblock_v3.h | 1 +
ldap/servers/slapd/slapi-private.h | 14 +++++
src/lib389/lib389/__init__.py | 18 +++----
src/lib389/lib389/_constants.py | 7 +++
src/lib389/lib389/cli_ctl/dbtasks.py | 8 ++-
9 files changed, 140 insertions(+), 16 deletions(-)
diff --git a/dirsrvtests/tests/suites/import/import_test.py b/dirsrvtests/tests/suites/import/import_test.py
index 3803ecf43..b47db96ed 100644
--- a/dirsrvtests/tests/suites/import/import_test.py
+++ b/dirsrvtests/tests/suites/import/import_test.py
@@ -15,7 +15,7 @@ import pytest
import time
import glob
from lib389.topologies import topology_st as topo
-from lib389._constants import DEFAULT_SUFFIX
+from lib389._constants import DEFAULT_SUFFIX, TaskWarning
from lib389.dbgen import dbgen_users
from lib389.tasks import ImportTask
from lib389.index import Indexes
@@ -139,6 +139,38 @@ def _create_bogus_ldif(topo):
return import_ldif1
+def _create_syntax_err_ldif(topo):
+ """
+ Create an incorrect ldif entry that violates syntax check
+ """
+ ldif_dir = topo.standalone.get_ldif_dir()
+ line1 = """dn: dc=example,dc=com
+objectClass: top
+objectClass: domain
+dc: example
+dn: ou=groups,dc=example,dc=com
+objectClass: top
+objectClass: organizationalUnit
+ou: groups
+dn: uid=JHunt,ou=groups,dc=example,dc=com
+objectClass: top
+objectClass: person
+objectClass: organizationalPerson
+objectClass: inetOrgPerson
+objectclass: inetUser
+cn: James Hunt
+sn: Hunt
+uid: JHunt
+givenName:
+"""
+ with open(f'{ldif_dir}/syntax_err.ldif', 'w') as out:
+ out.write(f'{line1}')
+ os.chmod(out.name, 0o777)
+ out.close()
+ import_ldif1 = ldif_dir + '/syntax_err.ldif'
+ return import_ldif1
+
+
def test_import_with_index(topo, _import_clean):
"""
Add an index, then import via cn=tasks
@@ -214,6 +246,26 @@ def test_ldif2db_allows_entries_without_a_parent_to_be_imported(topo, _import_cl
topo.standalone.start()
+def test_ldif2db_syntax_check(topo):
+ """ldif2db should return a warning when a skipped entry has occured.
+ :id: 85e75670-42c5-4062-9edc-7f117c97a06f
+ :setup:
+ 1. Standalone Instance
+ 2. Ldif entry that violates syntax check rule (empty givenname)
+ :steps:
+ 1. Create an ldif file which violates the syntax checking rule
+ 2. Stop the server and import ldif file with ldif2db
+ :expected results:
+ 1. ldif2db import returns a warning to signify skipped entries
+ """
+ import_ldif1 = _create_syntax_err_ldif(topo)
+ # Import the offending LDIF data - offline
+ topo.standalone.stop()
+ ret = topo.standalone.ldif2db('userRoot', None, None, None, import_ldif1)
+ assert ret == TaskWarning.WARN_SKIPPED_IMPORT_ENTRY
+ topo.standalone.start()
+
+
def test_issue_a_warning_if_the_cache_size_is_smaller(topo, _import_clean):
"""Report during startup if nsslapd-cachememsize is too small
diff --git a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_import.c b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_import.c
index e7da0517f..1e4830e99 100644
--- a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_import.c
+++ b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_import.c
@@ -2563,7 +2563,7 @@ error:
slapi_task_dec_refcount(job->task);
}
import_all_done(job, ret);
- ret = 1;
+ ret |= WARN_UPGARDE_DN_FORMAT_ALL;
} else if (NEED_DN_NORM == ret) {
import_log_notice(job, SLAPI_LOG_NOTICE, "bdb_import_main",
"%s complete. %s needs upgradednformat.",
@@ -2572,7 +2572,7 @@ error:
slapi_task_dec_refcount(job->task);
}
import_all_done(job, ret);
- ret = 2;
+ ret |= WARN_UPGRADE_DN_FORMAT;
} else if (NEED_DN_NORM_SP == ret) {
import_log_notice(job, SLAPI_LOG_NOTICE, "bdb_import_main",
"%s complete. %s needs upgradednformat spaces.",
@@ -2581,7 +2581,7 @@ error:
slapi_task_dec_refcount(job->task);
}
import_all_done(job, ret);
- ret = 3;
+ ret |= WARN_UPGRADE_DN_FORMAT_SPACE;
} else {
ret = -1;
if (job->task != NULL) {
@@ -2600,6 +2600,11 @@ error:
import_all_done(job, ret);
}
+ /* set task warning if there are no errors */
+ if((!ret) && (job->skipped)) {
+ ret |= WARN_SKIPPED_IMPORT_ENTRY;
+ }
+
/* This instance isn't busy anymore */
instance_set_not_busy(job->inst);
@@ -2637,6 +2642,7 @@ bdb_back_ldif2db(Slapi_PBlock *pb)
int total_files, i;
int up_flags = 0;
PRThread *thread = NULL;
+ int ret = 0;
slapi_pblock_get(pb, SLAPI_BACKEND, &be);
if (be == NULL) {
@@ -2764,7 +2770,15 @@ bdb_back_ldif2db(Slapi_PBlock *pb)
}
/* old style -- do it all synchronously (THIS IS GOING AWAY SOON) */
- return import_main_offline((void *)job);
+ ret = import_main_offline((void *)job);
+
+ /* no error just warning, reset ret */
+ if(ret &= WARN_SKIPPED_IMPORT_ENTRY) {
+ slapi_pblock_set_task_warning(pb, WARN_SKIPPED_IMPORT_ENTRY);
+ ret = 0;
+ }
+
+ return ret;
}
struct _import_merge_thang
diff --git a/ldap/servers/slapd/main.c b/ldap/servers/slapd/main.c
index 694375b22..104f6826c 100644
--- a/ldap/servers/slapd/main.c
+++ b/ldap/servers/slapd/main.c
@@ -2069,6 +2069,14 @@ slapd_exemode_ldif2db(struct main_config *mcfg)
plugin->plg_name);
return_value = -1;
}
+
+ /* check for task warnings */
+ if(!return_value) {
+ if((return_value = slapi_pblock_get_task_warning(pb))) {
+ slapi_log_err(SLAPI_LOG_INFO, "slapd_exemode_ldif2db","returning task warning: %d\n", return_value);
+ }
+ }
+
slapi_pblock_destroy(pb);
charray_free(instances);
charray_free(mcfg->cmd_line_instance_names);
diff --git a/ldap/servers/slapd/pblock.c b/ldap/servers/slapd/pblock.c
index 454ea9cc3..1ad9d0399 100644
--- a/ldap/servers/slapd/pblock.c
+++ b/ldap/servers/slapd/pblock.c
@@ -28,12 +28,14 @@
#define SLAPI_LDIF_DUMP_REPLICA 2003
#define SLAPI_PWDPOLICY 2004
#define SLAPI_PW_ENTRY 2005
+#define SLAPI_TASK_WARNING 2006
/* Used for checking assertions about pblocks in some cases. */
#define SLAPI_HINT 9999
static PRLock *pblock_analytics_lock = NULL;
+
static PLHashNumber
hash_int_func(const void *key)
{
@@ -4315,6 +4317,28 @@ slapi_pblock_set_ldif_dump_replica(Slapi_PBlock *pb, int32_t dump_replica)
pb->pb_task->ldif_dump_replica = dump_replica;
}
+int32_t
+slapi_pblock_get_task_warning(Slapi_PBlock *pb)
+{
+#ifdef PBLOCK_ANALYTICS
+ pblock_analytics_record(pb, SLAPI_TASK_WARNING);
+#endif
+ if (pb->pb_task != NULL) {
+ return pb->pb_task->task_warning;
+ }
+ return 0;
+}
+
+void
+slapi_pblock_set_task_warning(Slapi_PBlock *pb, task_warning warning)
+{
+#ifdef PBLOCK_ANALYTICS
+ pblock_analytics_record(pb, SLAPI_TASK_WARNING);
+#endif
+ _pblock_assert_pb_task(pb);
+ pb->pb_task->task_warning = warning;
+}
+
void *
slapi_pblock_get_vattr_context(Slapi_PBlock *pb)
{
diff --git a/ldap/servers/slapd/pblock_v3.h b/ldap/servers/slapd/pblock_v3.h
index 90498c0b0..b35d78565 100644
--- a/ldap/servers/slapd/pblock_v3.h
+++ b/ldap/servers/slapd/pblock_v3.h
@@ -67,6 +67,7 @@ typedef struct _slapi_pblock_task
int ldif2db_noattrindexes;
int ldif_printkey;
int task_flags;
+ int32_t task_warning;
int import_state;
int server_running; /* indicate that server is running */
diff --git a/ldap/servers/slapd/slapi-private.h b/ldap/servers/slapd/slapi-private.h
index c98c1947c..31cb33472 100644
--- a/ldap/servers/slapd/slapi-private.h
+++ b/ldap/servers/slapd/slapi-private.h
@@ -1465,6 +1465,20 @@ void slapi_pblock_set_operation_notes(Slapi_PBlock *pb, uint32_t opnotes);
void slapi_pblock_set_flag_operation_notes(Slapi_PBlock *pb, uint32_t opflag);
void slapi_pblock_set_result_text_if_empty(Slapi_PBlock *pb, char *text);
+/* task warnings */
+typedef enum task_warning_t{
+ WARN_UPGARDE_DN_FORMAT_ALL = (1 << 0),
+ WARN_UPGRADE_DN_FORMAT = (1 << 1),
+ WARN_UPGRADE_DN_FORMAT_SPACE = (1 << 2),
+ WARN_SKIPPED_IMPORT_ENTRY = (1 << 3)
+} task_warning;
+
+int32_t slapi_pblock_get_task_warning(Slapi_PBlock *pb);
+void slapi_pblock_set_task_warning(Slapi_PBlock *pb, task_warning warn);
+
+
+int slapi_exists_or_add_internal(Slapi_DN *dn, const char *filter, const char *entry, const char *modifier_name);
+
#ifdef __cplusplus
}
#endif
diff --git a/src/lib389/lib389/__init__.py b/src/lib389/lib389/__init__.py
index 4e6a1905a..5b36a79e1 100644
--- a/src/lib389/lib389/__init__.py
+++ b/src/lib389/lib389/__init__.py
@@ -2683,7 +2683,7 @@ class DirSrv(SimpleLDAPObject, object):
# server is stopped)
#
def ldif2db(self, bename, suffixes, excludeSuffixes, encrypt,
- import_file):
+ import_file, import_cl):
"""
@param bename - The backend name of the database to import
@param suffixes - List/tuple of suffixes to import
@@ -2731,14 +2731,14 @@ class DirSrv(SimpleLDAPObject, object):
try:
result = subprocess.check_output(cmd, encoding='utf-8')
except subprocess.CalledProcessError as e:
- self.log.debug("Command: %s failed with the return code %s and the error %s",
- format_cmd_list(cmd), e.returncode, e.output)
- return False
-
- self.log.debug("ldif2db output: BEGIN")
- for line in result.split("\n"):
- self.log.debug(line)
- self.log.debug("ldif2db output: END")
+ if e.returncode == TaskWarning.WARN_SKIPPED_IMPORT_ENTRY:
+ self.log.debug("Command: %s skipped import entry warning %s",
+ format_cmd_list(cmd), e.returncode)
+ return e.returncode
+ else:
+ self.log.debug("Command: %s failed with the return code %s and the error %s",
+ format_cmd_list(cmd), e.returncode, e.output)
+ return False
return True
diff --git a/src/lib389/lib389/_constants.py b/src/lib389/lib389/_constants.py
index e28c602a3..38ba04565 100644
--- a/src/lib389/lib389/_constants.py
+++ b/src/lib389/lib389/_constants.py
@@ -162,6 +162,13 @@ DB2BAK = 'db2bak'
DB2INDEX = 'db2index'
DBSCAN = 'dbscan'
+# Task warnings
+class TaskWarning(IntEnum):
+ WARN_UPGARDE_DN_FORMAT_ALL = (1 << 0)
+ WARN_UPGRADE_DN_FORMAT = (1 << 1)
+ WARN_UPGRADE_DN_FORMAT_SPACE = (1 << 2)
+ WARN_SKIPPED_IMPORT_ENTRY = (1 << 3)
+
RDN_REPLICA = "cn=replica"
RETROCL_SUFFIX = "cn=changelog"
diff --git a/src/lib389/lib389/cli_ctl/dbtasks.py b/src/lib389/lib389/cli_ctl/dbtasks.py
index 590a1ea0e..02830239c 100644
--- a/src/lib389/lib389/cli_ctl/dbtasks.py
+++ b/src/lib389/lib389/cli_ctl/dbtasks.py
@@ -7,6 +7,7 @@
# See LICENSE for details.
# --- END COPYRIGHT BLOCK ---
+from lib389._constants import TaskWarning
def dbtasks_db2index(inst, log, args):
if not inst.db2index(bename=args.backend):
@@ -44,10 +45,13 @@ def dbtasks_db2ldif(inst, log, args):
def dbtasks_ldif2db(inst, log, args):
- if not inst.ldif2db(bename=args.backend, encrypt=args.encrypted, import_file=args.ldif,
- suffixes=None, excludeSuffixes=None):
+ ret = inst.ldif2db(bename=args.backend, encrypt=args.encrypted, import_file=args.ldif,
+ suffixes=None, excludeSuffixes=None, import_cl=False)
+ if not ret:
log.fatal("ldif2db failed")
return False
+ elif ret == TaskWarning.WARN_SKIPPED_IMPORT_ENTRY:
+ log.warn("ldif2db successful with skipped entries")
else:
log.info("ldif2db successful")
--
2.26.2

View File

@ -1,520 +0,0 @@
From 6cd4b1c60dbd3d7b74adb19a2434585d50553f39 Mon Sep 17 00:00:00 2001
From: Thierry Bordaz <tbordaz@redhat.com>
Date: Fri, 5 Jun 2020 12:14:51 +0200
Subject: [PATCH] Ticket 49859 - A distinguished value can be missing in an
entry
Bug description:
According to RFC 4511 (see ticket), the values of the RDN attributes
should be present in an entry.
With a set of replicated operations, it is possible that those values
would be missing
Fix description:
MOD and MODRDN update checks that the RDN values are presents.
If they are missing they are added to the resulting entry. In addition
the set of modifications to add those values are also indexed.
The specific case of single-valued attributes, where the final and unique value
can not be the RDN value, the attribute nsds5ReplConflict is added.
https://pagure.io/389-ds-base/issue/49859
Reviewed by: Mark Reynolds, William Brown
Platforms tested: F31
---
.../replication/conflict_resolve_test.py | 174 +++++++++++++++++-
ldap/servers/slapd/back-ldbm/ldbm_modify.c | 136 ++++++++++++++
ldap/servers/slapd/back-ldbm/ldbm_modrdn.c | 37 +++-
.../servers/slapd/back-ldbm/proto-back-ldbm.h | 1 +
4 files changed, 343 insertions(+), 5 deletions(-)
diff --git a/dirsrvtests/tests/suites/replication/conflict_resolve_test.py b/dirsrvtests/tests/suites/replication/conflict_resolve_test.py
index 99a072935..48d0067db 100644
--- a/dirsrvtests/tests/suites/replication/conflict_resolve_test.py
+++ b/dirsrvtests/tests/suites/replication/conflict_resolve_test.py
@@ -10,10 +10,11 @@ import time
import logging
import ldap
import pytest
+import re
from itertools import permutations
from lib389._constants import *
from lib389.idm.nscontainer import nsContainers
-from lib389.idm.user import UserAccounts
+from lib389.idm.user import UserAccounts, UserAccount
from lib389.idm.group import Groups
from lib389.idm.organizationalunit import OrganizationalUnits
from lib389.replica import ReplicationManager
@@ -763,6 +764,177 @@ class TestTwoMasters:
user_dns_m2 = [user.dn for user in test_users_m2.list()]
assert set(user_dns_m1) == set(user_dns_m2)
+ def test_conflict_attribute_multi_valued(self, topology_m2, base_m2):
+ """A RDN attribute being multi-valued, checks that after several operations
+ MODRDN and MOD_REPL its RDN values are the same on both servers
+
+ :id: 225b3522-8ed7-4256-96f9-5fab9b7044a5
+ :setup: Two master replication,
+ audit log, error log for replica and access log for internal
+ :steps:
+ 1. Create a test entry uid=user_test_1000,...
+ 2. Pause all replication agreements
+ 3. On M1 rename it into uid=foo1,...
+ 4. On M2 rename it into uid=foo2,...
+ 5. On M1 MOD_REPL uid:foo1
+ 6. Resume all replication agreements
+ 7. Check that entry on M1 has uid=foo1, foo2
+ 8. Check that entry on M2 has uid=foo1, foo2
+ 9. Check that entry on M1 and M2 has the same uid values
+ :expectedresults:
+ 1. It should pass
+ 2. It should pass
+ 3. It should pass
+ 4. It should pass
+ 5. It should pass
+ 6. It should pass
+ 7. It should pass
+ 8. It should pass
+ 9. It should pass
+ """
+
+ M1 = topology_m2.ms["master1"]
+ M2 = topology_m2.ms["master2"]
+
+ # add a test user
+ test_users_m1 = UserAccounts(M1, base_m2.dn, rdn=None)
+ user_1 = test_users_m1.create_test_user(uid=1000)
+ test_users_m2 = UserAccount(M2, user_1.dn)
+ # Waiting fo the user to be replicated
+ for i in range(0,4):
+ time.sleep(1)
+ if test_users_m2.exists():
+ break
+ assert(test_users_m2.exists())
+
+ # Stop replication agreements
+ topology_m2.pause_all_replicas()
+
+ # On M1 rename test entry in uid=foo1
+ original_dn = user_1.dn
+ user_1.rename('uid=foo1')
+ time.sleep(1)
+
+ # On M2 rename test entry in uid=foo2
+ M2.rename_s(original_dn, 'uid=foo2')
+ time.sleep(2)
+
+ # on M1 MOD_REPL uid into foo1
+ user_1.replace('uid', 'foo1')
+
+ # resume replication agreements
+ topology_m2.resume_all_replicas()
+ time.sleep(5)
+
+ # check that on M1, the entry 'uid' has two values 'foo1' and 'foo2'
+ final_dn = re.sub('^.*1000,', 'uid=foo2,', original_dn)
+ final_user_m1 = UserAccount(M1, final_dn)
+ for val in final_user_m1.get_attr_vals_utf8('uid'):
+ log.info("Check %s is on M1" % val)
+ assert(val in ['foo1', 'foo2'])
+
+ # check that on M2, the entry 'uid' has two values 'foo1' and 'foo2'
+ final_user_m2 = UserAccount(M2, final_dn)
+ for val in final_user_m2.get_attr_vals_utf8('uid'):
+ log.info("Check %s is on M1" % val)
+ assert(val in ['foo1', 'foo2'])
+
+ # check that the entry have the same uid values
+ for val in final_user_m1.get_attr_vals_utf8('uid'):
+ log.info("Check M1.uid %s is also on M2" % val)
+ assert(val in final_user_m2.get_attr_vals_utf8('uid'))
+
+ for val in final_user_m2.get_attr_vals_utf8('uid'):
+ log.info("Check M2.uid %s is also on M1" % val)
+ assert(val in final_user_m1.get_attr_vals_utf8('uid'))
+
+ def test_conflict_attribute_single_valued(self, topology_m2, base_m2):
+ """A RDN attribute being signle-valued, checks that after several operations
+ MODRDN and MOD_REPL its RDN values are the same on both servers
+
+ :id: c38ae613-5d1e-47cf-b051-c7284e64b817
+ :setup: Two master replication, test container for entries, enable plugin logging,
+ audit log, error log for replica and access log for internal
+ :steps:
+ 1. Create a test entry uid=user_test_1000,...
+ 2. Pause all replication agreements
+ 3. On M1 rename it into employeenumber=foo1,...
+ 4. On M2 rename it into employeenumber=foo2,...
+ 5. On M1 MOD_REPL employeenumber:foo1
+ 6. Resume all replication agreements
+ 7. Check that entry on M1 has employeenumber=foo1
+ 8. Check that entry on M2 has employeenumber=foo1
+ 9. Check that entry on M1 and M2 has the same employeenumber values
+ :expectedresults:
+ 1. It should pass
+ 2. It should pass
+ 3. It should pass
+ 4. It should pass
+ 5. It should pass
+ 6. It should pass
+ 7. It should pass
+ 8. It should pass
+ 9. It should pass
+ """
+
+ M1 = topology_m2.ms["master1"]
+ M2 = topology_m2.ms["master2"]
+
+ # add a test user with a dummy 'uid' extra value because modrdn removes
+ # uid that conflict with 'account' objectclass
+ test_users_m1 = UserAccounts(M1, base_m2.dn, rdn=None)
+ user_1 = test_users_m1.create_test_user(uid=1000)
+ user_1.add('objectclass', 'extensibleobject')
+ user_1.add('uid', 'dummy')
+ test_users_m2 = UserAccount(M2, user_1.dn)
+
+ # Waiting fo the user to be replicated
+ for i in range(0,4):
+ time.sleep(1)
+ if test_users_m2.exists():
+ break
+ assert(test_users_m2.exists())
+
+ # Stop replication agreements
+ topology_m2.pause_all_replicas()
+
+ # On M1 rename test entry in employeenumber=foo1
+ original_dn = user_1.dn
+ user_1.rename('employeenumber=foo1')
+ time.sleep(1)
+
+ # On M2 rename test entry in employeenumber=foo2
+ M2.rename_s(original_dn, 'employeenumber=foo2')
+ time.sleep(2)
+
+ # on M1 MOD_REPL uid into foo1
+ user_1.replace('employeenumber', 'foo1')
+
+ # resume replication agreements
+ topology_m2.resume_all_replicas()
+ time.sleep(5)
+
+ # check that on M1, the entry 'employeenumber' has value 'foo1'
+ final_dn = re.sub('^.*1000,', 'employeenumber=foo2,', original_dn)
+ final_user_m1 = UserAccount(M1, final_dn)
+ for val in final_user_m1.get_attr_vals_utf8('employeenumber'):
+ log.info("Check %s is on M1" % val)
+ assert(val in ['foo1'])
+
+ # check that on M2, the entry 'employeenumber' has values 'foo1'
+ final_user_m2 = UserAccount(M2, final_dn)
+ for val in final_user_m2.get_attr_vals_utf8('employeenumber'):
+ log.info("Check %s is on M2" % val)
+ assert(val in ['foo1'])
+
+ # check that the entry have the same uid values
+ for val in final_user_m1.get_attr_vals_utf8('employeenumber'):
+ log.info("Check M1.uid %s is also on M2" % val)
+ assert(val in final_user_m2.get_attr_vals_utf8('employeenumber'))
+
+ for val in final_user_m2.get_attr_vals_utf8('employeenumber'):
+ log.info("Check M2.uid %s is also on M1" % val)
+ assert(val in final_user_m1.get_attr_vals_utf8('employeenumber'))
class TestThreeMasters:
def test_nested_entries(self, topology_m3, base_m3):
diff --git a/ldap/servers/slapd/back-ldbm/ldbm_modify.c b/ldap/servers/slapd/back-ldbm/ldbm_modify.c
index e9d7e87e3..a507f3c31 100644
--- a/ldap/servers/slapd/back-ldbm/ldbm_modify.c
+++ b/ldap/servers/slapd/back-ldbm/ldbm_modify.c
@@ -213,6 +213,112 @@ error:
return retval;
}
+int32_t
+entry_get_rdn_mods(Slapi_PBlock *pb, Slapi_Entry *entry, CSN *csn, int repl_op, Slapi_Mods **smods_ret)
+{
+ unsigned long op_type = SLAPI_OPERATION_NONE;
+ char *new_rdn = NULL;
+ char **dns = NULL;
+ char **rdns = NULL;
+ Slapi_Mods *smods = NULL;
+ char *type = NULL;
+ struct berval *bvp[2] = {0};
+ struct berval bv;
+ Slapi_Attr *attr = NULL;
+ const char *entry_dn = NULL;
+
+ *smods_ret = NULL;
+ entry_dn = slapi_entry_get_dn_const(entry);
+ /* Do not bother to check that RDN is present, no one rename RUV or change its nsuniqueid */
+ if (strcasestr(entry_dn, RUV_STORAGE_ENTRY_UNIQUEID)) {
+ return 0;
+ }
+
+ /* First get the RDNs of the operation */
+ slapi_pblock_get(pb, SLAPI_OPERATION_TYPE, &op_type);
+ switch (op_type) {
+ case SLAPI_OPERATION_MODIFY:
+ dns = slapi_ldap_explode_dn(entry_dn, 0);
+ if (dns == NULL) {
+ slapi_log_err(SLAPI_LOG_ERR, "entry_get_rdn_mods",
+ "Fails to split DN \"%s\" into components\n", entry_dn);
+ return -1;
+ }
+ rdns = slapi_ldap_explode_rdn(dns[0], 0);
+ slapi_ldap_value_free(dns);
+
+ break;
+ case SLAPI_OPERATION_MODRDN:
+ slapi_pblock_get(pb, SLAPI_MODRDN_NEWRDN, &new_rdn);
+ rdns = slapi_ldap_explode_rdn(new_rdn, 0);
+ break;
+ default:
+ break;
+ }
+ if (rdns == NULL || rdns[0] == NULL) {
+ slapi_log_err(SLAPI_LOG_ERR, "entry_get_rdn_mods",
+ "Fails to split RDN \"%s\" into components\n", slapi_entry_get_dn_const(entry));
+ return -1;
+ }
+
+ /* Update the entry to add RDNs values if they are missing */
+ smods = slapi_mods_new();
+
+ bvp[0] = &bv;
+ bvp[1] = NULL;
+ for (size_t rdns_count = 0; rdns[rdns_count]; rdns_count++) {
+ Slapi_Value *value;
+ attr = NULL;
+ slapi_rdn2typeval(rdns[rdns_count], &type, &bv);
+
+ /* Check if the RDN value exists */
+ if ((slapi_entry_attr_find(entry, type, &attr) != 0) ||
+ (slapi_attr_value_find(attr, &bv))) {
+ const CSN *csn_rdn_add;
+ const CSN *adcsn = attr_get_deletion_csn(attr);
+
+ /* It is missing => adds it */
+ if (slapi_attr_flag_is_set(attr, SLAPI_ATTR_FLAG_SINGLE)) {
+ if (csn_compare(adcsn, csn) >= 0) {
+ /* this is a single valued attribute and the current value
+ * (that is different from RDN value) is more recent than
+ * the RDN value we want to apply.
+ * Keep the current value and add a conflict flag
+ */
+
+ type = ATTR_NSDS5_REPLCONFLICT;
+ bv.bv_val = "RDN value may be missing because it is single-valued";
+ bv.bv_len = strlen(bv.bv_val);
+ slapi_entry_add_string(entry, type, bv.bv_val);
+ slapi_mods_add_modbvps(smods, LDAP_MOD_ADD, type, bvp);
+ continue;
+ }
+ }
+ /* if a RDN value needs to be forced, make sure it csn is ahead */
+ slapi_mods_add_modbvps(smods, LDAP_MOD_ADD, type, bvp);
+ csn_rdn_add = csn_max(adcsn, csn);
+
+ if (entry_apply_mods_wsi(entry, smods, csn_rdn_add, repl_op)) {
+ slapi_log_err(SLAPI_LOG_ERR, "entry_get_rdn_mods",
+ "Fails to set \"%s\" in \"%s\"\n", type, slapi_entry_get_dn_const(entry));
+ slapi_ldap_value_free(rdns);
+ slapi_mods_free(&smods);
+ return -1;
+ }
+ /* Make the RDN value a distinguished value */
+ attr_value_find_wsi(attr, &bv, &value);
+ value_update_csn(value, CSN_TYPE_VALUE_DISTINGUISHED, csn_rdn_add);
+ }
+ }
+ slapi_ldap_value_free(rdns);
+ if (smods->num_mods == 0) {
+ /* smods_ret already NULL, just free the useless smods */
+ slapi_mods_free(&smods);
+ } else {
+ *smods_ret = smods;
+ }
+ return 0;
+}
/**
Apply the mods to the ec entry. Check for syntax, schema problems.
Check for abandon.
@@ -269,6 +375,8 @@ modify_apply_check_expand(
goto done;
}
+
+
/*
* If the objectClass attribute type was modified in any way, expand
* the objectClass values to reflect the inheritance hierarchy.
@@ -414,6 +522,7 @@ ldbm_back_modify(Slapi_PBlock *pb)
int result_sent = 0;
int32_t parent_op = 0;
struct timespec parent_time;
+ Slapi_Mods *smods_add_rdn = NULL;
slapi_pblock_get(pb, SLAPI_BACKEND, &be);
slapi_pblock_get(pb, SLAPI_PLUGIN_PRIVATE, &li);
@@ -731,6 +840,15 @@ ldbm_back_modify(Slapi_PBlock *pb)
}
} /* else if new_mod_count == mod_count then betxnpremod plugin did nothing */
+ /* time to check if applying a replicated operation removed
+ * the RDN value from the entry. Assuming that only replicated update
+ * can lead to that bad result
+ */
+ if (entry_get_rdn_mods(pb, ec->ep_entry, opcsn, repl_op, &smods_add_rdn)) {
+ goto error_return;
+ }
+
+
/*
* Update the ID to Entry index.
* Note that id2entry_add replaces the entry, so the Entry ID
@@ -764,6 +882,23 @@ ldbm_back_modify(Slapi_PBlock *pb)
MOD_SET_ERROR(ldap_result_code, LDAP_OPERATIONS_ERROR, retry_count);
goto error_return;
}
+
+ if (smods_add_rdn && slapi_mods_get_num_mods(smods_add_rdn) > 0) {
+ retval = index_add_mods(be, (LDAPMod **) slapi_mods_get_ldapmods_byref(smods_add_rdn), e, ec, &txn);
+ if (DB_LOCK_DEADLOCK == retval) {
+ /* Abort and re-try */
+ slapi_mods_free(&smods_add_rdn);
+ continue;
+ }
+ if (retval != 0) {
+ slapi_log_err(SLAPI_LOG_ERR, "ldbm_back_modify",
+ "index_add_mods (rdn) failed, err=%d %s\n",
+ retval, (msg = dblayer_strerror(retval)) ? msg : "");
+ MOD_SET_ERROR(ldap_result_code, LDAP_OPERATIONS_ERROR, retry_count);
+ slapi_mods_free(&smods_add_rdn);
+ goto error_return;
+ }
+ }
/*
* Remove the old entry from the Virtual List View indexes.
* Add the new entry to the Virtual List View indexes.
@@ -978,6 +1113,7 @@ error_return:
common_return:
slapi_mods_done(&smods);
+ slapi_mods_free(&smods_add_rdn);
if (inst) {
if (ec_locked || cache_is_in_cache(&inst->inst_cache, ec)) {
diff --git a/ldap/servers/slapd/back-ldbm/ldbm_modrdn.c b/ldap/servers/slapd/back-ldbm/ldbm_modrdn.c
index fde83c99f..e97b7a5f6 100644
--- a/ldap/servers/slapd/back-ldbm/ldbm_modrdn.c
+++ b/ldap/servers/slapd/back-ldbm/ldbm_modrdn.c
@@ -21,7 +21,7 @@ static void moddn_unlock_and_return_entry(backend *be, struct backentry **target
static int moddn_newrdn_mods(Slapi_PBlock *pb, const char *olddn, struct backentry *ec, Slapi_Mods *smods_wsi, int is_repl_op);
static IDList *moddn_get_children(back_txn *ptxn, Slapi_PBlock *pb, backend *be, struct backentry *parententry, Slapi_DN *parentdn, struct backentry ***child_entries, struct backdn ***child_dns, int is_resurect_operation);
static int moddn_rename_children(back_txn *ptxn, Slapi_PBlock *pb, backend *be, IDList *children, Slapi_DN *dn_parentdn, Slapi_DN *dn_newsuperiordn, struct backentry *child_entries[]);
-static int modrdn_rename_entry_update_indexes(back_txn *ptxn, Slapi_PBlock *pb, struct ldbminfo *li, struct backentry *e, struct backentry **ec, Slapi_Mods *smods1, Slapi_Mods *smods2, Slapi_Mods *smods3);
+static int modrdn_rename_entry_update_indexes(back_txn *ptxn, Slapi_PBlock *pb, struct ldbminfo *li, struct backentry *e, struct backentry **ec, Slapi_Mods *smods1, Slapi_Mods *smods2, Slapi_Mods *smods3, Slapi_Mods *smods4);
static void mods_remove_nsuniqueid(Slapi_Mods *smods);
#define MOD_SET_ERROR(rc, error, count) \
@@ -100,6 +100,7 @@ ldbm_back_modrdn(Slapi_PBlock *pb)
Connection *pb_conn = NULL;
int32_t parent_op = 0;
struct timespec parent_time;
+ Slapi_Mods *smods_add_rdn = NULL;
if (slapi_pblock_get(pb, SLAPI_CONN_ID, &conn_id) < 0) {
conn_id = 0; /* connection is NULL */
@@ -842,6 +843,15 @@ ldbm_back_modrdn(Slapi_PBlock *pb)
goto error_return;
}
}
+
+ /* time to check if applying a replicated operation removed
+ * the RDN value from the entry. Assuming that only replicated update
+ * can lead to that bad result
+ */
+ if (entry_get_rdn_mods(pb, ec->ep_entry, opcsn, is_replicated_operation, &smods_add_rdn)) {
+ goto error_return;
+ }
+
/* check that the entry still obeys the schema */
if (slapi_entry_schema_check(pb, ec->ep_entry) != 0) {
ldap_result_code = LDAP_OBJECT_CLASS_VIOLATION;
@@ -1003,7 +1013,7 @@ ldbm_back_modrdn(Slapi_PBlock *pb)
/*
* Update the indexes for the entry.
*/
- retval = modrdn_rename_entry_update_indexes(&txn, pb, li, e, &ec, &smods_generated, &smods_generated_wsi, &smods_operation_wsi);
+ retval = modrdn_rename_entry_update_indexes(&txn, pb, li, e, &ec, &smods_generated, &smods_generated_wsi, &smods_operation_wsi, smods_add_rdn);
if (DB_LOCK_DEADLOCK == retval) {
/* Retry txn */
continue;
@@ -1497,6 +1507,7 @@ common_return:
slapi_mods_done(&smods_operation_wsi);
slapi_mods_done(&smods_generated);
slapi_mods_done(&smods_generated_wsi);
+ slapi_mods_free(&smods_add_rdn);
slapi_ch_free((void **)&child_entries);
slapi_ch_free((void **)&child_dns);
if (ldap_result_matcheddn && 0 != strcmp(ldap_result_matcheddn, "NULL"))
@@ -1778,7 +1789,7 @@ mods_remove_nsuniqueid(Slapi_Mods *smods)
* mods contains the list of attribute change made.
*/
static int
-modrdn_rename_entry_update_indexes(back_txn *ptxn, Slapi_PBlock *pb, struct ldbminfo *li __attribute__((unused)), struct backentry *e, struct backentry **ec, Slapi_Mods *smods1, Slapi_Mods *smods2, Slapi_Mods *smods3)
+modrdn_rename_entry_update_indexes(back_txn *ptxn, Slapi_PBlock *pb, struct ldbminfo *li __attribute__((unused)), struct backentry *e, struct backentry **ec, Slapi_Mods *smods1, Slapi_Mods *smods2, Slapi_Mods *smods3, Slapi_Mods *smods4)
{
backend *be;
ldbm_instance *inst;
@@ -1874,6 +1885,24 @@ modrdn_rename_entry_update_indexes(back_txn *ptxn, Slapi_PBlock *pb, struct ldbm
goto error_return;
}
}
+ if (smods4 != NULL && slapi_mods_get_num_mods(smods4) > 0) {
+ /*
+ * update the indexes: lastmod, rdn, etc.
+ */
+ retval = index_add_mods(be, slapi_mods_get_ldapmods_byref(smods4), e, *ec, ptxn);
+ if (DB_LOCK_DEADLOCK == retval) {
+ /* Retry txn */
+ slapi_log_err(SLAPI_LOG_BACKLDBM, "modrdn_rename_entry_update_indexes",
+ "index_add_mods4 deadlock\n");
+ goto error_return;
+ }
+ if (retval != 0) {
+ slapi_log_err(SLAPI_LOG_TRACE, "modrdn_rename_entry_update_indexes",
+ "index_add_mods 4 failed, err=%d %s\n",
+ retval, (msg = dblayer_strerror(retval)) ? msg : "");
+ goto error_return;
+ }
+ }
/*
* Remove the old entry from the Virtual List View indexes.
* Add the new entry to the Virtual List View indexes.
@@ -1991,7 +2020,7 @@ moddn_rename_child_entry(
* Update all the indexes.
*/
retval = modrdn_rename_entry_update_indexes(ptxn, pb, li, e, ec,
- smodsp, NULL, NULL);
+ smodsp, NULL, NULL, NULL);
/* JCMREPL - Should the children get updated modifiersname and lastmodifiedtime? */
slapi_mods_done(&smods);
}
diff --git a/ldap/servers/slapd/back-ldbm/proto-back-ldbm.h b/ldap/servers/slapd/back-ldbm/proto-back-ldbm.h
index 4d2524fd9..e2f1100ed 100644
--- a/ldap/servers/slapd/back-ldbm/proto-back-ldbm.h
+++ b/ldap/servers/slapd/back-ldbm/proto-back-ldbm.h
@@ -324,6 +324,7 @@ int get_parent_rdn(DB *db, ID parentid, Slapi_RDN *srdn);
/*
* modify.c
*/
+int32_t entry_get_rdn_mods(Slapi_PBlock *pb, Slapi_Entry *entry, CSN *csn, int repl_op, Slapi_Mods **smods_ret);
int modify_update_all(backend *be, Slapi_PBlock *pb, modify_context *mc, back_txn *txn);
void modify_init(modify_context *mc, struct backentry *old_entry);
int modify_apply_mods(modify_context *mc, Slapi_Mods *smods);
--
2.26.2

View File

@ -0,0 +1,52 @@
From 97bdef2d562e447d521202beb485c3948b0e7214 Mon Sep 17 00:00:00 2001
From: James Chapman <jachapma@redhat.com>
Date: Mon, 30 Nov 2020 15:28:05 +0000
Subject: [PATCH 2/6] Issue 4418 - ldif2db - offline. Warn the user of skipped
entries
Bug Description: During an ldif2db import entries that do not
conform to various constraints will be skipped and not imported.
On completition of an import with skipped entries, the server
returns a success exit code and logs the skipped entry detail to
the error logs. The success exit code could lead the user to
believe that all entries were successfully imported.
Fix Description: If a skipped entry occurs during import, the
import will continue and a warning will be returned to the user.
CLI tools for offline import updated to handle warning code.
Test added to generate an incorrect ldif entry and perform an
import.
Fixes: #4418
Reviewed by: Firstyear, droideck (Thanks)
---
ldap/servers/slapd/slapi-private.h | 10 ++++++++++
1 file changed, 10 insertions(+)
diff --git a/ldap/servers/slapd/slapi-private.h b/ldap/servers/slapd/slapi-private.h
index 31cb33472..e0092d571 100644
--- a/ldap/servers/slapd/slapi-private.h
+++ b/ldap/servers/slapd/slapi-private.h
@@ -1476,6 +1476,16 @@ typedef enum task_warning_t{
int32_t slapi_pblock_get_task_warning(Slapi_PBlock *pb);
void slapi_pblock_set_task_warning(Slapi_PBlock *pb, task_warning warn);
+/* task warnings */
+typedef enum task_warning_t{
+ WARN_UPGARDE_DN_FORMAT_ALL = (1 << 0),
+ WARN_UPGRADE_DN_FORMAT = (1 << 1),
+ WARN_UPGRADE_DN_FORMAT_SPACE = (1 << 2),
+ WARN_SKIPPED_IMPORT_ENTRY = (1 << 3)
+} task_warning;
+
+int32_t slapi_pblock_get_task_warning(Slapi_PBlock *pb);
+void slapi_pblock_set_task_warning(Slapi_PBlock *pb, task_warning warn);
int slapi_exists_or_add_internal(Slapi_DN *dn, const char *filter, const char *entry, const char *modifier_name);
--
2.26.2

View File

@ -1,128 +0,0 @@
From 2be9d1b4332d3b9b55a2d285e9610813100e235f Mon Sep 17 00:00:00 2001
From: Mark Reynolds <mreynolds@redhat.com>
Date: Mon, 22 Jun 2020 17:49:10 -0400
Subject: [PATCH] Issue 49256 - log warning when thread number is very
different from autotuned value
Description: To help prevent customers from setting incorrect values for
the thread number it would be useful to warn them that the
configured value is either way too low or way too high.
relates: https://pagure.io/389-ds-base/issue/49256
Reviewed by: firstyear(Thanks!)
---
.../tests/suites/config/autotuning_test.py | 28 +++++++++++++++
ldap/servers/slapd/libglobs.c | 34 ++++++++++++++++++-
ldap/servers/slapd/slap.h | 3 ++
3 files changed, 64 insertions(+), 1 deletion(-)
diff --git a/dirsrvtests/tests/suites/config/autotuning_test.py b/dirsrvtests/tests/suites/config/autotuning_test.py
index d1c751444..540761250 100644
--- a/dirsrvtests/tests/suites/config/autotuning_test.py
+++ b/dirsrvtests/tests/suites/config/autotuning_test.py
@@ -43,6 +43,34 @@ def test_threads_basic(topo):
assert topo.standalone.config.get_attr_val_int("nsslapd-threadnumber") > 0
+def test_threads_warning(topo):
+ """Check that we log a warning if the thread number is too high or low
+
+ :id: db92412b-2812-49de-84b0-00f452cd254f
+ :setup: Standalone Instance
+ :steps:
+ 1. Get autotuned thread number
+ 2. Set threads way higher than hw threads, and find a warning in the log
+ 3. Set threads way lower than hw threads, and find a warning in the log
+ :expectedresults:
+ 1. Success
+ 2. Success
+ 3. Success
+ """
+ topo.standalone.config.set("nsslapd-threadnumber", "-1")
+ autotuned_value = topo.standalone.config.get_attr_val_utf8("nsslapd-threadnumber")
+
+ topo.standalone.config.set("nsslapd-threadnumber", str(int(autotuned_value) * 4))
+ time.sleep(.5)
+ assert topo.standalone.ds_error_log.match('.*higher.*hurt server performance.*')
+
+ if int(autotuned_value) > 1:
+ # If autotuned is 1, there isn't anything to test here
+ topo.standalone.config.set("nsslapd-threadnumber", "1")
+ time.sleep(.5)
+ assert topo.standalone.ds_error_log.match('.*lower.*hurt server performance.*')
+
+
@pytest.mark.parametrize("invalid_value", ('-2', '0', 'invalid'))
def test_threads_invalid_value(topo, invalid_value):
"""Check nsslapd-threadnumber for an invalid values
diff --git a/ldap/servers/slapd/libglobs.c b/ldap/servers/slapd/libglobs.c
index fbf90d92d..88676a303 100644
--- a/ldap/servers/slapd/libglobs.c
+++ b/ldap/servers/slapd/libglobs.c
@@ -4374,6 +4374,7 @@ config_set_threadnumber(const char *attrname, char *value, char *errorbuf, int a
{
int retVal = LDAP_SUCCESS;
int32_t threadnum = 0;
+ int32_t hw_threadnum = 0;
char *endp = NULL;
slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig();
@@ -4386,8 +4387,39 @@ config_set_threadnumber(const char *attrname, char *value, char *errorbuf, int a
threadnum = strtol(value, &endp, 10);
/* Means we want to re-run the hardware detection. */
+ hw_threadnum = util_get_hardware_threads();
if (threadnum == -1) {
- threadnum = util_get_hardware_threads();
+ threadnum = hw_threadnum;
+ } else {
+ /*
+ * Log a message if the user defined thread number is very different
+ * from the hardware threads as this is probably not the optimal
+ * value.
+ */
+ if (threadnum >= hw_threadnum) {
+ if (threadnum > MIN_THREADS && threadnum / hw_threadnum >= 4) {
+ /* We're over the default minimum and way higher than the hw
+ * threads. */
+ slapi_log_err(SLAPI_LOG_NOTICE, "config_set_threadnumber",
+ "The configured thread number (%d) is significantly "
+ "higher than the number of hardware threads (%d). "
+ "This can potentially hurt server performance. If "
+ "you are unsure how to tune \"nsslapd-threadnumber\" "
+ "then set it to \"-1\" and the server will tune it "
+ "according to the system hardware\n",
+ threadnum, hw_threadnum);
+ }
+ } else if (threadnum < MIN_THREADS) {
+ /* The thread number should never be less than the minimum and
+ * hardware threads. */
+ slapi_log_err(SLAPI_LOG_WARNING, "config_set_threadnumber",
+ "The configured thread number (%d) is lower than the number "
+ "of hardware threads (%d). This will hurt server performance. "
+ "If you are unsure how to tune \"nsslapd-threadnumber\" then "
+ "set it to \"-1\" and the server will tune it according to the "
+ "system hardware\n",
+ threadnum, hw_threadnum);
+ }
}
if (*endp != '\0' || errno == ERANGE || threadnum < 1 || threadnum > 65535) {
diff --git a/ldap/servers/slapd/slap.h b/ldap/servers/slapd/slap.h
index 8e76393c3..894efd29c 100644
--- a/ldap/servers/slapd/slap.h
+++ b/ldap/servers/slapd/slap.h
@@ -403,6 +403,9 @@ typedef void (*VFPV)(); /* takes undefined arguments */
#define SLAPD_DEFAULT_PW_MAX_CLASS_CHARS_ATTRIBUTE 0
#define SLAPD_DEFAULT_PW_MAX_CLASS_CHARS_ATTRIBUTE_STR "0"
+#define MIN_THREADS 16
+#define MAX_THREADS 512
+
/* Default password values. */
--
2.26.2

View File

@ -0,0 +1,34 @@
From 22fb8b2690a5fa364d252846f06b77b5fec8c602 Mon Sep 17 00:00:00 2001
From: Mark Reynolds <mreynolds@redhat.com>
Date: Thu, 7 Jan 2021 10:27:43 -0500
Subject: [PATCH 3/6] Fix cherry-pick erorr
---
ldap/servers/slapd/slapi-private.h | 11 -----------
1 file changed, 11 deletions(-)
diff --git a/ldap/servers/slapd/slapi-private.h b/ldap/servers/slapd/slapi-private.h
index e0092d571..d5abe8ac1 100644
--- a/ldap/servers/slapd/slapi-private.h
+++ b/ldap/servers/slapd/slapi-private.h
@@ -1476,17 +1476,6 @@ typedef enum task_warning_t{
int32_t slapi_pblock_get_task_warning(Slapi_PBlock *pb);
void slapi_pblock_set_task_warning(Slapi_PBlock *pb, task_warning warn);
-/* task warnings */
-typedef enum task_warning_t{
- WARN_UPGARDE_DN_FORMAT_ALL = (1 << 0),
- WARN_UPGRADE_DN_FORMAT = (1 << 1),
- WARN_UPGRADE_DN_FORMAT_SPACE = (1 << 2),
- WARN_SKIPPED_IMPORT_ENTRY = (1 << 3)
-} task_warning;
-
-int32_t slapi_pblock_get_task_warning(Slapi_PBlock *pb);
-void slapi_pblock_set_task_warning(Slapi_PBlock *pb, task_warning warn);
-
int slapi_exists_or_add_internal(Slapi_DN *dn, const char *filter, const char *entry, const char *modifier_name);
#ifdef __cplusplus
--
2.26.2

View File

@ -1,34 +0,0 @@
From d24381488a997dda0006b603fb2b452b726757c0 Mon Sep 17 00:00:00 2001
From: Simon Pichugin <simon.pichugin@gmail.com>
Date: Thu, 25 Jun 2020 10:45:16 +0200
Subject: [PATCH] Issue 51188 - db2ldif crashes when LDIF file can't be
accessed
Bug Description: db2ldif crashes when we set '-a LDIF_PATH' to a place that
can't be accessed by the user (dirsrv by default)
Fix Description: Don't attempt to close DB if we bail after a failed
attempt to open LDIF file.
https://pagure.io/389-ds-base/issue/51188
Reviewed by: mreynolds (Thanks!)
---
ldap/servers/slapd/back-ldbm/db-bdb/bdb_ldif2db.c | 1 +
1 file changed, 1 insertion(+)
diff --git a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_ldif2db.c b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_ldif2db.c
index 542147c3d..9ffd877cb 100644
--- a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_ldif2db.c
+++ b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_ldif2db.c
@@ -871,6 +871,7 @@ bdb_db2ldif(Slapi_PBlock *pb)
slapi_log_err(SLAPI_LOG_ERR, "bdb_db2ldif",
"db2ldif: %s: can't open %s: %d (%s) while running as user \"%s\"\n",
inst->inst_name, fname, errno, dblayer_strerror(errno), slapdFrontendConfig->localuserinfo->pw_name);
+ we_start_the_backends = 0;
return_value = -1;
goto bye;
}
--
2.26.2

View File

@ -0,0 +1,393 @@
From 43f8a317bcd9040874b27cad905347a9e6bc8a6f Mon Sep 17 00:00:00 2001
From: James Chapman <jachapma@redhat.com>
Date: Wed, 9 Dec 2020 22:42:59 +0000
Subject: [PATCH 4/6] Issue 4419 - Warn users of skipped entries during ldif2db
online import (#4476)
Bug Description: During an online ldif2db import entries that do not
conform to various constraints will be skipped and
not imported. On completition of an import with skipped
entries, the server responds with a success message
and logs the skipped entry detail to the error logs.
The success messgae could lead the user to believe
that all entries were successfully imported.
Fix Description: If a skipped entry occurs during import, the import
will continue and a warning message will be displayed.
The schema is extended with a nsTaskWarning attribute
which is used to capture and retrieve any task
warnings.
CLI tools for online import updated.
Test added to generate an incorrect ldif entry and perform an
online import.
Fixes: https://github.com/389ds/389-ds-base/issues/4419
Reviewed by: tbordaz, mreynolds389, droideck, Firstyear (Thanks)
---
.../tests/suites/import/import_test.py | 39 +++++++++++++++++--
ldap/schema/02common.ldif | 3 +-
.../back-ldbm/db-bdb/bdb_import_threads.c | 5 +++
ldap/servers/slapd/slap.h | 1 +
ldap/servers/slapd/slapi-plugin.h | 11 ++++++
ldap/servers/slapd/slapi-private.h | 8 ----
ldap/servers/slapd/task.c | 29 +++++++++++++-
src/lib389/lib389/cli_conf/backend.py | 6 ++-
src/lib389/lib389/tasks.py | 23 +++++++++--
9 files changed, 108 insertions(+), 17 deletions(-)
diff --git a/dirsrvtests/tests/suites/import/import_test.py b/dirsrvtests/tests/suites/import/import_test.py
index b47db96ed..77c915026 100644
--- a/dirsrvtests/tests/suites/import/import_test.py
+++ b/dirsrvtests/tests/suites/import/import_test.py
@@ -65,6 +65,9 @@ def _import_clean(request, topo):
import_ldif = ldif_dir + '/basic_import.ldif'
if os.path.exists(import_ldif):
os.remove(import_ldif)
+ syntax_err_ldif = ldif_dir + '/syntax_err.dif'
+ if os.path.exists(syntax_err_ldif):
+ os.remove(syntax_err_ldif)
request.addfinalizer(finofaci)
@@ -141,17 +144,19 @@ def _create_bogus_ldif(topo):
def _create_syntax_err_ldif(topo):
"""
- Create an incorrect ldif entry that violates syntax check
+ Create an ldif file, which contains an entry that violates syntax check
"""
ldif_dir = topo.standalone.get_ldif_dir()
line1 = """dn: dc=example,dc=com
objectClass: top
objectClass: domain
dc: example
+
dn: ou=groups,dc=example,dc=com
objectClass: top
objectClass: organizationalUnit
ou: groups
+
dn: uid=JHunt,ou=groups,dc=example,dc=com
objectClass: top
objectClass: person
@@ -201,6 +206,34 @@ def test_import_with_index(topo, _import_clean):
assert f'{place}/userRoot/roomNumber.db' in glob.glob(f'{place}/userRoot/*.db', recursive=True)
+def test_online_import_with_warning(topo, _import_clean):
+ """
+ Import an ldif file with syntax errors, verify skipped entry warning code
+
+ :id: 5bf75c47-a283-430e-a65c-3c5fd8dbadb8
+ :setup: Standalone Instance
+ :steps:
+ 1. Create standalone Instance
+ 2. Create an ldif file with an entry that violates syntax check (empty givenname)
+ 3. Online import of troublesome ldif file
+ :expected results:
+ 1. Successful import with skipped entry warning
+ """
+ topo.standalone.restart()
+
+ import_task = ImportTask(topo.standalone)
+ import_ldif1 = _create_syntax_err_ldif(topo)
+
+ # Importing the offending ldif file - online
+ import_task.import_suffix_from_ldif(ldiffile=import_ldif1, suffix=DEFAULT_SUFFIX)
+
+ # There is just a single entry in this ldif
+ import_task.wait(5)
+
+ # Check for the task nsTaskWarning attr, make sure its set to skipped entry code
+ assert import_task.present('nstaskwarning')
+ assert TaskWarning.WARN_SKIPPED_IMPORT_ENTRY == import_task.get_task_warn()
+
def test_crash_on_ldif2db(topo, _import_clean):
"""
Delete the cn=monitor entry for an LDBM backend instance. Doing this will
@@ -246,7 +279,7 @@ def test_ldif2db_allows_entries_without_a_parent_to_be_imported(topo, _import_cl
topo.standalone.start()
-def test_ldif2db_syntax_check(topo):
+def test_ldif2db_syntax_check(topo, _import_clean):
"""ldif2db should return a warning when a skipped entry has occured.
:id: 85e75670-42c5-4062-9edc-7f117c97a06f
:setup:
@@ -261,7 +294,7 @@ def test_ldif2db_syntax_check(topo):
import_ldif1 = _create_syntax_err_ldif(topo)
# Import the offending LDIF data - offline
topo.standalone.stop()
- ret = topo.standalone.ldif2db('userRoot', None, None, None, import_ldif1)
+ ret = topo.standalone.ldif2db('userRoot', None, None, None, import_ldif1, None)
assert ret == TaskWarning.WARN_SKIPPED_IMPORT_ENTRY
topo.standalone.start()
diff --git a/ldap/schema/02common.ldif b/ldap/schema/02common.ldif
index c6dc074db..821640d03 100644
--- a/ldap/schema/02common.ldif
+++ b/ldap/schema/02common.ldif
@@ -145,6 +145,7 @@ attributeTypes: ( 2.16.840.1.113730.3.1.2356 NAME 'nsTaskExitCode' DESC 'Slapi T
attributeTypes: ( 2.16.840.1.113730.3.1.2357 NAME 'nsTaskCurrentItem' DESC 'Slapi Task item' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE X-ORIGIN '389 Directory Server' )
attributeTypes: ( 2.16.840.1.113730.3.1.2358 NAME 'nsTaskTotalItems' DESC 'Slapi Task total items' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE X-ORIGIN '389 Directory Server' )
attributeTypes: ( 2.16.840.1.113730.3.1.2359 NAME 'nsTaskCreated' DESC 'Slapi Task creation date' SYNTAX 1.3.6.1.4.1.1466.115.121.1.24 SINGLE-VALUE X-ORIGIN '389 Directory Server' )
+attributeTypes: ( 2.16.840.1.113730.3.1.2375 NAME 'nsTaskWarning' DESC 'Slapi Task warning code' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE X-ORIGIN '389 Directory Server' )
#
# objectclasses:
#
@@ -177,5 +178,5 @@ objectClasses: ( 2.16.840.1.113730.3.2.503 NAME 'nsDSWindowsReplicationAgreement
objectClasses: ( 2.16.840.1.113730.3.2.128 NAME 'costemplate' DESC 'Netscape defined objectclass' SUP top MAY ( cn $ cospriority ) X-ORIGIN 'Netscape Directory Server' )
objectClasses: ( 2.16.840.1.113730.3.2.304 NAME 'nsView' DESC 'Netscape defined objectclass' SUP top AUXILIARY MAY ( nsViewFilter $ description ) X-ORIGIN 'Netscape Directory Server' )
objectClasses: ( 2.16.840.1.113730.3.2.316 NAME 'nsAttributeEncryption' DESC 'Netscape defined objectclass' SUP top MUST ( cn $ nsEncryptionAlgorithm ) X-ORIGIN 'Netscape Directory Server' )
-objectClasses: ( 2.16.840.1.113730.3.2.335 NAME 'nsSlapiTask' DESC 'Slapi_Task objectclass' SUP top MUST ( cn ) MAY ( ttl $ nsTaskLog $ nsTaskStatus $ nsTaskExitCode $ nsTaskCurrentItem $ nsTaskTotalItems $ nsTaskCreated ) X-ORIGIN '389 Directory Server' )
+objectClasses: ( 2.16.840.1.113730.3.2.335 NAME 'nsSlapiTask' DESC 'Slapi_Task objectclass' SUP top MUST ( cn ) MAY ( ttl $ nsTaskLog $ nsTaskStatus $ nsTaskExitCode $ nsTaskCurrentItem $ nsTaskTotalItems $ nsTaskCreated $ nsTaskWarning ) X-ORIGIN '389 Directory Server' )
diff --git a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_import_threads.c b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_import_threads.c
index 310893884..5c7d9c8f7 100644
--- a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_import_threads.c
+++ b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_import_threads.c
@@ -747,6 +747,11 @@ import_producer(void *param)
}
}
+ /* capture skipped entry warnings for this task */
+ if((job) && (job->skipped)) {
+ slapi_task_set_warning(job->task, WARN_SKIPPED_IMPORT_ENTRY);
+ }
+
slapi_value_free(&(job->usn_value));
import_free_ldif(&c);
info->state = FINISHED;
diff --git a/ldap/servers/slapd/slap.h b/ldap/servers/slapd/slap.h
index 53c9161d1..be4d38739 100644
--- a/ldap/servers/slapd/slap.h
+++ b/ldap/servers/slapd/slap.h
@@ -1753,6 +1753,7 @@ typedef struct slapi_task
int task_progress; /* number between 0 and task_work */
int task_work; /* "units" of work to be done */
int task_flags; /* (see above) */
+ task_warning task_warn; /* task warning */
char *task_status; /* transient status info */
char *task_log; /* appended warnings, etc */
char task_date[SLAPI_TIMESTAMP_BUFSIZE]; /* Date/time when task was created */
diff --git a/ldap/servers/slapd/slapi-plugin.h b/ldap/servers/slapd/slapi-plugin.h
index 96313ef2c..ddb11bc7c 100644
--- a/ldap/servers/slapd/slapi-plugin.h
+++ b/ldap/servers/slapd/slapi-plugin.h
@@ -6638,6 +6638,15 @@ int slapi_config_remove_callback(int operation, int flags, const char *base, int
/* task flags (set by the task-control code) */
#define SLAPI_TASK_DESTROYING 0x01 /* queued event for destruction */
+/* task warnings */
+typedef enum task_warning_t{
+ WARN_UPGARDE_DN_FORMAT_ALL = (1 << 0),
+ WARN_UPGRADE_DN_FORMAT = (1 << 1),
+ WARN_UPGRADE_DN_FORMAT_SPACE = (1 << 2),
+ WARN_SKIPPED_IMPORT_ENTRY = (1 << 3)
+} task_warning;
+
+
int slapi_task_register_handler(const char *name, dseCallbackFn func);
int slapi_plugin_task_register_handler(const char *name, dseCallbackFn func, Slapi_PBlock *plugin_pb);
int slapi_plugin_task_unregister_handler(const char *name, dseCallbackFn func);
@@ -6654,6 +6663,8 @@ int slapi_task_get_refcount(Slapi_Task *task);
void slapi_task_set_destructor_fn(Slapi_Task *task, TaskCallbackFn func);
void slapi_task_set_cancel_fn(Slapi_Task *task, TaskCallbackFn func);
void slapi_task_status_changed(Slapi_Task *task);
+void slapi_task_set_warning(Slapi_Task *task, task_warning warn);
+int slapi_task_get_warning(Slapi_Task *task);
void slapi_task_log_status(Slapi_Task *task, char *format, ...)
#ifdef __GNUC__
__attribute__((format(printf, 2, 3)));
diff --git a/ldap/servers/slapd/slapi-private.h b/ldap/servers/slapd/slapi-private.h
index d5abe8ac1..b956ebe63 100644
--- a/ldap/servers/slapd/slapi-private.h
+++ b/ldap/servers/slapd/slapi-private.h
@@ -1465,14 +1465,6 @@ void slapi_pblock_set_operation_notes(Slapi_PBlock *pb, uint32_t opnotes);
void slapi_pblock_set_flag_operation_notes(Slapi_PBlock *pb, uint32_t opflag);
void slapi_pblock_set_result_text_if_empty(Slapi_PBlock *pb, char *text);
-/* task warnings */
-typedef enum task_warning_t{
- WARN_UPGARDE_DN_FORMAT_ALL = (1 << 0),
- WARN_UPGRADE_DN_FORMAT = (1 << 1),
- WARN_UPGRADE_DN_FORMAT_SPACE = (1 << 2),
- WARN_SKIPPED_IMPORT_ENTRY = (1 << 3)
-} task_warning;
-
int32_t slapi_pblock_get_task_warning(Slapi_PBlock *pb);
void slapi_pblock_set_task_warning(Slapi_PBlock *pb, task_warning warn);
diff --git a/ldap/servers/slapd/task.c b/ldap/servers/slapd/task.c
index 936c64920..806077a16 100644
--- a/ldap/servers/slapd/task.c
+++ b/ldap/servers/slapd/task.c
@@ -46,6 +46,7 @@ static uint64_t shutting_down = 0;
#define TASK_PROGRESS_NAME "nsTaskCurrentItem"
#define TASK_WORK_NAME "nsTaskTotalItems"
#define TASK_DATE_NAME "nsTaskCreated"
+#define TASK_WARNING_NAME "nsTaskWarning"
#define DEFAULT_TTL "3600" /* seconds */
#define TASK_SYSCONFIG_FILE_ATTR "sysconfigfile" /* sysconfig reload task file attr */
@@ -332,7 +333,7 @@ slapi_task_status_changed(Slapi_Task *task)
LDAPMod modlist[20];
LDAPMod *mod[20];
int cur = 0, i;
- char s1[20], s2[20], s3[20];
+ char s1[20], s2[20], s3[20], s4[20];
if (shutting_down) {
/* don't care about task status updates anymore */
@@ -346,9 +347,11 @@ slapi_task_status_changed(Slapi_Task *task)
sprintf(s1, "%d", task->task_exitcode);
sprintf(s2, "%d", task->task_progress);
sprintf(s3, "%d", task->task_work);
+ sprintf(s4, "%d", task->task_warn);
NEXTMOD(TASK_PROGRESS_NAME, s2);
NEXTMOD(TASK_WORK_NAME, s3);
NEXTMOD(TASK_DATE_NAME, task->task_date);
+ NEXTMOD(TASK_WARNING_NAME, s4);
/* only add the exit code when the job is done */
if ((task->task_state == SLAPI_TASK_FINISHED) ||
(task->task_state == SLAPI_TASK_CANCELLED)) {
@@ -452,6 +455,30 @@ slapi_task_get_refcount(Slapi_Task *task)
return 0; /* return value not currently used */
}
+/*
+ * Return task warning
+ */
+int
+slapi_task_get_warning(Slapi_Task *task)
+{
+ if (task) {
+ return task->task_warn;
+ }
+
+ return 0; /* return value not currently used */
+}
+
+/*
+ * Set task warning
+ */
+void
+slapi_task_set_warning(Slapi_Task *task, task_warning warn)
+{
+ if (task) {
+ return task->task_warn |= warn;
+ }
+}
+
int
slapi_plugin_task_unregister_handler(const char *name, dseCallbackFn func)
{
diff --git a/src/lib389/lib389/cli_conf/backend.py b/src/lib389/lib389/cli_conf/backend.py
index d7a6e670c..6bfbcb036 100644
--- a/src/lib389/lib389/cli_conf/backend.py
+++ b/src/lib389/lib389/cli_conf/backend.py
@@ -243,9 +243,13 @@ def backend_import(inst, basedn, log, args):
exclude_suffixes=args.exclude_suffixes)
task.wait(timeout=None)
result = task.get_exit_code()
+ warning = task.get_task_warn()
if task.is_complete() and result == 0:
- log.info("The import task has finished successfully")
+ if warning is None or (warning == 0):
+ log.info("The import task has finished successfully")
+ else:
+ log.info("The import task has finished successfully, with warning code {}, check the logs for more detail".format(warning))
else:
raise ValueError("Import task failed\n-------------------------\n{}".format(ensure_str(task.get_task_log())))
diff --git a/src/lib389/lib389/tasks.py b/src/lib389/lib389/tasks.py
index dc7bb9206..bf20d1e61 100644
--- a/src/lib389/lib389/tasks.py
+++ b/src/lib389/lib389/tasks.py
@@ -38,6 +38,7 @@ class Task(DSLdapObject):
self._protected = False
self._exit_code = None
self._task_log = ""
+ self._task_warn = None
def status(self):
"""Return the decoded status of the task
@@ -49,6 +50,7 @@ class Task(DSLdapObject):
self._exit_code = self.get_attr_val_utf8("nsTaskExitCode")
self._task_log = self.get_attr_val_utf8("nsTaskLog")
+ self._task_warn = self.get_attr_val_utf8("nsTaskWarning")
if not self.exists():
self._log.debug("complete: task has self cleaned ...")
# The task cleaned it self up.
@@ -77,6 +79,15 @@ class Task(DSLdapObject):
return None
return None
+ def get_task_warn(self):
+ """Return task's warning code if task is complete, else None."""
+ if self.is_complete():
+ try:
+ return int(self._task_warn)
+ except TypeError:
+ return None
+ return None
+
def wait(self, timeout=120):
"""Wait until task is complete."""
@@ -390,14 +401,17 @@ class Tasks(object):
running, true if done - if true, second is the exit code - if dowait
is True, this function will block until the task is complete'''
attrlist = ['nsTaskLog', 'nsTaskStatus', 'nsTaskExitCode',
- 'nsTaskCurrentItem', 'nsTaskTotalItems']
+ 'nsTaskCurrentItem', 'nsTaskTotalItems', 'nsTaskWarning']
done = False
exitCode = 0
+ warningCode = 0
dn = entry.dn
while not done:
entry = self.conn.getEntry(dn, attrlist=attrlist)
self.log.debug("task entry %r", entry)
+ if entry.nsTaskWarning:
+ warningCode = int(entry.nsTaskWarning)
if entry.nsTaskExitCode:
exitCode = int(entry.nsTaskExitCode)
done = True
@@ -405,7 +419,7 @@ class Tasks(object):
time.sleep(1)
else:
break
- return (done, exitCode)
+ return (done, exitCode, warningCode)
def importLDIF(self, suffix=None, benamebase=None, input_file=None,
args=None):
@@ -461,8 +475,9 @@ class Tasks(object):
self.conn.add_s(entry)
exitCode = 0
+ warningCode = 0
if args and args.get(TASK_WAIT, False):
- (done, exitCode) = self.conn.tasks.checkTask(entry, True)
+ (done, exitCode, warningCode) = self.conn.tasks.checkTask(entry, True)
if exitCode:
self.log.error("Error: import task %s for file %s exited with %d",
@@ -470,6 +485,8 @@ class Tasks(object):
else:
self.log.info("Import task %s for file %s completed successfully",
cn, input_file)
+ if warningCode:
+ self.log.info("with warning code %d", warningCode)
self.dn = dn
self.entry = entry
return exitCode
--
2.26.2

View File

@ -0,0 +1,149 @@
From 61d82ef842e0e4e013937bf05d7f640be2d2fc09 Mon Sep 17 00:00:00 2001
From: tbordaz <tbordaz@redhat.com>
Date: Wed, 16 Dec 2020 16:30:28 +0100
Subject: [PATCH 5/6] Issue 4480 - Unexpected info returned to ldap request
(#4491)
Bug description:
If the bind entry does not exist, the bind result info
reports that 'No such entry'. It should not give any
information if the target entry exists or not
Fix description:
Does not return any additional information during a bind
relates: https://github.com/389ds/389-ds-base/issues/4480
Reviewed by: William Brown, Viktor Ashirov, Mark Reynolds (thank you all)
Platforms tested: F31
---
dirsrvtests/tests/suites/basic/basic_test.py | 112 +++++++++++++++++++
1 file changed, 112 insertions(+)
diff --git a/dirsrvtests/tests/suites/basic/basic_test.py b/dirsrvtests/tests/suites/basic/basic_test.py
index 1ae82dcdd..02b73ee85 100644
--- a/dirsrvtests/tests/suites/basic/basic_test.py
+++ b/dirsrvtests/tests/suites/basic/basic_test.py
@@ -1400,6 +1400,118 @@ def test_dscreate_multiple_dashes_name(dscreate_long_instance):
assert not dscreate_long_instance.exists()
+@pytest.fixture(scope="module", params=('c=uk', 'cn=test_user', 'dc=example,dc=com', 'o=south', 'ou=sales', 'wrong=some_value'))
+def dscreate_test_rdn_value(request):
+ template_file = "/tmp/dssetup.inf"
+ template_text = f"""[general]
+config_version = 2
+# This invalid hostname ...
+full_machine_name = localhost.localdomain
+# Means we absolutely require this.
+strict_host_checking = False
+# In tests, we can be run in containers, NEVER trust
+# that systemd is there, or functional in any capacity
+systemd = False
+
+[slapd]
+instance_name = test_different_rdn
+root_dn = cn=directory manager
+root_password = someLongPassword_123
+# We do not have access to high ports in containers,
+# so default to something higher.
+port = 38999
+secure_port = 63699
+
+[backend-userroot]
+create_suffix_entry = True
+suffix = {request.param}
+"""
+
+ with open(template_file, "w") as template_fd:
+ template_fd.write(template_text)
+
+ # Unset PYTHONPATH to avoid mixing old CLI tools and new lib389
+ tmp_env = os.environ
+ if "PYTHONPATH" in tmp_env:
+ del tmp_env["PYTHONPATH"]
+
+ def fin():
+ os.remove(template_file)
+ if request.param != "wrong=some_value":
+ try:
+ subprocess.check_call(['dsctl', 'test_different_rdn', 'remove', '--do-it'])
+ except subprocess.CalledProcessError as e:
+ log.fatal(f"Failed to remove test instance Error ({e.returncode}) {e.output}")
+ else:
+ log.info("Wrong RDN is passed, instance not created")
+ request.addfinalizer(fin)
+ return template_file, tmp_env, request.param,
+
+
+@pytest.mark.skipif(not get_user_is_root() or ds_is_older('1.4.0.0'),
+ reason="This test is only required with new admin cli, and requires root.")
+@pytest.mark.bz1807419
+@pytest.mark.ds50928
+def test_dscreate_with_different_rdn(dscreate_test_rdn_value):
+ """Test that dscreate works with different RDN attributes as suffix
+
+ :id: 77ed6300-6a2f-4e79-a862-1f1105f1e3ef
+ :parametrized: yes
+ :setup: None
+ :steps:
+ 1. Create template file for dscreate with different RDN attributes as suffix
+ 2. Create instance using template file
+ 3. Create instance with 'wrong=some_value' as suffix's RDN attribute
+ :expectedresults:
+ 1. Should succeeds
+ 2. Should succeeds
+ 3. Should fail
+ """
+ try:
+ subprocess.check_call([
+ 'dscreate',
+ 'from-file',
+ dscreate_test_rdn_value[0]
+ ], env=dscreate_test_rdn_value[1])
+ except subprocess.CalledProcessError as e:
+ log.fatal(f"dscreate failed! Error ({e.returncode}) {e.output}")
+ if dscreate_test_rdn_value[2] != "wrong=some_value":
+ assert False
+ else:
+ assert True
+
+def test_bind_invalid_entry(topology_st):
+ """Test the failing bind does not return information about the entry
+
+ :id: 5cd9b083-eea6-426b-84ca-83c26fc49a6f
+
+ :setup: Standalone instance
+
+ :steps:
+ 1: bind as non existing entry
+ 2: check that bind info does not report 'No such entry'
+
+ :expectedresults:
+ 1: pass
+ 2: pass
+ """
+
+ topology_st.standalone.restart()
+ INVALID_ENTRY="cn=foooo,%s" % DEFAULT_SUFFIX
+ try:
+ topology_st.standalone.simple_bind_s(INVALID_ENTRY, PASSWORD)
+ except ldap.LDAPError as e:
+ log.info('test_bind_invalid_entry: Failed to bind as %s (expected)' % INVALID_ENTRY)
+ log.info('exception description: ' + e.args[0]['desc'])
+ if 'info' in e.args[0]:
+ log.info('exception info: ' + e.args[0]['info'])
+ assert e.args[0]['desc'] == 'Invalid credentials'
+ assert 'info' not in e.args[0]
+ pass
+
+ log.info('test_bind_invalid_entry: PASSED')
+
+
if __name__ == '__main__':
# Run isolated
# -s for DEBUG mode
--
2.26.2

View File

@ -0,0 +1,99 @@
From 3c74f736c657d007770fe866842b08d0a74772ca Mon Sep 17 00:00:00 2001
From: Mark Reynolds <mreynolds@redhat.com>
Date: Wed, 9 Dec 2020 15:21:11 -0500
Subject: [PATCH 6/6] Issue 4414 - disk monitoring - prevent division by zero
crash
Bug Description: If a disk mount has zero total space or zero used
space then a division by zero can occur and the
server will crash.
It has also been observed that sometimes a system
can return the wrong disk entirely, and when that
happens the incorrect disk also has zero available
space which triggers the disk monitioring thread to
immediately shut the server down.
Fix Description: Check the total and used space for zero and do not
divide, just ignore it. As a preemptive measure
ignore disks from /dev, /proc, /sys (except /dev/shm).
Yes it's a bit hacky, but the true underlying cause
is not known yet. So better to be safe than sorry.
Relates: https://github.com/389ds/389-ds-base/issues/4414
Reviewed by: firstyear(Thanks!)
---
ldap/servers/slapd/daemon.c | 22 +++++++++++++++++++++-
ldap/servers/slapd/monitor.c | 13 +++++--------
2 files changed, 26 insertions(+), 9 deletions(-)
diff --git a/ldap/servers/slapd/daemon.c b/ldap/servers/slapd/daemon.c
index 691f77570..bfd965263 100644
--- a/ldap/servers/slapd/daemon.c
+++ b/ldap/servers/slapd/daemon.c
@@ -221,7 +221,27 @@ disk_mon_get_mount_point(char *dir)
}
if (s.st_dev == dev_id) {
endmntent(fp);
- return (slapi_ch_strdup(mnt->mnt_dir));
+
+ if ((strncmp(mnt->mnt_dir, "/dev", 4) == 0 && strncmp(mnt->mnt_dir, "/dev/shm", 8) != 0) ||
+ strncmp(mnt->mnt_dir, "/proc", 4) == 0 ||
+ strncmp(mnt->mnt_dir, "/sys", 4) == 0)
+ {
+ /*
+ * Ignore "mount directories" starting with /dev (except
+ * /dev/shm), /proc, /sys For some reason these mounts are
+ * occasionally/incorrectly returned. Only seen this at a
+ * customer site once. When it happens it causes disk
+ * monitoring to think the server has 0 disk space left, and
+ * it abruptly/unexpectedly shuts the server down. At this
+ * point it looks like a bug in stat(), setmntent(), or
+ * getmntent(), but there is no way to prove that since there
+ * is no way to reproduce the original issue. For now just
+ * return NULL to be safe.
+ */
+ return NULL;
+ } else {
+ return (slapi_ch_strdup(mnt->mnt_dir));
+ }
}
}
endmntent(fp);
diff --git a/ldap/servers/slapd/monitor.c b/ldap/servers/slapd/monitor.c
index 562721bed..65f082986 100644
--- a/ldap/servers/slapd/monitor.c
+++ b/ldap/servers/slapd/monitor.c
@@ -131,7 +131,6 @@ monitor_disk_info (Slapi_PBlock *pb __attribute__((unused)),
{
int32_t rc = LDAP_SUCCESS;
char **dirs = NULL;
- char buf[BUFSIZ];
struct berval val;
struct berval *vals[2];
uint64_t total_space;
@@ -143,15 +142,13 @@ monitor_disk_info (Slapi_PBlock *pb __attribute__((unused)),
disk_mon_get_dirs(&dirs);
- for (uint16_t i = 0; dirs && dirs[i]; i++) {
+ for (size_t i = 0; dirs && dirs[i]; i++) {
+ char buf[BUFSIZ] = {0};
rc = disk_get_info(dirs[i], &total_space, &avail_space, &used_space);
- if (rc) {
- slapi_log_err(SLAPI_LOG_WARNING, "monitor_disk_info",
- "Unable to get 'cn=disk space,cn=monitor' stats for %s\n", dirs[i]);
- } else {
+ if (rc == 0 && total_space > 0 && used_space > 0) {
val.bv_len = snprintf(buf, sizeof(buf),
- "partition=\"%s\" size=\"%" PRIu64 "\" used=\"%" PRIu64 "\" available=\"%" PRIu64 "\" use%%=\"%" PRIu64 "\"",
- dirs[i], total_space, used_space, avail_space, used_space * 100 / total_space);
+ "partition=\"%s\" size=\"%" PRIu64 "\" used=\"%" PRIu64 "\" available=\"%" PRIu64 "\" use%%=\"%" PRIu64 "\"",
+ dirs[i], total_space, used_space, avail_space, used_space * 100 / total_space);
val.bv_val = buf;
attrlist_merge(&e->e_attrs, "dsDisk", vals);
}
--
2.26.2

View File

@ -0,0 +1,132 @@
From 48b30739f33d1eb526dbdd45c820129c4a4c4bcb Mon Sep 17 00:00:00 2001
From: progier389 <72748589+progier389@users.noreply.github.com>
Date: Tue, 12 Jan 2021 11:06:24 +0100
Subject: [PATCH] Issue 4504 - Insure ldapi is enabled in repl_monitor_test.py
(Needed on RHEL) (#4527)
(cherry picked from commit 279556bc78ed743d7a053069621d999ec045866f)
---
.../tests/suites/clu/repl_monitor_test.py | 67 +++++++++----------
1 file changed, 31 insertions(+), 36 deletions(-)
diff --git a/dirsrvtests/tests/suites/clu/repl_monitor_test.py b/dirsrvtests/tests/suites/clu/repl_monitor_test.py
index eb18d2da2..b2cb840b3 100644
--- a/dirsrvtests/tests/suites/clu/repl_monitor_test.py
+++ b/dirsrvtests/tests/suites/clu/repl_monitor_test.py
@@ -9,7 +9,6 @@
import time
import subprocess
import pytest
-import re
from lib389.cli_conf.replication import get_repl_monitor_info
from lib389.tasks import *
@@ -18,6 +17,8 @@ from lib389.topologies import topology_m2
from lib389.cli_base import FakeArgs
from lib389.cli_base.dsrc import dsrc_arg_concat
from lib389.cli_base import connect_instance
+from lib389.replica import Replicas
+
pytestmark = pytest.mark.tier0
@@ -68,25 +69,6 @@ def check_value_in_log_and_reset(content_list, second_list=None, single_value=No
log.info('Reset log file')
f.truncate(0)
-def get_hostnames_from_log(port1, port2):
- # Get the supplier host names as displayed in replication monitor output
- with open(LOG_FILE, 'r') as logfile:
- logtext = logfile.read()
- # search for Supplier :hostname:port
- # and use \D to insure there is no more number is after
- # the matched port (i.e that 10 is not matching 101)
- regexp = '(Supplier: )([^:]*)(:' + str(port1) + '\D)'
- match=re.search(regexp, logtext)
- host_m1 = 'localhost.localdomain'
- if (match is not None):
- host_m1 = match.group(2)
- # Same for master 2
- regexp = '(Supplier: )([^:]*)(:' + str(port2) + '\D)'
- match=re.search(regexp, logtext)
- host_m2 = 'localhost.localdomain'
- if (match is not None):
- host_m2 = match.group(2)
- return (host_m1, host_m2)
@pytest.mark.ds50545
@pytest.mark.bz1739718
@@ -115,6 +97,24 @@ def test_dsconf_replication_monitor(topology_m2, set_log_file):
m1 = topology_m2.ms["master1"]
m2 = topology_m2.ms["master2"]
+ # Enable ldapi if not already done.
+ for inst in [topology_m2.ms["master1"], topology_m2.ms["master2"]]:
+ if not inst.can_autobind():
+ # Update ns-slapd instance
+ inst.config.set('nsslapd-ldapilisten', 'on')
+ inst.config.set('nsslapd-ldapiautobind', 'on')
+ inst.restart()
+ # Ensure that updates have been sent both ways.
+ replicas = Replicas(m1)
+ replica = replicas.get(DEFAULT_SUFFIX)
+ replica.test_replication([m2])
+ replicas = Replicas(m2)
+ replica = replicas.get(DEFAULT_SUFFIX)
+ replica.test_replication([m1])
+
+ alias_content = ['Supplier: M1 (' + m1.host + ':' + str(m1.port) + ')',
+ 'Supplier: M2 (' + m2.host + ':' + str(m2.port) + ')']
+
connection_content = 'Supplier: '+ m1.host + ':' + str(m1.port)
content_list = ['Replica Root: dc=example,dc=com',
'Replica ID: 1',
@@ -177,9 +177,20 @@ def test_dsconf_replication_monitor(topology_m2, set_log_file):
'001',
m1.host + ':' + str(m1.port)]
+ dsrc_content = '[repl-monitor-connections]\n' \
+ 'connection1 = ' + m1.host + ':' + str(m1.port) + ':' + DN_DM + ':' + PW_DM + '\n' \
+ 'connection2 = ' + m2.host + ':' + str(m2.port) + ':' + DN_DM + ':' + PW_DM + '\n' \
+ '\n' \
+ '[repl-monitor-aliases]\n' \
+ 'M1 = ' + m1.host + ':' + str(m1.port) + '\n' \
+ 'M2 = ' + m2.host + ':' + str(m2.port)
+
connections = [m1.host + ':' + str(m1.port) + ':' + DN_DM + ':' + PW_DM,
m2.host + ':' + str(m2.port) + ':' + DN_DM + ':' + PW_DM]
+ aliases = ['M1=' + m1.host + ':' + str(m1.port),
+ 'M2=' + m2.host + ':' + str(m2.port)]
+
args = FakeArgs()
args.connections = connections
args.aliases = None
@@ -187,24 +198,8 @@ def test_dsconf_replication_monitor(topology_m2, set_log_file):
log.info('Run replication monitor with connections option')
get_repl_monitor_info(m1, DEFAULT_SUFFIX, log, args)
- (host_m1, host_m2) = get_hostnames_from_log(m1.port, m2.port)
check_value_in_log_and_reset(content_list, connection_content, error_list=error_list)
- # Prepare the data for next tests
- aliases = ['M1=' + host_m1 + ':' + str(m1.port),
- 'M2=' + host_m2 + ':' + str(m2.port)]
-
- alias_content = ['Supplier: M1 (' + host_m1 + ':' + str(m1.port) + ')',
- 'Supplier: M2 (' + host_m2 + ':' + str(m2.port) + ')']
-
- dsrc_content = '[repl-monitor-connections]\n' \
- 'connection1 = ' + m1.host + ':' + str(m1.port) + ':' + DN_DM + ':' + PW_DM + '\n' \
- 'connection2 = ' + m2.host + ':' + str(m2.port) + ':' + DN_DM + ':' + PW_DM + '\n' \
- '\n' \
- '[repl-monitor-aliases]\n' \
- 'M1 = ' + host_m1 + ':' + str(m1.port) + '\n' \
- 'M2 = ' + host_m2 + ':' + str(m2.port)
-
log.info('Run replication monitor with aliases option')
args.aliases = aliases
get_repl_monitor_info(m1, DEFAULT_SUFFIX, log, args)
--
2.26.2

View File

@ -0,0 +1,51 @@
From f84e75de9176218d3b47a447d07fe8fb7ca3d72f Mon Sep 17 00:00:00 2001
From: Barbora Simonova <bsmejkal@redhat.com>
Date: Mon, 11 Jan 2021 15:51:24 +0100
Subject: [PATCH] Issue 4315 - performance search rate: nagle triggers high
rate of setsocketopt
Description:
The config value of nsslapd-nagle is now set to 'off' by default.
Added a test case, that checks the value.
Relates: https://github.com/389ds/389-ds-base/issues/4315
Reviewed by: droideck (Thanks!)
---
.../tests/suites/config/config_test.py | 20 +++++++++++++++++++
1 file changed, 20 insertions(+)
diff --git a/dirsrvtests/tests/suites/config/config_test.py b/dirsrvtests/tests/suites/config/config_test.py
index 38d1ed9ac..fda16a530 100644
--- a/dirsrvtests/tests/suites/config/config_test.py
+++ b/dirsrvtests/tests/suites/config/config_test.py
@@ -41,6 +41,26 @@ def big_file():
return TEMP_BIG_FILE
+@pytest.mark.bz1897248
+@pytest.mark.ds4315
+@pytest.mark.skipif(ds_is_older('1.4.3.16'), reason="This config setting exists in 1.4.3.16 and higher")
+def test_nagle_default_value(topo):
+ """Test that nsslapd-nagle attribute is off by default
+
+ :id: 00361f5d-d638-4d39-8231-66fa52637203
+ :setup: Standalone instance
+ :steps:
+ 1. Create instance
+ 2. Check the value of nsslapd-nagle
+ :expectedresults:
+ 1. Success
+ 2. The value of nsslapd-nagle should be off
+ """
+
+ log.info('Check the value of nsslapd-nagle attribute is off by default')
+ assert topo.standalone.config.get_attr_val_utf8('nsslapd-nagle') == 'off'
+
+
def test_maxbersize_repl(topology_m2, big_file):
"""maxbersize is ignored in the replicated operations.
--
2.26.2

View File

@ -0,0 +1,98 @@
From 00ccec335792e3fa44712427463c64eb1ff9c5be Mon Sep 17 00:00:00 2001
From: progier389 <progier@redhat.com>
Date: Tue, 12 Jan 2021 17:45:41 +0100
Subject: [PATCH] Issue 4504 - insure that repl_monitor_test use ldapi (for
RHEL) - fix merge issue (#4533)
(cherry picked from commit a880fddc192414d6283ea6832491b7349e5471dc)
---
.../tests/suites/clu/repl_monitor_test.py | 47 ++++++++++++++-----
1 file changed, 36 insertions(+), 11 deletions(-)
diff --git a/dirsrvtests/tests/suites/clu/repl_monitor_test.py b/dirsrvtests/tests/suites/clu/repl_monitor_test.py
index b2cb840b3..caf6a9099 100644
--- a/dirsrvtests/tests/suites/clu/repl_monitor_test.py
+++ b/dirsrvtests/tests/suites/clu/repl_monitor_test.py
@@ -9,6 +9,7 @@
import time
import subprocess
import pytest
+import re
from lib389.cli_conf.replication import get_repl_monitor_info
from lib389.tasks import *
@@ -69,6 +70,25 @@ def check_value_in_log_and_reset(content_list, second_list=None, single_value=No
log.info('Reset log file')
f.truncate(0)
+def get_hostnames_from_log(port1, port2):
+ # Get the supplier host names as displayed in replication monitor output
+ with open(LOG_FILE, 'r') as logfile:
+ logtext = logfile.read()
+ # search for Supplier :hostname:port
+ # and use \D to insure there is no more number is after
+ # the matched port (i.e that 10 is not matching 101)
+ regexp = '(Supplier: )([^:]*)(:' + str(port1) + '\D)'
+ match=re.search(regexp, logtext)
+ host_m1 = 'localhost.localdomain'
+ if (match is not None):
+ host_m1 = match.group(2)
+ # Same for master 2
+ regexp = '(Supplier: )([^:]*)(:' + str(port2) + '\D)'
+ match=re.search(regexp, logtext)
+ host_m2 = 'localhost.localdomain'
+ if (match is not None):
+ host_m2 = match.group(2)
+ return (host_m1, host_m2)
@pytest.mark.ds50545
@pytest.mark.bz1739718
@@ -177,20 +197,9 @@ def test_dsconf_replication_monitor(topology_m2, set_log_file):
'001',
m1.host + ':' + str(m1.port)]
- dsrc_content = '[repl-monitor-connections]\n' \
- 'connection1 = ' + m1.host + ':' + str(m1.port) + ':' + DN_DM + ':' + PW_DM + '\n' \
- 'connection2 = ' + m2.host + ':' + str(m2.port) + ':' + DN_DM + ':' + PW_DM + '\n' \
- '\n' \
- '[repl-monitor-aliases]\n' \
- 'M1 = ' + m1.host + ':' + str(m1.port) + '\n' \
- 'M2 = ' + m2.host + ':' + str(m2.port)
-
connections = [m1.host + ':' + str(m1.port) + ':' + DN_DM + ':' + PW_DM,
m2.host + ':' + str(m2.port) + ':' + DN_DM + ':' + PW_DM]
- aliases = ['M1=' + m1.host + ':' + str(m1.port),
- 'M2=' + m2.host + ':' + str(m2.port)]
-
args = FakeArgs()
args.connections = connections
args.aliases = None
@@ -198,8 +207,24 @@ def test_dsconf_replication_monitor(topology_m2, set_log_file):
log.info('Run replication monitor with connections option')
get_repl_monitor_info(m1, DEFAULT_SUFFIX, log, args)
+ (host_m1, host_m2) = get_hostnames_from_log(m1.port, m2.port)
check_value_in_log_and_reset(content_list, connection_content, error_list=error_list)
+ # Prepare the data for next tests
+ aliases = ['M1=' + host_m1 + ':' + str(m1.port),
+ 'M2=' + host_m2 + ':' + str(m2.port)]
+
+ alias_content = ['Supplier: M1 (' + host_m1 + ':' + str(m1.port) + ')',
+ 'Supplier: M2 (' + host_m2 + ':' + str(m2.port) + ')']
+
+ dsrc_content = '[repl-monitor-connections]\n' \
+ 'connection1 = ' + m1.host + ':' + str(m1.port) + ':' + DN_DM + ':' + PW_DM + '\n' \
+ 'connection2 = ' + m2.host + ':' + str(m2.port) + ':' + DN_DM + ':' + PW_DM + '\n' \
+ '\n' \
+ '[repl-monitor-aliases]\n' \
+ 'M1 = ' + host_m1 + ':' + str(m1.port) + '\n' \
+ 'M2 = ' + host_m2 + ':' + str(m2.port)
+
log.info('Run replication monitor with aliases option')
args.aliases = aliases
get_repl_monitor_info(m1, DEFAULT_SUFFIX, log, args)
--
2.26.2

View File

@ -0,0 +1,70 @@
From 2afc65fd1750afcb1667545da5625f5a932aacdd Mon Sep 17 00:00:00 2001
From: Simon Pichugin <spichugi@redhat.com>
Date: Wed, 13 Jan 2021 15:16:08 +0100
Subject: [PATCH] Issue 4528 - Fix cn=monitor SCOPE_ONE search (#4529)
Bug Description: While doing a ldapsearch on "cn=monitor" is
throwing err=32 with -s one.
Fix Description: 'cn=monitor' is not a real entry so we should not
trying to check if the searched suffix (cm=monitor or its children)
belongs to the searched backend.
Fixes: #4528
Reviewed by: @mreynolds389 @Firstyear @tbordaz (Thanks!)
---
ldap/servers/slapd/opshared.c | 15 ++++++++++-----
1 file changed, 10 insertions(+), 5 deletions(-)
diff --git a/ldap/servers/slapd/opshared.c b/ldap/servers/slapd/opshared.c
index c0bc5dcd0..f5ed71144 100644
--- a/ldap/servers/slapd/opshared.c
+++ b/ldap/servers/slapd/opshared.c
@@ -240,6 +240,7 @@ op_shared_search(Slapi_PBlock *pb, int send_result)
int rc = 0;
int internal_op;
Slapi_DN *basesdn = NULL;
+ Slapi_DN monitorsdn = {0};
Slapi_DN *sdn = NULL;
Slapi_Operation *operation = NULL;
Slapi_Entry *referral = NULL;
@@ -765,9 +766,11 @@ op_shared_search(Slapi_PBlock *pb, int send_result)
}
} else {
/* be_suffix null means that we are searching the default backend
- * -> don't change the search parameters in pblock
- */
- if (be_suffix != NULL) {
+ * -> don't change the search parameters in pblock
+ * Also, we skip this block for 'cn=monitor' search and its subsearches
+ * as they are done by callbacks from monitor.c */
+ slapi_sdn_init_dn_byref(&monitorsdn, "cn=monitor");
+ if (!((be_suffix == NULL) || slapi_sdn_issuffix(basesdn, &monitorsdn))) {
if ((be_name == NULL) && (scope == LDAP_SCOPE_ONELEVEL)) {
/* one level searches
* - depending on the suffix of the backend we might have to
@@ -789,8 +792,10 @@ op_shared_search(Slapi_PBlock *pb, int send_result)
} else if (slapi_sdn_issuffix(basesdn, be_suffix)) {
int tmp_scope = LDAP_SCOPE_ONELEVEL;
slapi_pblock_set(pb, SLAPI_SEARCH_SCOPE, &tmp_scope);
- } else
+ } else {
+ slapi_sdn_done(&monitorsdn);
goto next_be;
+ }
}
/* subtree searches :
@@ -811,7 +816,7 @@ op_shared_search(Slapi_PBlock *pb, int send_result)
}
}
}
-
+ slapi_sdn_done(&monitorsdn);
slapi_pblock_set(pb, SLAPI_BACKEND, be);
slapi_pblock_set(pb, SLAPI_PLUGIN, be->be_database);
slapi_pblock_set(pb, SLAPI_SEARCH_RESULT_SET, NULL);
--
2.26.2

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,782 @@
From 788d7c69a446d1ae324b2c58daaa5d4fd5528748 Mon Sep 17 00:00:00 2001
From: Mark Reynolds <mreynolds@redhat.com>
Date: Wed, 20 Jan 2021 16:42:15 -0500
Subject: [PATCH 1/3] Issue 5442 - Search results are different between RHDS10
and RHDS11
Bug Description: In 1.4.x we introduced a change that was overly strict about
how a search on a non-existent subtree returned its error code.
It was changed from returning an error 32 to an error 0 with
zero entries returned.
Fix Description: When finding the entry and processing acl's make sure to
gather the aci's that match the resource even if the resource
does not exist. This requires some extra checks when processing
the target attribute.
relates: https://github.com/389ds/389-ds-base/issues/4542
Reviewed by: firstyear, elkris, and tbordaz (Thanks!)
Apply Thierry's changes
round 2
Apply more suggestions from Thierry
---
dirsrvtests/tests/suites/acl/misc_test.py | 108 +++++++-
ldap/servers/plugins/acl/acl.c | 296 ++++++++++------------
ldap/servers/slapd/back-ldbm/findentry.c | 6 +-
src/lib389/lib389/_mapped_object.py | 4 +-
4 files changed, 239 insertions(+), 175 deletions(-)
diff --git a/dirsrvtests/tests/suites/acl/misc_test.py b/dirsrvtests/tests/suites/acl/misc_test.py
index 5f0e3eb72..c640e60ad 100644
--- a/dirsrvtests/tests/suites/acl/misc_test.py
+++ b/dirsrvtests/tests/suites/acl/misc_test.py
@@ -12,7 +12,7 @@ import ldap
import os
import pytest
-from lib389._constants import DEFAULT_SUFFIX, PW_DM
+from lib389._constants import DEFAULT_SUFFIX, PW_DM, DN_DM
from lib389.idm.user import UserAccount, UserAccounts
from lib389._mapped_object import DSLdapObject
from lib389.idm.account import Accounts, Anonymous
@@ -408,14 +408,112 @@ def test_do_bind_as_201_distinct_users(topo, clean, aci_of_user):
user = uas.create_test_user(uid=i, gid=i)
user.set('userPassword', PW_DM)
- for i in range(len(uas.list())):
- uas.list()[i].bind(PW_DM)
+ users = uas.list()
+ for user in users:
+ user.bind(PW_DM)
ACLPlugin(topo.standalone).replace("nsslapd-aclpb-max-selected-acls", '220')
topo.standalone.restart()
- for i in range(len(uas.list())):
- uas.list()[i].bind(PW_DM)
+ users = uas.list()
+ for user in users:
+ user.bind(PW_DM)
+
+
+def test_info_disclosure(request, topo):
+ """Test that a search returns 32 when base entry does not exist
+
+ :id: f6dec4c2-65a3-41e4-a4c0-146196863333
+ :setup: Standalone Instance
+ :steps:
+ 1. Add aci
+ 2. Add test user
+ 3. Bind as user and search for non-existent entry
+ :expectedresults:
+ 1. Success
+ 2. Success
+ 3. Error 32 is returned
+ """
+
+ ACI_TARGET = "(targetattr = \"*\")(target = \"ldap:///%s\")" % (DEFAULT_SUFFIX)
+ ACI_ALLOW = "(version 3.0; acl \"Read/Search permission for all users\"; allow (read,search)"
+ ACI_SUBJECT = "(userdn=\"ldap:///all\");)"
+ ACI = ACI_TARGET + ACI_ALLOW + ACI_SUBJECT
+
+ # Get current ACi's so we can restore them when we are done
+ suffix = Domain(topo.standalone, DEFAULT_SUFFIX)
+ preserved_acis = suffix.get_attr_vals_utf8('aci')
+
+ def finofaci():
+ domain = Domain(topo.standalone, DEFAULT_SUFFIX)
+ try:
+ domain.remove_all('aci')
+ domain.replace_values('aci', preserved_acis)
+ except:
+ pass
+ request.addfinalizer(finofaci)
+
+ # Remove aci's
+ suffix.remove_all('aci')
+
+ # Add test user
+ USER_DN = "uid=test,ou=people," + DEFAULT_SUFFIX
+ users = UserAccounts(topo.standalone, DEFAULT_SUFFIX)
+ users.create(properties={
+ 'uid': 'test',
+ 'cn': 'test',
+ 'sn': 'test',
+ 'uidNumber': '1000',
+ 'gidNumber': '2000',
+ 'homeDirectory': '/home/test',
+ 'userPassword': PW_DM
+ })
+
+ # bind as user
+ conn = UserAccount(topo.standalone, USER_DN).bind(PW_DM)
+
+ # Search fo existing base DN
+ test = Domain(conn, DEFAULT_SUFFIX)
+ try:
+ test.get_attr_vals_utf8_l('dc')
+ assert False
+ except IndexError:
+ pass
+
+ # Search for a non existent bases
+ subtree = Domain(conn, "ou=does_not_exist," + DEFAULT_SUFFIX)
+ try:
+ subtree.get_attr_vals_utf8_l('objectclass')
+ except IndexError:
+ pass
+ subtree = Domain(conn, "ou=also does not exist,ou=does_not_exist," + DEFAULT_SUFFIX)
+ try:
+ subtree.get_attr_vals_utf8_l('objectclass')
+ except IndexError:
+ pass
+ # Try ONE level search instead of BASE
+ try:
+ Accounts(conn, "ou=does_not_exist," + DEFAULT_SUFFIX).filter("(objectclass=top)", ldap.SCOPE_ONELEVEL)
+ except IndexError:
+ pass
+
+ # add aci
+ suffix.add('aci', ACI)
+
+ # Search for a non existent entry which should raise an exception
+ with pytest.raises(ldap.NO_SUCH_OBJECT):
+ conn = UserAccount(topo.standalone, USER_DN).bind(PW_DM)
+ subtree = Domain(conn, "ou=does_not_exist," + DEFAULT_SUFFIX)
+ subtree.get_attr_vals_utf8_l('objectclass')
+ with pytest.raises(ldap.NO_SUCH_OBJECT):
+ conn = UserAccount(topo.standalone, USER_DN).bind(PW_DM)
+ subtree = Domain(conn, "ou=also does not exist,ou=does_not_exist," + DEFAULT_SUFFIX)
+ subtree.get_attr_vals_utf8_l('objectclass')
+ with pytest.raises(ldap.NO_SUCH_OBJECT):
+ conn = UserAccount(topo.standalone, USER_DN).bind(PW_DM)
+ DN = "ou=also does not exist,ou=does_not_exist," + DEFAULT_SUFFIX
+ Accounts(conn, DN).filter("(objectclass=top)", ldap.SCOPE_ONELEVEL, strict=True)
+
if __name__ == "__main__":
CURRENT_FILE = os.path.realpath(__file__)
diff --git a/ldap/servers/plugins/acl/acl.c b/ldap/servers/plugins/acl/acl.c
index 41a909a18..4e811f73a 100644
--- a/ldap/servers/plugins/acl/acl.c
+++ b/ldap/servers/plugins/acl/acl.c
@@ -2111,10 +2111,11 @@ acl__resource_match_aci(Acl_PBlock *aclpb, aci_t *aci, int skip_attrEval, int *a
aci_right = aci->aci_access;
res_right = aclpb->aclpb_access;
if (!(aci_right & res_right)) {
- /* If we are looking for read/search and the acl has read/search
- ** then go further because if targets match we may keep that
- ** acl in the entry cache list.
- */
+ /*
+ * If we are looking for read/search and the acl has read/search
+ * then go further because if targets match we may keep that
+ * acl in the entry cache list.
+ */
if (!((res_right & (SLAPI_ACL_SEARCH | SLAPI_ACL_READ)) &&
(aci_right & (SLAPI_ACL_SEARCH | SLAPI_ACL_READ)))) {
matches = ACL_FALSE;
@@ -2122,30 +2123,29 @@ acl__resource_match_aci(Acl_PBlock *aclpb, aci_t *aci, int skip_attrEval, int *a
}
}
-
- /* first Let's see if the entry is under the subtree where the
- ** ACL resides. We can't let somebody affect a target beyond the
- ** scope of where the ACL resides
- ** Example: ACL is located in "ou=engineering, o=ace industry, c=us
- ** but if the target is "o=ace industry, c=us", then we are in trouble.
- **
- ** If the aci is in the rootdse and the entry is not, then we do not
- ** match--ie. acis in the rootdse do NOT apply below...for the moment.
- **
- */
+ /*
+ * First Let's see if the entry is under the subtree where the
+ * ACL resides. We can't let somebody affect a target beyond the
+ * scope of where the ACL resides
+ * Example: ACL is located in "ou=engineering, o=ace industry, c=us
+ * but if the target is "o=ace industry, c=us", then we are in trouble.
+ *
+ * If the aci is in the rootdse and the entry is not, then we do not
+ * match--ie. acis in the rootdse do NOT apply below...for the moment.
+ */
res_ndn = slapi_sdn_get_ndn(aclpb->aclpb_curr_entry_sdn);
aci_ndn = slapi_sdn_get_ndn(aci->aci_sdn);
- if (!slapi_sdn_issuffix(aclpb->aclpb_curr_entry_sdn, aci->aci_sdn) || (!slapi_is_rootdse(res_ndn) && slapi_is_rootdse(aci_ndn))) {
-
- /* cant' poke around */
+ if (!slapi_sdn_issuffix(aclpb->aclpb_curr_entry_sdn, aci->aci_sdn) ||
+ (!slapi_is_rootdse(res_ndn) && slapi_is_rootdse(aci_ndn)))
+ {
+ /* can't poke around */
matches = ACL_FALSE;
goto acl__resource_match_aci_EXIT;
}
/*
- ** We have a single ACI which we need to find if it applies to
- ** the resource or not.
- */
+ * We have a single ACI which we need to find if it applies to the resource or not.
+ */
if ((aci->aci_type & ACI_TARGET_DN) && (aclpb->aclpb_curr_entry_sdn)) {
char *avaType;
struct berval *avaValue;
@@ -2173,25 +2173,23 @@ acl__resource_match_aci(Acl_PBlock *aclpb, aci_t *aci, int skip_attrEval, int *a
char *avaType;
struct berval *avaValue;
char logbuf[1024];
-
- /* We are evaluating the moddn permission.
- * The aci contains target_to and target_from
- *
- * target_to filter must be checked against the resource ndn that was stored in
- * aclpb->aclpb_curr_entry_sdn
- *
- * target_from filter must be check against the entry ndn that is in aclpb->aclpb_moddn_source_sdn
- * (sdn was stored in the pblock)
- */
+ /*
+ * We are evaluating the moddn permission.
+ * The aci contains target_to and target_from
+ *
+ * target_to filter must be checked against the resource ndn that was stored in
+ * aclpb->aclpb_curr_entry_sdn
+ *
+ * target_from filter must be check against the entry ndn that is in aclpb->aclpb_moddn_source_sdn
+ * (sdn was stored in the pblock)
+ */
if (aci->target_to) {
f = aci->target_to;
dn_matched = ACL_TRUE;
/* Now check if the filter is a simple or substring filter */
if (aci->aci_type & ACI_TARGET_MODDN_TO_PATTERN) {
- /* This is a filter with substring
- * e.g. ldap:///uid=*,cn=accounts,dc=example,dc=com
- */
+ /* This is a filter with substring e.g. ldap:///uid=*,cn=accounts,dc=example,dc=com */
slapi_log_err(SLAPI_LOG_ACL, plugin_name, "acl__resource_match_aci - moddn target_to substring: %s\n",
slapi_filter_to_string(f, logbuf, sizeof(logbuf)));
if ((rv = acl_match_substring(f, (char *)res_ndn, 0 /* match suffix */)) != ACL_TRUE) {
@@ -2204,9 +2202,7 @@ acl__resource_match_aci(Acl_PBlock *aclpb, aci_t *aci, int skip_attrEval, int *a
}
}
} else {
- /* This is a filter without substring
- * e.g. ldap:///cn=accounts,dc=example,dc=com
- */
+ /* This is a filter without substring e.g. ldap:///cn=accounts,dc=example,dc=com */
slapi_log_err(SLAPI_LOG_ACL, plugin_name, "acl__resource_match_aci - moddn target_to: %s\n",
slapi_filter_to_string(f, logbuf, sizeof(logbuf)));
slapi_filter_get_ava(f, &avaType, &avaValue);
@@ -2230,8 +2226,8 @@ acl__resource_match_aci(Acl_PBlock *aclpb, aci_t *aci, int skip_attrEval, int *a
/* Now check if the filter is a simple or substring filter */
if (aci->aci_type & ACI_TARGET_MODDN_FROM_PATTERN) {
/* This is a filter with substring
- * e.g. ldap:///uid=*,cn=accounts,dc=example,dc=com
- */
+ * e.g. ldap:///uid=*,cn=accounts,dc=example,dc=com
+ */
slapi_log_err(SLAPI_LOG_ACL, plugin_name, "acl__resource_match_aci - moddn target_from substring: %s\n",
slapi_filter_to_string(f, logbuf, sizeof(logbuf)));
if ((rv = acl_match_substring(f, (char *)slapi_sdn_get_dn(aclpb->aclpb_moddn_source_sdn), 0 /* match suffix */)) != ACL_TRUE) {
@@ -2243,11 +2239,8 @@ acl__resource_match_aci(Acl_PBlock *aclpb, aci_t *aci, int skip_attrEval, int *a
goto acl__resource_match_aci_EXIT;
}
}
-
} else {
- /* This is a filter without substring
- * e.g. ldap:///cn=accounts,dc=example,dc=com
- */
+ /* This is a filter without substring e.g. ldap:///cn=accounts,dc=example,dc=com */
slapi_log_err(SLAPI_LOG_ACL, plugin_name, "acl__resource_match_aci - moddn target_from: %s\n",
slapi_filter_to_string(f, logbuf, sizeof(logbuf)));
if (!slapi_dn_issuffix(slapi_sdn_get_dn(aclpb->aclpb_moddn_source_sdn), avaValue->bv_val)) {
@@ -2269,10 +2262,8 @@ acl__resource_match_aci(Acl_PBlock *aclpb, aci_t *aci, int skip_attrEval, int *a
}
if (aci->aci_type & ACI_TARGET_PATTERN) {
-
f = aci->target;
dn_matched = ACL_TRUE;
-
if ((rv = acl_match_substring(f, (char *)res_ndn, 0 /* match suffux */)) != ACL_TRUE) {
dn_matched = ACL_FALSE;
if (rv == ACL_ERR) {
@@ -2296,7 +2287,7 @@ acl__resource_match_aci(Acl_PBlock *aclpb, aci_t *aci, int skip_attrEval, int *a
/*
* Is it a (target="ldap://cn=*,($dn),o=sun.com") kind of thing.
- */
+ */
if (aci->aci_type & ACI_TARGET_MACRO_DN) {
/*
* See if the ($dn) component matches the string and
@@ -2306,8 +2297,7 @@ acl__resource_match_aci(Acl_PBlock *aclpb, aci_t *aci, int skip_attrEval, int *a
* entry is the same one don't recalculate it--
* this flag only works for search right now, could
* also optimise for mods by making it work for mods.
- */
-
+ */
if ((aclpb->aclpb_res_type & ACLPB_NEW_ENTRY) == 0) {
/*
* Here same entry so just look up the matched value,
@@ -2356,8 +2346,7 @@ acl__resource_match_aci(Acl_PBlock *aclpb, aci_t *aci, int skip_attrEval, int *a
* If there is already an entry for this aci in this
* aclpb then remove it--it's an old value for a
* different entry.
- */
-
+ */
acl_ht_add_and_freeOld(aclpb->aclpb_macro_ht,
(PLHashNumber)aci->aci_index,
matched_val);
@@ -2381,30 +2370,27 @@ acl__resource_match_aci(Acl_PBlock *aclpb, aci_t *aci, int skip_attrEval, int *a
}
/*
- ** Here, if there's a targetfilter field, see if it matches.
- **
- ** The commented out code below was an erroneous attempt to skip
- ** this test. It is wrong because: 1. you need to store
- ** whether the last test matched or not (you cannot just assume it did)
- ** and 2. It may not be the same aci, so the previous matched
- ** value is a function of the aci.
- ** May be interesting to build such a cache...but no evidence for
- ** for that right now. See Bug 383424.
- **
- **
- ** && ((aclpb->aclpb_state & ACLPB_SEARCH_BASED_ON_LIST) ||
- ** (aclpb->aclpb_res_type & ACLPB_NEW_ENTRY))
- */
+ * Here, if there's a targetfilter field, see if it matches.
+ *
+ * The commented out code below was an erroneous attempt to skip
+ * this test. It is wrong because: 1. you need to store
+ * whether the last test matched or not (you cannot just assume it did)
+ * and 2. It may not be the same aci, so the previous matched
+ * value is a function of the aci.
+ * May be interesting to build such a cache...but no evidence for
+ * for that right now. See Bug 383424.
+ *
+ *
+ * && ((aclpb->aclpb_state & ACLPB_SEARCH_BASED_ON_LIST) ||
+ * (aclpb->aclpb_res_type & ACLPB_NEW_ENTRY))
+ */
if (aci->aci_type & ACI_TARGET_FILTER) {
int filter_matched = ACL_TRUE;
-
/*
* Check for macros.
* For targetfilter we need to fake the lasinfo structure--it's
* created "naturally" for subjects but not targets.
- */
-
-
+ */
if (aci->aci_type & ACI_TARGET_FILTER_MACRO_DN) {
lasInfo *lasinfo = NULL;
@@ -2419,11 +2405,9 @@ acl__resource_match_aci(Acl_PBlock *aclpb, aci_t *aci, int skip_attrEval, int *a
ACL_EVAL_TARGET_FILTER);
slapi_ch_free((void **)&lasinfo);
} else {
-
-
if (slapi_vattr_filter_test(NULL, aclpb->aclpb_curr_entry,
aci->targetFilter,
- 0 /*don't do acess chk*/) != 0) {
+ 0 /*don't do access check*/) != 0) {
filter_matched = ACL_FALSE;
}
}
@@ -2450,7 +2434,7 @@ acl__resource_match_aci(Acl_PBlock *aclpb, aci_t *aci, int skip_attrEval, int *a
* Check to see if we need to evaluate any targetattrfilters.
* They look as follows:
* (targetattrfilters="add=sn:(sn=rob) && gn:(gn!=byrne),
- * del=sn:(sn=rob) && gn:(gn=byrne)")
+ * del=sn:(sn=rob) && gn:(gn=byrne)")
*
* For ADD/DELETE:
* If theres's a targetattrfilter then each add/del filter
@@ -2458,29 +2442,25 @@ acl__resource_match_aci(Acl_PBlock *aclpb, aci_t *aci, int skip_attrEval, int *a
* by each value of the attribute in the entry.
*
* For MODIFY:
- * If there's a targetattrfilter then the add/del filter
+ * If there's a targetattrfilter then the add/del filter
* must be satisfied by the attribute to be added/deleted.
* (MODIFY acl is evaluated one value at a time).
*
*
- */
-
+ */
if (((aclpb->aclpb_access & SLAPI_ACL_ADD) &&
(aci->aci_type & ACI_TARGET_ATTR_ADD_FILTERS)) ||
((aclpb->aclpb_access & SLAPI_ACL_DELETE) &&
- (aci->aci_type & ACI_TARGET_ATTR_DEL_FILTERS))) {
-
+ (aci->aci_type & ACI_TARGET_ATTR_DEL_FILTERS)))
+ {
Targetattrfilter **attrFilterArray = NULL;
-
Targetattrfilter *attrFilter = NULL;
-
Slapi_Attr *attr_ptr = NULL;
Slapi_Value *sval;
const struct berval *attrVal;
int k;
int done;
-
if ((aclpb->aclpb_access & SLAPI_ACL_ADD) &&
(aci->aci_type & ACI_TARGET_ATTR_ADD_FILTERS)) {
@@ -2497,28 +2477,20 @@ acl__resource_match_aci(Acl_PBlock *aclpb, aci_t *aci, int skip_attrEval, int *a
while (attrFilterArray && attrFilterArray[num_attrs] && attr_matched) {
attrFilter = attrFilterArray[num_attrs];
-
/*
- * If this filter applies to an attribute in the entry,
- * apply it to the entry.
- * Otherwise just ignore it.
- *
- */
-
- if (slapi_entry_attr_find(aclpb->aclpb_curr_entry,
- attrFilter->attr_str,
- &attr_ptr) == 0) {
-
+ * If this filter applies to an attribute in the entry,
+ * apply it to the entry.
+ * Otherwise just ignore it.
+ *
+ */
+ if (slapi_entry_attr_find(aclpb->aclpb_curr_entry, attrFilter->attr_str, &attr_ptr) == 0) {
/*
- * This is an applicable filter.
- * The filter is to be appplied to the entry being added
- * or deleted.
- * The filter needs to be satisfied by _each_ occurence
- * of the attribute in the entry--otherwise you
- * could satisfy the filter and then put loads of other
- * values in on the back of it.
- */
-
+ * This is an applicable filter.
+ * The filter is to be applied to the entry being added or deleted.
+ * The filter needs to be satisfied by _each_ occurrence of the
+ * attribute in the entry--otherwise you could satisfy the filter
+ * and then put loads of other values in on the back of it.
+ */
sval = NULL;
attrVal = NULL;
k = slapi_attr_first_value(attr_ptr, &sval);
@@ -2528,12 +2500,11 @@ acl__resource_match_aci(Acl_PBlock *aclpb, aci_t *aci, int skip_attrEval, int *a
if (acl__make_filter_test_entry(&aclpb->aclpb_filter_test_entry,
attrFilter->attr_str,
- (struct berval *)attrVal) == LDAP_SUCCESS) {
-
+ (struct berval *)attrVal) == LDAP_SUCCESS)
+ {
attr_matched = acl__test_filter(aclpb->aclpb_filter_test_entry,
attrFilter->filter,
- 1 /* Do filter sense evaluation below */
- );
+ 1 /* Do filter sense evaluation below */);
done = !attr_matched;
slapi_entry_free(aclpb->aclpb_filter_test_entry);
}
@@ -2542,19 +2513,19 @@ acl__resource_match_aci(Acl_PBlock *aclpb, aci_t *aci, int skip_attrEval, int *a
} /* while */
/*
- * Here, we applied an applicable filter to the entry.
- * So if attr_matched is ACL_TRUE then every value
- * of the attribute in the entry satisfied the filter.
- * Otherwise, attr_matched is ACL_FALSE and not every
- * value satisfied the filter, so we will teminate the
- * scan of the filter list.
- */
+ * Here, we applied an applicable filter to the entry.
+ * So if attr_matched is ACL_TRUE then every value
+ * of the attribute in the entry satisfied the filter.
+ * Otherwise, attr_matched is ACL_FALSE and not every
+ * value satisfied the filter, so we will terminate the
+ * scan of the filter list.
+ */
}
num_attrs++;
} /* while */
-/*
+ /*
* Here, we've applied all the applicable filters to the entry.
* Each one must have been satisfied by all the values of the attribute.
* The result of this is stored in attr_matched.
@@ -2585,7 +2556,8 @@ acl__resource_match_aci(Acl_PBlock *aclpb, aci_t *aci, int skip_attrEval, int *a
} else if (((aclpb->aclpb_access & ACLPB_SLAPI_ACL_WRITE_ADD) &&
(aci->aci_type & ACI_TARGET_ATTR_ADD_FILTERS)) ||
((aclpb->aclpb_access & ACLPB_SLAPI_ACL_WRITE_DEL) &&
- (aci->aci_type & ACI_TARGET_ATTR_DEL_FILTERS))) {
+ (aci->aci_type & ACI_TARGET_ATTR_DEL_FILTERS)))
+ {
/*
* Here, it's a modify add/del and we have attr filters.
* So, we need to scan the add/del filter list to find the filter
@@ -2629,11 +2601,10 @@ acl__resource_match_aci(Acl_PBlock *aclpb, aci_t *aci, int skip_attrEval, int *a
* Otherwise, ignore the targetattrfilters.
*/
if (found) {
-
if (acl__make_filter_test_entry(&aclpb->aclpb_filter_test_entry,
aclpb->aclpb_curr_attrEval->attrEval_name,
- aclpb->aclpb_curr_attrVal) == LDAP_SUCCESS) {
-
+ aclpb->aclpb_curr_attrVal) == LDAP_SUCCESS)
+ {
attr_matched = acl__test_filter(aclpb->aclpb_filter_test_entry,
attrFilter->filter,
1 /* Do filter sense evaluation below */
@@ -2651,20 +2622,21 @@ acl__resource_match_aci(Acl_PBlock *aclpb, aci_t *aci, int skip_attrEval, int *a
* Here this attribute appeared and was matched in a
* targetattrfilters list, so record this fact so we do
* not have to scan the targetattr list for the attribute.
- */
+ */
attr_matched_in_targetattrfilters = 1;
}
} /* targetvaluefilters */
- /* There are 3 cases by which acis are selected.
- ** 1) By scanning the whole list and picking based on the resource.
- ** 2) By picking a subset of the list which will be used for the whole
- ** acl evaluation.
- ** 3) A finer granularity, i.e, a selected list of acls which will be
- ** used for only that entry's evaluation.
- */
+ /*
+ * There are 3 cases by which acis are selected.
+ * 1) By scanning the whole list and picking based on the resource.
+ * 2) By picking a subset of the list which will be used for the whole
+ * acl evaluation.
+ * 3) A finer granularity, i.e, a selected list of acls which will be
+ * used for only that entry's evaluation.
+ */
if (!(skip_attrEval) && (aclpb->aclpb_state & ACLPB_SEARCH_BASED_ON_ENTRY_LIST) &&
(res_right & SLAPI_ACL_SEARCH) &&
((aci->aci_access & SLAPI_ACL_READ) || (aci->aci_access & SLAPI_ACL_SEARCH))) {
@@ -2680,7 +2652,6 @@ acl__resource_match_aci(Acl_PBlock *aclpb, aci_t *aci, int skip_attrEval, int *a
}
}
-
/* If we are suppose to skip attr eval, then let's skip it */
if ((aclpb->aclpb_access & SLAPI_ACL_SEARCH) && (!skip_attrEval) &&
(aclpb->aclpb_res_type & ACLPB_NEW_ENTRY)) {
@@ -2697,9 +2668,10 @@ acl__resource_match_aci(Acl_PBlock *aclpb, aci_t *aci, int skip_attrEval, int *a
goto acl__resource_match_aci_EXIT;
}
- /* We need to check again because we don't want to select this handle
- ** if the right doesn't match for now.
- */
+ /*
+ * We need to check again because we don't want to select this handle
+ * if the right doesn't match for now.
+ */
if (!(aci_right & res_right)) {
matches = ACL_FALSE;
goto acl__resource_match_aci_EXIT;
@@ -2718,20 +2690,16 @@ acl__resource_match_aci(Acl_PBlock *aclpb, aci_t *aci, int skip_attrEval, int *a
* rbyrneXXX if we had a proper permission for modrdn eg SLAPI_ACL_MODRDN
* then we would not need this crappy way of telling it was a MODRDN
* request ie. SLAPI_ACL_WRITE && !(c_attrEval).
- */
-
+ */
c_attrEval = aclpb->aclpb_curr_attrEval;
/*
* If we've already matched on targattrfilter then do not
* bother to look at the attrlist.
- */
-
+ */
if (!attr_matched_in_targetattrfilters) {
-
/* match target attr */
- if ((c_attrEval) &&
- (aci->aci_type & ACI_TARGET_ATTR)) {
+ if ((c_attrEval) && (aci->aci_type & ACI_TARGET_ATTR)) {
/* there is a target ATTR */
Targetattr **attrArray = aci->targetAttr;
Targetattr *attr = NULL;
@@ -2773,46 +2741,43 @@ acl__resource_match_aci(Acl_PBlock *aclpb, aci_t *aci, int skip_attrEval, int *a
matches = (attr_matched ? ACL_TRUE : ACL_FALSE);
}
-
aclpb->aclpb_state &= ~ACLPB_ATTR_STAR_MATCHED;
/* figure out how it matched, i.e star matched */
- if (matches && star_matched && num_attrs == 1 &&
- !(aclpb->aclpb_state & ACLPB_FOUND_ATTR_RULE))
+ if (matches && star_matched && num_attrs == 1 && !(aclpb->aclpb_state & ACLPB_FOUND_ATTR_RULE)) {
aclpb->aclpb_state |= ACLPB_ATTR_STAR_MATCHED;
- else {
+ } else {
/* we are here means that there is a specific
- ** attr in the rule for this resource.
- ** We need to avoid this case
- ** Rule 1: (targetattr = "uid")
- ** Rule 2: (targetattr = "*")
- ** we cannot use STAR optimization
- */
+ * attr in the rule for this resource.
+ * We need to avoid this case
+ * Rule 1: (targetattr = "uid")
+ * Rule 2: (targetattr = "*")
+ * we cannot use STAR optimization
+ */
aclpb->aclpb_state |= ACLPB_FOUND_ATTR_RULE;
aclpb->aclpb_state &= ~ACLPB_ATTR_STAR_MATCHED;
}
- } else if ((c_attrEval) ||
- (aci->aci_type & ACI_TARGET_ATTR)) {
+ } else if ((c_attrEval) || (aci->aci_type & ACI_TARGET_ATTR)) {
if ((aci_right & ACL_RIGHTS_TARGETATTR_NOT_NEEDED) &&
(aclpb->aclpb_access & ACL_RIGHTS_TARGETATTR_NOT_NEEDED)) {
/*
- ** Targetattr rule doesn't make any sense
- ** in this case. So select this rule
- ** default: matches = ACL_TRUE;
- */
+ * Targetattr rule doesn't make any sense
+ * in this case. So select this rule
+ * default: matches = ACL_TRUE;
+ */
;
- } else if (aci_right & SLAPI_ACL_WRITE &&
+ } else if ((aci_right & SLAPI_ACL_WRITE) &&
(aci->aci_type & ACI_TARGET_ATTR) &&
!(c_attrEval) &&
(aci->aci_type & ACI_HAS_ALLOW_RULE)) {
/* We need to handle modrdn operation. Modrdn doesn't
- ** change any attrs but changes the RDN and so (attr=NULL).
- ** Here we found an acl which has a targetattr but
- ** the resource doesn't need one. In that case, we should
- ** consider this acl.
- ** the opposite is true if it is a deny rule, only a deny without
- ** any targetattr should deny modrdn
- ** default: matches = ACL_TRUE;
- */
+ * change any attrs but changes the RDN and so (attr=NULL).
+ * Here we found an acl which has a targetattr but
+ * the resource doesn't need one. In that case, we should
+ * consider this acl.
+ * the opposite is true if it is a deny rule, only a deny without
+ * any targetattr should deny modrdn
+ * default: matches = ACL_TRUE;
+ */
;
} else {
matches = ACL_FALSE;
@@ -2821,16 +2786,16 @@ acl__resource_match_aci(Acl_PBlock *aclpb, aci_t *aci, int skip_attrEval, int *a
} /* !attr_matched_in_targetattrfilters */
/*
- ** Here we are testing if we find a entry test rule (which should
- ** be rare). In that case, just remember it. An entry test rule
- ** doesn't have "(targetattr)".
- */
+ * Here we are testing if we find a entry test rule (which should
+ * be rare). In that case, just remember it. An entry test rule
+ * doesn't have "(targetattr)".
+ */
if ((aclpb->aclpb_state & ACLPB_EVALUATING_FIRST_ATTR) &&
(!(aci->aci_type & ACI_TARGET_ATTR))) {
aclpb->aclpb_state |= ACLPB_FOUND_A_ENTRY_TEST_RULE;
}
-/*
+ /*
* Generic exit point for this routine:
* matches is ACL_TRUE if the aci matches the target of the resource,
* ACL_FALSE othrewise.
@@ -2853,6 +2818,7 @@ acl__resource_match_aci_EXIT:
return (matches);
}
+
/* Macro to determine if the cached result is valid or not. */
#define ACL_CACHED_RESULT_VALID(result) \
(((result & ACLPB_CACHE_READ_RES_ALLOW) && \
diff --git a/ldap/servers/slapd/back-ldbm/findentry.c b/ldap/servers/slapd/back-ldbm/findentry.c
index 6e53a0aea..bff751c88 100644
--- a/ldap/servers/slapd/back-ldbm/findentry.c
+++ b/ldap/servers/slapd/back-ldbm/findentry.c
@@ -93,7 +93,6 @@ find_entry_internal_dn(
size_t tries = 0;
int isroot = 0;
int op_type;
- char *errbuf = NULL;
/* get the managedsait ldap message control */
slapi_pblock_get(pb, SLAPI_MANAGEDSAIT, &managedsait);
@@ -207,8 +206,8 @@ find_entry_internal_dn(
break;
}
if (acl_type > 0) {
- err = plugin_call_acl_plugin(pb, me->ep_entry, NULL, NULL, acl_type,
- ACLPLUGIN_ACCESS_DEFAULT, &errbuf);
+ char *dummy_attr = "1.1";
+ err = slapi_access_allowed(pb, me->ep_entry, dummy_attr, NULL, acl_type);
}
if (((acl_type > 0) && err) || (op_type == SLAPI_OPERATION_BIND)) {
/*
@@ -237,7 +236,6 @@ find_entry_internal_dn(
CACHE_RETURN(&inst->inst_cache, &me);
}
- slapi_ch_free_string(&errbuf);
slapi_log_err(SLAPI_LOG_TRACE, "find_entry_internal_dn", "<= Not found (%s)\n",
slapi_sdn_get_dn(sdn));
return (NULL);
diff --git a/src/lib389/lib389/_mapped_object.py b/src/lib389/lib389/_mapped_object.py
index c60837601..ca6ea6ef8 100644
--- a/src/lib389/lib389/_mapped_object.py
+++ b/src/lib389/lib389/_mapped_object.py
@@ -1190,7 +1190,7 @@ class DSLdapObjects(DSLogging, DSLints):
# Now actually commit the creation req
return co.ensure_state(rdn, properties, self._basedn)
- def filter(self, search, scope=None):
+ def filter(self, search, scope=None, strict=False):
# This will yield and & filter for objectClass with as many terms as needed.
if search:
search_filter = _gen_and([self._get_objectclass_filter(), search])
@@ -1211,5 +1211,7 @@ class DSLdapObjects(DSLogging, DSLints):
insts = [self._entry_to_instance(dn=r.dn, entry=r) for r in results]
except ldap.NO_SUCH_OBJECT:
# There are no objects to select from, se we return an empty array
+ if strict:
+ raise ldap.NO_SUCH_OBJECT
insts = []
return insts
--
2.26.2

View File

@ -0,0 +1,452 @@
From 5bca57b52069508a55b36fafe3729b7d1243743b Mon Sep 17 00:00:00 2001
From: tbordaz <tbordaz@redhat.com>
Date: Wed, 27 Jan 2021 11:58:38 +0100
Subject: [PATCH 2/3] Issue 4526 - sync_repl: when completing an operation in
the pending list, it can select the wrong operation (#4553)
Bug description:
When an operation complete, it was retrieved in the pending list with
the address of the Operation structure. In case of POST OP nested operations
the same address can be reused. So when completing an operation there could be
a confusion which operation actually completed.
A second problem is that if an update its DB_DEADLOCK, the BETXN_PREOP can
be called several times. During retry, the operation is already in the pending
list.
Fix description:
The fix defines a new operation extension (sync_persist_extension_type).
This operation extension contains an index (idx_pl) of the op_pl in the
the pending list.
And additional safety fix is to dump the pending list in case it becomes large (>10).
The pending list is dumped with SLAPI_LOG_PLUGIN.
When there is a retry (operation extension exists) the call to sync_update_persist_betxn_pre_op
becomes a NOOP: the operation is not added again in the pending list.
relates: https://github.com/389ds/389-ds-base/issues/4526
Reviewed by: William Brown (Thanks !!)
---
ldap/servers/plugins/sync/sync.h | 9 ++
ldap/servers/plugins/sync/sync_init.c | 64 +++++++-
ldap/servers/plugins/sync/sync_persist.c | 194 ++++++++++++++++-------
3 files changed, 208 insertions(+), 59 deletions(-)
diff --git a/ldap/servers/plugins/sync/sync.h b/ldap/servers/plugins/sync/sync.h
index 7241fddbf..2fdf24476 100644
--- a/ldap/servers/plugins/sync/sync.h
+++ b/ldap/servers/plugins/sync/sync.h
@@ -82,6 +82,12 @@ typedef enum _pl_flags {
OPERATION_PL_IGNORED = 5
} pl_flags_t;
+typedef struct op_ext_ident
+{
+ uint32_t idx_pl; /* To uniquely identify an operation in PL, the operation extension
+ * contains the index of that operation in the pending list
+ */
+} op_ext_ident_t;
/* Pending list operations.
* it contains a list ('next') of nested operations. The
* order the same order that the server applied the operation
@@ -90,6 +96,7 @@ typedef enum _pl_flags {
typedef struct OPERATION_PL_CTX
{
Operation *op; /* Pending operation, should not be freed as it belongs to the pblock */
+ uint32_t idx_pl; /* index of the operation in the pending list */
pl_flags_t flags; /* operation is completed (set to TRUE in POST) */
Slapi_Entry *entry; /* entry to be store in the enqueued node. 1st arg sync_queue_change */
Slapi_Entry *eprev; /* pre-entry to be stored in the enqueued node. 2nd arg sync_queue_change */
@@ -99,6 +106,8 @@ typedef struct OPERATION_PL_CTX
OPERATION_PL_CTX_T * get_thread_primary_op(void);
void set_thread_primary_op(OPERATION_PL_CTX_T *op);
+const op_ext_ident_t * sync_persist_get_operation_extension(Slapi_PBlock *pb);
+void sync_persist_set_operation_extension(Slapi_PBlock *pb, op_ext_ident_t *op_ident);
int sync_register_operation_extension(void);
int sync_unregister_operation_entension(void);
diff --git a/ldap/servers/plugins/sync/sync_init.c b/ldap/servers/plugins/sync/sync_init.c
index 74af14512..9e6a12000 100644
--- a/ldap/servers/plugins/sync/sync_init.c
+++ b/ldap/servers/plugins/sync/sync_init.c
@@ -16,6 +16,7 @@ static int sync_preop_init(Slapi_PBlock *pb);
static int sync_postop_init(Slapi_PBlock *pb);
static int sync_be_postop_init(Slapi_PBlock *pb);
static int sync_betxn_preop_init(Slapi_PBlock *pb);
+static int sync_persist_register_operation_extension(void);
static PRUintn thread_primary_op;
@@ -43,7 +44,8 @@ sync_init(Slapi_PBlock *pb)
slapi_pblock_set(pb, SLAPI_PLUGIN_CLOSE_FN,
(void *)sync_close) != 0 ||
slapi_pblock_set(pb, SLAPI_PLUGIN_DESCRIPTION,
- (void *)&pdesc) != 0) {
+ (void *)&pdesc) != 0 ||
+ sync_persist_register_operation_extension()) {
slapi_log_err(SLAPI_LOG_ERR, SYNC_PLUGIN_SUBSYSTEM,
"sync_init - Failed to register plugin\n");
rc = 1;
@@ -242,4 +244,64 @@ set_thread_primary_op(OPERATION_PL_CTX_T *op)
PR_SetThreadPrivate(thread_primary_op, (void *) head);
}
head->next = op;
+}
+
+/* The following definitions are used for the operation pending list
+ * (used by sync_repl). To retrieve a specific operation in the pending
+ * list, the operation extension contains the index of the operation in
+ * the pending list
+ */
+static int sync_persist_extension_type; /* initialized in sync_persist_register_operation_extension */
+static int sync_persist_extension_handle; /* initialized in sync_persist_register_operation_extension */
+
+const op_ext_ident_t *
+sync_persist_get_operation_extension(Slapi_PBlock *pb)
+{
+ Slapi_Operation *op;
+ op_ext_ident_t *ident;
+
+ slapi_pblock_get(pb, SLAPI_OPERATION, &op);
+ ident = slapi_get_object_extension(sync_persist_extension_type, op,
+ sync_persist_extension_handle);
+ slapi_log_err(SLAPI_LOG_PLUGIN, SYNC_PLUGIN_SUBSYSTEM, "sync_persist_get_operation_extension operation (op=0x%lx) -> %d\n",
+ (ulong) op, ident ? ident->idx_pl : -1);
+ return (const op_ext_ident_t *) ident;
+
+}
+
+void
+sync_persist_set_operation_extension(Slapi_PBlock *pb, op_ext_ident_t *op_ident)
+{
+ Slapi_Operation *op;
+
+ slapi_pblock_get(pb, SLAPI_OPERATION, &op);
+ slapi_log_err(SLAPI_LOG_PLUGIN, SYNC_PLUGIN_SUBSYSTEM, "sync_persist_set_operation_extension operation (op=0x%lx) -> %d\n",
+ (ulong) op, op_ident ? op_ident->idx_pl : -1);
+ slapi_set_object_extension(sync_persist_extension_type, op,
+ sync_persist_extension_handle, (void *)op_ident);
+}
+/* operation extension constructor */
+static void *
+sync_persist_operation_extension_constructor(void *object __attribute__((unused)), void *parent __attribute__((unused)))
+{
+ /* we only set the extension value explicitly in sync_update_persist_betxn_pre_op */
+ return NULL; /* we don't set anything in the ctor */
+}
+
+/* consumer operation extension destructor */
+static void
+sync_persist_operation_extension_destructor(void *ext, void *object __attribute__((unused)), void *parent __attribute__((unused)))
+{
+ op_ext_ident_t *op_ident = (op_ext_ident_t *)ext;
+ slapi_ch_free((void **)&op_ident);
+}
+static int
+sync_persist_register_operation_extension(void)
+{
+ return slapi_register_object_extension(SYNC_PLUGIN_SUBSYSTEM,
+ SLAPI_EXT_OPERATION,
+ sync_persist_operation_extension_constructor,
+ sync_persist_operation_extension_destructor,
+ &sync_persist_extension_type,
+ &sync_persist_extension_handle);
}
\ No newline at end of file
diff --git a/ldap/servers/plugins/sync/sync_persist.c b/ldap/servers/plugins/sync/sync_persist.c
index d13f142b0..e93a8fa83 100644
--- a/ldap/servers/plugins/sync/sync_persist.c
+++ b/ldap/servers/plugins/sync/sync_persist.c
@@ -47,6 +47,9 @@ static int sync_release_connection(Slapi_PBlock *pb, Slapi_Connection *conn, Sla
* per thread pending list of nested operation..
* being a betxn_preop the pending list has the same order
* that the server received the operation
+ *
+ * In case of DB_RETRY, this callback can be called several times
+ * The detection of the DB_RETRY is done via the operation extension
*/
int
sync_update_persist_betxn_pre_op(Slapi_PBlock *pb)
@@ -54,64 +57,128 @@ sync_update_persist_betxn_pre_op(Slapi_PBlock *pb)
OPERATION_PL_CTX_T *prim_op;
OPERATION_PL_CTX_T *new_op;
Slapi_DN *sdn;
+ uint32_t idx_pl = 0;
+ op_ext_ident_t *op_ident;
+ Operation *op;
if (!SYNC_IS_INITIALIZED()) {
/* not initialized if sync plugin is not started */
return 0;
}
+ prim_op = get_thread_primary_op();
+ op_ident = sync_persist_get_operation_extension(pb);
+ slapi_pblock_get(pb, SLAPI_OPERATION, &op);
+ slapi_pblock_get(pb, SLAPI_TARGET_SDN, &sdn);
+
+ /* Check if we are in a DB retry case */
+ if (op_ident && prim_op) {
+ OPERATION_PL_CTX_T *current_op;
+
+ /* This callback is called (with the same operation) because of a DB_RETRY */
+
+ /* It already existed (in the operation extension) an index of the operation in the pending list */
+ for (idx_pl = 0, current_op = prim_op; current_op->next; idx_pl++, current_op = current_op->next) {
+ if (op_ident->idx_pl == idx_pl) {
+ break;
+ }
+ }
+
+ /* The retrieved operation in the pending list is at the right
+ * index and state. Just return making this callback a noop
+ */
+ PR_ASSERT(current_op);
+ PR_ASSERT(current_op->op == op);
+ PR_ASSERT(current_op->flags == OPERATION_PL_PENDING);
+ slapi_log_err(SLAPI_LOG_WARNING, SYNC_PLUGIN_SUBSYSTEM, "sync_update_persist_betxn_pre_op - DB retried operation targets "
+ "\"%s\" (op=0x%lx idx_pl=%d) => op not changed in PL\n",
+ slapi_sdn_get_dn(sdn), (ulong) op, idx_pl);
+ return 0;
+ }
+
/* Create a new pending operation node */
new_op = (OPERATION_PL_CTX_T *)slapi_ch_calloc(1, sizeof(OPERATION_PL_CTX_T));
new_op->flags = OPERATION_PL_PENDING;
- slapi_pblock_get(pb, SLAPI_OPERATION, &new_op->op);
- slapi_pblock_get(pb, SLAPI_TARGET_SDN, &sdn);
+ new_op->op = op;
- prim_op = get_thread_primary_op();
if (prim_op) {
/* It already exists a primary operation, so the current
* operation is a nested one that we need to register at the end
* of the pending nested operations
+ * Also computes the idx_pl that will be the identifier (index) of the operation
+ * in the pending list
*/
OPERATION_PL_CTX_T *current_op;
- for (current_op = prim_op; current_op->next; current_op = current_op->next);
+ for (idx_pl = 0, current_op = prim_op; current_op->next; idx_pl++, current_op = current_op->next);
current_op->next = new_op;
+ idx_pl++; /* idx_pl is currently the index of the last op
+ * as we are adding a new op we need to increase that index
+ */
slapi_log_err(SLAPI_LOG_PLUGIN, SYNC_PLUGIN_SUBSYSTEM, "sync_update_persist_betxn_pre_op - nested operation targets "
- "\"%s\" (0x%lx)\n",
- slapi_sdn_get_dn(sdn), (ulong) new_op->op);
+ "\"%s\" (op=0x%lx idx_pl=%d)\n",
+ slapi_sdn_get_dn(sdn), (ulong) new_op->op, idx_pl);
} else {
/* The current operation is the first/primary one in the txn
* registers it directly in the thread private data (head)
*/
set_thread_primary_op(new_op);
+ idx_pl = 0; /* as primary operation, its index in the pending list is 0 */
slapi_log_err(SLAPI_LOG_PLUGIN, SYNC_PLUGIN_SUBSYSTEM, "sync_update_persist_betxn_pre_op - primary operation targets "
"\"%s\" (0x%lx)\n",
slapi_sdn_get_dn(sdn), (ulong) new_op->op);
}
+
+ /* records, in the operation extension AND in the pending list, the identifier (index) of
+ * this operation into the pending list
+ */
+ op_ident = (op_ext_ident_t *) slapi_ch_calloc(1, sizeof (op_ext_ident_t));
+ op_ident->idx_pl = idx_pl;
+ new_op->idx_pl = idx_pl;
+ sync_persist_set_operation_extension(pb, op_ident);
return 0;
}
-/* This operation can not be proceed by sync_repl listener because
- * of internal problem. For example, POST entry does not exist
+/* This operation failed or skipped (e.g. no MODs).
+ * In such case POST entry does not exist
*/
static void
-ignore_op_pl(Operation *op)
+ignore_op_pl(Slapi_PBlock *pb)
{
OPERATION_PL_CTX_T *prim_op, *curr_op;
+ op_ext_ident_t *ident;
+ Operation *op;
+
+ slapi_pblock_get(pb, SLAPI_OPERATION, &op);
+
+ /* prim_op is set if betxn was called
+ * In case of invalid update (schema violation) the
+ * operation skip betxn and prim_op is not set.
+ * This is the same for ident
+ */
prim_op = get_thread_primary_op();
+ ident = sync_persist_get_operation_extension(pb);
- for (curr_op = prim_op; curr_op; curr_op = curr_op->next) {
- if ((curr_op->op == op) &&
- (curr_op->flags == OPERATION_PL_PENDING)) { /* If by any "chance" a same operation structure was reused in consecutive updates
- * we can not only rely on 'op' value
- */
- slapi_log_err(SLAPI_LOG_ERR, SYNC_PLUGIN_SUBSYSTEM, "ignore_op_pl operation (0x%lx) from the pending list\n",
- (ulong) op);
- curr_op->flags = OPERATION_PL_IGNORED;
- return;
+ if (ident) {
+ /* The TXN_BEPROP was called, so the operation is
+ * registered in the pending list
+ */
+ for (curr_op = prim_op; curr_op; curr_op = curr_op->next) {
+ if (curr_op->idx_pl == ident->idx_pl) {
+ /* The operation extension (ident) refers this operation (currop in the pending list).
+ * This is called during sync_repl postop. At this moment
+ * the operation in the pending list (identified by idx_pl in the operation extension)
+ * should be pending
+ */
+ PR_ASSERT(curr_op->flags == OPERATION_PL_PENDING);
+ slapi_log_err(SLAPI_LOG_PLUGIN, SYNC_PLUGIN_SUBSYSTEM, "ignore_op_pl operation (op=0x%lx, idx_pl=%d) from the pending list\n",
+ (ulong) op, ident->idx_pl);
+ curr_op->flags = OPERATION_PL_IGNORED;
+ return;
+ }
}
}
- slapi_log_err(SLAPI_LOG_ERR, SYNC_PLUGIN_SUBSYSTEM, "ignore_op_pl can not retrieve an operation (0x%lx) in pending list\n",
- (ulong) op);
+ slapi_log_err(SLAPI_LOG_PLUGIN, SYNC_PLUGIN_SUBSYSTEM, "ignore_op_pl failing operation (op=0x%lx, idx_pl=%d) was not in the pending list\n",
+ (ulong) op, ident ? ident->idx_pl : -1);
}
/* This is a generic function that is called by betxn_post of this plugin.
@@ -126,7 +193,9 @@ sync_update_persist_op(Slapi_PBlock *pb, Slapi_Entry *e, Slapi_Entry *eprev, ber
{
OPERATION_PL_CTX_T *prim_op = NULL, *curr_op;
Operation *pb_op;
+ op_ext_ident_t *ident;
Slapi_DN *sdn;
+ uint32_t count; /* use for diagnostic of the lenght of the pending list */
int32_t rc;
if (!SYNC_IS_INITIALIZED()) {
@@ -138,7 +207,7 @@ sync_update_persist_op(Slapi_PBlock *pb, Slapi_Entry *e, Slapi_Entry *eprev, ber
if (NULL == e) {
/* Ignore this operation (for example case of failure of the operation) */
- ignore_op_pl(pb_op);
+ ignore_op_pl(pb);
return;
}
@@ -161,16 +230,21 @@ sync_update_persist_op(Slapi_PBlock *pb, Slapi_Entry *e, Slapi_Entry *eprev, ber
prim_op = get_thread_primary_op();
+ ident = sync_persist_get_operation_extension(pb);
PR_ASSERT(prim_op);
+ PR_ASSERT(ident);
/* First mark the operation as completed/failed
* the param to be used once the operation will be pushed
* on the listeners queue
*/
for (curr_op = prim_op; curr_op; curr_op = curr_op->next) {
- if ((curr_op->op == pb_op) &&
- (curr_op->flags == OPERATION_PL_PENDING)) { /* If by any "chance" a same operation structure was reused in consecutive updates
- * we can not only rely on 'op' value
- */
+ if (curr_op->idx_pl == ident->idx_pl) {
+ /* The operation extension (ident) refers this operation (currop in the pending list)
+ * This is called during sync_repl postop. At this moment
+ * the operation in the pending list (identified by idx_pl in the operation extension)
+ * should be pending
+ */
+ PR_ASSERT(curr_op->flags == OPERATION_PL_PENDING);
if (rc == LDAP_SUCCESS) {
curr_op->flags = OPERATION_PL_SUCCEEDED;
curr_op->entry = e ? slapi_entry_dup(e) : NULL;
@@ -183,46 +257,50 @@ sync_update_persist_op(Slapi_PBlock *pb, Slapi_Entry *e, Slapi_Entry *eprev, ber
}
}
if (!curr_op) {
- slapi_log_err(SLAPI_LOG_ERR, SYNC_PLUGIN_SUBSYSTEM, "%s - operation not found on the pendling list\n", label);
+ slapi_log_err(SLAPI_LOG_ERR, SYNC_PLUGIN_SUBSYSTEM, "%s - operation (op=0x%lx, idx_pl=%d) not found on the pendling list\n",
+ label, (ulong) pb_op, ident->idx_pl);
PR_ASSERT(curr_op);
}
-#if DEBUG
- /* dump the pending queue */
- for (curr_op = prim_op; curr_op; curr_op = curr_op->next) {
- char *flags_str;
- char * entry_str;
+ /* for diagnostic of the pending list, dump its content if it is too long */
+ for (count = 0, curr_op = prim_op; curr_op; count++, curr_op = curr_op->next);
+ if (loglevel_is_set(SLAPI_LOG_PLUGIN) && (count > 10)) {
- if (curr_op->entry) {
- entry_str = slapi_entry_get_dn(curr_op->entry);
- } else if (curr_op->eprev){
- entry_str = slapi_entry_get_dn(curr_op->eprev);
- } else {
- entry_str = "unknown";
- }
- switch (curr_op->flags) {
- case OPERATION_PL_SUCCEEDED:
- flags_str = "succeeded";
- break;
- case OPERATION_PL_FAILED:
- flags_str = "failed";
- break;
- case OPERATION_PL_IGNORED:
- flags_str = "ignored";
- break;
- case OPERATION_PL_PENDING:
- flags_str = "pending";
- break;
- default:
- flags_str = "unknown";
- break;
-
+ /* if pending list looks abnormally too long, dump the pending list */
+ for (curr_op = prim_op; curr_op; curr_op = curr_op->next) {
+ char *flags_str;
+ char * entry_str;
- }
- slapi_log_err(SLAPI_LOG_PLUGIN, SYNC_PLUGIN_SUBSYSTEM, "dump pending list(0x%lx) %s %s\n",
+ if (curr_op->entry) {
+ entry_str = slapi_entry_get_dn(curr_op->entry);
+ } else if (curr_op->eprev) {
+ entry_str = slapi_entry_get_dn(curr_op->eprev);
+ } else {
+ entry_str = "unknown";
+ }
+ switch (curr_op->flags) {
+ case OPERATION_PL_SUCCEEDED:
+ flags_str = "succeeded";
+ break;
+ case OPERATION_PL_FAILED:
+ flags_str = "failed";
+ break;
+ case OPERATION_PL_IGNORED:
+ flags_str = "ignored";
+ break;
+ case OPERATION_PL_PENDING:
+ flags_str = "pending";
+ break;
+ default:
+ flags_str = "unknown";
+ break;
+
+
+ }
+ slapi_log_err(SLAPI_LOG_PLUGIN, SYNC_PLUGIN_SUBSYSTEM, "dump pending list(0x%lx) %s %s\n",
(ulong) curr_op->op, entry_str, flags_str);
+ }
}
-#endif
/* Second check if it remains a pending operation in the pending list */
for (curr_op = prim_op; curr_op; curr_op = curr_op->next) {
--
2.26.2

View File

@ -0,0 +1,145 @@
From e6536aa27bfdc27cad07f6c5cd3312f0f0710c96 Mon Sep 17 00:00:00 2001
From: tbordaz <tbordaz@redhat.com>
Date: Mon, 1 Feb 2021 09:28:25 +0100
Subject: [PATCH 3/3] Issue 4581 - A failed re-indexing leaves the database in
broken state (#4582)
Bug description:
During reindex the numsubordinates attribute is not updated in parent entries.
The consequence is that the internal counter job->numsubordinates==0.
Later when indexing the ancestorid, the server can show the progression of this
indexing with a ratio using job->numsubordinates==0.
Division with 0 -> SIGFPE
Fix description:
if the numsubordinates is NULL, log a message without a division.
relates: https://github.com/389ds/389-ds-base/issues/4581
Reviewed by: Pierre Rogier, Mark Reynolds, Simon Pichugin, Teko Mihinto (thanks !!)
Platforms tested: F31
---
.../slapd/back-ldbm/db-bdb/bdb_import.c | 72 ++++++++++++++-----
1 file changed, 54 insertions(+), 18 deletions(-)
diff --git a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_import.c b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_import.c
index ba783ee59..7f484934f 100644
--- a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_import.c
+++ b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_import.c
@@ -468,18 +468,30 @@ bdb_get_nonleaf_ids(backend *be, DB_TXN *txn, IDList **idl, ImportJob *job)
}
key_count++;
if (!(key_count % PROGRESS_INTERVAL)) {
- import_log_notice(job, SLAPI_LOG_INFO, "bdb_get_nonleaf_ids",
- "Gathering ancestorid non-leaf IDs: processed %d%% (ID count %d)",
- (key_count * 100 / job->numsubordinates), key_count);
+ if (job->numsubordinates) {
+ import_log_notice(job, SLAPI_LOG_INFO, "bdb_get_nonleaf_ids",
+ "Gathering ancestorid non-leaf IDs: processed %d%% (ID count %d)",
+ (key_count * 100 / job->numsubordinates), key_count);
+ } else {
+ import_log_notice(job, SLAPI_LOG_INFO, "bdb_get_nonleaf_ids",
+ "Gathering ancestorid non-leaf IDs: processed %d ancestors...",
+ key_count);
+ }
started_progress_logging = 1;
}
} while (ret == 0 && !(job->flags & FLAG_ABORT));
if (started_progress_logging) {
/* finish what we started logging */
- import_log_notice(job, SLAPI_LOG_INFO, "bdb_get_nonleaf_ids",
- "Gathering ancestorid non-leaf IDs: processed %d%% (ID count %d)",
- (key_count * 100 / job->numsubordinates), key_count);
+ if (job->numsubordinates) {
+ import_log_notice(job, SLAPI_LOG_INFO, "bdb_get_nonleaf_ids",
+ "Gathering ancestorid non-leaf IDs: processed %d%% (ID count %d)",
+ (key_count * 100 / job->numsubordinates), key_count);
+ } else {
+ import_log_notice(job, SLAPI_LOG_INFO, "bdb_get_nonleaf_ids",
+ "Gathering ancestorid non-leaf IDs: processed %d ancestors",
+ key_count);
+ }
}
import_log_notice(job, SLAPI_LOG_INFO, "bdb_get_nonleaf_ids",
"Finished gathering ancestorid non-leaf IDs.");
@@ -660,9 +672,15 @@ bdb_ancestorid_default_create_index(backend *be, ImportJob *job)
key_count++;
if (!(key_count % PROGRESS_INTERVAL)) {
- import_log_notice(job, SLAPI_LOG_INFO, "bdb_ancestorid_default_create_index",
- "Creating ancestorid index: processed %d%% (ID count %d)",
- (key_count * 100 / job->numsubordinates), key_count);
+ if (job->numsubordinates) {
+ import_log_notice(job, SLAPI_LOG_INFO, "bdb_ancestorid_default_create_index",
+ "Creating ancestorid index: processed %d%% (ID count %d)",
+ (key_count * 100 / job->numsubordinates), key_count);
+ } else {
+ import_log_notice(job, SLAPI_LOG_INFO, "bdb_ancestorid_default_create_index",
+ "Creating ancestorid index: processed %d ancestors...",
+ key_count);
+ }
started_progress_logging = 1;
}
@@ -743,9 +761,15 @@ out:
if (ret == 0) {
if (started_progress_logging) {
/* finish what we started logging */
- import_log_notice(job, SLAPI_LOG_INFO, "bdb_ancestorid_default_create_index",
- "Creating ancestorid index: processed %d%% (ID count %d)",
- (key_count * 100 / job->numsubordinates), key_count);
+ if (job->numsubordinates) {
+ import_log_notice(job, SLAPI_LOG_INFO, "bdb_ancestorid_default_create_index",
+ "Creating ancestorid index: processed %d%% (ID count %d)",
+ (key_count * 100 / job->numsubordinates), key_count);
+ } else {
+ import_log_notice(job, SLAPI_LOG_INFO, "bdb_ancestorid_default_create_index",
+ "Creating ancestorid index: processed %d ancestors",
+ key_count);
+ }
}
import_log_notice(job, SLAPI_LOG_INFO, "bdb_ancestorid_default_create_index",
"Created ancestorid index (old idl).");
@@ -869,9 +893,15 @@ bdb_ancestorid_new_idl_create_index(backend *be, ImportJob *job)
key_count++;
if (!(key_count % PROGRESS_INTERVAL)) {
- import_log_notice(job, SLAPI_LOG_INFO, "bdb_ancestorid_new_idl_create_index",
- "Creating ancestorid index: progress %d%% (ID count %d)",
- (key_count * 100 / job->numsubordinates), key_count);
+ if (job->numsubordinates) {
+ import_log_notice(job, SLAPI_LOG_INFO, "bdb_ancestorid_new_idl_create_index",
+ "Creating ancestorid index: progress %d%% (ID count %d)",
+ (key_count * 100 / job->numsubordinates), key_count);
+ } else {
+ import_log_notice(job, SLAPI_LOG_INFO, "bdb_ancestorid_new_idl_create_index",
+ "Creating ancestorid index: progress %d ancestors...",
+ key_count);
+ }
started_progress_logging = 1;
}
@@ -932,9 +962,15 @@ out:
if (ret == 0) {
if (started_progress_logging) {
/* finish what we started logging */
- import_log_notice(job, SLAPI_LOG_INFO, "bdb_ancestorid_new_idl_create_index",
- "Creating ancestorid index: processed %d%% (ID count %d)",
- (key_count * 100 / job->numsubordinates), key_count);
+ if (job->numsubordinates) {
+ import_log_notice(job, SLAPI_LOG_INFO, "bdb_ancestorid_new_idl_create_index",
+ "Creating ancestorid index: processed %d%% (ID count %d)",
+ (key_count * 100 / job->numsubordinates), key_count);
+ } else {
+ import_log_notice(job, SLAPI_LOG_INFO, "bdb_ancestorid_new_idl_create_index",
+ "Creating ancestorid index: processed %d ancestors",
+ key_count);
+ }
}
import_log_notice(job, SLAPI_LOG_INFO, "bdb_ancestorid_new_idl_create_index",
"Created ancestorid index (new idl).");
--
2.26.2

View File

@ -0,0 +1,190 @@
From 4839898dbe69d6445f3571beec1bf3f1557d6cc6 Mon Sep 17 00:00:00 2001
From: Mark Reynolds <mreynolds@redhat.com>
Date: Tue, 12 Jan 2021 10:09:23 -0500
Subject: [PATCH] Issue 4513 - CI Tests - fix test failures
Description:
Fixed tests in these suites: basic, entryuuid, filter, lib389, and schema
relates: https://github.com/389ds/389-ds-base/issues/4513
Reviewed by: progier(Thanks!)
---
dirsrvtests/tests/suites/basic/basic_test.py | 65 ++++++++++---------
.../filter/rfc3673_all_oper_attrs_test.py | 4 +-
.../suites/lib389/config_compare_test.py | 5 +-
.../suites/lib389/idm/user_compare_i2_test.py | 3 +
.../tests/suites/schema/schema_reload_test.py | 3 +
5 files changed, 47 insertions(+), 33 deletions(-)
diff --git a/dirsrvtests/tests/suites/basic/basic_test.py b/dirsrvtests/tests/suites/basic/basic_test.py
index 97908c31c..fc9af46e4 100644
--- a/dirsrvtests/tests/suites/basic/basic_test.py
+++ b/dirsrvtests/tests/suites/basic/basic_test.py
@@ -1059,6 +1059,41 @@ def test_search_ou(topology_st):
assert len(entries) == 0
+def test_bind_invalid_entry(topology_st):
+ """Test the failing bind does not return information about the entry
+
+ :id: 5cd9b083-eea6-426b-84ca-83c26fc49a6f
+
+ :setup: Standalone instance
+
+ :steps:
+ 1: bind as non existing entry
+ 2: check that bind info does not report 'No such entry'
+
+ :expectedresults:
+ 1: pass
+ 2: pass
+ """
+
+ topology_st.standalone.restart()
+ INVALID_ENTRY="cn=foooo,%s" % DEFAULT_SUFFIX
+ try:
+ topology_st.standalone.simple_bind_s(INVALID_ENTRY, PASSWORD)
+ except ldap.LDAPError as e:
+ log.info('test_bind_invalid_entry: Failed to bind as %s (expected)' % INVALID_ENTRY)
+ log.info('exception description: ' + e.args[0]['desc'])
+ if 'info' in e.args[0]:
+ log.info('exception info: ' + e.args[0]['info'])
+ assert e.args[0]['desc'] == 'Invalid credentials'
+ assert 'info' not in e.args[0]
+ pass
+
+ log.info('test_bind_invalid_entry: PASSED')
+
+ # reset credentials
+ topology_st.standalone.simple_bind_s(DN_DM, PW_DM)
+
+
@pytest.mark.bz1044135
@pytest.mark.ds47319
def test_connection_buffer_size(topology_st):
@@ -1477,36 +1512,6 @@ def test_dscreate_with_different_rdn(dscreate_test_rdn_value):
else:
assert True
-def test_bind_invalid_entry(topology_st):
- """Test the failing bind does not return information about the entry
-
- :id: 5cd9b083-eea6-426b-84ca-83c26fc49a6f
-
- :setup: Standalone instance
-
- :steps:
- 1: bind as non existing entry
- 2: check that bind info does not report 'No such entry'
-
- :expectedresults:
- 1: pass
- 2: pass
- """
-
- topology_st.standalone.restart()
- INVALID_ENTRY="cn=foooo,%s" % DEFAULT_SUFFIX
- try:
- topology_st.standalone.simple_bind_s(INVALID_ENTRY, PASSWORD)
- except ldap.LDAPError as e:
- log.info('test_bind_invalid_entry: Failed to bind as %s (expected)' % INVALID_ENTRY)
- log.info('exception description: ' + e.args[0]['desc'])
- if 'info' in e.args[0]:
- log.info('exception info: ' + e.args[0]['info'])
- assert e.args[0]['desc'] == 'Invalid credentials'
- assert 'info' not in e.args[0]
- pass
-
- log.info('test_bind_invalid_entry: PASSED')
if __name__ == '__main__':
diff --git a/dirsrvtests/tests/suites/filter/rfc3673_all_oper_attrs_test.py b/dirsrvtests/tests/suites/filter/rfc3673_all_oper_attrs_test.py
index c882bea5f..0477acda7 100644
--- a/dirsrvtests/tests/suites/filter/rfc3673_all_oper_attrs_test.py
+++ b/dirsrvtests/tests/suites/filter/rfc3673_all_oper_attrs_test.py
@@ -53,11 +53,11 @@ TEST_PARAMS = [(DN_ROOT, False, [
(TEST_USER_DN, False, [
'createTimestamp', 'creatorsName', 'entrydn',
'entryid', 'modifiersName', 'modifyTimestamp',
- 'nsUniqueId', 'parentid'
+ 'nsUniqueId', 'parentid', 'entryUUID'
]),
(TEST_USER_DN, True, [
'createTimestamp', 'creatorsName', 'entrydn',
- 'entryid', 'modifyTimestamp', 'nsUniqueId', 'parentid'
+ 'entryid', 'modifyTimestamp', 'nsUniqueId', 'parentid', 'entryUUID'
]),
(DN_CONFIG, False, [
'numSubordinates', 'passwordHistory'
diff --git a/dirsrvtests/tests/suites/lib389/config_compare_test.py b/dirsrvtests/tests/suites/lib389/config_compare_test.py
index 709bae8cb..84f55acfa 100644
--- a/dirsrvtests/tests/suites/lib389/config_compare_test.py
+++ b/dirsrvtests/tests/suites/lib389/config_compare_test.py
@@ -22,15 +22,18 @@ def test_config_compare(topology_i2):
st2_config = topology_i2.ins.get('standalone2').config
# 'nsslapd-port' attribute is expected to be same in cn=config comparison,
# but they are different in our testing environment
- # as we are using 2 DS instances running, both running simultaneuosly.
+ # as we are using 2 DS instances running, both running simultaneously.
# Hence explicitly adding 'nsslapd-port' to compare_exclude.
st1_config._compare_exclude.append('nsslapd-port')
st2_config._compare_exclude.append('nsslapd-port')
st1_config._compare_exclude.append('nsslapd-secureport')
st2_config._compare_exclude.append('nsslapd-secureport')
+ st1_config._compare_exclude.append('nsslapd-ldapssotoken-secret')
+ st2_config._compare_exclude.append('nsslapd-ldapssotoken-secret')
assert Config.compare(st1_config, st2_config)
+
if __name__ == '__main__':
# Run isolated
# -s for DEBUG mode
diff --git a/dirsrvtests/tests/suites/lib389/idm/user_compare_i2_test.py b/dirsrvtests/tests/suites/lib389/idm/user_compare_i2_test.py
index c7540e4ce..ccde0f6b0 100644
--- a/dirsrvtests/tests/suites/lib389/idm/user_compare_i2_test.py
+++ b/dirsrvtests/tests/suites/lib389/idm/user_compare_i2_test.py
@@ -39,6 +39,9 @@ def test_user_compare_i2(topology_i2):
st2_users.create(properties=user_properties)
st2_testuser = st2_users.get('testuser')
+ st1_testuser._compare_exclude.append('entryuuid')
+ st2_testuser._compare_exclude.append('entryuuid')
+
assert UserAccount.compare(st1_testuser, st2_testuser)
diff --git a/dirsrvtests/tests/suites/schema/schema_reload_test.py b/dirsrvtests/tests/suites/schema/schema_reload_test.py
index 2ece5dda5..e7e7d833d 100644
--- a/dirsrvtests/tests/suites/schema/schema_reload_test.py
+++ b/dirsrvtests/tests/suites/schema/schema_reload_test.py
@@ -54,6 +54,7 @@ def test_valid_schema(topo):
schema_file.write("objectclasses: ( 1.2.3.4.5.6.7.8 NAME 'TestObject' " +
"SUP top MUST ( objectclass $ cn ) MAY ( givenName $ " +
"sn $ ValidAttribute ) X-ORIGIN 'user defined' )')\n")
+ os.chmod(schema_filename, 0o777)
except OSError as e:
log.fatal("Failed to create schema file: " +
"{} Error: {}".format(schema_filename, str(e)))
@@ -106,6 +107,7 @@ def test_invalid_schema(topo):
schema_file.write("objectclasses: ( 1.2.3.4.5.6.7 NAME 'MoZiLLaOBJeCT' " +
"SUP top MUST ( objectclass $ cn ) MAY ( givenName $ " +
"sn $ MozillaAttribute ) X-ORIGIN 'user defined' )')\n")
+ os.chmod(schema_filename, 0o777)
except OSError as e:
log.fatal("Failed to create schema file: " +
"{} Error: {}".format(schema_filename, str(e)))
@@ -122,6 +124,7 @@ def test_invalid_schema(topo):
schema_file.write("objectclasses: ( 1.2.3.4.5.6.70 NAME 'MoZiLLaOBJeCT' " +
"SUP top MUST ( objectclass $ cn ) MAY ( givenName $ " +
"cn $ MoZiLLaATTRiBuTe ) X-ORIGIN 'user defined' )')\n")
+ os.chmod(schema_filename, 0o777)
except OSError as e:
log.fatal("Failed to create schema file: " +
"{} Error: {}".format(schema_filename, str(e)))
--
2.26.2

View File

@ -44,8 +44,8 @@ ExcludeArch: i686
Summary: 389 Directory Server (base)
Name: 389-ds-base
Version: 1.4.3.8
Release: %{?relprefix}4%{?prerel}%{?dist}
Version: 1.4.3.16
Release: %{?relprefix}11%{?prerel}%{?dist}
License: GPLv3+
URL: https://www.port389.org
Group: System Environment/Daemons
@ -174,28 +174,44 @@ Source2: %{name}-devel.README
%if %{bundle_jemalloc}
Source3: https://github.com/jemalloc/%{jemalloc_name}/releases/download/%{jemalloc_ver}/%{jemalloc_name}-%{jemalloc_ver}.tar.bz2
%endif
Patch01: 0001-Issue-51076-prevent-unnecessarily-duplication-of-the.patch
Patch02: 0002-Ticket-51082-abort-when-a-empty-valueset-is-freed.patch
Patch03: 0003-Issue-51091-healthcheck-json-report-fails-when-mappi.patch
Patch04: 0004-Issue-51076-remove-unnecessary-slapi-entry-dups.patch
Patch05: 0005-Issue-51086-Improve-dscreate-instance-name-validatio.patch
Patch06: 0006-Issue-51102-RFE-ds-replcheck-make-online-timeout-con.patch
Patch07: 0007-Issue-51110-Fix-ASAN-ODR-warnings.patch
Patch08: 0008-Issue-51095-abort-operation-if-CSN-can-not-be-genera.patch
Patch09: 0009-Issue-51113-Allow-using-uid-for-replication-manager-.patch
Patch10: 0010-Issue-50931-RFE-AD-filter-rewriter-for-ObjectCategor.patch
Patch11: 0011-Issue-50746-Add-option-to-healthcheck-to-list-all-th.patch
Patch12: 0012-Issue-50984-Memory-leaks-in-disk-monitoring.patch
Patch13: 0013-Issue-50984-Memory-leaks-in-disk-monitoring.patch
Patch14: 0014-Issue-50201-nsIndexIDListScanLimit-accepts-any-value.patch
Patch15: 0015-Issue-51157-Reindex-task-may-create-abandoned-index-.patch
Patch16: 0016-Issue-51165-add-new-access-log-keywords-for-wtime-an.patch
Patch17: 0017-Issue-50912-pwdReset-can-be-modified-by-a-user.patch
Patch18: 0018-Issue-50791-Healthcheck-should-look-for-notes-A-F-in.patch
Patch19: 0019-Issue-51144-dsctl-fails-with-instance-names-that-con.patch
Patch20: 0020-Ticket-49859-A-distinguished-value-can-be-missing-in.patch
Patch21: 0021-Issue-49256-log-warning-when-thread-number-is-very-d.patch
Patch22: 0022-Issue-51188-db2ldif-crashes-when-LDIF-file-can-t-be-.patch
Patch01: 0001-Issue-4383-Do-not-normalize-escaped-spaces-in-a-DN.patch
Patch02: 0002-ticket-2058-Add-keep-alive-entry-after-on-line-initi.patch
Patch03: 0003-do-not-add-referrals-for-masters-with-different-data.patch
Patch04: 0004-Ticket-50933-Update-2307compat.ldif.patch
Patch05: 0005-Issue-50933-Fix-OID-change-between-10rfc2307-and-10r.patch
Patch06: 0006-Ticket-51131-improve-mutex-alloc-in-conntable.patch
Patch07: 0007-Issue-4297-2nd-fix-for-on-ADD-replication-URP-issue-.patch
Patch08: 0008-Issue-3657-Add-options-to-dsctl-for-dsrc-file.patch
Patch09: 0009-Issue-4440-BUG-ldifgen-with-start-idx-option-fails-w.patch
Patch10: 0010-Issue-4449-dsconf-replication-monitor-fails-to-retri.patch
Patch11: 0011-Issue-4243-Fix-test-SyncRepl-plugin-provides-a-wrong.patch
Patch12: 0012-Add-dsconf-replication-monitor-test-case-gitHub-issu.patch
Patch13: 0013-Issue-4460-BUG-lib389-should-use-system-tls-policy.patch
Patch14: 0014-Issue-4428-BUG-Paged-Results-with-critical-false-cau.patch
Patch15: 0015-Issue-4315-performance-search-rate-nagle-triggers-hi.patch
Patch16: 0016-Issue-4460-BUG-add-machine-name-to-subject-alt-names.patch
Patch17: 0017-Issue-4483-heap-use-after-free-in-slapi_be_getsuffix.patch
Patch18: 0018-Issue-4480-Unexpected-info-returned-to-ldap-request-.patch
Patch19: 0019-Issue-4504-Fix-pytest-test_dsconf_replication_monito.patch
Patch20: 0020-Issue-4418-ldif2db-offline.-Warn-the-user-of-skipped.patch
Patch21: 0021-Issue-4418-ldif2db-offline.-Warn-the-user-of-skipped.patch
Patch22: 0022-Fix-cherry-pick-erorr.patch
Patch23: 0023-Issue-4419-Warn-users-of-skipped-entries-during-ldif.patch
Patch24: 0024-Issue-4480-Unexpected-info-returned-to-ldap-request-.patch
Patch25: 0025-Issue-4414-disk-monitoring-prevent-division-by-zero-.patch
Patch26: 0026-Issue-4504-Insure-ldapi-is-enabled-in-repl_monitor_t.patch
Patch27: 0027-Issue-4315-performance-search-rate-nagle-triggers-hi.patch
Patch28: 0028-Issue-4504-insure-that-repl_monitor_test-use-ldapi-f.patch
Patch29: 0029-Issue-4528-Fix-cn-monitor-SCOPE_ONE-search-4529.patch
Patch30: 0030-Issue-4384-Use-MONOTONIC-clock-for-all-timing-events.patch
Patch31: 0031-Issue-4384-Separate-eventq-into-REALTIME-and-MONOTON.patch
Patch32: 0032-Backport-tests-from-master-branch-fix-failing-tests-.patch
Patch33: 0033-Issue-5442-Search-results-are-different-between-RHDS.patch
Patch34: 0034-Issue-4526-sync_repl-when-completing-an-operation-in.patch
Patch35: 0035-Issue-4581-A-failed-re-indexing-leaves-the-database-.patch
Patch36: 0036-Issue-4513-CI-Tests-fix-test-failures.patch
# Patch37: 0037-Issue-4609-CVE-info-disclosure-when-authenticating.patch
%description
389 Directory Server is an LDAPv3 compliant server. The base package includes
@ -813,6 +829,100 @@ exit 0
%doc README.md
%changelog
* Mon Feb 15 2021 Mark Reynolds <mreynolds@redhat.com> - 1.4.3.16-11
- Bump version to 1.4.3.16-11
- Resolves: Bug 1924130 - RHDS11: “write” permission of ACI changes ns-slapds behavior on search operation(remove patch as it breaks DogTag, will add this patch back after DogTag is fixed)
* Wed Feb 10 2021 Mark Reynolds <mreynolds@redhat.com> - 1.4.3.16-10
- Bump version to 1.4.3.16-10
- Resolves: Bug 1924130 - RHDS11: “write” permission of ACI changes ns-slapds behavior on search operation(part 2)
* Tue Feb 2 2021 Mark Reynolds <mreynolds@redhat.com> - 1.4.3.16-9
- Bump version to 1.4.3.16-9
- Resolves: Bug 1924130 - RHDS11: “write” permission of ACI changes ns-slapds behavior on search operation
- Resolves: Bug 1916677 - A failed re-indexing leaves the database in broken state.
- Resolves: Bug 1912822 - sync_repl: when completing an operation in the pending list, it can select the wrong operation
* Wed Jan 13 2021 Mark Reynolds <mreynolds@redhat.com> - 1.4.3.16-8
- Bump version to 1.4.3.16-8
- Resolves: Bug 1903539 - cn=monitor is throwing err=32 with scope: -s one
- Resolves: Bug 1893870 - PR_WaitCondVar() issue causes replication delay when clock jumps backwards
* Thu Jan 7 2021 Mark Reynolds <mreynolds@redhat.com> - 1.4.3.16-7
- Bump version to 1.4.3.16-7
- Resolves: Bug 1890118 - SIGFPE crash in rhds disk monitoring routine
- Resolves: Bug 1904991 - 389-ds:1.4/389-ds-base: information disclosure during the binding of a DN
- Resolves: Bug 1627645 - ldif2db does not change exit code when there are skipped entries
* Wed Dec 16 2020 Mark Reynolds <mreynolds@redhat.com> - 1.4.3.16-6
- Bump version to 1.4.3.16-6
- Resolves: Bug 1879386 - cli dsconf replication monitor fails to retrieve database RUV - consumer (Unavailable) State (green) Reason (error (0)
- Resolves: Bug 1904991 - Unexpected info returned to ldap request
- Resolves: Bug 1843838 - heap-use-after-free in slapi_be_getsuffix
- Resolves: Bug 1903133 - Server-Cert.crt created using dscreate has Subject:CN =localhost instead of hostname.
* Wed Dec 9 2020 Mark Reynolds <mreynolds@redhat.com> - 1.4.3.16-5
- Bump version to 1.4.3.16-5
- Resolves: Bug 1879386 - cli dsconf replication monitor fails to retrieve database RUV
- Resolves: Bug 1887449 - Sync repl: missing update because operation are erroneously stated as nested
- Resolves: Bug 1887415 - Sync repl - if a series of updates target the same entry then the cookie get wrong changenumber
- Resolves: Bug 1851978 - SyncRepl plugin provides a wrong cookie
* Thu Dec 3 2020 Mark Reynolds <mreynolds@redhat.com> - 1.4.3.16-4
- Bump version to 1.4.3.16-4
- Resolves: Bug 1843517 - Using ldifgen with --start-idx option fails with unsupported operand
- Resolves: Bug 1801086 - [RFE] Generate dsrc file using dsconf
- Resolves: Bug 1843838 - heap-use-after-free in slapi_be_getsuffix
* Wed Nov 25 2020 Mark Reynolds <mreynolds@redhat.com> - 1.4.3.16-3
- Bump version to 1.4.3.16-3
- Resolves: Bug 1859219 - rfc2307 and rfc2307bis compat schema
- Resolves: Bug 1843604 - reduce the cost of allocation/free when open/close a connection
- Resolves: Bug 1898850 - Entries conflict not resolved by replication
* Thu Nov 19 2020 Mark Reynolds <mreynolds@redhat.com> - 1.4.3.16-2
- Bump version to 1.4.3.16-2
- Resolves: Bug 1859227 - create keep alive entry after on line init
- Resolves: Bug 1888863 - group rdn with leading space char and add fails error 21 invalid syntax and delete fails error 32
- Resolves: Bug 1859228 - do not add referrals for masters with different data generation
* Mon Oct 26 2020 Mark Reynolds <mreynolds@redhat.com> - 1.4.3.16-1
- Bump version to 1.4.3.16-1
- Resolves: Bug 1887415 - Sync repl - if a serie of updates target the same entry then the cookie get wrong changenumber
- Resolves: Bug 1859225 - suffix management in backends incorrect
* Mon Oct 26 2020 Mark Reynolds <mreynolds@redhat.com> - 1.4.3.14-1
- Bump version to 1.4.3.14-1
- Resolves: Bug 1862529 - Rebase 389-ds-base-1.4.3 in RHEL 8.4
- Resolves: Bug 1859301 - Misleading message in access log for idle timeout
- Resolves: Bug 1889782 - Missing closing quote when reporting the details of unindexed/paged search results
- Resolves: Bug 1862971 - dsidm user status fails with Error: 'nsUserAccount' object has no attribute 'is_locked'
- Resolves: Bug 1859878 - Managed Entries configuration not being enforced
- Resolves: Bug 1851973 - Duplicate entryUSN numbers for different LDAP entries in the same backend
- Resolves: Bug 1851967 - if dbhome directory is set online backup fails
- Resolves: Bug 1887449 - Sync repl: missing update because operation are erroneously stated as nested
- Resolves: Bug 1887415 - Sync repl - if a serie of updates target the same entry then the cookie get wrong changenumber
- Resolves: Bug 1851978 - SyncRepl plugin provides a wrong cookie
- Resolves: Bug 1843604 - reduce the cost of allocation/free when open/close a connection
- Resolves: Bug 1872930 - dscreate: Not possible to bind to a unix domain socket
- Resolves: Bug 1861504 - ds-replcheck crashes in offline mode
- Resolves: Bug 1859282 - remove ldbm_back_entry_release
- Resolves: Bug 1859225 - suffix management in backends incorrect
- Resolves: Bug 1859224 - remove unused or unnecessary database plugin functions
- Resolves: Bug 1859219 - rfc2307 and rfc2307bis compat schema
- Resolves: Bug 1851975 - Add option to reject internal unindexed searches
- Resolves: Bug 1851972 - Remove code duplication from the BDB backend separation work
- Resolves: Bug 1850275 - Add new access log keywords for time spent in work queue and actual operation time
- Resolves: Bug 1848359 - Add failover credentials to replication agreement
- Resolves: Bug 1837315 - Healthcheck code DSBLE0002 not returned on disabled suffix
* Wed Aug 5 2020 Mark Reynolds <mreynolds@redhat.com> - 1.4.3.8-5
- Bump version to 1.4.3.8-5
- Resolves: Bug 1841086 - SSL alert: The value of sslVersionMax "TLS1.3" is higher than the supported version
- Resolves: Bug 1800529 - Memory leaks in disk monitoring
- Resolves: Bug 1748227 - Instance name length is not enforced
- Resolves: Bug 1849418 - python3-lib389 pulls unnecessary bash-completion package
* Fri Jun 26 2020 Mark Reynolds <mreynolds@redhat.com> - 1.4.3.8-4
- Bump version to 1.4.3.8-4
- Resolves: Bug 1806978 - ns-slapd crashes during db2ldif
@ -865,195 +975,3 @@ exit 0
- Resolves: Bug 1790986 - cenotaph errors on modrdn operations
- Resolves: Bug 1769734 - Heavy StartTLS connection load can randomly fail with err=1
- Resolves: Bug 1758501 - LeakSanitizer: detected memory leaks in changelog5_init and perfctrs_init
* Fri May 8 2020 Mark Reynolds <mreynolds@redhat.com> - 1.4.3.8-0
- Bump version to 1.4.3.8-0
- Issue 51078 - Add nsslapd-enable-upgrade-hash to the schema
- Issue 51054 - Revise ACI target syntax checking
- Issue 51068 - deadlock when updating the schema
- Issue 51060 - unable to set sslVersionMin to TLS1.0
- Issue 51064 - Unable to install server where IPv6 is disabled
- Issue 51051 - CLI fix consistency issues with confirmations
- Issue 49731 - undo db_home_dir under /dev/shm/dirsrv for now
- Issue 51054 - AddressSanitizer: heap-buffer-overflow in ldap_utf8prev
- Issue 51047 - React deprecating ComponentWillMount
- Issue 50499 - fix npm audit issues
- Issue 50545 - Port dbgen.pl to dsctl
* Wed Apr 22 2020 Mark Reynolds <mreynolds@redhat.com> - 1.4.3.7-1
- Bump version to 1.4.3.7
- Issue 51024 - syncrepl_entry callback does not contain attributes added by postoperation plugins
- Issue 51035 - Heavy StartTLS connection load can randomly fail with err=1
- Issue 49731 - undo db_home_dir under /dev/shm/dirsrv for now
- Issue 51031 - UI - transition between two instances needs improvement
* Thu Apr 16 2020 Mark Reynolds <mreynolds@redhat.com> - 1.4.3.6-1
- Bump version to 1.4.3.6
- Issue 50933 - 10rfc2307compat.ldif is not ready to set used by default
- Issue 50931 - RFE AD filter rewriter for ObjectCategory
- Issue 51016 - Fix memory leaks in changelog5_init and perfctrs_init
- Issue 50980 - RFE extend usability for slapi_compute_add_search_rewriter and slapi_compute_add_evaluator
- Issue 51008 - dbhome in containers
- Issue 50875 - Refactor passwordUserAttributes's and passwordBadWords's code
- Issue 51014 - slapi_pal.c possible static buffer overflow
- Issue 50545 - remove dbmon "incr" option from arg parser
- Issue 50545 - Port dbmon.sh to dsconf
- Issue 51005 - AttributeUniqueness plugin's DN parameter should not have a default value
- Issue 49731 - Fix additional issues with setting db home directory by default
- Issue 50337 - Replace exec() with setattr()
- Issue 50905 - intermittent SSL hang with rhds
- Issue 50952 - SSCA lacks basicConstraint:CA
- Issue 50640 - Database links: get_monitor() takes 1 positional argument but 2 were given
- Issue 50869 - Setting nsslapd-allowed-sasl-mechanisms truncates the value
* Wed Apr 1 2020 Mark Reynolds <mreynolds@redhat.com> - 1.4.3.5-1
- Bump version to 1.4.3.5
- Issue 50994 - Fix latest UI bugs found by QE
- Issue 50933 - rfc2307compat.ldif
- Issue 50337 - Replace exec() with setattr()
- Issue 50984 - Memory leaks in disk monitoring
- Issue 50984 - Memory leaks in disk monitoring
- Issue 49731 - dscreate fails in silent mode because of db_home_dir
- Issue 50975 - Revise UI branding with new minimized build
- Issue 49437 - Fix memory leak with indirect COS
- Issue 49731 - Do not add db_home_dir to template-dse.ldif
- Issue 49731 - set and use db_home_directory by default
- Issue 50971 - fix BSD_SOURCE
- Issue 50744 - -n option of dbverify does not work
- Issue 50952 - SSCA lacks basicConstraint:CA
- Issue 50976 - Clean up Web UI source directory from unused files
- Issue 50955 - Fix memory leaks in chaining plugin(part 2)
- Issue 50966 - UI - Database indexes not using typeAhead correctly
- Issue 50974 - UI - wrong title in "Delete Suffix" popup
- Issue 50972 - Fix cockpit plugin build
- Issue 49761 - Fix CI test suite issues
- Issue 50971 - Support building on FreeBSD.
- Issue 50960 - [RFE] Advance options in RHDS Disk Monitoring Framework
- Issue 50800 - wildcards in rootdn-allow-ip attribute are not accepted
- Issue 50963 - We should bundle *.min.js files of Console
- Issue 50860 - Port Password Policy test cases from TET to python3 Password grace limit section.
- Issue 50860 - Port Password Policy test cases from TET to python3 series of bugs Port final
- Issue 50954 - buildnum.py - fix date formatting issue
* Mon Mar 16 2020 Mark Reynolds <mreynolds@redhat.com> - 1.4.3.4-1
- Bump version to 1.4.3.4
- Issue 50954 - Port buildnum.pl to python(part 2)
- Issue 50955 - Fix memory leaks in chaining plugin
- Issue 50954 - Port buildnum.pl to python
- Issue 50947 - change 00core.ldif objectClasses for openldap migration
- Issue 50755 - setting nsslapd-db-home-directory is overriding db_directory
- Issue 50937 - Update CLI for new backend split configuration
- Issue 50860 - Port Password Policy test cases from TET to python3 pwp.sh
- Issue 50945 - givenname alias of gn from openldap
- Issue 50935 - systemd override in lib389 for dscontainer
- Issue 50499 - Fix npm audit issues
- Issue 49761 - Fix CI test suite issues
- Issue 50618 - clean compiler warning and log level
- Issue 50889 - fix compiler issues
- Issue 50884 - Health check tool DSEldif check fails
- Issue 50926 - Remove dual spinner and other UI fixes
- Issue 50928 - Unable to create a suffix with countryName
- Issue 50758 - Only Recommend bash-completion, not Require
- Issue 50923 - Fix a test regression
- Issue 50904 - Connect All React Components And Refactor the Main Navigation Tab Code
- Issue 50920 - cl-dump exit code is 0 even if command fails with invalid arguments
- Issue 50923 - Add test - dsctl fails to remove instances with dashes in the name
- Issue 50919 - Backend delete fails using dsconf
- Issue 50872 - dsconf can't create GSSAPI replication agreements
- Issue 50912 - RFE - add password policy attribute pwdReset
- Issue 50914 - No error returned when adding an entry matching filters for a non existing automember group
- Issue 50889 - Extract pem files into a private namespace
- Issue 50909 - nsDS5ReplicaId cant be set to the old value it had before
- Issue 50686 - Port fractional replication test cases from TET to python3 final
- Issue 49845 - Remove pkgconfig check for libasan
- Issue:50860 - Port Password Policy test cases from TET to python3 bug624080
- Issue:50860 - Port Password Policy test cases from TET to python3 series of bugs
- Issue 50786 - connection table freelist
- Issue 50618 - support cgroupv2
- Issue 50900 - Fix cargo offline build
- Issue 50898 - ldclt core dumped when run with -e genldif option
* Mon Feb 17 2020 Matus Honek <mhonek@redhat.com> - 1.4.3.3-3
- Bring back the necessary c_rehash util (#1803370)
* Fri Feb 14 2020 Mark Reynolds <mreynolds@redhat.com> - 1.4.3.3-2
- Bump version to 1.4.3.3-2
- Remove unneeded perl dependencies
- Change bash-completion to "Recommends" instead of "Requires"
* Thu Feb 13 2020 Mark Reynolds <mreynolds@redhat.com> - 1.4.3.3-1
- Bump version to 1.4.3.3
- Issue 50855 - remove unused file from UI
- Issue 50855 - UI: Port Server Tab to React
- Issue 49845 - README does not contain complete information on building
- Issue 50686 - Port fractional replication test cases from TET to python3 part 1
- Issue 49623 - cont cenotaph errors on modrdn operations
- Issue 50882 - Fix healthcheck errors for instances that do not have TLS enabled
- Issue 50886 - Typo in the replication debug message
- Issue 50873 - Fix healthcheck and virtual attr check
- Issue 50873 - Fix issues with healthcheck tool
- Issue 50028 - Add a new CI test case
- Issue 49946 - Add a new CI test case
- Issue 50117 - Add a new CI test case
- Issue 50787 - fix implementation of attr unique
- Issue 50859 - support running only with ldaps socket
- Issue 50823 - dsctl doesn't work with 'slapd-' in the instance name
- Issue 49624 - cont - DB Deadlock on modrdn appears to corrupt database and entry cache
- Issue 50867 - Fix minor buildsys issues
- Issue 50737 - Allow building with rust online without vendoring
- Issue 50831 - add cargo.lock to allow offline builds
- Issue 50694 - import PEM certs on startup
- Issue 50857 - Memory leak in ACI using IP subject
- Issue 49761 - Fix CI test suite issues
- Issue 50853 - Fix NULL pointer deref in config setting
- Issue 50850 - Fix dsctl healthcheck for python36
- Issue 49990 - Need to enforce a hard maximum limit for file descriptors
- Issue 48707 - ldapssotoken for authentication
* Tue Jan 28 2020 Fedora Release Engineering <releng@fedoraproject.org> - 1.4.3.2-1.1
- Rebuilt for https://fedoraproject.org/wiki/Fedora_32_Mass_Rebuild
* Thu Jan 23 2020 Mark Reynolds <mreynolds@redhat.com> - 1.4.3.2-1
- Bump version to 1.4.3.2
- Issue 49254 - Fix compiler failures and warnings
- Issue 50741 - cont bdb_start - Detected Disorderly Shutdown
- Issue 50836 - Port Schema UI tab to React
- Issue 50842 - Decrease 389-console Cockpit component size
- Issue 50790 - Add result text when filter is invalid
- Issue 50627 - Add ASAN logs to HTML report
- Issue 50834 - Incorrectly setting the NSS default SSL version max
- Issue 50829 - Disk monitoring rotated log cleanup causes heap-use-after-free
- Issue 50709 - (cont) Several memory leaks reported by Valgrind for 389-ds 1.3.9.1-10
- Issue 50784 - performance testing scripts
- Issue 50599 - Fix memory leak when removing db region files
- Issue 49395 - Set the default TLS version min to TLS1.2
- Issue 50818 - dsconf pwdpolicy get error
- Issue 50824 - dsctl remove fails with "name 'ensure_str' is not defined"
- Issue 50599 - Remove db region files prior to db recovery
- Issue 50812 - dscontainer executable should be placed under /usr/libexec/dirsrv/
- Issue 50816 - dsconf allows the root password to be set to nothing
- Issue 50798 - incorrect bytes in format string(fix import issue)
* Thu Jan 16 2020 Adam Williamson <awilliam@redhat.com> - 1.4.3.1-3
- Backport two more import/missing function fixes
* Wed Jan 15 2020 Adam Williamson <awilliam@redhat.com> - 1.4.3.1-2
- Backport 828aad0 to fix missing imports from 1.4.3.1
* Mon Jan 13 2020 Mark Reynolds <mreynolds@redhat.com> - 1.4.3.1-1
- Bump version to 1.4.3.1
- Issue 50798 - incorrect bytes in format string
- Issue 50545 - Add the new replication monitor functionality to UI
- Issue 50806 - Fix minor issues in lib389 health checks
- Issue 50690 - Port Password Storage test cases from TET to python3 part 1
- Issue 49761 - Fix CI test suite issues
- Issue 49761 - Fix CI test suite issues
- Issue 50754 - Add Restore Change Log option to CLI
- Issue 48055 - CI test - automember_plugin(part2)
- Issue 50667 - dsctl -l did not respect PREFIX
- Issue 50780 - More CLI fixes
- Issue 50649 - lib389 without defaults.inf
- Issue 50780 - Fix UI issues
- Issue 50727 - correct mistaken options in filter validation patch
- Issue 50779 - lib389 - conflict compare fails for DN's with spaces
- Set branch version to 1.4.3.0