Compare commits

...

No commits in common. "c8-stream-1.4" and "c9-beta" have entirely different histories.

36 changed files with 2849 additions and 4822 deletions

View File

@ -1,3 +1,2 @@
bd9aab32d9cbf9231058d585479813f3420dc872 SOURCES/389-ds-base-1.4.3.39.tar.bz2
25969f6e65d79aa29671eff7185e4307ff3c08a0 SOURCES/389-ds-base-2.6.1.tar.bz2
1c8f2d0dfbf39fa8cd86363bf3314351ab21f8d4 SOURCES/jemalloc-5.3.0.tar.bz2
978b7c5e4a9e5784fddb23ba1abe4dc5a071589f SOURCES/vendor-1.4.3.39-1.tar.gz

3
.gitignore vendored
View File

@ -1,3 +1,2 @@
SOURCES/389-ds-base-1.4.3.39.tar.bz2
SOURCES/389-ds-base-2.6.1.tar.bz2
SOURCES/jemalloc-5.3.0.tar.bz2
SOURCES/vendor-1.4.3.39-1.tar.gz

View File

@ -0,0 +1,60 @@
From 0921400a39b61687db2bc55ebd5021eef507e960 Mon Sep 17 00:00:00 2001
From: Viktor Ashirov <vashirov@redhat.com>
Date: Tue, 28 Jan 2025 21:05:49 +0100
Subject: [PATCH] Issue 6468 - Fix building for older versions of Python
Bug Description:
Structural Pattern Matching has been added in Python 3.10, older version
do not support it.
Fix Description:
Replace `match` and `case` statements with `if-elif`.
Relates: https://github.com/389ds/389-ds-base/issues/6468
Reviewed by: @droideck (Thanks!)
---
src/lib389/lib389/cli_conf/logging.py | 27 ++++++++++++++-------------
1 file changed, 14 insertions(+), 13 deletions(-)
diff --git a/src/lib389/lib389/cli_conf/logging.py b/src/lib389/lib389/cli_conf/logging.py
index 2e86f2de8..d1e32822c 100644
--- a/src/lib389/lib389/cli_conf/logging.py
+++ b/src/lib389/lib389/cli_conf/logging.py
@@ -234,19 +234,20 @@ def get_log_config(inst, basedn, log, args):
attr_map = {}
levels = {}
- match args.logtype:
- case "access":
- attr_map = ACCESS_ATTR_MAP
- levels = ACCESS_LEVELS
- case "error":
- attr_map = ERROR_ATTR_MAP
- levels = ERROR_LEVELS
- case "security":
- attr_map = SECURITY_ATTR_MAP
- case "audit":
- attr_map = AUDIT_ATTR_MAP
- case "auditfail":
- attr_map = AUDITFAIL_ATTR_MAP
+ if args.logtype == "access":
+ attr_map = ACCESS_ATTR_MAP
+ levels = ACCESS_LEVELS
+ elif args.logtype == "error":
+ attr_map = ERROR_ATTR_MAP
+ levels = ERROR_LEVELS
+ elif args.logtype == "security":
+ attr_map = SECURITY_ATTR_MAP
+ elif args.logtype == "audit":
+ attr_map = AUDIT_ATTR_MAP
+ elif args.logtype == "auditfail":
+ attr_map = AUDITFAIL_ATTR_MAP
+ else:
+ raise ValueError(f"Unknown logtype: {args.logtype}")
sorted_results = []
for attr, value in attrs.items():
--
2.48.0

View File

@ -1,119 +0,0 @@
From dddb14210b402f317e566b6387c76a8e659bf7fa Mon Sep 17 00:00:00 2001
From: progier389 <progier@redhat.com>
Date: Tue, 14 Feb 2023 13:34:10 +0100
Subject: [PATCH 1/2] issue 5647 - covscan: memory leak in audit log when
adding entries (#5650)
covscan reported an issue about "vals" variable in auditlog.c:231 and indeed a charray_free is missing.
Issue: 5647
Reviewed by: @mreynolds389, @droideck
---
ldap/servers/slapd/auditlog.c | 71 +++++++++++++++++++----------------
1 file changed, 38 insertions(+), 33 deletions(-)
diff --git a/ldap/servers/slapd/auditlog.c b/ldap/servers/slapd/auditlog.c
index 68cbc674d..3128e0497 100644
--- a/ldap/servers/slapd/auditlog.c
+++ b/ldap/servers/slapd/auditlog.c
@@ -177,6 +177,40 @@ write_auditfail_log_entry(Slapi_PBlock *pb)
slapi_ch_free_string(&audit_config);
}
+/*
+ * Write the attribute values to the audit log as "comments"
+ *
+ * Slapi_Attr *entry - the attribute begin logged.
+ * char *attrname - the attribute name.
+ * lenstr *l - the audit log buffer
+ *
+ * Resulting output in the log:
+ *
+ * #ATTR: VALUE
+ * #ATTR: VALUE
+ */
+static void
+log_entry_attr(Slapi_Attr *entry_attr, char *attrname, lenstr *l)
+{
+ Slapi_Value **vals = attr_get_present_values(entry_attr);
+ for(size_t i = 0; vals && vals[i]; i++) {
+ char log_val[256] = "";
+ const struct berval *bv = slapi_value_get_berval(vals[i]);
+ if (bv->bv_len >= 256) {
+ strncpy(log_val, bv->bv_val, 252);
+ strcpy(log_val+252, "...");
+ } else {
+ strncpy(log_val, bv->bv_val, bv->bv_len);
+ log_val[bv->bv_len] = 0;
+ }
+ addlenstr(l, "#");
+ addlenstr(l, attrname);
+ addlenstr(l, ": ");
+ addlenstr(l, log_val);
+ addlenstr(l, "\n");
+ }
+}
+
/*
* Write "requested" attributes from the entry to the audit log as "comments"
*
@@ -212,21 +246,9 @@ add_entry_attrs(Slapi_Entry *entry, lenstr *l)
for (req_attr = ldap_utf8strtok_r(display_attrs, ", ", &last); req_attr;
req_attr = ldap_utf8strtok_r(NULL, ", ", &last))
{
- char **vals = slapi_entry_attr_get_charray(entry, req_attr);
- for(size_t i = 0; vals && vals[i]; i++) {
- char log_val[256] = {0};
-
- if (strlen(vals[i]) > 256) {
- strncpy(log_val, vals[i], 252);
- strcat(log_val, "...");
- } else {
- strcpy(log_val, vals[i]);
- }
- addlenstr(l, "#");
- addlenstr(l, req_attr);
- addlenstr(l, ": ");
- addlenstr(l, log_val);
- addlenstr(l, "\n");
+ slapi_entry_attr_find(entry, req_attr, &entry_attr);
+ if (entry_attr) {
+ log_entry_attr(entry_attr, req_attr, l);
}
}
} else {
@@ -234,7 +256,6 @@ add_entry_attrs(Slapi_Entry *entry, lenstr *l)
for (; entry_attr; entry_attr = entry_attr->a_next) {
Slapi_Value **vals = attr_get_present_values(entry_attr);
char *attr = NULL;
- const char *val = NULL;
slapi_attr_get_type(entry_attr, &attr);
if (strcmp(attr, PSEUDO_ATTR_UNHASHEDUSERPASSWORD) == 0) {
@@ -251,23 +272,7 @@ add_entry_attrs(Slapi_Entry *entry, lenstr *l)
addlenstr(l, ": ****************************\n");
continue;
}
-
- for(size_t i = 0; vals && vals[i]; i++) {
- char log_val[256] = {0};
-
- val = slapi_value_get_string(vals[i]);
- if (strlen(val) > 256) {
- strncpy(log_val, val, 252);
- strcat(log_val, "...");
- } else {
- strcpy(log_val, val);
- }
- addlenstr(l, "#");
- addlenstr(l, attr);
- addlenstr(l, ": ");
- addlenstr(l, log_val);
- addlenstr(l, "\n");
- }
+ log_entry_attr(entry_attr, attr, l);
}
}
slapi_ch_free_string(&display_attrs);
--
2.43.0

View File

@ -1,27 +0,0 @@
From be7c2b82958e91ce08775bf6b5da3c311d3b00e5 Mon Sep 17 00:00:00 2001
From: progier389 <progier@redhat.com>
Date: Mon, 20 Feb 2023 16:14:05 +0100
Subject: [PATCH 2/2] Issue 5647 - Fix unused variable warning from previous
commit (#5670)
* issue 5647 - memory leak in audit log when adding entries
* Issue 5647 - Fix unused variable warning from previous commit
---
ldap/servers/slapd/auditlog.c | 1 -
1 file changed, 1 deletion(-)
diff --git a/ldap/servers/slapd/auditlog.c b/ldap/servers/slapd/auditlog.c
index 3128e0497..0597ecc6f 100644
--- a/ldap/servers/slapd/auditlog.c
+++ b/ldap/servers/slapd/auditlog.c
@@ -254,7 +254,6 @@ add_entry_attrs(Slapi_Entry *entry, lenstr *l)
} else {
/* Return all attributes */
for (; entry_attr; entry_attr = entry_attr->a_next) {
- Slapi_Value **vals = attr_get_present_values(entry_attr);
char *attr = NULL;
slapi_attr_get_type(entry_attr, &attr);
--
2.43.0

View File

@ -0,0 +1,146 @@
From 12f9bf81e834549db02b1243ecf769b511c9f69f Mon Sep 17 00:00:00 2001
From: Mark Reynolds <mreynolds@redhat.com>
Date: Fri, 31 Jan 2025 08:54:27 -0500
Subject: [PATCH] Issue 6489 - After log rotation refresh the FD pointer
Description:
When flushing a log buffer we get a FD for log prior to checking if the
log should be rotated. If the log is rotated that FD reference is now
invalid, and it needs to be refrehed before proceeding
Relates: https://github.com/389ds/389-ds-base/issues/6489
Reviewed by: tbordaz(Thanks!)
---
.../suites/logging/log_flush_rotation_test.py | 81 +++++++++++++++++++
ldap/servers/slapd/log.c | 18 +++++
2 files changed, 99 insertions(+)
create mode 100644 dirsrvtests/tests/suites/logging/log_flush_rotation_test.py
diff --git a/dirsrvtests/tests/suites/logging/log_flush_rotation_test.py b/dirsrvtests/tests/suites/logging/log_flush_rotation_test.py
new file mode 100644
index 000000000..b33a622e1
--- /dev/null
+++ b/dirsrvtests/tests/suites/logging/log_flush_rotation_test.py
@@ -0,0 +1,81 @@
+# --- BEGIN COPYRIGHT BLOCK ---
+# Copyright (C) 2025 Red Hat, Inc.
+# All rights reserved.
+#
+# License: GPL (version 3 or any later version).
+# See LICENSE for details.
+# --- END COPYRIGHT BLOCK ---
+#
+import os
+import logging
+import time
+import pytest
+from lib389._constants import DEFAULT_SUFFIX, PW_DM
+from lib389.tasks import ImportTask
+from lib389.idm.user import UserAccounts
+from lib389.topologies import topology_st as topo
+
+
+log = logging.getLogger(__name__)
+
+
+def test_log_flush_and_rotation_crash(topo):
+ """Make sure server does not crash whening flushing a buffer and rotating
+ the log at the same time
+
+ :id: d4b0af2f-48b2-45f5-ae8b-f06f692c3133
+ :setup: Standalone Instance
+ :steps:
+ 1. Enable all logs
+ 2. Enable log buffering for all logs
+ 3. Set rotation time unit to 1 minute
+ 4. Make sure server is still running after 1 minute
+ :expectedresults:
+ 1. Success
+ 2. Success
+ 3. Success
+ 4. Success
+ """
+
+ inst = topo.standalone
+
+ # Enable logging and buffering
+ inst.config.set("nsslapd-auditlog-logging-enabled", "on")
+ inst.config.set("nsslapd-accesslog-logbuffering", "on")
+ inst.config.set("nsslapd-auditlog-logbuffering", "on")
+ inst.config.set("nsslapd-errorlog-logbuffering", "on")
+ inst.config.set("nsslapd-securitylog-logbuffering", "on")
+
+ # Set rotation policy to trigger rotation asap
+ inst.config.set("nsslapd-accesslog-logrotationtimeunit", "minute")
+ inst.config.set("nsslapd-auditlog-logrotationtimeunit", "minute")
+ inst.config.set("nsslapd-errorlog-logrotationtimeunit", "minute")
+ inst.config.set("nsslapd-securitylog-logrotationtimeunit", "minute")
+
+ #
+ # Performs ops to populate all the logs
+ #
+ # Access & audit log
+ users = UserAccounts(topo.standalone, DEFAULT_SUFFIX)
+ user = users.create_test_user()
+ user.set("userPassword", PW_DM)
+ # Security log
+ user.bind(PW_DM)
+ # Error log
+ import_task = ImportTask(inst)
+ import_task.import_suffix_from_ldif(ldiffile="/not/here",
+ suffix=DEFAULT_SUFFIX)
+
+ # Wait a minute and make sure the server did not crash
+ log.info("Sleep until logs are flushed and rotated")
+ time.sleep(61)
+
+ assert inst.status()
+
+
+if __name__ == '__main__':
+ # Run isolated
+ # -s for DEBUG mode
+ CURRENT_FILE = os.path.realpath(__file__)
+ pytest.main(["-s", CURRENT_FILE])
+
diff --git a/ldap/servers/slapd/log.c b/ldap/servers/slapd/log.c
index 8352f4abd..c1260a203 100644
--- a/ldap/servers/slapd/log.c
+++ b/ldap/servers/slapd/log.c
@@ -6746,6 +6746,23 @@ log_refresh_state(int32_t log_type)
return 0;
}
}
+static LOGFD
+log_refresh_fd(int32_t log_type)
+{
+ switch (log_type) {
+ case SLAPD_ACCESS_LOG:
+ return loginfo.log_access_fdes;
+ case SLAPD_SECURITY_LOG:
+ return loginfo.log_security_fdes;
+ case SLAPD_AUDIT_LOG:
+ return loginfo.log_audit_fdes;
+ case SLAPD_AUDITFAIL_LOG:
+ return loginfo.log_auditfail_fdes;
+ case SLAPD_ERROR_LOG:
+ return loginfo.log_error_fdes;
+ }
+ return NULL;
+}
/* this function assumes the lock is already acquired */
/* if sync_now is non-zero, data is flushed to physical storage */
@@ -6857,6 +6874,7 @@ log_flush_buffer(LogBufferInfo *lbi, int log_type, int sync_now, int locked)
rotationtime_secs);
}
log_state = log_refresh_state(log_type);
+ fd = log_refresh_fd(log_type);
}
if (log_state & LOGGING_NEED_TITLE) {
--
2.48.0

View File

@ -1,147 +0,0 @@
From 692c4cec6cc5c0086cf58f83bcfa690c766c9887 Mon Sep 17 00:00:00 2001
From: Thierry Bordaz <tbordaz@redhat.com>
Date: Fri, 2 Feb 2024 14:14:28 +0100
Subject: [PATCH] Issue 5407 - sync_repl crashes if enabled while dynamic
plugin is enabled (#5411)
Bug description:
When dynamic plugin is enabled, if a MOD enables sync_repl plugin
then sync_repl init function registers the postop callback
that will be called for the MOD itself while the preop
has not been called.
postop expects preop to be called and so primary operation
to be set. When it is not set it crashes
Fix description:
If the primary operation is not set, just return
relates: #5407
---
.../suites/syncrepl_plugin/basic_test.py | 68 +++++++++++++++++++
ldap/servers/plugins/sync/sync_persist.c | 23 ++++++-
2 files changed, 90 insertions(+), 1 deletion(-)
diff --git a/dirsrvtests/tests/suites/syncrepl_plugin/basic_test.py b/dirsrvtests/tests/suites/syncrepl_plugin/basic_test.py
index eb3770b78..cdf35eeaa 100644
--- a/dirsrvtests/tests/suites/syncrepl_plugin/basic_test.py
+++ b/dirsrvtests/tests/suites/syncrepl_plugin/basic_test.py
@@ -592,6 +592,74 @@ def test_sync_repl_cenotaph(topo_m2, request):
request.addfinalizer(fin)
+def test_sync_repl_dynamic_plugin(topology, request):
+ """Test sync_repl with dynamic plugin
+
+ :id: d4f84913-c18a-459f-8525-110f610ca9e6
+ :setup: install a standalone instance
+ :steps:
+ 1. reset instance to standard (no retroCL, no sync_repl, no dynamic plugin)
+ 2. Enable dynamic plugin
+ 3. Enable retroCL/content_sync
+ 4. Establish a sync_repl req
+ :expectedresults:
+ 1. Should succeeds
+ 2. Should succeeds
+ 3. Should succeeds
+ 4. Should succeeds
+ """
+
+ # Reset the instance in a default config
+ # Disable content sync plugin
+ topology.standalone.plugins.disable(name=PLUGIN_REPL_SYNC)
+
+ # Disable retro changelog
+ topology.standalone.plugins.disable(name=PLUGIN_RETRO_CHANGELOG)
+
+ # Disable dynamic plugins
+ topology.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-dynamic-plugins', b'off')])
+ topology.standalone.restart()
+
+ # Now start the test
+ # Enable dynamic plugins
+ try:
+ topology.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-dynamic-plugins', b'on')])
+ except ldap.LDAPError as e:
+ log.error('Failed to enable dynamic plugin! {}'.format(e.args[0]['desc']))
+ assert False
+
+ # Enable retro changelog
+ topology.standalone.plugins.enable(name=PLUGIN_RETRO_CHANGELOG)
+
+ # Enbale content sync plugin
+ topology.standalone.plugins.enable(name=PLUGIN_REPL_SYNC)
+
+ # create a sync repl client and wait 5 seconds to be sure it is running
+ sync_repl = Sync_persist(topology.standalone)
+ sync_repl.start()
+ time.sleep(5)
+
+ # create users
+ users = UserAccounts(topology.standalone, DEFAULT_SUFFIX)
+ users_set = []
+ for i in range(10001, 10004):
+ users_set.append(users.create_test_user(uid=i))
+
+ time.sleep(10)
+ # delete users, that automember/memberof will generate nested updates
+ for user in users_set:
+ user.delete()
+ # stop the server to get the sync_repl result set (exit from while loop).
+ # Only way I found to acheive that.
+ # and wait a bit to let sync_repl thread time to set its result before fetching it.
+ topology.standalone.stop()
+ sync_repl.get_result()
+ sync_repl.join()
+ log.info('test_sync_repl_dynamic_plugin: PASS\n')
+
+ # Success
+ log.info('Test complete')
+
def test_sync_repl_invalid_cookie(topology, request):
"""Test sync_repl with invalid cookie
diff --git a/ldap/servers/plugins/sync/sync_persist.c b/ldap/servers/plugins/sync/sync_persist.c
index d2210b64c..283607361 100644
--- a/ldap/servers/plugins/sync/sync_persist.c
+++ b/ldap/servers/plugins/sync/sync_persist.c
@@ -156,6 +156,17 @@ ignore_op_pl(Slapi_PBlock *pb)
* This is the same for ident
*/
prim_op = get_thread_primary_op();
+ if (prim_op == NULL) {
+ /* This can happen if the PRE_OP (sync_update_persist_betxn_pre_op) was not called.
+ * The only known case it happens is with dynamic plugin enabled and an
+ * update that enable the sync_repl plugin. In such case sync_repl registers
+ * the postop (sync_update_persist_op) that is called while the preop was not called
+ */
+ slapi_log_err(SLAPI_LOG_PLUGIN, SYNC_PLUGIN_SUBSYSTEM,
+ "ignore_op_pl - Operation without primary op set (0x%lx)\n",
+ (ulong) op);
+ return;
+ }
ident = sync_persist_get_operation_extension(pb);
if (ident) {
@@ -232,8 +243,18 @@ sync_update_persist_op(Slapi_PBlock *pb, Slapi_Entry *e, Slapi_Entry *eprev, ber
prim_op = get_thread_primary_op();
+ if (prim_op == NULL) {
+ /* This can happen if the PRE_OP (sync_update_persist_betxn_pre_op) was not called.
+ * The only known case it happens is with dynamic plugin enabled and an
+ * update that enable the sync_repl plugin. In such case sync_repl registers
+ * the postop (sync_update_persist_op) that is called while the preop was not called
+ */
+ slapi_log_err(SLAPI_LOG_PLUGIN, SYNC_PLUGIN_SUBSYSTEM,
+ "sync_update_persist_op - Operation without primary op set (0x%lx)\n",
+ (ulong) pb_op);
+ return;
+ }
ident = sync_persist_get_operation_extension(pb);
- PR_ASSERT(prim_op);
if ((ident == NULL) && operation_is_flag_set(pb_op, OP_FLAG_NOOP)) {
/* This happens for URP (add cenotaph, fixup rename, tombstone resurrect)
--
2.43.0

View File

@ -0,0 +1,311 @@
From f077f9692d1625a1bc2dc6ee02a4fca71ee30b03 Mon Sep 17 00:00:00 2001
From: progier389 <progier@redhat.com>
Date: Wed, 13 Nov 2024 15:31:35 +0100
Subject: [PATCH] Issue 6374 - nsslapd-mdb-max-dbs autotuning doesn't work
properly (#6400)
* Issue 6374 - nsslapd-mdb-max-dbs autotuning doesn't work properly
Several issues:
After restarting the server nsslapd-mdb-max-dbs may not be high enough to add a new backend
because the value computation is wrong.
dbscan fails to open the database if nsslapd-mdb-max-dbs has been increased.
dbscan crashes when closing the database (typically when using -S)
When starting the instance the nsslapd-mdb-max-dbs parameter is increased to ensure that a new backend may be added.
When dse.ldif path is not specified, the db environment is now open using the INFO.mdb data instead of using the default values.
synchronization between thread closure and database context destruction is hardened
Issue: #6374
Reviewed by: @tbordaz , @vashirov (Thanks!)
(cherry picked from commit 56cd3389da608a3f6eeee58d20dffbcd286a8033)
---
.../tests/suites/config/config_test.py | 86 +++++++++++++++++++
ldap/servers/slapd/back-ldbm/back-ldbm.h | 2 +
.../slapd/back-ldbm/db-mdb/mdb_config.c | 17 ++--
.../back-ldbm/db-mdb/mdb_import_threads.c | 9 +-
.../slapd/back-ldbm/db-mdb/mdb_instance.c | 8 ++
ldap/servers/slapd/back-ldbm/dbimpl.c | 2 +-
ldap/servers/slapd/back-ldbm/import.c | 14 ++-
7 files changed, 128 insertions(+), 10 deletions(-)
diff --git a/dirsrvtests/tests/suites/config/config_test.py b/dirsrvtests/tests/suites/config/config_test.py
index 57b155af7..34dac36b6 100644
--- a/dirsrvtests/tests/suites/config/config_test.py
+++ b/dirsrvtests/tests/suites/config/config_test.py
@@ -17,6 +17,7 @@ from lib389.topologies import topology_m2, topology_st as topo
from lib389.utils import *
from lib389._constants import DN_CONFIG, DEFAULT_SUFFIX, DEFAULT_BENAME
from lib389._mapped_object import DSLdapObjects
+from lib389.agreement import Agreements
from lib389.cli_base import FakeArgs
from lib389.cli_conf.backend import db_config_set
from lib389.idm.user import UserAccounts, TEST_USER_PROPERTIES
@@ -27,6 +28,8 @@ from lib389.cos import CosPointerDefinitions, CosTemplates
from lib389.backend import Backends, DatabaseConfig
from lib389.monitor import MonitorLDBM, Monitor
from lib389.plugins import ReferentialIntegrityPlugin
+from lib389.replica import BootstrapReplicationManager, Replicas
+from lib389.passwd import password_generate
pytestmark = pytest.mark.tier0
@@ -36,6 +39,8 @@ PSTACK_CMD = '/usr/bin/pstack'
logging.getLogger(__name__).setLevel(logging.INFO)
log = logging.getLogger(__name__)
+DEBUGGING = os.getenv("DEBUGGING", default=False)
+
@pytest.fixture(scope="module")
def big_file():
TEMP_BIG_FILE = ''
@@ -811,6 +816,87 @@ def test_numlisteners_limit(topo):
assert numlisteners[0] == '4'
+def bootstrap_replication(inst_from, inst_to, creds):
+ manager = BootstrapReplicationManager(inst_to)
+ rdn_val = 'replication manager'
+ if manager.exists():
+ manager.delete()
+ manager.create(properties={
+ 'cn': rdn_val,
+ 'uid': rdn_val,
+ 'userPassword': creds
+ })
+ for replica in Replicas(inst_to).list():
+ replica.remove_all('nsDS5ReplicaBindDNGroup')
+ replica.replace('nsDS5ReplicaBindDN', manager.dn)
+ for agmt in Agreements(inst_from).list():
+ agmt.replace('nsDS5ReplicaBindDN', manager.dn)
+ agmt.replace('nsDS5ReplicaCredentials', creds)
+
+
+@pytest.mark.skipif(get_default_db_lib() != "mdb", reason="This test requires lmdb")
+def test_lmdb_autotuned_maxdbs(topology_m2, request):
+ """Verify that after restart, nsslapd-mdb-max-dbs is large enough to add a new backend.
+
+ :id: 0272d432-9080-11ef-8f40-482ae39447e5
+ :setup: Two suppliers configuration
+ :steps:
+ 1. loop 20 times
+ 3. In 1 loop: restart instance
+ 3. In 1 loop: add a new backend
+ 4. In 1 loop: check that instance is still alive
+ :expectedresults:
+ 1. Success
+ 2. Success
+ 3. Success
+ 4. Success
+ """
+
+ s1 = topology_m2.ms["supplier1"]
+ s2 = topology_m2.ms["supplier2"]
+
+ backends = Backends(s1)
+ db_config = DatabaseConfig(s1)
+ # Generate the teardown finalizer
+ belist = []
+ creds=password_generate()
+ bootstrap_replication(s2, s1, creds)
+ bootstrap_replication(s1, s2, creds)
+
+ def fin():
+ s1.start()
+ for be in belist:
+ be.delete()
+
+ if not DEBUGGING:
+ request.addfinalizer(fin)
+
+ # 1. Set autotuning (off-line to be able to decrease the value)
+ s1.stop()
+ dse_ldif = DSEldif(s1)
+ dse_ldif.replace(db_config.dn, 'nsslapd-mdb-max-dbs', '0')
+ os.remove(f'{s1.dbdir}/data.mdb')
+ s1.start()
+
+ # 2. Reinitialize the db:
+ log.info("Bulk import...")
+ agmt = Agreements(s2).list()[0]
+ agmt.begin_reinit()
+ (done, error) = agmt.wait_reinit()
+ log.info(f'Bulk importresult is ({done}, {error})')
+ assert done is True
+ assert error is False
+
+ # 3. loop 20 times
+ for idx in range(20):
+ s1.restart()
+ log.info(f'Adding backend test{idx}')
+ belist.append(backends.create(properties={'cn': f'test{idx}',
+ 'nsslapd-suffix': f'dc=test{idx}'}))
+ assert s1.status()
+
+
+
if __name__ == '__main__':
# Run isolated
# -s for DEBUG mode
diff --git a/ldap/servers/slapd/back-ldbm/back-ldbm.h b/ldap/servers/slapd/back-ldbm/back-ldbm.h
index 8fea63e35..35d0ece04 100644
--- a/ldap/servers/slapd/back-ldbm/back-ldbm.h
+++ b/ldap/servers/slapd/back-ldbm/back-ldbm.h
@@ -896,4 +896,6 @@ typedef struct _back_search_result_set
((L)->size == (R)->size && !memcmp((L)->data, (R)->data, (L)->size))
typedef int backend_implement_init_fn(struct ldbminfo *li, config_info *config_array);
+
+pthread_mutex_t *get_import_ctx_mutex();
#endif /* _back_ldbm_h_ */
diff --git a/ldap/servers/slapd/back-ldbm/db-mdb/mdb_config.c b/ldap/servers/slapd/back-ldbm/db-mdb/mdb_config.c
index 351f54037..1f7b71442 100644
--- a/ldap/servers/slapd/back-ldbm/db-mdb/mdb_config.c
+++ b/ldap/servers/slapd/back-ldbm/db-mdb/mdb_config.c
@@ -83,7 +83,7 @@ dbmdb_compute_limits(struct ldbminfo *li)
uint64_t total_space = 0;
uint64_t avail_space = 0;
uint64_t cur_dbsize = 0;
- int nbchangelogs = 0;
+ int nbvlvs = 0;
int nbsuffixes = 0;
int nbindexes = 0;
int nbagmt = 0;
@@ -99,8 +99,8 @@ dbmdb_compute_limits(struct ldbminfo *li)
* But some tunable may be autotuned.
*/
if (dbmdb_count_config_entries("(objectClass=nsMappingTree)", &nbsuffixes) ||
- dbmdb_count_config_entries("(objectClass=nsIndex)", &nbsuffixes) ||
- dbmdb_count_config_entries("(&(objectClass=nsds5Replica)(nsDS5Flags=1))", &nbchangelogs) ||
+ dbmdb_count_config_entries("(objectClass=nsIndex)", &nbindexes) ||
+ dbmdb_count_config_entries("(objectClass=vlvIndex)", &nbvlvs) ||
dbmdb_count_config_entries("(objectClass=nsds5replicationagreement)", &nbagmt)) {
/* error message is already logged */
return 1;
@@ -120,8 +120,15 @@ dbmdb_compute_limits(struct ldbminfo *li)
info->pagesize = sysconf(_SC_PAGE_SIZE);
limits->min_readers = config_get_threadnumber() + nbagmt + DBMDB_READERS_MARGIN;
- /* Default indexes are counted in "nbindexes" so we should always have enough resource to add 1 new suffix */
- limits->min_dbs = nbsuffixes + nbindexes + nbchangelogs + DBMDB_DBS_MARGIN;
+ /*
+ * For each suffix there are 4 databases instances:
+ * long-entryrdn, replication_changelog, id2entry and ancestorid
+ * then the indexes and the vlv and vlv cache
+ *
+ * Default indexes are counted in "nbindexes" so we should always have enough
+ * resource to add 1 new suffix
+ */
+ limits->min_dbs = 4*nbsuffixes + nbindexes + 2*nbvlvs + DBMDB_DBS_MARGIN;
total_space = ((uint64_t)(buf.f_blocks)) * ((uint64_t)(buf.f_bsize));
avail_space = ((uint64_t)(buf.f_bavail)) * ((uint64_t)(buf.f_bsize));
diff --git a/ldap/servers/slapd/back-ldbm/db-mdb/mdb_import_threads.c b/ldap/servers/slapd/back-ldbm/db-mdb/mdb_import_threads.c
index 8c879da31..707a110c5 100644
--- a/ldap/servers/slapd/back-ldbm/db-mdb/mdb_import_threads.c
+++ b/ldap/servers/slapd/back-ldbm/db-mdb/mdb_import_threads.c
@@ -4312,9 +4312,12 @@ dbmdb_import_init_writer(ImportJob *job, ImportRole_t role)
void
dbmdb_free_import_ctx(ImportJob *job)
{
- if (job->writer_ctx) {
- ImportCtx_t *ctx = job->writer_ctx;
- job->writer_ctx = NULL;
+ ImportCtx_t *ctx = NULL;
+ pthread_mutex_lock(get_import_ctx_mutex());
+ ctx = job->writer_ctx;
+ job->writer_ctx = NULL;
+ pthread_mutex_unlock(get_import_ctx_mutex());
+ if (ctx) {
pthread_mutex_destroy(&ctx->workerq.mutex);
pthread_cond_destroy(&ctx->workerq.cv);
slapi_ch_free((void**)&ctx->workerq.slots);
diff --git a/ldap/servers/slapd/back-ldbm/db-mdb/mdb_instance.c b/ldap/servers/slapd/back-ldbm/db-mdb/mdb_instance.c
index 6386ecf06..05f1e348d 100644
--- a/ldap/servers/slapd/back-ldbm/db-mdb/mdb_instance.c
+++ b/ldap/servers/slapd/back-ldbm/db-mdb/mdb_instance.c
@@ -287,6 +287,13 @@ int add_dbi(dbi_open_ctx_t *octx, backend *be, const char *fname, int flags)
slapi_ch_free((void**)&treekey.dbname);
return octx->rc;
}
+ if (treekey.dbi >= ctx->dsecfg.max_dbs) {
+ octx->rc = MDB_DBS_FULL;
+ slapi_log_err(SLAPI_LOG_ERR, "add_dbi", "Failed to open database instance %s slots: %d/%d. Error is %d: %s.\n",
+ treekey.dbname, treekey.dbi, ctx->dsecfg.max_dbs, octx->rc, mdb_strerror(octx->rc));
+ slapi_ch_free((void**)&treekey.dbname);
+ return octx->rc;
+ }
if (octx->ai && octx->ai->ai_key_cmp_fn) {
octx->rc = dbmdb_update_dbi_cmp_fn(ctx, &treekey, octx->ai->ai_key_cmp_fn, octx->txn);
if (octx->rc) {
@@ -689,6 +696,7 @@ int dbmdb_make_env(dbmdb_ctx_t *ctx, int readOnly, mdb_mode_t mode)
rc = dbmdb_write_infofile(ctx);
} else {
/* No Config ==> read it from info file */
+ ctx->dsecfg = ctx->startcfg;
}
if (rc) {
return rc;
diff --git a/ldap/servers/slapd/back-ldbm/dbimpl.c b/ldap/servers/slapd/back-ldbm/dbimpl.c
index da4a4548e..42f4a0718 100644
--- a/ldap/servers/slapd/back-ldbm/dbimpl.c
+++ b/ldap/servers/slapd/back-ldbm/dbimpl.c
@@ -463,7 +463,7 @@ int dblayer_show_statistics(const char *dbimpl_name, const char *dbhome, FILE *f
li->li_plugin = be->be_database;
li->li_plugin->plg_name = (char*) "back-ldbm-dbimpl";
li->li_plugin->plg_libpath = (char*) "libback-ldbm";
- li->li_directory = (char*)dbhome;
+ li->li_directory = get_li_directory(dbhome);
/* Initialize database plugin */
rc = dbimpl_setup(li, dbimpl_name);
diff --git a/ldap/servers/slapd/back-ldbm/import.c b/ldap/servers/slapd/back-ldbm/import.c
index 2bb8cb581..30ec462fa 100644
--- a/ldap/servers/slapd/back-ldbm/import.c
+++ b/ldap/servers/slapd/back-ldbm/import.c
@@ -27,6 +27,9 @@
#define NEED_DN_NORM_SP -25
#define NEED_DN_NORM_BT -26
+/* Protect against import context destruction */
+static pthread_mutex_t import_ctx_mutex = PTHREAD_MUTEX_INITIALIZER;
+
/********** routines to manipulate the entry fifo **********/
@@ -143,6 +146,14 @@ ldbm_back_wire_import(Slapi_PBlock *pb)
/* Threads management */
+/* Return the mutex that protects against import context destruction */
+pthread_mutex_t *
+get_import_ctx_mutex()
+{
+ return &import_ctx_mutex;
+}
+
+
/* tell all the threads to abort */
void
import_abort_all(ImportJob *job, int wait_for_them)
@@ -151,7 +162,7 @@ import_abort_all(ImportJob *job, int wait_for_them)
/* tell all the worker threads to abort */
job->flags |= FLAG_ABORT;
-
+ pthread_mutex_lock(&import_ctx_mutex);
for (worker = job->worker_list; worker; worker = worker->next)
worker->command = ABORT;
@@ -167,6 +178,7 @@ import_abort_all(ImportJob *job, int wait_for_them)
}
}
}
+ pthread_mutex_unlock(&import_ctx_mutex);
}
--
2.48.0

View File

@ -1,840 +0,0 @@
From 8dc61a176323f0d41df730abd715ccff3034c2be Mon Sep 17 00:00:00 2001
From: Mark Reynolds <mreynolds@redhat.com>
Date: Sun, 27 Nov 2022 09:37:19 -0500
Subject: [PATCH] Issue 5547 - automember plugin improvements
Description:
Rebuild task has the following improvements:
- Only one task allowed at a time
- Do not cleanup previous members by default. Add new CLI option to intentionally
cleanup memberships before rebuilding from scratch.
- Add better task logging to show fixup progress
To prevent automember from being called in a nested be_txn loop thread storage is
used to check and skip these loops.
relates: https://github.com/389ds/389-ds-base/issues/5547
Reviewed by: spichugi(Thanks!)
---
.../automember_plugin/automember_mod_test.py | 43 +++-
ldap/servers/plugins/automember/automember.c | 232 ++++++++++++++----
ldap/servers/slapd/back-ldbm/ldbm_add.c | 11 +-
ldap/servers/slapd/back-ldbm/ldbm_delete.c | 10 +-
ldap/servers/slapd/back-ldbm/ldbm_modify.c | 11 +-
.../lib389/cli_conf/plugins/automember.py | 10 +-
src/lib389/lib389/plugins.py | 7 +-
src/lib389/lib389/tasks.py | 9 +-
8 files changed, 250 insertions(+), 83 deletions(-)
diff --git a/dirsrvtests/tests/suites/automember_plugin/automember_mod_test.py b/dirsrvtests/tests/suites/automember_plugin/automember_mod_test.py
index 8d25384bf..7a0ed3275 100644
--- a/dirsrvtests/tests/suites/automember_plugin/automember_mod_test.py
+++ b/dirsrvtests/tests/suites/automember_plugin/automember_mod_test.py
@@ -5,12 +5,13 @@
# License: GPL (version 3 or any later version).
# See LICENSE for details.
# --- END COPYRIGHT BLOCK ---
-#
+import ldap
import logging
import pytest
import os
+import time
from lib389.utils import ds_is_older
-from lib389._constants import *
+from lib389._constants import DEFAULT_SUFFIX
from lib389.plugins import AutoMembershipPlugin, AutoMembershipDefinitions
from lib389.idm.user import UserAccounts
from lib389.idm.group import Groups
@@ -41,6 +42,11 @@ def automember_fixture(topo, request):
user_accts = UserAccounts(topo.standalone, DEFAULT_SUFFIX)
user = user_accts.create_test_user()
+ # Create extra users
+ users = UserAccounts(topo.standalone, DEFAULT_SUFFIX)
+ for i in range(0, 100):
+ users.create_test_user(uid=i)
+
# Create automember definitions and regex rules
automember_prop = {
'cn': 'testgroup_definition',
@@ -59,7 +65,7 @@ def automember_fixture(topo, request):
automemberplugin.enable()
topo.standalone.restart()
- return (user, groups)
+ return user, groups
def test_mods(automember_fixture, topo):
@@ -72,19 +78,21 @@ def test_mods(automember_fixture, topo):
2. Update user that should add it to group[1]
3. Update user that should add it to group[2]
4. Update user that should add it to group[0]
- 5. Test rebuild task correctly moves user to group[1]
+ 5. Test rebuild task adds user to group[1]
+ 6. Test rebuild task cleanups groups and only adds it to group[1]
:expectedresults:
1. Success
2. Success
3. Success
4. Success
5. Success
+ 6. Success
"""
(user, groups) = automember_fixture
# Update user which should go into group[0]
user.replace('cn', 'whatever')
- groups[0].is_member(user.dn)
+ assert groups[0].is_member(user.dn)
if groups[1].is_member(user.dn):
assert False
if groups[2].is_member(user.dn):
@@ -92,7 +100,7 @@ def test_mods(automember_fixture, topo):
# Update user0 which should go into group[1]
user.replace('cn', 'mark')
- groups[1].is_member(user.dn)
+ assert groups[1].is_member(user.dn)
if groups[0].is_member(user.dn):
assert False
if groups[2].is_member(user.dn):
@@ -100,7 +108,7 @@ def test_mods(automember_fixture, topo):
# Update user which should go into group[2]
user.replace('cn', 'simon')
- groups[2].is_member(user.dn)
+ assert groups[2].is_member(user.dn)
if groups[0].is_member(user.dn):
assert False
if groups[1].is_member(user.dn):
@@ -108,7 +116,7 @@ def test_mods(automember_fixture, topo):
# Update user which should go back into group[0] (full circle)
user.replace('cn', 'whatever')
- groups[0].is_member(user.dn)
+ assert groups[0].is_member(user.dn)
if groups[1].is_member(user.dn):
assert False
if groups[2].is_member(user.dn):
@@ -128,12 +136,24 @@ def test_mods(automember_fixture, topo):
automemberplugin.enable()
topo.standalone.restart()
- # Run rebuild task
+ # Run rebuild task (no cleanup)
task = automemberplugin.fixup(DEFAULT_SUFFIX, "objectclass=posixaccount")
+ with pytest.raises(ldap.UNWILLING_TO_PERFORM):
+ # test only one fixup task is allowed at a time
+ automemberplugin.fixup(DEFAULT_SUFFIX, "objectclass=top")
task.wait()
- # Test membership
- groups[1].is_member(user.dn)
+ # Test membership (user should still be in groups[0])
+ assert groups[1].is_member(user.dn)
+ if not groups[0].is_member(user.dn):
+ assert False
+
+ # Run rebuild task with cleanup
+ task = automemberplugin.fixup(DEFAULT_SUFFIX, "objectclass=posixaccount", cleanup=True)
+ task.wait()
+
+ # Test membership (user should only be in groups[1])
+ assert groups[1].is_member(user.dn)
if groups[0].is_member(user.dn):
assert False
if groups[2].is_member(user.dn):
@@ -148,4 +168,3 @@ if __name__ == '__main__':
# -s for DEBUG mode
CURRENT_FILE = os.path.realpath(__file__)
pytest.main(["-s", CURRENT_FILE])
-
diff --git a/ldap/servers/plugins/automember/automember.c b/ldap/servers/plugins/automember/automember.c
index 3494d0343..419adb052 100644
--- a/ldap/servers/plugins/automember/automember.c
+++ b/ldap/servers/plugins/automember/automember.c
@@ -1,5 +1,5 @@
/** BEGIN COPYRIGHT BLOCK
- * Copyright (C) 2011 Red Hat, Inc.
+ * Copyright (C) 2022 Red Hat, Inc.
* All rights reserved.
*
* License: GPL (version 3 or any later version).
@@ -14,7 +14,7 @@
* Auto Membership Plug-in
*/
#include "automember.h"
-
+#include <pthread.h>
/*
* Plug-in globals
@@ -22,7 +22,9 @@
static PRCList *g_automember_config = NULL;
static Slapi_RWLock *g_automember_config_lock = NULL;
static uint64_t abort_rebuild_task = 0;
-
+static pthread_key_t td_automem_block_nested;
+static PRBool fixup_running = PR_FALSE;
+static PRLock *fixup_lock = NULL;
static void *_PluginID = NULL;
static Slapi_DN *_PluginDN = NULL;
static Slapi_DN *_ConfigAreaDN = NULL;
@@ -93,9 +95,43 @@ static void automember_task_export_destructor(Slapi_Task *task);
static void automember_task_map_destructor(Slapi_Task *task);
#define DEFAULT_FILE_MODE PR_IRUSR | PR_IWUSR
+#define FIXUP_PROGRESS_LIMIT 1000
static uint64_t plugin_do_modify = 0;
static uint64_t plugin_is_betxn = 0;
+/* automember_plugin fixup task and add operations should block other be_txn
+ * plugins from calling automember_post_op_mod() */
+static int32_t
+slapi_td_block_nested_post_op(void)
+{
+ int32_t val = 12345;
+
+ if (pthread_setspecific(td_automem_block_nested, (void *)&val) != 0) {
+ return PR_FAILURE;
+ }
+ return PR_SUCCESS;
+}
+
+static int32_t
+slapi_td_unblock_nested_post_op(void)
+{
+ if (pthread_setspecific(td_automem_block_nested, NULL) != 0) {
+ return PR_FAILURE;
+ }
+ return PR_SUCCESS;
+}
+
+static int32_t
+slapi_td_is_post_op_nested(void)
+{
+ int32_t *value = pthread_getspecific(td_automem_block_nested);
+
+ if (value == NULL) {
+ return 0;
+ }
+ return 1;
+}
+
/*
* Config cache locking functions
*/
@@ -317,6 +353,14 @@ automember_start(Slapi_PBlock *pb)
return -1;
}
+ if (fixup_lock == NULL) {
+ if ((fixup_lock = PR_NewLock()) == NULL) {
+ slapi_log_err(SLAPI_LOG_ERR, AUTOMEMBER_PLUGIN_SUBSYSTEM,
+ "automember_start - Failed to create fixup lock.\n");
+ return -1;
+ }
+ }
+
/*
* Get the plug-in target dn from the system
* and store it for future use. */
@@ -360,6 +404,11 @@ automember_start(Slapi_PBlock *pb)
}
}
+ if (pthread_key_create(&td_automem_block_nested, NULL) != 0) {
+ slapi_log_err(SLAPI_LOG_ERR, AUTOMEMBER_PLUGIN_SUBSYSTEM,
+ "automember_start - pthread_key_create failed\n");
+ }
+
slapi_log_err(SLAPI_LOG_PLUGIN, AUTOMEMBER_PLUGIN_SUBSYSTEM,
"automember_start - ready for service\n");
slapi_log_err(SLAPI_LOG_TRACE, AUTOMEMBER_PLUGIN_SUBSYSTEM,
@@ -394,6 +443,8 @@ automember_close(Slapi_PBlock *pb __attribute__((unused)))
slapi_sdn_free(&_ConfigAreaDN);
slapi_destroy_rwlock(g_automember_config_lock);
g_automember_config_lock = NULL;
+ PR_DestroyLock(fixup_lock);
+ fixup_lock = NULL;
slapi_log_err(SLAPI_LOG_TRACE, AUTOMEMBER_PLUGIN_SUBSYSTEM,
"<-- automember_close\n");
@@ -1619,7 +1670,6 @@ out:
return rc;
}
-
/*
* automember_update_member_value()
*
@@ -1634,7 +1684,7 @@ automember_update_member_value(Slapi_Entry *member_e, const char *group_dn, char
LDAPMod *mods[2];
char *vals[2];
char *member_value = NULL;
- int rc = 0;
+ int rc = LDAP_SUCCESS;
Slapi_DN *group_sdn;
/* First thing check that the group still exists */
@@ -1653,7 +1703,7 @@ automember_update_member_value(Slapi_Entry *member_e, const char *group_dn, char
"automember_update_member_value - group (default or target) can not be retrieved (%s) err=%d\n",
group_dn, rc);
}
- return rc;
+ goto out;
}
/* If grouping_value is dn, we need to fetch the dn instead. */
@@ -1879,6 +1929,13 @@ automember_mod_post_op(Slapi_PBlock *pb)
PRCList *list = NULL;
int rc = SLAPI_PLUGIN_SUCCESS;
+ if (slapi_td_is_post_op_nested()) {
+ /* don't process op twice in the same thread */
+ return rc;
+ } else {
+ slapi_td_block_nested_post_op();
+ }
+
slapi_log_err(SLAPI_LOG_TRACE, AUTOMEMBER_PLUGIN_SUBSYSTEM,
"--> automember_mod_post_op\n");
@@ -2005,6 +2062,7 @@ automember_mod_post_op(Slapi_PBlock *pb)
}
}
}
+ slapi_td_unblock_nested_post_op();
slapi_log_err(SLAPI_LOG_TRACE, AUTOMEMBER_PLUGIN_SUBSYSTEM,
"<-- automember_mod_post_op (%d)\n", rc);
@@ -2024,6 +2082,13 @@ automember_add_post_op(Slapi_PBlock *pb)
slapi_log_err(SLAPI_LOG_TRACE, AUTOMEMBER_PLUGIN_SUBSYSTEM,
"--> automember_add_post_op\n");
+ if (slapi_td_is_post_op_nested()) {
+ /* don't process op twice in the same thread */
+ return rc;
+ } else {
+ slapi_td_block_nested_post_op();
+ }
+
/* Reload config if a config entry was added. */
if ((sdn = automember_get_sdn(pb))) {
if (automember_dn_is_config(sdn)) {
@@ -2039,7 +2104,7 @@ automember_add_post_op(Slapi_PBlock *pb)
/* If replication, just bail. */
if (automember_isrepl(pb)) {
- return SLAPI_PLUGIN_SUCCESS;
+ goto bail;
}
/* Get the newly added entry. */
@@ -2052,7 +2117,7 @@ automember_add_post_op(Slapi_PBlock *pb)
tombstone);
slapi_value_free(&tombstone);
if (is_tombstone) {
- return SLAPI_PLUGIN_SUCCESS;
+ goto bail;
}
/* Check if a config entry applies
@@ -2063,21 +2128,19 @@ automember_add_post_op(Slapi_PBlock *pb)
list = PR_LIST_HEAD(g_automember_config);
while (list != g_automember_config) {
config = (struct configEntry *)list;
-
/* Does the entry meet scope and filter requirements? */
if (slapi_dn_issuffix(slapi_sdn_get_dn(sdn), config->scope) &&
- (slapi_filter_test_simple(e, config->filter) == 0)) {
+ (slapi_filter_test_simple(e, config->filter) == 0))
+ {
/* Find out what membership changes are needed and make them. */
if (automember_update_membership(config, e, NULL) == SLAPI_PLUGIN_FAILURE) {
rc = SLAPI_PLUGIN_FAILURE;
break;
}
}
-
list = PR_NEXT_LINK(list);
}
}
-
automember_config_unlock();
} else {
slapi_log_err(SLAPI_LOG_PLUGIN, AUTOMEMBER_PLUGIN_SUBSYSTEM,
@@ -2098,6 +2161,7 @@ bail:
slapi_pblock_set(pb, SLAPI_RESULT_CODE, &result);
slapi_pblock_set(pb, SLAPI_PB_RESULT_TEXT, &errtxt);
}
+ slapi_td_unblock_nested_post_op();
return rc;
}
@@ -2138,6 +2202,7 @@ typedef struct _task_data
Slapi_DN *base_dn;
char *bind_dn;
int scope;
+ PRBool cleanup;
} task_data;
static void
@@ -2270,6 +2335,7 @@ automember_task_abort_thread(void *arg)
* basedn: dc=example,dc=com
* filter: (uid=*)
* scope: sub
+ * cleanup: yes/on (default is off)
*
* basedn and filter are required. If scope is omitted, the default is sub
*/
@@ -2284,9 +2350,22 @@ automember_task_add(Slapi_PBlock *pb, Slapi_Entry *e, Slapi_Entry *eAfter __attr
const char *base_dn;
const char *filter;
const char *scope;
+ const char *cleanup_str;
+ PRBool cleanup = PR_FALSE;
*returncode = LDAP_SUCCESS;
+ PR_Lock(fixup_lock);
+ if (fixup_running) {
+ PR_Unlock(fixup_lock);
+ *returncode = LDAP_UNWILLING_TO_PERFORM;
+ slapi_log_err(SLAPI_LOG_ERR, AUTOMEMBER_PLUGIN_SUBSYSTEM,
+ "automember_task_add - there is already a fixup task running\n");
+ rv = SLAPI_DSE_CALLBACK_ERROR;
+ goto out;
+ }
+ PR_Unlock(fixup_lock);
+
/*
* Grab the task params
*/
@@ -2300,6 +2379,12 @@ automember_task_add(Slapi_PBlock *pb, Slapi_Entry *e, Slapi_Entry *eAfter __attr
rv = SLAPI_DSE_CALLBACK_ERROR;
goto out;
}
+ if ((cleanup_str = slapi_entry_attr_get_ref(e, "cleanup"))) {
+ if (strcasecmp(cleanup_str, "yes") == 0 || strcasecmp(cleanup_str, "on")) {
+ cleanup = PR_TRUE;
+ }
+ }
+
scope = slapi_fetch_attr(e, "scope", "sub");
/*
* setup our task data
@@ -2315,6 +2400,7 @@ automember_task_add(Slapi_PBlock *pb, Slapi_Entry *e, Slapi_Entry *eAfter __attr
mytaskdata->bind_dn = slapi_ch_strdup(bind_dn);
mytaskdata->base_dn = slapi_sdn_new_dn_byval(base_dn);
mytaskdata->filter_str = slapi_ch_strdup(filter);
+ mytaskdata->cleanup = cleanup;
if (scope) {
if (strcasecmp(scope, "sub") == 0) {
@@ -2334,6 +2420,9 @@ automember_task_add(Slapi_PBlock *pb, Slapi_Entry *e, Slapi_Entry *eAfter __attr
task = slapi_plugin_new_task(slapi_entry_get_ndn(e), arg);
slapi_task_set_destructor_fn(task, automember_task_destructor);
slapi_task_set_data(task, mytaskdata);
+ PR_Lock(fixup_lock);
+ fixup_running = PR_TRUE;
+ PR_Unlock(fixup_lock);
/*
* Start the task as a separate thread
*/
@@ -2345,6 +2434,9 @@ automember_task_add(Slapi_PBlock *pb, Slapi_Entry *e, Slapi_Entry *eAfter __attr
"automember_task_add - Unable to create task thread!\n");
*returncode = LDAP_OPERATIONS_ERROR;
slapi_task_finish(task, *returncode);
+ PR_Lock(fixup_lock);
+ fixup_running = PR_FALSE;
+ PR_Unlock(fixup_lock);
rv = SLAPI_DSE_CALLBACK_ERROR;
} else {
rv = SLAPI_DSE_CALLBACK_OK;
@@ -2372,6 +2464,9 @@ automember_rebuild_task_thread(void *arg)
PRCList *list = NULL;
PRCList *include_list = NULL;
int result = 0;
+ int64_t fixup_progress_count = 0;
+ int64_t fixup_progress_elapsed = 0;
+ int64_t fixup_start_time = 0;
size_t i = 0;
/* Reset abort flag */
@@ -2380,6 +2475,7 @@ automember_rebuild_task_thread(void *arg)
if (!task) {
return; /* no task */
}
+
slapi_task_inc_refcount(task);
slapi_log_err(SLAPI_LOG_PLUGIN, AUTOMEMBER_PLUGIN_SUBSYSTEM,
"automember_rebuild_task_thread - Refcount incremented.\n");
@@ -2393,9 +2489,11 @@ automember_rebuild_task_thread(void *arg)
slapi_task_log_status(task, "Automember rebuild task starting (base dn: (%s) filter (%s)...",
slapi_sdn_get_dn(td->base_dn), td->filter_str);
/*
- * Set the bind dn in the local thread data
+ * Set the bind dn in the local thread data, and block post op mods
*/
slapi_td_set_dn(slapi_ch_strdup(td->bind_dn));
+ slapi_td_block_nested_post_op();
+ fixup_start_time = slapi_current_rel_time_t();
/*
* Take the config lock now and search the database
*/
@@ -2426,6 +2524,21 @@ automember_rebuild_task_thread(void *arg)
* Loop over the entries
*/
for (i = 0; entries && (entries[i] != NULL); i++) {
+ fixup_progress_count++;
+ if (fixup_progress_count % FIXUP_PROGRESS_LIMIT == 0 ) {
+ slapi_task_log_notice(task,
+ "Processed %ld entries in %ld seconds (+%ld seconds)",
+ fixup_progress_count,
+ slapi_current_rel_time_t() - fixup_start_time,
+ slapi_current_rel_time_t() - fixup_progress_elapsed);
+ slapi_task_log_status(task,
+ "Processed %ld entries in %ld seconds (+%ld seconds)",
+ fixup_progress_count,
+ slapi_current_rel_time_t() - fixup_start_time,
+ slapi_current_rel_time_t() - fixup_progress_elapsed);
+ slapi_task_inc_progress(task);
+ fixup_progress_elapsed = slapi_current_rel_time_t();
+ }
if (slapi_atomic_load_64(&abort_rebuild_task, __ATOMIC_ACQUIRE) == 1) {
/* The task was aborted */
slapi_task_log_notice(task, "Automember rebuild task was intentionally aborted");
@@ -2443,48 +2556,66 @@ automember_rebuild_task_thread(void *arg)
if (slapi_dn_issuffix(slapi_entry_get_dn(entries[i]), config->scope) &&
(slapi_filter_test_simple(entries[i], config->filter) == 0))
{
- /* First clear out all the defaults groups */
- for (size_t ii = 0; config->default_groups && config->default_groups[ii]; ii++) {
- if ((result = automember_update_member_value(entries[i], config->default_groups[ii],
- config->grouping_attr, config->grouping_value, NULL, DEL_MEMBER)))
- {
- slapi_task_log_notice(task, "Automember rebuild membership task unable to delete "
- "member from default group (%s) error (%d)",
- config->default_groups[ii], result);
- slapi_task_log_status(task, "Automember rebuild membership task unable to delete "
- "member from default group (%s) error (%d)",
- config->default_groups[ii], result);
- slapi_log_err(SLAPI_LOG_ERR, AUTOMEMBER_PLUGIN_SUBSYSTEM,
- "automember_rebuild_task_thread - Unable to unable to delete from (%s) error (%d)\n",
- config->default_groups[ii], result);
- goto out;
- }
- }
-
- /* Then clear out the non-default group */
- if (config->inclusive_rules && !PR_CLIST_IS_EMPTY((PRCList *)config->inclusive_rules)) {
- include_list = PR_LIST_HEAD((PRCList *)config->inclusive_rules);
- while (include_list != (PRCList *)config->inclusive_rules) {
- struct automemberRegexRule *curr_rule = (struct automemberRegexRule *)include_list;
- if ((result = automember_update_member_value(entries[i], slapi_sdn_get_dn(curr_rule->target_group_dn),
- config->grouping_attr, config->grouping_value, NULL, DEL_MEMBER)))
+ if (td->cleanup) {
+
+ slapi_log_err(SLAPI_LOG_PLUGIN, AUTOMEMBER_PLUGIN_SUBSYSTEM,
+ "automember_rebuild_task_thread - Cleaning up groups (config %s)\n",
+ config->dn);
+ /* First clear out all the defaults groups */
+ for (size_t ii = 0; config->default_groups && config->default_groups[ii]; ii++) {
+ if ((result = automember_update_member_value(entries[i],
+ config->default_groups[ii],
+ config->grouping_attr,
+ config->grouping_value,
+ NULL, DEL_MEMBER)))
{
slapi_task_log_notice(task, "Automember rebuild membership task unable to delete "
- "member from group (%s) error (%d)",
- slapi_sdn_get_dn(curr_rule->target_group_dn), result);
+ "member from default group (%s) error (%d)",
+ config->default_groups[ii], result);
slapi_task_log_status(task, "Automember rebuild membership task unable to delete "
- "member from group (%s) error (%d)",
- slapi_sdn_get_dn(curr_rule->target_group_dn), result);
+ "member from default group (%s) error (%d)",
+ config->default_groups[ii], result);
slapi_log_err(SLAPI_LOG_ERR, AUTOMEMBER_PLUGIN_SUBSYSTEM,
"automember_rebuild_task_thread - Unable to unable to delete from (%s) error (%d)\n",
- slapi_sdn_get_dn(curr_rule->target_group_dn), result);
+ config->default_groups[ii], result);
goto out;
}
- include_list = PR_NEXT_LINK(include_list);
}
+
+ /* Then clear out the non-default group */
+ if (config->inclusive_rules && !PR_CLIST_IS_EMPTY((PRCList *)config->inclusive_rules)) {
+ include_list = PR_LIST_HEAD((PRCList *)config->inclusive_rules);
+ while (include_list != (PRCList *)config->inclusive_rules) {
+ struct automemberRegexRule *curr_rule = (struct automemberRegexRule *)include_list;
+ if ((result = automember_update_member_value(entries[i],
+ slapi_sdn_get_dn(curr_rule->target_group_dn),
+ config->grouping_attr,
+ config->grouping_value,
+ NULL, DEL_MEMBER)))
+ {
+ slapi_task_log_notice(task, "Automember rebuild membership task unable to delete "
+ "member from group (%s) error (%d)",
+ slapi_sdn_get_dn(curr_rule->target_group_dn), result);
+ slapi_task_log_status(task, "Automember rebuild membership task unable to delete "
+ "member from group (%s) error (%d)",
+ slapi_sdn_get_dn(curr_rule->target_group_dn), result);
+ slapi_log_err(SLAPI_LOG_ERR, AUTOMEMBER_PLUGIN_SUBSYSTEM,
+ "automember_rebuild_task_thread - Unable to unable to delete from (%s) error (%d)\n",
+ slapi_sdn_get_dn(curr_rule->target_group_dn), result);
+ goto out;
+ }
+ include_list = PR_NEXT_LINK(include_list);
+ }
+ }
+ slapi_log_err(SLAPI_LOG_PLUGIN, AUTOMEMBER_PLUGIN_SUBSYSTEM,
+ "automember_rebuild_task_thread - Finished cleaning up groups (config %s)\n",
+ config->dn);
}
/* Update the memberships for this entries */
+ slapi_log_err(SLAPI_LOG_PLUGIN, AUTOMEMBER_PLUGIN_SUBSYSTEM,
+ "automember_rebuild_task_thread - Updating membership (config %s)\n",
+ config->dn);
if (slapi_is_shutting_down() ||
automember_update_membership(config, entries[i], NULL) == SLAPI_PLUGIN_FAILURE)
{
@@ -2508,15 +2639,22 @@ out:
slapi_task_log_notice(task, "Automember rebuild task aborted. Error (%d)", result);
slapi_task_log_status(task, "Automember rebuild task aborted. Error (%d)", result);
} else {
- slapi_task_log_notice(task, "Automember rebuild task finished. Processed (%d) entries.", (int32_t)i);
- slapi_task_log_status(task, "Automember rebuild task finished. Processed (%d) entries.", (int32_t)i);
+ slapi_task_log_notice(task, "Automember rebuild task finished. Processed (%ld) entries in %ld seconds",
+ (int64_t)i, slapi_current_rel_time_t() - fixup_start_time);
+ slapi_task_log_status(task, "Automember rebuild task finished. Processed (%ld) entries in %ld seconds",
+ (int64_t)i, slapi_current_rel_time_t() - fixup_start_time);
}
slapi_task_inc_progress(task);
slapi_task_finish(task, result);
slapi_task_dec_refcount(task);
slapi_atomic_store_64(&abort_rebuild_task, 0, __ATOMIC_RELEASE);
+ slapi_td_unblock_nested_post_op();
+ PR_Lock(fixup_lock);
+ fixup_running = PR_FALSE;
+ PR_Unlock(fixup_lock);
+
slapi_log_err(SLAPI_LOG_PLUGIN, AUTOMEMBER_PLUGIN_SUBSYSTEM,
- "automember_rebuild_task_thread - Refcount decremented.\n");
+ "automember_rebuild_task_thread - task finished, refcount decremented.\n");
}
/*
diff --git a/ldap/servers/slapd/back-ldbm/ldbm_add.c b/ldap/servers/slapd/back-ldbm/ldbm_add.c
index ba2d73a84..ce4c314a1 100644
--- a/ldap/servers/slapd/back-ldbm/ldbm_add.c
+++ b/ldap/servers/slapd/back-ldbm/ldbm_add.c
@@ -1,6 +1,6 @@
/** BEGIN COPYRIGHT BLOCK
* Copyright (C) 2001 Sun Microsystems, Inc. Used by permission.
- * Copyright (C) 2005 Red Hat, Inc.
+ * Copyright (C) 2022 Red Hat, Inc.
* Copyright (C) 2009 Hewlett-Packard Development Company, L.P.
* All rights reserved.
*
@@ -1264,10 +1264,6 @@ ldbm_back_add(Slapi_PBlock *pb)
goto common_return;
error_return:
- /* Revert the caches if this is the parent operation */
- if (parent_op && betxn_callback_fails) {
- revert_cache(inst, &parent_time);
- }
if (addingentry_id_assigned) {
next_id_return(be, addingentry->ep_id);
}
@@ -1376,6 +1372,11 @@ diskfull_return:
if (!not_an_error) {
rc = SLAPI_FAIL_GENERAL;
}
+
+ /* Revert the caches if this is the parent operation */
+ if (parent_op && betxn_callback_fails) {
+ revert_cache(inst, &parent_time);
+ }
}
common_return:
diff --git a/ldap/servers/slapd/back-ldbm/ldbm_delete.c b/ldap/servers/slapd/back-ldbm/ldbm_delete.c
index de23190c3..27f0ac58a 100644
--- a/ldap/servers/slapd/back-ldbm/ldbm_delete.c
+++ b/ldap/servers/slapd/back-ldbm/ldbm_delete.c
@@ -1407,11 +1407,6 @@ commit_return:
goto common_return;
error_return:
- /* Revert the caches if this is the parent operation */
- if (parent_op && betxn_callback_fails) {
- revert_cache(inst, &parent_time);
- }
-
if (tombstone) {
if (cache_is_in_cache(&inst->inst_cache, tombstone)) {
tomb_ep_id = tombstone->ep_id; /* Otherwise, tombstone might have been freed. */
@@ -1496,6 +1491,11 @@ error_return:
conn_id, op_id, parent_modify_c.old_entry, parent_modify_c.new_entry, myrc);
}
+ /* Revert the caches if this is the parent operation */
+ if (parent_op && betxn_callback_fails) {
+ revert_cache(inst, &parent_time);
+ }
+
common_return:
if (orig_entry) {
/* NOTE: #define SLAPI_DELETE_BEPREOP_ENTRY SLAPI_ENTRY_PRE_OP */
diff --git a/ldap/servers/slapd/back-ldbm/ldbm_modify.c b/ldap/servers/slapd/back-ldbm/ldbm_modify.c
index 537369055..64b293001 100644
--- a/ldap/servers/slapd/back-ldbm/ldbm_modify.c
+++ b/ldap/servers/slapd/back-ldbm/ldbm_modify.c
@@ -1,6 +1,6 @@
/** BEGIN COPYRIGHT BLOCK
* Copyright (C) 2001 Sun Microsystems, Inc. Used by permission.
- * Copyright (C) 2005 Red Hat, Inc.
+ * Copyright (C) 2022 Red Hat, Inc.
* Copyright (C) 2009 Hewlett-Packard Development Company, L.P.
* All rights reserved.
*
@@ -1043,11 +1043,6 @@ ldbm_back_modify(Slapi_PBlock *pb)
goto common_return;
error_return:
- /* Revert the caches if this is the parent operation */
- if (parent_op && betxn_callback_fails) {
- revert_cache(inst, &parent_time);
- }
-
if (postentry != NULL) {
slapi_entry_free(postentry);
postentry = NULL;
@@ -1103,6 +1098,10 @@ error_return:
if (!not_an_error) {
rc = SLAPI_FAIL_GENERAL;
}
+ /* Revert the caches if this is the parent operation */
+ if (parent_op && betxn_callback_fails) {
+ revert_cache(inst, &parent_time);
+ }
}
/* if ec is in cache, remove it, then add back e if we still have it */
diff --git a/src/lib389/lib389/cli_conf/plugins/automember.py b/src/lib389/lib389/cli_conf/plugins/automember.py
index 15b00c633..568586ad8 100644
--- a/src/lib389/lib389/cli_conf/plugins/automember.py
+++ b/src/lib389/lib389/cli_conf/plugins/automember.py
@@ -155,7 +155,7 @@ def fixup(inst, basedn, log, args):
log.info('Attempting to add task entry... This will fail if Automembership plug-in is not enabled.')
if not plugin.status():
log.error("'%s' is disabled. Rebuild membership task can't be executed" % plugin.rdn)
- fixup_task = plugin.fixup(args.DN, args.filter)
+ fixup_task = plugin.fixup(args.DN, args.filter, args.cleanup)
if args.wait:
log.info(f'Waiting for fixup task "{fixup_task.dn}" to complete. You can safely exit by pressing Control C ...')
fixup_task.wait(timeout=args.timeout)
@@ -225,8 +225,8 @@ def create_parser(subparsers):
subcommands = automember.add_subparsers(help='action')
add_generic_plugin_parsers(subcommands, AutoMembershipPlugin)
- list = subcommands.add_parser('list', help='List Automembership definitions or regex rules.')
- subcommands_list = list.add_subparsers(help='action')
+ automember_list = subcommands.add_parser('list', help='List Automembership definitions or regex rules.')
+ subcommands_list = automember_list.add_subparsers(help='action')
list_definitions = subcommands_list.add_parser('definitions', help='Lists Automembership definitions.')
list_definitions.set_defaults(func=definition_list)
list_regexes = subcommands_list.add_parser('regexes', help='List Automembership regex rules.')
@@ -269,6 +269,8 @@ def create_parser(subparsers):
fixup_task.add_argument('-f', '--filter', required=True, help='Sets the LDAP filter for entries to fix up')
fixup_task.add_argument('-s', '--scope', required=True, choices=['sub', 'base', 'one'], type=str.lower,
help='Sets the LDAP search scope for entries to fix up')
+ fixup_task.add_argument('--cleanup', action='store_true',
+ help="Clean up previous group memberships before rebuilding")
fixup_task.add_argument('--wait', action='store_true',
help="Wait for the task to finish, this could take a long time")
fixup_task.add_argument('--timeout', default=0, type=int,
@@ -279,7 +281,7 @@ def create_parser(subparsers):
fixup_status.add_argument('--dn', help="The task entry's DN")
fixup_status.add_argument('--show-log', action='store_true', help="Display the task log")
fixup_status.add_argument('--watch', action='store_true',
- help="Watch the task's status and wait for it to finish")
+ help="Watch the task's status and wait for it to finish")
abort_fixup = subcommands.add_parser('abort-fixup', help='Abort the rebuild membership task.')
abort_fixup.set_defaults(func=abort)
diff --git a/src/lib389/lib389/plugins.py b/src/lib389/lib389/plugins.py
index 52691a44c..a1ad0a45b 100644
--- a/src/lib389/lib389/plugins.py
+++ b/src/lib389/lib389/plugins.py
@@ -1141,13 +1141,15 @@ class AutoMembershipPlugin(Plugin):
def __init__(self, instance, dn="cn=Auto Membership Plugin,cn=plugins,cn=config"):
super(AutoMembershipPlugin, self).__init__(instance, dn)
- def fixup(self, basedn, _filter=None):
+ def fixup(self, basedn, _filter=None, cleanup=False):
"""Create an automember rebuild membership task
:param basedn: Basedn to fix up
:type basedn: str
:param _filter: a filter for entries to fix up
:type _filter: str
+ :param cleanup: cleanup old group memberships
+ :type cleanup: boolean
:returns: an instance of Task(DSLdapObject)
"""
@@ -1156,6 +1158,9 @@ class AutoMembershipPlugin(Plugin):
task_properties = {'basedn': basedn}
if _filter is not None:
task_properties['filter'] = _filter
+ if cleanup:
+ task_properties['cleanup'] = "yes"
+
task.create(properties=task_properties)
return task
diff --git a/src/lib389/lib389/tasks.py b/src/lib389/lib389/tasks.py
index 1a16bbb83..193805780 100644
--- a/src/lib389/lib389/tasks.py
+++ b/src/lib389/lib389/tasks.py
@@ -1006,12 +1006,13 @@ class Tasks(object):
return exitCode
def automemberRebuild(self, suffix=DEFAULT_SUFFIX, scope='sub',
- filterstr='objectclass=top', args=None):
+ filterstr='objectclass=top', cleanup=False, args=None):
'''
- @param suffix - The suffix the task should examine - defualt is
+ @param suffix - The suffix the task should examine - default is
"dc=example,dc=com"
@param scope - The scope of the search to find entries
- @param fitlerstr - THe search filter to find entries
+ @param fitlerstr - The search filter to find entries
+ @param cleanup - reset/clear the old group mmeberships prior to rebuilding
@param args - is a dictionary that contains modifier of the task
wait: True/[False] - If True, waits for the completion of
the task before to return
@@ -1027,6 +1028,8 @@ class Tasks(object):
entry.setValues('basedn', suffix)
entry.setValues('filter', filterstr)
entry.setValues('scope', scope)
+ if cleanup:
+ entry.setValues('cleanup', 'yes')
# start the task and possibly wait for task completion
try:
--
2.43.0

View File

@ -0,0 +1,894 @@
From b53faa9e7289383bbc02fc260b1b34958a317fdd Mon Sep 17 00:00:00 2001
From: progier389 <progier@redhat.com>
Date: Fri, 6 Sep 2024 14:45:06 +0200
Subject: [PATCH] Issue 6090 - Fix dbscan options and man pages (#6315)
* Issue 6090 - Fix dbscan options and man pages
dbscan -d option is dangerously confusing as it removes a database instance while in db_stat it identify the database
(cf issue #5609 ).
This fix implements long options in dbscan, rename -d in --remove, and requires a new --do-it option for action that change the database content.
The fix should also align both the usage and the dbscan man page with the new set of options
Issue: #6090
Reviewed by: @tbordaz, @droideck (Thanks!)
(cherry picked from commit 25e1d16887ebd299dfe0088080b9ee0deec1e41f)
---
dirsrvtests/tests/suites/clu/dbscan_test.py | 253 ++++++++++++++++++
.../tests/suites/clu/repl_monitor_test.py | 4 +-
.../slapd/back-ldbm/db-bdb/bdb_layer.c | 12 +-
ldap/servers/slapd/back-ldbm/dbimpl.c | 50 +++-
ldap/servers/slapd/tools/dbscan.c | 182 ++++++++++---
man/man1/dbscan.1 | 74 +++--
src/lib389/lib389/__init__.py | 9 +-
src/lib389/lib389/cli_ctl/dblib.py | 13 +-
8 files changed, 531 insertions(+), 66 deletions(-)
create mode 100644 dirsrvtests/tests/suites/clu/dbscan_test.py
diff --git a/dirsrvtests/tests/suites/clu/dbscan_test.py b/dirsrvtests/tests/suites/clu/dbscan_test.py
new file mode 100644
index 000000000..2c9a9651a
--- /dev/null
+++ b/dirsrvtests/tests/suites/clu/dbscan_test.py
@@ -0,0 +1,253 @@
+# --- BEGIN COPYRIGHT BLOCK ---
+# Copyright (C) 2024 Red Hat, Inc.
+# All rights reserved.
+#
+# License: GPL (version 3 or any later version).
+# See LICENSE for details.
+# --- END COPYRIGHT BLOCK ---
+#
+import logging
+import os
+import pytest
+import re
+import subprocess
+import sys
+
+from lib389 import DirSrv
+from lib389._constants import DBSCAN
+from lib389.topologies import topology_m2 as topo_m2
+from difflib import context_diff
+
+pytestmark = pytest.mark.tier0
+
+logging.getLogger(__name__).setLevel(logging.DEBUG)
+log = logging.getLogger(__name__)
+
+DEBUGGING = os.getenv("DEBUGGING", default=False)
+
+
+class CalledProcessUnexpectedReturnCode(subprocess.CalledProcessError):
+ def __init__(self, result, expected_rc):
+ super().__init__(cmd=result.args, returncode=result.returncode, output=result.stdout, stderr=result.stderr)
+ self.expected_rc = expected_rc
+ self.result = result
+
+ def __str__(self):
+ return f'Command {self.result.args} returned {self.result.returncode} instead of {self.expected_rc}'
+
+
+class DbscanPaths:
+ @staticmethod
+ def list_instances(inst, dblib, dbhome):
+ # compute db instance pathnames
+ instances = dbscan(['-D', dblib, '-L', dbhome], inst=inst).stdout
+ dbis = []
+ if dblib == 'bdb':
+ pattern = r'^ (.*) $'
+ prefix = f'{dbhome}/'
+ else:
+ pattern = r'^ (.*) flags:'
+ prefix = f''
+ for match in re.finditer(pattern, instances, flags=re.MULTILINE):
+ dbis.append(prefix+match.group(1))
+ return dbis
+
+ @staticmethod
+ def list_options(inst):
+ # compute supported options
+ options = []
+ usage = dbscan(['-h'], inst=inst, expected_rc=None).stdout
+ pattern = r'^\s+(?:(-[^-,]+), +)?(--[^ ]+).*$'
+ for match in re.finditer(pattern, usage, flags=re.MULTILINE):
+ for idx in range(1,3):
+ if match.group(idx) is not None:
+ options.append(match.group(idx))
+ return options
+
+ def __init__(self, inst):
+ dblib = inst.get_db_lib()
+ dbhome = inst.ds_paths.db_home_dir
+ self.inst = inst
+ self.dblib = dblib
+ self.dbhome = dbhome
+ self.options = DbscanPaths.list_options(inst)
+ self.dbis = DbscanPaths.list_instances(inst, dblib, dbhome)
+ self.ldif_dir = inst.ds_paths.ldif_dir
+
+ def get_dbi(self, attr, backend='userroot'):
+ for dbi in self.dbis:
+ if f'{backend}/{attr}.'.lower() in dbi.lower():
+ return dbi
+ raise KeyError(f'Unknown dbi {backend}/{attr}')
+
+ def __repr__(self):
+ attrs = ['inst', 'dblib', 'dbhome', 'ldif_dir', 'options', 'dbis' ]
+ res = ", ".join(map(lambda x: f'{x}={self.__dict__[x]}', attrs))
+ return f'DbscanPaths({res})'
+
+
+def dbscan(args, inst=None, expected_rc=0):
+ if inst is None:
+ prefix = os.environ.get('PREFIX', "")
+ prog = f'{prefix}/bin/dbscan'
+ else:
+ prog = os.path.join(inst.ds_paths.bin_dir, DBSCAN)
+ args.insert(0, prog)
+ output = subprocess.run(args, encoding='utf-8', stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+ log.debug(f'{args} result is {output.returncode} output is {output.stdout}')
+ if expected_rc is not None and expected_rc != output.returncode:
+ raise CalledProcessUnexpectedReturnCode(output, expected_rc)
+ return output
+
+
+def log_export_file(filename):
+ with open(filename, 'r') as file:
+ log.debug(f'=========== Dump of {filename} ================')
+ for line in file:
+ log.debug(line.rstrip('\n'))
+ log.debug(f'=========== Enf of {filename} =================')
+
+
+@pytest.fixture(scope='module')
+def paths(topo_m2, request):
+ inst = topo_m2.ms["supplier1"]
+ if sys.version_info < (3,5):
+ pytest.skip('requires python version >= 3.5')
+ paths = DbscanPaths(inst)
+ if '--do-it' not in paths.options:
+ pytest.skip('Not supported with this dbscan version')
+ inst.stop()
+ return paths
+
+
+def test_dbscan_destructive_actions(paths, request):
+ """Test that dbscan remove/import actions
+
+ :id: f40b0c42-660a-11ef-9544-083a88554478
+ :setup: Stopped standalone instance
+ :steps:
+ 1. Export cn instance with dbscan
+ 2. Run dbscan --remove ...
+ 3. Check the error message about missing --do-it
+ 4. Check that cn instance is still present
+ 5. Run dbscan -I import_file ...
+ 6. Check it was properly imported
+ 7. Check that cn instance is still present
+ 8. Run dbscan --remove ... --doit
+ 9. Check the error message about missing --do-it
+ 10. Check that cn instance is still present
+ 11. Run dbscan -I import_file ... --do-it
+ 12. Check it was properly imported
+ 13. Check that cn instance is still present
+ 14. Export again the database
+ 15. Check that content of export files are the same
+ :expectedresults:
+ 1. Success
+ 2. dbscan return code should be 1 (error)
+ 3. Error message should be present
+ 4. cn instance should be present
+ 5. dbscan return code should be 1 (error)
+ 6. Error message should be present
+ 7. cn instance should be present
+ 8. dbscan return code should be 0 (success)
+ 9. Error message should not be present
+ 10. cn instance should not be present
+ 11. dbscan return code should be 0 (success)
+ 12. Error message should not be present
+ 13. cn instance should be present
+ 14. Success
+ 15. Export files content should be the same
+ """
+
+ # Export cn instance with dbscan
+ export_cn = f'{paths.ldif_dir}/dbscan_cn.data'
+ export_cn2 = f'{paths.ldif_dir}/dbscan_cn2.data'
+ cndbi = paths.get_dbi('replication_changelog')
+ inst = paths.inst
+ dblib = paths.dblib
+ exportok = False
+ def fin():
+ if os.path.exists(export_cn):
+ # Restore cn if it was exported successfully but does not exists any more
+ if exportok and cndbi not in DbscanPaths.list_instances(inst, dblib, paths.dbhome):
+ dbscan(['-D', dblib, '-f', cndbi, '-I', export_cn, '--do-it'], inst=inst)
+ if not DEBUGGING:
+ os.remove(export_cn)
+ if os.path.exists(export_cn) and not DEBUGGING:
+ os.remove(export_cn2)
+
+ fin()
+ request.addfinalizer(fin)
+ dbscan(['-D', dblib, '-f', cndbi, '-X', export_cn], inst=inst)
+ exportok = True
+
+ expected_msg = "without specifying '--do-it' parameter."
+
+ # Run dbscan --remove ...
+ result = dbscan(['-D', paths.dblib, '--remove', '-f', cndbi],
+ inst=paths.inst, expected_rc=1)
+
+ # Check the error message about missing --do-it
+ assert expected_msg in result.stdout
+
+ # Check that cn instance is still present
+ curdbis = DbscanPaths.list_instances(paths.inst, paths.dblib, paths.dbhome)
+ assert cndbi in curdbis
+
+ # Run dbscan -I import_file ...
+ result = dbscan(['-D', paths.dblib, '-f', cndbi, '-I', export_cn],
+ inst=paths.inst, expected_rc=1)
+
+ # Check the error message about missing --do-it
+ assert expected_msg in result.stdout
+
+ # Check that cn instance is still present
+ curdbis = DbscanPaths.list_instances(paths.inst, paths.dblib, paths.dbhome)
+ assert cndbi in curdbis
+
+ # Run dbscan --remove ... --doit
+ result = dbscan(['-D', paths.dblib, '--remove', '-f', cndbi, '--do-it'],
+ inst=paths.inst, expected_rc=0)
+
+ # Check the error message about missing --do-it
+ assert expected_msg not in result.stdout
+
+ # Check that cn instance is still present
+ curdbis = DbscanPaths.list_instances(paths.inst, paths.dblib, paths.dbhome)
+ assert cndbi not in curdbis
+
+ # Run dbscan -I import_file ... --do-it
+ result = dbscan(['-D', paths.dblib, '-f', cndbi,
+ '-I', export_cn, '--do-it'],
+ inst=paths.inst, expected_rc=0)
+
+ # Check the error message about missing --do-it
+ assert expected_msg not in result.stdout
+
+ # Check that cn instance is still present
+ curdbis = DbscanPaths.list_instances(paths.inst, paths.dblib, paths.dbhome)
+ assert cndbi in curdbis
+
+ # Export again the database
+ dbscan(['-D', dblib, '-f', cndbi, '-X', export_cn2], inst=inst)
+
+ # Check that content of export files are the same
+ with open(export_cn) as f1:
+ f1lines = f1.readlines()
+ with open(export_cn2) as f2:
+ f2lines = f2.readlines()
+ diffs = list(context_diff(f1lines, f2lines))
+ if len(diffs) > 0:
+ log.debug("Export file differences are:")
+ for d in diffs:
+ log.debug(d)
+ log_export_file(export_cn)
+ log_export_file(export_cn2)
+ assert diffs is None
+
+
+if __name__ == '__main__':
+ # Run isolated
+ # -s for DEBUG mode
+ CURRENT_FILE = os.path.realpath(__file__)
+ pytest.main("-s %s" % CURRENT_FILE)
diff --git a/dirsrvtests/tests/suites/clu/repl_monitor_test.py b/dirsrvtests/tests/suites/clu/repl_monitor_test.py
index d83416847..842dd96fd 100644
--- a/dirsrvtests/tests/suites/clu/repl_monitor_test.py
+++ b/dirsrvtests/tests/suites/clu/repl_monitor_test.py
@@ -77,13 +77,13 @@ def get_hostnames_from_log(port1, port2):
# search for Supplier :hostname:port
# and use \D to insure there is no more number is after
# the matched port (i.e that 10 is not matching 101)
- regexp = '(Supplier: )([^:]*)(:' + str(port1) + '\D)'
+ regexp = '(Supplier: )([^:]*)(:' + str(port1) + r'\D)'
match=re.search(regexp, logtext)
host_m1 = 'localhost.localdomain'
if (match is not None):
host_m1 = match.group(2)
# Same for supplier 2
- regexp = '(Supplier: )([^:]*)(:' + str(port2) + '\D)'
+ regexp = '(Supplier: )([^:]*)(:' + str(port2) + r'\D)'
match=re.search(regexp, logtext)
host_m2 = 'localhost.localdomain'
if (match is not None):
diff --git a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.c b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.c
index de6be0f42..4b30e8e87 100644
--- a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.c
+++ b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.c
@@ -5820,8 +5820,16 @@ bdb_import_file_name(ldbm_instance *inst)
static char *
bdb_restore_file_name(struct ldbminfo *li)
{
- char *fname = slapi_ch_smprintf("%s/../.restore", li->li_directory);
-
+ char *pt = strrchr(li->li_directory, '/');
+ char *fname = NULL;
+ if (pt == NULL) {
+ fname = slapi_ch_strdup(".restore");
+ } else {
+ size_t len = pt-li->li_directory;
+ fname = slapi_ch_malloc(len+10);
+ strncpy(fname, li->li_directory, len);
+ strcpy(fname+len, "/.restore");
+ }
return fname;
}
diff --git a/ldap/servers/slapd/back-ldbm/dbimpl.c b/ldap/servers/slapd/back-ldbm/dbimpl.c
index 42f4a0718..134d06480 100644
--- a/ldap/servers/slapd/back-ldbm/dbimpl.c
+++ b/ldap/servers/slapd/back-ldbm/dbimpl.c
@@ -397,7 +397,48 @@ const char *dblayer_op2str(dbi_op_t op)
return str[idx];
}
-/* Open db env, db and db file privately */
+/* Get the li_directory directory from the database instance name -
+ * Caller should free the returned value
+ */
+static char *
+get_li_directory(const char *fname)
+{
+ /*
+ * li_directory is an existing directory.
+ * it can be fname or its parent or its greatparent
+ * in case of problem returns the provided name
+ */
+ char *lid = slapi_ch_strdup(fname);
+ struct stat sbuf = {0};
+ char *pt = NULL;
+ for (int count=0; count<3; count++) {
+ if (stat(lid, &sbuf) == 0) {
+ if (S_ISDIR(sbuf.st_mode)) {
+ return lid;
+ }
+ /* Non directory existing file could be regular
+ * at the first iteration otherwise it is an error.
+ */
+ if (count>0 || !S_ISREG(sbuf.st_mode)) {
+ break;
+ }
+ }
+ pt = strrchr(lid, '/');
+ if (pt == NULL) {
+ slapi_ch_free_string(&lid);
+ return slapi_ch_strdup(".");
+ }
+ *pt = '\0';
+ }
+ /*
+ * Error case. Returns a copy of the original string:
+ * and let dblayer_private_open_fn fail to open the database
+ */
+ slapi_ch_free_string(&lid);
+ return slapi_ch_strdup(fname);
+}
+
+/* Open db env, db and db file privately (for dbscan) */
int dblayer_private_open(const char *plgname, const char *dbfilename, int rw, Slapi_Backend **be, dbi_env_t **env, dbi_db_t **db)
{
struct ldbminfo *li;
@@ -412,7 +453,7 @@ int dblayer_private_open(const char *plgname, const char *dbfilename, int rw, Sl
li->li_plugin = (*be)->be_database;
li->li_plugin->plg_name = (char*) "back-ldbm-dbimpl";
li->li_plugin->plg_libpath = (char*) "libback-ldbm";
- li->li_directory = slapi_ch_strdup(dbfilename);
+ li->li_directory = get_li_directory(dbfilename);
/* Initialize database plugin */
rc = dbimpl_setup(li, plgname);
@@ -439,7 +480,10 @@ int dblayer_private_close(Slapi_Backend **be, dbi_env_t **env, dbi_db_t **db)
}
slapi_ch_free((void**)&li->li_dblayer_private);
slapi_ch_free((void**)&li->li_dblayer_config);
- ldbm_config_destroy(li);
+ if (dblayer_is_lmdb(*be)) {
+ /* Generate use after free and double free in bdb case */
+ ldbm_config_destroy(li);
+ }
slapi_ch_free((void**)&(*be)->be_database);
slapi_ch_free((void**)&(*be)->be_instance_info);
slapi_ch_free((void**)be);
diff --git a/ldap/servers/slapd/tools/dbscan.c b/ldap/servers/slapd/tools/dbscan.c
index 2d28dd951..12edf7c5b 100644
--- a/ldap/servers/slapd/tools/dbscan.c
+++ b/ldap/servers/slapd/tools/dbscan.c
@@ -26,6 +26,7 @@
#include <string.h>
#include <ctype.h>
#include <errno.h>
+#include <getopt.h>
#include "../back-ldbm/dbimpl.h"
#include "../slapi-plugin.h"
#include "nspr.h"
@@ -85,6 +86,8 @@
#define DB_BUFFER_SMALL ENOMEM
#endif
+#define COUNTOF(array) ((sizeof(array))/sizeof(*(array)))
+
#if defined(linux)
#include <getopt.h>
#endif
@@ -130,9 +133,43 @@ long ind_cnt = 0;
long allids_cnt = 0;
long other_cnt = 0;
char *dump_filename = NULL;
+int do_it = 0;
static Slapi_Backend *be = NULL; /* Pseudo backend used to interact with db */
+/* For Long options without shortcuts */
+enum {
+ OPT_FIRST = 0x1000,
+ OPT_DO_IT,
+ OPT_REMOVE,
+};
+
+static const struct option options[] = {
+ /* Options without shortcut */
+ { "do-it", no_argument, 0, OPT_DO_IT },
+ { "remove", no_argument, 0, OPT_REMOVE },
+ /* Options with shortcut */
+ { "import", required_argument, 0, 'I' },
+ { "export", required_argument, 0, 'X' },
+ { "db-type", required_argument, 0, 'D' },
+ { "dbi", required_argument, 0, 'f' },
+ { "ascii", no_argument, 0, 'A' },
+ { "raw", no_argument, 0, 'R' },
+ { "truncate-entry", required_argument, 0, 't' },
+ { "entry-id", required_argument, 0, 'K' },
+ { "key", required_argument, 0, 'k' },
+ { "list", required_argument, 0, 'L' },
+ { "stats", required_argument, 0, 'S' },
+ { "id-list-max-size", required_argument, 0, 'l' },
+ { "id-list-min-size", required_argument, 0, 'G' },
+ { "show-id-list-lenghts", no_argument, 0, 'n' },
+ { "show-id-list", no_argument, 0, 'r' },
+ { "summary", no_argument, 0, 's' },
+ { "help", no_argument, 0, 'h' },
+ { 0, 0, 0, 0 }
+};
+
+
/** db_printf - functioning same as printf but a place for manipluating output.
*/
void
@@ -899,7 +936,7 @@ is_changelog(char *filename)
}
static void
-usage(char *argv0)
+usage(char *argv0, int error)
{
char *copy = strdup(argv0);
char *p0 = NULL, *p1 = NULL;
@@ -922,42 +959,52 @@ usage(char *argv0)
}
printf("\n%s - scan a db file and dump the contents\n", p0);
printf(" common options:\n");
- printf(" -D <dbimpl> specify db implementaion (may be: bdb or mdb)\n");
- printf(" -f <filename> specify db file\n");
- printf(" -A dump as ascii data\n");
- printf(" -R dump as raw data\n");
- printf(" -t <size> entry truncate size (bytes)\n");
+ printf(" -A, --ascii dump as ascii data\n");
+ printf(" -D, --db-type <dbimpl> specify db implementaion (may be: bdb or mdb)\n");
+ printf(" -f, --dbi <filename> specify db instance\n");
+ printf(" -R, --raw dump as raw data\n");
+ printf(" -t, --truncate-entry <size> entry truncate size (bytes)\n");
+
printf(" entry file options:\n");
- printf(" -K <entry_id> lookup only a specific entry id\n");
+ printf(" -K, --entry-id <entry_id> lookup only a specific entry id\n");
+
printf(" index file options:\n");
- printf(" -k <key> lookup only a specific key\n");
- printf(" -L <dbhome> list all db files\n");
- printf(" -S <dbhome> show statistics\n");
- printf(" -l <size> max length of dumped id list\n");
- printf(" (default %" PRIu32 "; 40 bytes <= size <= 1048576 bytes)\n", MAX_BUFFER);
- printf(" -G <n> only display index entries with more than <n> ids\n");
- printf(" -n display ID list lengths\n");
- printf(" -r display the conents of ID list\n");
- printf(" -s Summary of index counts\n");
- printf(" -I file Import database content from file\n");
- printf(" -X file Export database content in file\n");
+ printf(" -G, --id-list-min-size <n> only display index entries with more than <n> ids\n");
+ printf(" -I, --import file Import database instance from file.\n");
+ printf(" -k, --key <key> lookup only a specific key\n");
+ printf(" -l, --id-list-max-size <size> max length of dumped id list\n");
+ printf(" (default %" PRIu32 "; 40 bytes <= size <= 1048576 bytes)\n", MAX_BUFFER);
+ printf(" -n, --show-id-list-lenghts display ID list lengths\n");
+ printf(" --remove remove database instance\n");
+ printf(" -r, --show-id-list display the conents of ID list\n");
+ printf(" -S, --stats <dbhome> show statistics\n");
+ printf(" -X, --export file export database instance in file\n");
+
+ printf(" other options:\n");
+ printf(" -s, --summary summary of index counts\n");
+ printf(" -L, --list <dbhome> list all db files\n");
+ printf(" --do-it confirmation flags for destructive actions like --remove or --import\n");
+ printf(" -h, --help display this usage\n");
+
printf(" sample usages:\n");
- printf(" # list the db files\n");
- printf(" %s -D mdb -L /var/lib/dirsrv/slapd-i/db/\n", p0);
- printf(" %s -f id2entry.db\n", p0);
+ printf(" # list the database instances\n");
+ printf(" %s -L /var/lib/dirsrv/slapd-supplier1/db/\n", p0);
printf(" # dump the entry file\n");
printf(" %s -f id2entry.db\n", p0);
printf(" # display index keys in cn.db4\n");
printf(" %s -f cn.db4\n", p0);
+ printf(" # display index keys in cn on lmdb\n");
+ printf(" %s -f /var/lib/dirsrv/slapd-supplier1/db/userroot/cn.db\n", p0);
+ printf(" (Note: Use 'dbscan -L db_home_dir' to get the db instance path)\n");
printf(" # display index keys and the count of entries having the key in mail.db4\n");
printf(" %s -r -f mail.db4\n", p0);
printf(" # display index keys and the IDs having more than 20 IDs in sn.db4\n");
printf(" %s -r -G 20 -f sn.db4\n", p0);
printf(" # display summary of objectclass.db4\n");
- printf(" %s -f objectclass.db4\n", p0);
+ printf(" %s -s -f objectclass.db4\n", p0);
printf("\n");
free(copy);
- exit(1);
+ exit(error?1:0);
}
void dump_ascii_val(const char *str, dbi_val_t *val)
@@ -1126,13 +1173,12 @@ importdb(const char *dbimpl_name, const char *filename, const char *dump_name)
dblayer_init_pvt_txn();
if (!dump) {
- printf("Failed to open dump file %s. Error %d: %s\n", dump_name, errno, strerror(errno));
- fclose(dump);
+ printf("Error: Failed to open dump file %s. Error %d: %s\n", dump_name, errno, strerror(errno));
return 1;
}
if (dblayer_private_open(dbimpl_name, filename, 1, &be, &env, &db)) {
- printf("Can't initialize db plugin: %s\n", dbimpl_name);
+ printf("Error: Can't initialize db plugin: %s\n", dbimpl_name);
fclose(dump);
return 1;
}
@@ -1142,11 +1188,16 @@ importdb(const char *dbimpl_name, const char *filename, const char *dump_name)
!_read_line(dump, &keyword, &data) && keyword == 'v') {
ret = dblayer_db_op(be, db, txn.txn, DBI_OP_PUT, &key, &data);
}
+ if (ret !=0) {
+ printf("Error: failed to write record in database. Error %d: %s\n", ret, dblayer_strerror(ret));
+ dump_ascii_val("Failing record key", &key);
+ dump_ascii_val("Failing record value", &data);
+ }
fclose(dump);
dblayer_value_free(be, &key);
dblayer_value_free(be, &data);
if (dblayer_private_close(&be, &env, &db)) {
- printf("Unable to shutdown the db plugin: %s\n", dblayer_strerror(1));
+ printf("Error: Unable to shutdown the db plugin: %s\n", dblayer_strerror(1));
return 1;
}
return ret;
@@ -1243,6 +1294,7 @@ removedb(const char *dbimpl_name, const char *filename)
return 1;
}
+ db = NULL; /* Database is already closed by dblayer_db_remove */
if (dblayer_private_close(&be, &env, &db)) {
printf("Unable to shutdown the db plugin: %s\n", dblayer_strerror(1));
return 1;
@@ -1250,7 +1302,6 @@ removedb(const char *dbimpl_name, const char *filename)
return 0;
}
-
int
main(int argc, char **argv)
{
@@ -1262,11 +1313,46 @@ main(int argc, char **argv)
int ret = 0;
char *find_key = NULL;
uint32_t entry_id = 0xffffffff;
- char *dbimpl_name = (char*) "bdb";
- int c;
+ char *defdbimpl = getenv("NSSLAPD_DB_LIB");
+ char *dbimpl_name = (char*) "mdb";
+ int longopt_idx = 0;
+ int c = 0;
+ char optstring[2*COUNTOF(options)+1] = {0};
+
+ if (defdbimpl) {
+ if (strcasecmp(defdbimpl, "bdb") == 0) {
+ dbimpl_name = (char*) "bdb";
+ }
+ if (strcasecmp(defdbimpl, "mdb") == 0) {
+ dbimpl_name = (char*) "mdb";
+ }
+ }
+
+ /* Compute getopt short option string */
+ {
+ char *pt = optstring;
+ for (const struct option *opt = options; opt->name; opt++) {
+ if (opt->val>0 && opt->val<OPT_FIRST) {
+ *pt++ = (char)(opt->val);
+ if (opt->has_arg == required_argument) {
+ *pt++ = ':';
+ }
+ }
+ }
+ *pt = '\0';
+ }
- while ((c = getopt(argc, argv, "Af:RL:S:l:nG:srk:K:hvt:D:X:I:d")) != EOF) {
+ while ((c = getopt_long(argc, argv, optstring, options, &longopt_idx)) != EOF) {
+ if (c == 0) {
+ c = longopt_idx;
+ }
switch (c) {
+ case OPT_DO_IT:
+ do_it = 1;
+ break;
+ case OPT_REMOVE:
+ display_mode |= REMOVE;
+ break;
case 'A':
display_mode |= ASCIIDATA;
break;
@@ -1332,32 +1418,48 @@ main(int argc, char **argv)
display_mode |= IMPORT;
dump_filename = optarg;
break;
- case 'd':
- display_mode |= REMOVE;
- break;
case 'h':
default:
- usage(argv[0]);
+ usage(argv[0], 1);
}
}
+ if (filename == NULL) {
+ fprintf(stderr, "PARAMETER ERROR! 'filename' parameter is missing.\n");
+ usage(argv[0], 1);
+ }
+
if (display_mode & EXPORT) {
return exportdb(dbimpl_name, filename, dump_filename);
}
if (display_mode & IMPORT) {
+ if (!strstr(filename, "/id2entry") && !strstr(filename, "/replication_changelog")) {
+ /* schema is unknown in dbscan ==> duplicate keys sort order is unknown
+ * ==> cannot create dbi with duplicate keys
+ * ==> only id2entry and repl changelog is importable.
+ */
+ fprintf(stderr, "ERROR: The only database instances that may be imported with dbscan are id2entry and replication_changelog.\n");
+ exit(1);
+ }
+
+ if (do_it == 0) {
+ fprintf(stderr, "PARAMETER ERROR! Trying to perform a destructive action (import)\n"
+ " without specifying '--do-it' parameter.\n");
+ exit(1);
+ }
return importdb(dbimpl_name, filename, dump_filename);
}
if (display_mode & REMOVE) {
+ if (do_it == 0) {
+ fprintf(stderr, "PARAMETER ERROR! Trying to perform a destructive action (remove)\n"
+ " without specifying '--do-it' parameter.\n");
+ exit(1);
+ }
return removedb(dbimpl_name, filename);
}
- if (filename == NULL) {
- fprintf(stderr, "PARAMETER ERROR! 'filename' parameter is missing.\n");
- usage(argv[0]);
- }
-
if (display_mode & LISTDBS) {
dbi_dbslist_t *dbs = dblayer_list_dbs(dbimpl_name, filename);
if (dbs) {
diff --git a/man/man1/dbscan.1 b/man/man1/dbscan.1
index 810608371..dfb6e8351 100644
--- a/man/man1/dbscan.1
+++ b/man/man1/dbscan.1
@@ -31,50 +31,94 @@ Scans a Directory Server database index file and dumps the contents.
.\" respectively.
.SH OPTIONS
A summary of options is included below:
+.IP
+common options:
+.TP
+.B \fB\-A, \-\-ascii\fR
+dump as ascii data
+.TP
+.B \fB\-D, \-\-db\-type\fR <filename>
+specify db type: bdb or mdb
.TP
-.B \fB\-f\fR <filename>
-specify db file
+.B \fB\-f, \-\-dbi\fR <filename>
+specify db instance
.TP
-.B \fB\-R\fR
+.B \fB\-R, \-\-raw\fR
dump as raw data
.TP
-.B \fB\-t\fR <size>
+.B \fB\-t, \-\-truncate\-entry\fR <size>
entry truncate size (bytes)
.IP
entry file options:
.TP
-.B \fB\-K\fR <entry_id>
+.B \fB\-K, \-\-entry\-id\fR <entry_id>
lookup only a specific entry id
+.IP
index file options:
.TP
-.B \fB\-k\fR <key>
+.B \fB\-G, \-\-id\-list\-min\-size\fR <n>
+only display index entries with more than <n> ids
+.TP
+.B \fB\-I, \-\-import\fR <file>
+Import database instance from file. Requires \-\-do\-it parameter
+WARNING! Only the id2entry and replication_changelog database instances
+may be imported by dbscan.
+.TP
+.B \fB\-k, \-\-key\fR <key>
lookup only a specific key
.TP
-.B \fB\-l\fR <size>
+.B \fB\-l, \-\-id\-list\-max\-size\fR <size>
max length of dumped id list
(default 4096; 40 bytes <= size <= 1048576 bytes)
.TP
-.B \fB\-G\fR <n>
-only display index entries with more than <n> ids
-.TP
-.B \fB\-n\fR
+.B \fB\-n, \-\-show\-id\-list\-lenghts\fR
display ID list lengths
.TP
-.B \fB\-r\fR
+.B \fB\-\-remove\fR
+remove a db instance. Requires \-\-do\-it parameter
+.TP
+.B \fB\-r, \-\-show\-id\-list\fR
display the contents of ID list
.TP
-.B \fB\-s\fR
+.B \fB\-S, \-\-stats\fR
+display statistics
+.TP
+.B \fB\-X, \-\-export\fR <file>
+Export database instance to file
+.IP
+other options:
+.TP
+.B \fB\-s, \-\-summary\fR
Summary of index counts
+.TP
+.B \fB\-L, \-\-list\fR
+List od database instances
+.TP
+.B \fB\-\-do\-it\fR
+confirmation required for actions that change the database contents
+.TP
+.B \fB\-h, \-\-help\-it\fR
+display the usage
.IP
.SH USAGE
Sample usages:
.TP
+List the database instances
+.B
+dbscan -L /var/lib/dirsrv/slapd-supplier1/db
+.TP
Dump the entry file:
.B
dbscan \fB\-f\fR id2entry.db4
.TP
Display index keys in cn.db4:
-.B dbscan \fB\-f\fR cn.db4
+.B
+dbscan \fB\-f\fR cn.db4
+.TP
+Display index keys in cn on lmdb:
+.B
+dbscan \fB\-f\fR /var/lib/dirsrv/slapd\-supplier1/db/userroot/cn.db
+ (Note: Use \fBdbscan \-L db_home_dir\R to get the db instance path)
.TP
Display index keys and the count of entries having the key in mail.db4:
.B
@@ -86,7 +130,7 @@ dbscan \fB\-r\fR \fB\-G\fR 20 \fB\-f\fR sn.db4
.TP
Display summary of objectclass.db4:
.B
-dbscan \fB\-f\fR objectclass.db4
+dbscan \fB\-s \-f\fR objectclass.db4
.br
.SH AUTHOR
dbscan was written by the 389 Project.
diff --git a/src/lib389/lib389/__init__.py b/src/lib389/lib389/__init__.py
index e87582d9e..368741a66 100644
--- a/src/lib389/lib389/__init__.py
+++ b/src/lib389/lib389/__init__.py
@@ -3039,14 +3039,17 @@ class DirSrv(SimpleLDAPObject, object):
return self._dbisupport
# check if -D and -L options are supported
try:
- cmd = ["%s/dbscan" % self.get_bin_dir(), "--help"]
+ cmd = ["%s/dbscan" % self.get_bin_dir(), "-h"]
self.log.debug("DEBUG: checking dbscan supported options %s" % cmd)
p = subprocess.Popen(cmd, stdout=subprocess.PIPE)
except subprocess.CalledProcessError:
pass
output, stderr = p.communicate()
- self.log.debug("is_dbi_supported output " + output.decode())
- if "-D <dbimpl>" in output.decode() and "-L <dbhome>" in output.decode():
+ output = output.decode()
+ self.log.debug("is_dbi_supported output " + output)
+ if "-D <dbimpl>" in output and "-L <dbhome>" in output:
+ self._dbisupport = True
+ elif "--db-type" in output and "--list" in output:
self._dbisupport = True
else:
self._dbisupport = False
diff --git a/src/lib389/lib389/cli_ctl/dblib.py b/src/lib389/lib389/cli_ctl/dblib.py
index e9269e340..82f09c70c 100644
--- a/src/lib389/lib389/cli_ctl/dblib.py
+++ b/src/lib389/lib389/cli_ctl/dblib.py
@@ -158,6 +158,14 @@ def run_dbscan(args):
return output
+def does_dbscan_need_do_it():
+ prefix = os.environ.get('PREFIX', "")
+ prog = f'{prefix}/bin/dbscan'
+ args = [ prog, '-h' ]
+ output = subprocess.run(args, encoding='utf-8', stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+ return '--do-it' in output.stdout
+
+
def export_changelog(be, dblib):
# Export backend changelog
try:
@@ -172,7 +180,10 @@ def import_changelog(be, dblib):
# import backend changelog
try:
cl5dbname = be['eccl5dbname'] if dblib == "bdb" else be['cl5dbname']
- run_dbscan(['-D', dblib, '-f', cl5dbname, '-I', be['cl5name']])
+ if does_dbscan_need_do_it():
+ run_dbscan(['-D', dblib, '-f', cl5dbname, '-I', be['cl5name'], '--do-it'])
+ else:
+ run_dbscan(['-D', dblib, '-f', cl5dbname, '-I', be['cl5name']])
return True
except subprocess.CalledProcessError as e:
return False
--
2.48.0

View File

@ -1,83 +0,0 @@
From 9319d5b022918f14cacb00e3faef85a6ab730a26 Mon Sep 17 00:00:00 2001
From: Simon Pichugin <spichugi@redhat.com>
Date: Tue, 27 Feb 2024 16:30:47 -0800
Subject: [PATCH] Issue 3527 - Support HAProxy and Instance on the same machine
configuration (#6107)
Description: Improve how we handle HAProxy connections to work better when
the DS and HAProxy are on the same machine.
Ensure the client and header destination IPs are checked against the trusted IP list.
Additionally, this change will also allow configuration having
HAProxy is listening on a different subnet than the one used to forward the request.
Related: https://github.com/389ds/389-ds-base/issues/3527
Reviewed by: @progier389, @jchapma (Thanks!)
---
ldap/servers/slapd/connection.c | 35 +++++++++++++++++++++++++--------
1 file changed, 27 insertions(+), 8 deletions(-)
diff --git a/ldap/servers/slapd/connection.c b/ldap/servers/slapd/connection.c
index d28a39bf7..10a8cc577 100644
--- a/ldap/servers/slapd/connection.c
+++ b/ldap/servers/slapd/connection.c
@@ -1187,6 +1187,8 @@ connection_read_operation(Connection *conn, Operation *op, ber_tag_t *tag, int *
char str_ip[INET6_ADDRSTRLEN + 1] = {0};
char str_haproxy_ip[INET6_ADDRSTRLEN + 1] = {0};
char str_haproxy_destip[INET6_ADDRSTRLEN + 1] = {0};
+ int trusted_matches_ip_found = 0;
+ int trusted_matches_destip_found = 0;
struct berval **bvals = NULL;
int proxy_connection = 0;
@@ -1245,21 +1247,38 @@ connection_read_operation(Connection *conn, Operation *op, ber_tag_t *tag, int *
normalize_IPv4(conn->cin_addr, buf_ip, sizeof(buf_ip), str_ip, sizeof(str_ip));
normalize_IPv4(&pr_netaddr_dest, buf_haproxy_destip, sizeof(buf_haproxy_destip),
str_haproxy_destip, sizeof(str_haproxy_destip));
+ size_t ip_len = strlen(buf_ip);
+ size_t destip_len = strlen(buf_haproxy_destip);
/* Now, reset RC and set it to 0 only if a match is found */
haproxy_rc = -1;
- /* Allow only:
- * Trusted IP == Original Client IP == HAProxy Header Destination IP */
+ /*
+ * We need to allow a configuration where DS instance and HAProxy are on the same machine.
+ * In this case, we need to check if
+ * the HAProxy client IP (which will be a loopback address) matches one of the the trusted IP addresses,
+ * while still checking that
+ * the HAProxy header destination IP address matches one of the trusted IP addresses.
+ * Additionally, this change will also allow configuration having
+ * HAProxy listening on a different subnet than one used to forward the request.
+ */
for (size_t i = 0; bvals[i] != NULL; ++i) {
- if ((strlen(bvals[i]->bv_val) == strlen(buf_ip)) &&
- (strlen(bvals[i]->bv_val) == strlen(buf_haproxy_destip)) &&
- (strncasecmp(bvals[i]->bv_val, buf_ip, strlen(buf_ip)) == 0) &&
- (strncasecmp(bvals[i]->bv_val, buf_haproxy_destip, strlen(buf_haproxy_destip)) == 0)) {
- haproxy_rc = 0;
- break;
+ size_t bval_len = strlen(bvals[i]->bv_val);
+
+ /* Check if the Client IP (HAProxy's machine IP) address matches the trusted IP address */
+ if (!trusted_matches_ip_found) {
+ trusted_matches_ip_found = (bval_len == ip_len) && (strncasecmp(bvals[i]->bv_val, buf_ip, ip_len) == 0);
+ }
+ /* Check if the HAProxy header destination IP address matches the trusted IP address */
+ if (!trusted_matches_destip_found) {
+ trusted_matches_destip_found = (bval_len == destip_len) && (strncasecmp(bvals[i]->bv_val, buf_haproxy_destip, destip_len) == 0);
}
}
+
+ if (trusted_matches_ip_found && trusted_matches_destip_found) {
+ haproxy_rc = 0;
+ }
+
if (haproxy_rc == -1) {
slapi_log_err(SLAPI_LOG_CONNS, "connection_read_operation", "HAProxy header received from unknown source.\n");
disconnect_server_nomutex(conn, conn->c_connid, -1, SLAPD_DISCONNECT_PROXY_UNKNOWN, EPROTO);
--
2.45.0

View File

@ -0,0 +1,70 @@
From de52853a3551f1d1876ea21b33a5242ad669fec1 Mon Sep 17 00:00:00 2001
From: James Chapman <jachapma@redhat.com>
Date: Tue, 4 Feb 2025 15:40:16 +0000
Subject: [PATCH] Issue 6566 - RI plugin failure to handle a modrdn for rename
of member of multiple groups (#6567)
Bug description:
With AM and RI plugins enabled, the rename of a user that is part of multiple groups
fails with a "value exists" error.
Fix description:
For a modrdn the RI plugin creates a new DN, before a modify is attempted check
if the new DN already exists in the attr being updated.
Fixes: https://github.com/389ds/389-ds-base/issues/6566
Reviewed by: @progier389 , @tbordaz (Thank you)
---
ldap/servers/plugins/referint/referint.c | 15 ++++++++++++---
1 file changed, 12 insertions(+), 3 deletions(-)
diff --git a/ldap/servers/plugins/referint/referint.c b/ldap/servers/plugins/referint/referint.c
index 468fdc239..218863ea5 100644
--- a/ldap/servers/plugins/referint/referint.c
+++ b/ldap/servers/plugins/referint/referint.c
@@ -924,6 +924,7 @@ _update_all_per_mod(Slapi_DN *entrySDN, /* DN of the searched entry */
{
Slapi_Mods *smods = NULL;
char *newDN = NULL;
+ struct berval bv = {0};
char **dnParts = NULL;
char *sval = NULL;
char *newvalue = NULL;
@@ -1026,22 +1027,30 @@ _update_all_per_mod(Slapi_DN *entrySDN, /* DN of the searched entry */
}
/* else: normalize_rc < 0) Ignore the DN normalization error for now. */
+ bv.bv_val = newDN;
+ bv.bv_len = strlen(newDN);
p = PL_strstr(sval, slapi_sdn_get_ndn(origDN));
if (p == sval) {
/* (case 1) */
slapi_mods_add_string(smods, LDAP_MOD_DELETE, attrName, sval);
- slapi_mods_add_string(smods, LDAP_MOD_ADD, attrName, newDN);
-
+ /* Add only if the attr value does not exist */
+ if (VALUE_PRESENT != attr_value_find_wsi(attr, &bv, &v)) {
+ slapi_mods_add_string(smods, LDAP_MOD_ADD, attrName, newDN);
+ }
} else if (p) {
/* (case 2) */
slapi_mods_add_string(smods, LDAP_MOD_DELETE, attrName, sval);
*p = '\0';
newvalue = slapi_ch_smprintf("%s%s", sval, newDN);
- slapi_mods_add_string(smods, LDAP_MOD_ADD, attrName, newvalue);
+ /* Add only if the attr value does not exist */
+ if (VALUE_PRESENT != attr_value_find_wsi(attr, &bv, &v)) {
+ slapi_mods_add_string(smods, LDAP_MOD_ADD, attrName, newvalue);
+ }
slapi_ch_free_string(&newvalue);
}
/* else: value does not include the modified DN. Ignore it. */
slapi_ch_free_string(&sval);
+ bv = (struct berval){0};
}
rc = _do_modify(mod_pb, entrySDN, slapi_mods_get_ldapmods_byref(smods));
if (rc) {
--
2.48.0

View File

@ -1,108 +0,0 @@
From 016a2b6bd3e27cbff36609824a75b020dfd24823 Mon Sep 17 00:00:00 2001
From: James Chapman <jachapma@redhat.com>
Date: Wed, 1 May 2024 15:01:33 +0100
Subject: [PATCH] CVE-2024-2199
---
.../tests/suites/password/password_test.py | 56 +++++++++++++++++++
ldap/servers/slapd/modify.c | 8 ++-
2 files changed, 62 insertions(+), 2 deletions(-)
diff --git a/dirsrvtests/tests/suites/password/password_test.py b/dirsrvtests/tests/suites/password/password_test.py
index 38079476a..b3ff08904 100644
--- a/dirsrvtests/tests/suites/password/password_test.py
+++ b/dirsrvtests/tests/suites/password/password_test.py
@@ -65,6 +65,62 @@ def test_password_delete_specific_password(topology_st):
log.info('test_password_delete_specific_password: PASSED')
+def test_password_modify_non_utf8(topology_st):
+ """Attempt a modify of the userPassword attribute with
+ an invalid non utf8 value
+
+ :id: a31af9d5-d665-42b9-8d6e-fea3d0837d36
+ :setup: Standalone instance
+ :steps:
+ 1. Add a user if it doesnt exist and set its password
+ 2. Verify password with a bind
+ 3. Modify userPassword attr with invalid value
+ 4. Attempt a bind with invalid password value
+ 5. Verify original password with a bind
+ :expectedresults:
+ 1. The user with userPassword should be added successfully
+ 2. Operation should be successful
+ 3. Server returns ldap.UNWILLING_TO_PERFORM
+ 4. Server returns ldap.INVALID_CREDENTIALS
+ 5. Operation should be successful
+ """
+
+ log.info('Running test_password_modify_non_utf8...')
+
+ # Create user and set password
+ standalone = topology_st.standalone
+ users = UserAccounts(standalone, DEFAULT_SUFFIX)
+ if not users.exists(TEST_USER_PROPERTIES['uid'][0]):
+ user = users.create(properties=TEST_USER_PROPERTIES)
+ else:
+ user = users.get(TEST_USER_PROPERTIES['uid'][0])
+ user.set('userpassword', PASSWORD)
+
+ # Verify password
+ try:
+ user.bind(PASSWORD)
+ except ldap.LDAPError as e:
+ log.fatal('Failed to bind as {}, error: '.format(user.dn) + e.args[0]['desc'])
+ assert False
+
+ # Modify userPassword with an invalid value
+ password = b'tes\x82t-password' # A non UTF-8 encoded password
+ with pytest.raises(ldap.UNWILLING_TO_PERFORM):
+ user.replace('userpassword', password)
+
+ # Verify a bind fails with invalid pasword
+ with pytest.raises(ldap.INVALID_CREDENTIALS):
+ user.bind(password)
+
+ # Verify we can still bind with original password
+ try:
+ user.bind(PASSWORD)
+ except ldap.LDAPError as e:
+ log.fatal('Failed to bind as {}, error: '.format(user.dn) + e.args[0]['desc'])
+ assert False
+
+ log.info('test_password_modify_non_utf8: PASSED')
+
if __name__ == '__main__':
# Run isolated
# -s for DEBUG mode
diff --git a/ldap/servers/slapd/modify.c b/ldap/servers/slapd/modify.c
index 5ca78539c..669bb104c 100644
--- a/ldap/servers/slapd/modify.c
+++ b/ldap/servers/slapd/modify.c
@@ -765,8 +765,10 @@ op_shared_modify(Slapi_PBlock *pb, int pw_change, char *old_pw)
* flagged - leave mod attributes alone */
if (!repl_op && !skip_modified_attrs && lastmod) {
modify_update_last_modified_attr(pb, &smods);
+ slapi_pblock_set(pb, SLAPI_MODIFY_MODS, slapi_mods_get_ldapmods_byref(&smods));
}
+
if (0 == slapi_mods_get_num_mods(&smods)) {
/* nothing to do - no mods - this is not an error - just
send back LDAP_SUCCESS */
@@ -933,8 +935,10 @@ op_shared_modify(Slapi_PBlock *pb, int pw_change, char *old_pw)
/* encode password */
if (pw_encodevals_ext(pb, sdn, va)) {
- slapi_log_err(SLAPI_LOG_CRIT, "op_shared_modify", "Unable to hash userPassword attribute for %s.\n", slapi_entry_get_dn_const(e));
- send_ldap_result(pb, LDAP_UNWILLING_TO_PERFORM, NULL, "Unable to store attribute \"userPassword\" correctly\n", 0, NULL);
+ slapi_log_err(SLAPI_LOG_CRIT, "op_shared_modify", "Unable to hash userPassword attribute for %s, "
+ "check value is utf8 string.\n", slapi_entry_get_dn_const(e));
+ send_ldap_result(pb, LDAP_UNWILLING_TO_PERFORM, NULL, "Unable to hash \"userPassword\" attribute, "
+ "check value is utf8 string.\n", 0, NULL);
valuearray_free(&va);
goto free_and_return;
}
--
2.45.0

View File

@ -0,0 +1,43 @@
From a634756784056270773d67747061e26152d85469 Mon Sep 17 00:00:00 2001
From: Masahiro Matsuya <mmatsuya@redhat.com>
Date: Wed, 5 Feb 2025 11:38:04 +0900
Subject: [PATCH] Issue 6258 - Mitigate race condition in paged_results_test.py
(#6433)
The regression test dirsrvtests/tests/suites/paged_results/paged_results_test.py::test_multi_suffix_search has a race condition causing it to fail due to multiple queries potentially writing their logs out of chronological order.
This failure is mitigated by sorting the retrieved access_log_lines by their "op" value. This ensures the log lines are in chronological order, as expected by the assertions at the end of test_multi_suffix_search().
Helps fix: #6258
Reviewed by: @droideck , @progier389 (Thanks!)
Co-authored-by: Anuar Beisembayev <111912342+abeisemb@users.noreply.github.com>
---
dirsrvtests/tests/suites/paged_results/paged_results_test.py | 3 +++
1 file changed, 3 insertions(+)
diff --git a/dirsrvtests/tests/suites/paged_results/paged_results_test.py b/dirsrvtests/tests/suites/paged_results/paged_results_test.py
index eaf0e0da9..fca48db0f 100644
--- a/dirsrvtests/tests/suites/paged_results/paged_results_test.py
+++ b/dirsrvtests/tests/suites/paged_results/paged_results_test.py
@@ -7,6 +7,7 @@
# --- END COPYRIGHT BLOCK ---
#
import socket
+import re
from random import sample, randrange
import pytest
@@ -1126,6 +1127,8 @@ def test_multi_suffix_search(topology_st, create_user, new_suffixes):
topology_st.standalone.restart(timeout=10)
access_log_lines = topology_st.standalone.ds_access_log.match('.*pr_cookie=.*')
+ # Sort access_log_lines by op number to mitigate race condition effects.
+ access_log_lines.sort(key=lambda x: int(re.search(r"op=(\d+) RESULT", x).group(1)))
pr_cookie_list = ([line.rsplit('=', 1)[-1] for line in access_log_lines])
pr_cookie_list = [int(pr_cookie) for pr_cookie in pr_cookie_list]
log.info('Assert that last pr_cookie == -1 and others pr_cookie == 0')
--
2.48.0

View File

@ -1,213 +0,0 @@
From d5bbe52fbe84a7d3b5938bf82d5c4af15061a8e2 Mon Sep 17 00:00:00 2001
From: Pierre Rogier <progier@redhat.com>
Date: Wed, 17 Apr 2024 18:18:04 +0200
Subject: [PATCH] CVE-2024-3657
---
.../tests/suites/filter/large_filter_test.py | 34 +++++-
ldap/servers/slapd/back-ldbm/index.c | 111 ++++++++++--------
2 files changed, 92 insertions(+), 53 deletions(-)
diff --git a/dirsrvtests/tests/suites/filter/large_filter_test.py b/dirsrvtests/tests/suites/filter/large_filter_test.py
index ecc7bf979..40526bb16 100644
--- a/dirsrvtests/tests/suites/filter/large_filter_test.py
+++ b/dirsrvtests/tests/suites/filter/large_filter_test.py
@@ -13,19 +13,29 @@ verify and testing Filter from a search
import os
import pytest
+import ldap
-from lib389._constants import PW_DM
+from lib389._constants import PW_DM, DEFAULT_SUFFIX, ErrorLog
from lib389.topologies import topology_st as topo
from lib389.idm.user import UserAccounts, UserAccount
from lib389.idm.account import Accounts
from lib389.backend import Backends
from lib389.idm.domain import Domain
+from lib389.utils import get_ldapurl_from_serverid
SUFFIX = 'dc=anuj,dc=com'
pytestmark = pytest.mark.tier1
+def open_new_ldapi_conn(dsinstance):
+ ldapurl, certdir = get_ldapurl_from_serverid(dsinstance)
+ assert 'ldapi://' in ldapurl
+ conn = ldap.initialize(ldapurl)
+ conn.sasl_interactive_bind_s("", ldap.sasl.external())
+ return conn
+
+
@pytest.fixture(scope="module")
def _create_entries(request, topo):
"""
@@ -160,6 +170,28 @@ def test_large_filter(topo, _create_entries, real_value):
assert len(Accounts(conn, SUFFIX).filter(real_value)) == 3
+def test_long_filter_value(topo):
+ """Exercise large eq filter with dn syntax attributes
+
+ :id: b069ef72-fcc3-11ee-981c-482ae39447e5
+ :setup: Standalone
+ :steps:
+ 1. Try to pass filter rules as per the condition.
+ :expectedresults:
+ 1. Pass
+ """
+ inst = topo.standalone
+ conn = open_new_ldapi_conn(inst.serverid)
+ inst.config.loglevel(vals=(ErrorLog.DEFAULT,ErrorLog.TRACE,ErrorLog.SEARCH_FILTER))
+ filter_value = "a\x1Edmin" * 1025
+ conn.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, f'(cn={filter_value})')
+ filter_value = "aAdmin" * 1025
+ conn.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, f'(cn={filter_value})')
+ filter_value = "*"
+ conn.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, f'(cn={filter_value})')
+ inst.config.loglevel(vals=(ErrorLog.DEFAULT,))
+
+
if __name__ == '__main__':
CURRENT_FILE = os.path.realpath(__file__)
pytest.main("-s -v %s" % CURRENT_FILE)
diff --git a/ldap/servers/slapd/back-ldbm/index.c b/ldap/servers/slapd/back-ldbm/index.c
index 410db23d1..30fa09ebb 100644
--- a/ldap/servers/slapd/back-ldbm/index.c
+++ b/ldap/servers/slapd/back-ldbm/index.c
@@ -71,6 +71,32 @@ typedef struct _index_buffer_handle index_buffer_handle;
#define INDEX_BUFFER_FLAG_SERIALIZE 1
#define INDEX_BUFFER_FLAG_STATS 2
+/*
+ * space needed to encode a byte:
+ * 0x00-0x31 and 0x7f-0xff requires 3 bytes: \xx
+ * 0x22 and 0x5C requires 2 bytes: \" and \\
+ * other requires 1 byte: c
+ */
+static char encode_size[] = {
+ /* 0x00 */ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+ /* 0x10 */ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+ /* 0x20 */ 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ /* 0x30 */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ /* 0x40 */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ /* 0x50 */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1,
+ /* 0x60 */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ /* 0x70 */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 3,
+ /* 0x80 */ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+ /* 0x90 */ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+ /* 0xA0 */ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+ /* 0xB0 */ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+ /* 0xC0 */ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+ /* 0xD0 */ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+ /* 0xE0 */ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+ /* 0xF0 */ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+};
+
+
/* Index buffering functions */
static int
@@ -799,65 +825,46 @@ index_add_mods(
/*
* Convert a 'struct berval' into a displayable ASCII string
+ * returns the printable string
*/
-
-#define SPECIAL(c) (c < 32 || c > 126 || c == '\\' || c == '"')
-
const char *
encode(const struct berval *data, char buf[BUFSIZ])
{
- char *s;
- char *last;
- if (data == NULL || data->bv_len == 0)
- return "";
- last = data->bv_val + data->bv_len - 1;
- for (s = data->bv_val; s < last; ++s) {
- if (SPECIAL(*s)) {
- char *first = data->bv_val;
- char *bufNext = buf;
- size_t bufSpace = BUFSIZ - 4;
- while (1) {
- /* printf ("%lu bytes ASCII\n", (unsigned long)(s - first)); */
- if (bufSpace < (size_t)(s - first))
- s = first + bufSpace - 1;
- if (s != first) {
- memcpy(bufNext, first, s - first);
- bufNext += (s - first);
- bufSpace -= (s - first);
- }
- do {
- if (bufSpace) {
- *bufNext++ = '\\';
- --bufSpace;
- }
- if (bufSpace < 2) {
- memcpy(bufNext, "..", 2);
- bufNext += 2;
- goto bail;
- }
- if (*s == '\\' || *s == '"') {
- *bufNext++ = *s;
- --bufSpace;
- } else {
- sprintf(bufNext, "%02x", (unsigned)*(unsigned char *)s);
- bufNext += 2;
- bufSpace -= 2;
- }
- } while (++s <= last && SPECIAL(*s));
- if (s > last)
- break;
- first = s;
- while (!SPECIAL(*s) && s <= last)
- ++s;
- }
- bail:
- *bufNext = '\0';
- /* printf ("%lu chars in buffer\n", (unsigned long)(bufNext - buf)); */
+ if (!data || !data->bv_val) {
+ strcpy(buf, "<NULL>");
+ return buf;
+ }
+ char *endbuff = &buf[BUFSIZ-4]; /* Reserve space to append "...\0" */
+ char *ptout = buf;
+ unsigned char *ptin = (unsigned char*) data->bv_val;
+ unsigned char *endptin = ptin+data->bv_len;
+
+ while (ptin < endptin) {
+ if (ptout >= endbuff) {
+ /*
+ * BUFSIZ(8K) > SLAPI_LOG_BUFSIZ(2K) so the error log message will be
+ * truncated anyway. So there is no real interrest to test if the original
+ * data contains no special characters and return it as is.
+ */
+ strcpy(endbuff, "...");
return buf;
}
+ switch (encode_size[*ptin]) {
+ case 1:
+ *ptout++ = *ptin++;
+ break;
+ case 2:
+ *ptout++ = '\\';
+ *ptout++ = *ptin++;
+ break;
+ case 3:
+ sprintf(ptout, "\\%02x", *ptin++);
+ ptout += 3;
+ break;
+ }
}
- /* printf ("%lu bytes, all ASCII\n", (unsigned long)(s - data->bv_val)); */
- return data->bv_val;
+ *ptout = 0;
+ return buf;
}
static const char *
--
2.45.0

View File

@ -0,0 +1,566 @@
From 769e71499880a0820424bf925c0f0fe793e11cc8 Mon Sep 17 00:00:00 2001
From: progier389 <progier@redhat.com>
Date: Fri, 28 Jun 2024 18:56:49 +0200
Subject: [PATCH] Issue 6229 - After an initial failure, subsequent online
backups fail (#6230)
* Issue 6229 - After an initial failure, subsequent online backups will not work
Several issues related to backup task error handling:
Backends stay busy after the failure
Exit code is 0 in some cases
Crash if failing to open the backup directory
And a more general one:
lib389 Task DN collision
Solutions:
Always reset the busy flags that have been set
Ensure that 0 is not returned in error case
Avoid closing NULL directory descriptor
Use a timestamp having milliseconds precision to create the task DN
Issue: #6229
Reviewed by: @droideck (Thanks!)
(cherry picked from commit 04a0b6ac776a1d588ec2e10ff651e5015078ad21)
---
ldap/servers/slapd/back-ldbm/archive.c | 45 +++++-----
.../slapd/back-ldbm/db-mdb/mdb_layer.c | 3 +
src/lib389/lib389/__init__.py | 10 +--
src/lib389/lib389/tasks.py | 82 +++++++++----------
4 files changed, 70 insertions(+), 70 deletions(-)
diff --git a/ldap/servers/slapd/back-ldbm/archive.c b/ldap/servers/slapd/back-ldbm/archive.c
index 0460a42f6..6658cc80a 100644
--- a/ldap/servers/slapd/back-ldbm/archive.c
+++ b/ldap/servers/slapd/back-ldbm/archive.c
@@ -16,6 +16,8 @@
#include "back-ldbm.h"
#include "dblayer.h"
+#define NO_OBJECT ((Object*)-1)
+
int
ldbm_temporary_close_all_instances(Slapi_PBlock *pb)
{
@@ -270,6 +272,7 @@ ldbm_back_ldbm2archive(Slapi_PBlock *pb)
int run_from_cmdline = 0;
Slapi_Task *task;
struct stat sbuf;
+ Object *last_busy_inst_obj = NO_OBJECT;
slapi_pblock_get(pb, SLAPI_PLUGIN_PRIVATE, &li);
slapi_pblock_get(pb, SLAPI_SEQ_VAL, &rawdirectory);
@@ -380,13 +383,12 @@ ldbm_back_ldbm2archive(Slapi_PBlock *pb)
/* to avoid conflict w/ import, do this check for commandline, as well */
{
- Object *inst_obj, *inst_obj2;
ldbm_instance *inst = NULL;
/* server is up -- mark all backends busy */
- for (inst_obj = objset_first_obj(li->li_instance_set); inst_obj;
- inst_obj = objset_next_obj(li->li_instance_set, inst_obj)) {
- inst = (ldbm_instance *)object_get_data(inst_obj);
+ for (last_busy_inst_obj = objset_first_obj(li->li_instance_set); last_busy_inst_obj;
+ last_busy_inst_obj = objset_next_obj(li->li_instance_set, last_busy_inst_obj)) {
+ inst = (ldbm_instance *)object_get_data(last_busy_inst_obj);
/* check if an import/restore is already ongoing... */
if (instance_set_busy(inst) != 0 || dblayer_in_import(inst) != 0) {
@@ -400,20 +402,6 @@ ldbm_back_ldbm2archive(Slapi_PBlock *pb)
"another task and cannot be disturbed.",
inst->inst_name);
}
-
- /* painfully, we have to clear the BUSY flags on the
- * backends we'd already marked...
- */
- for (inst_obj2 = objset_first_obj(li->li_instance_set);
- inst_obj2 && (inst_obj2 != inst_obj);
- inst_obj2 = objset_next_obj(li->li_instance_set,
- inst_obj2)) {
- inst = (ldbm_instance *)object_get_data(inst_obj2);
- instance_set_not_busy(inst);
- }
- if (inst_obj2 && inst_obj2 != inst_obj)
- object_release(inst_obj2);
- object_release(inst_obj);
goto err;
}
}
@@ -427,18 +415,26 @@ ldbm_back_ldbm2archive(Slapi_PBlock *pb)
goto err;
}
- if (!run_from_cmdline) {
+err:
+ /* Clear all BUSY flags that have been previously set */
+ if (last_busy_inst_obj != NO_OBJECT) {
ldbm_instance *inst;
Object *inst_obj;
- /* none of these backends are busy anymore */
- for (inst_obj = objset_first_obj(li->li_instance_set); inst_obj;
+ for (inst_obj = objset_first_obj(li->li_instance_set);
+ inst_obj && (inst_obj != last_busy_inst_obj);
inst_obj = objset_next_obj(li->li_instance_set, inst_obj)) {
inst = (ldbm_instance *)object_get_data(inst_obj);
instance_set_not_busy(inst);
}
+ if (last_busy_inst_obj != NULL) {
+ /* release last seen object for aborted objset_next_obj iterations */
+ if (inst_obj != NULL) {
+ object_release(inst_obj);
+ }
+ object_release(last_busy_inst_obj);
+ }
}
-err:
if (return_value) {
if (dir_bak) {
slapi_log_err(SLAPI_LOG_ERR,
@@ -727,7 +723,10 @@ ldbm_archive_config(char *bakdir, Slapi_Task *task)
}
error:
- PR_CloseDir(dirhandle);
+ if (NULL != dirhandle) {
+ PR_CloseDir(dirhandle);
+ dirhandle = NULL;
+ }
dse_backup_unlock();
slapi_ch_free_string(&backup_config_dir);
slapi_ch_free_string(&dse_file);
diff --git a/ldap/servers/slapd/back-ldbm/db-mdb/mdb_layer.c b/ldap/servers/slapd/back-ldbm/db-mdb/mdb_layer.c
index 70a289bdb..de4161b0c 100644
--- a/ldap/servers/slapd/back-ldbm/db-mdb/mdb_layer.c
+++ b/ldap/servers/slapd/back-ldbm/db-mdb/mdb_layer.c
@@ -983,6 +983,9 @@ dbmdb_backup(struct ldbminfo *li, char *dest_dir, Slapi_Task *task)
if (ldbm_archive_config(dest_dir, task) != 0) {
slapi_log_err(SLAPI_LOG_ERR, "dbmdb_backup",
"Backup of config files failed or is incomplete\n");
+ if (0 == return_value) {
+ return_value = -1;
+ }
}
goto bail;
diff --git a/src/lib389/lib389/__init__.py b/src/lib389/lib389/__init__.py
index 368741a66..cb372c138 100644
--- a/src/lib389/lib389/__init__.py
+++ b/src/lib389/lib389/__init__.py
@@ -69,7 +69,7 @@ from lib389.utils import (
get_user_is_root)
from lib389.paths import Paths
from lib389.nss_ssl import NssSsl
-from lib389.tasks import BackupTask, RestoreTask
+from lib389.tasks import BackupTask, RestoreTask, Task
from lib389.dseldif import DSEldif
# mixin
@@ -1424,7 +1424,7 @@ class DirSrv(SimpleLDAPObject, object):
name, self.ds_paths.prefix)
# create the archive
- name = "backup_%s_%s.tar.gz" % (self.serverid, time.strftime("%m%d%Y_%H%M%S"))
+ name = "backup_%s_%s.tar.gz" % (self.serverid, Task.get_timestamp())
backup_file = os.path.join(backup_dir, name)
tar = tarfile.open(backup_file, "w:gz")
tar.extraction_filter = (lambda member, path: member)
@@ -2810,7 +2810,7 @@ class DirSrv(SimpleLDAPObject, object):
else:
# No output file specified. Use the default ldif location/name
cmd.append('-a')
- tnow = datetime.now().strftime("%Y_%m_%d_%H_%M_%S")
+ tnow = Task.get_timestamp()
if bename:
ldifname = os.path.join(self.ds_paths.ldif_dir, "%s-%s-%s.ldif" % (self.serverid, bename, tnow))
else:
@@ -2881,7 +2881,7 @@ class DirSrv(SimpleLDAPObject, object):
if archive_dir is None:
# Use the instance name and date/time as the default backup name
- tnow = datetime.now().strftime("%Y_%m_%d_%H_%M_%S")
+ tnow = Task.get_timestamp()
archive_dir = os.path.join(self.ds_paths.backup_dir, "%s-%s" % (self.serverid, tnow))
elif not archive_dir.startswith("/"):
# Relative path, append it to the bak directory
@@ -3506,7 +3506,7 @@ class DirSrv(SimpleLDAPObject, object):
if archive is None:
# Use the instance name and date/time as the default backup name
- tnow = datetime.now().strftime("%Y_%m_%d_%H_%M_%S")
+ tnow = Task.get_timestamp()
if self.serverid is not None:
backup_dir_name = "%s-%s" % (self.serverid, tnow)
else:
diff --git a/src/lib389/lib389/tasks.py b/src/lib389/lib389/tasks.py
index 6c2adb5b2..6bf302862 100644
--- a/src/lib389/lib389/tasks.py
+++ b/src/lib389/lib389/tasks.py
@@ -118,7 +118,7 @@ class Task(DSLdapObject):
return super(Task, self).create(rdn, properties, basedn)
@staticmethod
- def _get_task_date():
+ def get_timestamp():
"""Return a timestamp to use in naming new task entries."""
return datetime.now().isoformat()
@@ -132,7 +132,7 @@ class AutomemberRebuildMembershipTask(Task):
"""
def __init__(self, instance, dn=None):
- self.cn = 'automember_rebuild_' + Task._get_task_date()
+ self.cn = 'automember_rebuild_' + Task.get_timestamp()
dn = "cn=" + self.cn + "," + DN_AUTOMEMBER_REBUILD_TASK
super(AutomemberRebuildMembershipTask, self).__init__(instance, dn)
@@ -147,7 +147,7 @@ class AutomemberAbortRebuildTask(Task):
"""
def __init__(self, instance, dn=None):
- self.cn = 'automember_abort_' + Task._get_task_date()
+ self.cn = 'automember_abort_' + Task.get_timestamp()
dn = "cn=" + self.cn + "," + DN_AUTOMEMBER_ABORT_REBUILD_TASK
super(AutomemberAbortRebuildTask, self).__init__(instance, dn)
@@ -161,7 +161,7 @@ class FixupLinkedAttributesTask(Task):
"""
def __init__(self, instance, dn=None):
- self.cn = 'fixup_linked_attrs_' + Task._get_task_date()
+ self.cn = 'fixup_linked_attrs_' + Task.get_timestamp()
dn = "cn=" + self.cn + "," + DN_FIXUP_LINKED_ATTIBUTES
super(FixupLinkedAttributesTask, self).__init__(instance, dn)
@@ -175,7 +175,7 @@ class MemberUidFixupTask(Task):
"""
def __init__(self, instance, dn=None):
- self.cn = 'memberUid_fixup_' + Task._get_task_date()
+ self.cn = 'memberUid_fixup_' + Task.get_timestamp()
dn = f"cn={self.cn},cn=memberuid task,cn=tasks,cn=config"
super(MemberUidFixupTask, self).__init__(instance, dn)
@@ -190,7 +190,7 @@ class MemberOfFixupTask(Task):
"""
def __init__(self, instance, dn=None):
- self.cn = 'memberOf_fixup_' + Task._get_task_date()
+ self.cn = 'memberOf_fixup_' + Task.get_timestamp()
dn = "cn=" + self.cn + "," + DN_MBO_TASK
super(MemberOfFixupTask, self).__init__(instance, dn)
@@ -205,7 +205,7 @@ class USNTombstoneCleanupTask(Task):
"""
def __init__(self, instance, dn=None):
- self.cn = 'usn_cleanup_' + Task._get_task_date()
+ self.cn = 'usn_cleanup_' + Task.get_timestamp()
dn = "cn=" + self.cn + ",cn=USN tombstone cleanup task," + DN_TASKS
super(USNTombstoneCleanupTask, self).__init__(instance, dn)
@@ -225,7 +225,7 @@ class csngenTestTask(Task):
"""
def __init__(self, instance, dn=None):
- self.cn = 'csngenTest_' + Task._get_task_date()
+ self.cn = 'csngenTest_' + Task.get_timestamp()
dn = "cn=" + self.cn + ",cn=csngen_test," + DN_TASKS
super(csngenTestTask, self).__init__(instance, dn)
@@ -238,7 +238,7 @@ class EntryUUIDFixupTask(Task):
"""
def __init__(self, instance, dn=None):
- self.cn = 'entryuuid_fixup_' + Task._get_task_date()
+ self.cn = 'entryuuid_fixup_' + Task.get_timestamp()
dn = "cn=" + self.cn + "," + DN_EUUID_TASK
super(EntryUUIDFixupTask, self).__init__(instance, dn)
self._must_attributes.extend(['basedn'])
@@ -252,7 +252,7 @@ class DBCompactTask(Task):
"""
def __init__(self, instance, dn=None):
- self.cn = 'compact_db_' + Task._get_task_date()
+ self.cn = 'compact_db_' + Task.get_timestamp()
dn = "cn=" + self.cn + "," + DN_COMPACTDB_TASK
super(DBCompactTask, self).__init__(instance, dn)
@@ -265,7 +265,7 @@ class SchemaReloadTask(Task):
"""
def __init__(self, instance, dn=None):
- self.cn = 'schema_reload_' + Task._get_task_date()
+ self.cn = 'schema_reload_' + Task.get_timestamp()
dn = "cn=" + self.cn + ",cn=schema reload task," + DN_TASKS
super(SchemaReloadTask, self).__init__(instance, dn)
@@ -278,7 +278,7 @@ class SyntaxValidateTask(Task):
"""
def __init__(self, instance, dn=None):
- self.cn = 'syntax_validate_' + Task._get_task_date()
+ self.cn = 'syntax_validate_' + Task.get_timestamp()
dn = f"cn={self.cn},cn=syntax validate,cn=tasks,cn=config"
super(SyntaxValidateTask, self).__init__(instance, dn)
@@ -295,7 +295,7 @@ class AbortCleanAllRUVTask(Task):
"""
def __init__(self, instance, dn=None):
- self.cn = 'abortcleanallruv_' + Task._get_task_date()
+ self.cn = 'abortcleanallruv_' + Task.get_timestamp()
dn = "cn=" + self.cn + ",cn=abort cleanallruv," + DN_TASKS
super(AbortCleanAllRUVTask, self).__init__(instance, dn)
@@ -312,7 +312,7 @@ class CleanAllRUVTask(Task):
"""
def __init__(self, instance, dn=None):
- self.cn = 'cleanallruv_' + Task._get_task_date()
+ self.cn = 'cleanallruv_' + Task.get_timestamp()
dn = "cn=" + self.cn + ",cn=cleanallruv," + DN_TASKS
self._properties = None
@@ -359,7 +359,7 @@ class ImportTask(Task):
"""
def __init__(self, instance, dn=None):
- self.cn = 'import_' + Task._get_task_date()
+ self.cn = 'import_' + Task.get_timestamp()
dn = "cn=%s,%s" % (self.cn, DN_IMPORT_TASK)
self._properties = None
@@ -388,7 +388,7 @@ class ExportTask(Task):
"""
def __init__(self, instance, dn=None):
- self.cn = 'export_' + Task._get_task_date()
+ self.cn = 'export_' + Task.get_timestamp()
dn = "cn=%s,%s" % (self.cn, DN_EXPORT_TASK)
self._properties = None
@@ -411,7 +411,7 @@ class BackupTask(Task):
"""
def __init__(self, instance, dn=None):
- self.cn = 'backup_' + Task._get_task_date()
+ self.cn = 'backup_' + Task.get_timestamp()
dn = "cn=" + self.cn + ",cn=backup," + DN_TASKS
self._properties = None
@@ -426,7 +426,7 @@ class RestoreTask(Task):
"""
def __init__(self, instance, dn=None):
- self.cn = 'restore_' + Task._get_task_date()
+ self.cn = 'restore_' + Task.get_timestamp()
dn = "cn=" + self.cn + ",cn=restore," + DN_TASKS
self._properties = None
@@ -513,7 +513,7 @@ class Tasks(object):
raise ValueError("Import file (%s) does not exist" % input_file)
# Prepare the task entry
- cn = "import_" + time.strftime("%m%d%Y_%H%M%S", time.localtime())
+ cn = "import_" + Task.get_timestamp()
dn = "cn=%s,%s" % (cn, DN_IMPORT_TASK)
entry = Entry(dn)
entry.setValues('objectclass', 'top', 'extensibleObject')
@@ -581,7 +581,7 @@ class Tasks(object):
raise ValueError("output_file is mandatory")
# Prepare the task entry
- cn = "export_" + time.strftime("%m%d%Y_%H%M%S", time.localtime())
+ cn = "export_" + Task.get_timestamp()
dn = "cn=%s,%s" % (cn, DN_EXPORT_TASK)
entry = Entry(dn)
entry.update({
@@ -637,7 +637,7 @@ class Tasks(object):
raise ValueError("You must specify a backup directory.")
# build the task entry
- cn = "backup_" + time.strftime("%m%d%Y_%H%M%S", time.localtime())
+ cn = "backup_" + Task.get_timestamp()
dn = "cn=%s,%s" % (cn, DN_BACKUP_TASK)
entry = Entry(dn)
entry.update({
@@ -694,7 +694,7 @@ class Tasks(object):
raise ValueError("Backup file (%s) does not exist" % backup_dir)
# build the task entry
- cn = "restore_" + time.strftime("%m%d%Y_%H%M%S", time.localtime())
+ cn = "restore_" + Task.get_timestamp()
dn = "cn=%s,%s" % (cn, DN_RESTORE_TASK)
entry = Entry(dn)
entry.update({
@@ -789,7 +789,7 @@ class Tasks(object):
attrs.append(attr)
else:
attrs.append(attrname)
- cn = "index_vlv_%s" % (time.strftime("%m%d%Y_%H%M%S", time.localtime()))
+ cn = "index_vlv_%s" % (Task.get_timestamp())
dn = "cn=%s,%s" % (cn, DN_INDEX_TASK)
entry = Entry(dn)
entry.update({
@@ -803,7 +803,7 @@ class Tasks(object):
#
# Reindex all attributes - gather them first...
#
- cn = "index_all_%s" % (time.strftime("%m%d%Y_%H%M%S", time.localtime()))
+ cn = "index_all_%s" % (Task.get_timestamp())
dn = ('cn=%s,cn=ldbm database,cn=plugins,cn=config' % backend)
try:
indexes = self.conn.search_s(dn, ldap.SCOPE_SUBTREE, '(objectclass=nsIndex)')
@@ -815,7 +815,7 @@ class Tasks(object):
#
# Reindex specific attributes
#
- cn = "index_attrs_%s" % (time.strftime("%m%d%Y_%H%M%S", time.localtime()))
+ cn = "index_attrs_%s" % (Task.get_timestamp())
if isinstance(attrname, (tuple, list)):
# Need to guarantee this is a list (and not a tuple)
for attr in attrname:
@@ -903,8 +903,7 @@ class Tasks(object):
suffix = ents[0].getValue(attr)
- cn = "fixupmemberof_" + time.strftime("%m%d%Y_%H%M%S",
- time.localtime())
+ cn = "fixupmemberof_" + Task.get_timestamp()
dn = "cn=%s,%s" % (cn, DN_MBO_TASK)
entry = Entry(dn)
entry.setValues('objectclass', 'top', 'extensibleObject')
@@ -965,8 +964,7 @@ class Tasks(object):
if len(ents) != 1:
raise ValueError("invalid backend name: %s" % bename)
- cn = "fixupTombstone_" + time.strftime("%m%d%Y_%H%M%S",
- time.localtime())
+ cn = "fixupTombstone_" + Task.get_timestamp()
dn = "cn=%s,%s" % (cn, DN_TOMB_FIXUP_TASK)
entry = Entry(dn)
entry.setValues('objectclass', 'top', 'extensibleObject')
@@ -1019,7 +1017,7 @@ class Tasks(object):
@return exit code
'''
- cn = 'task-' + time.strftime("%m%d%Y_%H%M%S", time.localtime())
+ cn = 'task-' + Task.get_timestamp()
dn = ('cn=%s,cn=automember rebuild membership,cn=tasks,cn=config' % cn)
entry = Entry(dn)
@@ -1077,7 +1075,7 @@ class Tasks(object):
if not ldif_out:
raise ValueError("Missing ldif_out")
- cn = 'task-' + time.strftime("%m%d%Y_%H%M%S", time.localtime())
+ cn = 'task-' + Task.get_timestamp()
dn = ('cn=%s,cn=automember export updates,cn=tasks,cn=config' % cn)
entry = Entry(dn)
entry.setValues('objectclass', 'top', 'extensibleObject')
@@ -1129,7 +1127,7 @@ class Tasks(object):
if not ldif_out or not ldif_in:
raise ValueError("Missing ldif_out and/or ldif_in")
- cn = 'task-' + time.strftime("%m%d%Y_%H%M%S", time.localtime())
+ cn = 'task-' + Task.get_timestamp()
dn = ('cn=%s,cn=automember map updates,cn=tasks,cn=config' % cn)
entry = Entry(dn)
@@ -1175,7 +1173,7 @@ class Tasks(object):
@return exit code
'''
- cn = 'task-' + time.strftime("%m%d%Y_%H%M%S", time.localtime())
+ cn = 'task-' + Task.get_timestamp()
dn = ('cn=%s,cn=fixup linked attributes,cn=tasks,cn=config' % cn)
entry = Entry(dn)
entry.setValues('objectclass', 'top', 'extensibleObject')
@@ -1219,7 +1217,7 @@ class Tasks(object):
@return exit code
'''
- cn = 'task-' + time.strftime("%m%d%Y_%H%M%S", time.localtime())
+ cn = 'task-' + Task.get_timestamp()
dn = ('cn=%s,cn=schema reload task,cn=tasks,cn=config' % cn)
entry = Entry(dn)
entry.setValues('objectclass', 'top', 'extensibleObject')
@@ -1264,7 +1262,7 @@ class Tasks(object):
@return exit code
'''
- cn = 'task-' + time.strftime("%m%d%Y_%H%M%S", time.localtime())
+ cn = 'task-' + Task.get_timestamp()
dn = ('cn=%s,cn=memberuid task,cn=tasks,cn=config' % cn)
entry = Entry(dn)
entry.setValues('objectclass', 'top', 'extensibleObject')
@@ -1311,7 +1309,7 @@ class Tasks(object):
@return exit code
'''
- cn = 'task-' + time.strftime("%m%d%Y_%H%M%S", time.localtime())
+ cn = 'task-' + Task.get_timestamp()
dn = ('cn=%s,cn=syntax validate,cn=tasks,cn=config' % cn)
entry = Entry(dn)
entry.setValues('objectclass', 'top', 'extensibleObject')
@@ -1358,7 +1356,7 @@ class Tasks(object):
@return exit code
'''
- cn = 'task-' + time.strftime("%m%d%Y_%H%M%S", time.localtime())
+ cn = 'task-' + Task.get_timestamp()
dn = ('cn=%s,cn=USN tombstone cleanup task,cn=tasks,cn=config' % cn)
entry = Entry(dn)
entry.setValues('objectclass', 'top', 'extensibleObject')
@@ -1413,7 +1411,7 @@ class Tasks(object):
if not configfile:
raise ValueError("Missing required paramter: configfile")
- cn = 'task-' + time.strftime("%m%d%Y_%H%M%S", time.localtime())
+ cn = 'task-' + Task.get_timestamp()
dn = ('cn=%s,cn=sysconfig reload,cn=tasks,cn=config' % cn)
entry = Entry(dn)
entry.setValues('objectclass', 'top', 'extensibleObject')
@@ -1464,7 +1462,7 @@ class Tasks(object):
if not suffix:
raise ValueError("Missing required paramter: suffix")
- cn = 'task-' + time.strftime("%m%d%Y_%H%M%S", time.localtime())
+ cn = 'task-' + Task.get_timestamp()
dn = ('cn=%s,cn=cleanallruv,cn=tasks,cn=config' % cn)
entry = Entry(dn)
entry.setValues('objectclass', 'top', 'extensibleObject')
@@ -1516,7 +1514,7 @@ class Tasks(object):
if not suffix:
raise ValueError("Missing required paramter: suffix")
- cn = 'task-' + time.strftime("%m%d%Y_%H%M%S", time.localtime())
+ cn = 'task-' + Task.get_timestamp()
dn = ('cn=%s,cn=abort cleanallruv,cn=tasks,cn=config' % cn)
entry = Entry(dn)
entry.setValues('objectclass', 'top', 'extensibleObject')
@@ -1571,7 +1569,7 @@ class Tasks(object):
if not nsArchiveDir:
raise ValueError("Missing required paramter: nsArchiveDir")
- cn = 'task-' + time.strftime("%m%d%Y_%H%M%S", time.localtime())
+ cn = 'task-' + Task.get_timestamp()
dn = ('cn=%s,cn=upgradedb,cn=tasks,cn=config' % cn)
entry = Entry(dn)
entry.setValues('objectclass', 'top', 'extensibleObject')
@@ -1616,6 +1614,6 @@ class LDAPIMappingReloadTask(Task):
"""
def __init__(self, instance, dn=None):
- self.cn = 'reload-' + Task._get_task_date()
+ self.cn = 'reload-' + Task.get_timestamp()
dn = f'cn={self.cn},cn=reload ldapi mappings,cn=tasks,cn=config'
super(LDAPIMappingReloadTask, self).__init__(instance, dn)
--
2.48.0

View File

@ -1,143 +0,0 @@
From 6e5f03d5872129963106024f53765234a282406c Mon Sep 17 00:00:00 2001
From: James Chapman <jachapma@redhat.com>
Date: Fri, 16 Feb 2024 11:13:16 +0000
Subject: [PATCH] Issue 6096 - Improve connection timeout error logging (#6097)
Bug description: When a paged result search is run with a time limit,
if the time limit is exceed the server closes the connection with
closed IO timeout (nsslapd-ioblocktimeout) - T2. This error message
is incorrect as the reason the connection has been closed was because
the specified time limit on a paged result search has been exceeded.
Fix description: Correct error message
Relates: https://github.com/389ds/389-ds-base/issues/6096
Reviewed by: @tbordaz (Thank you)
---
ldap/admin/src/logconv.pl | 24 ++++++++++++++++++-
ldap/servers/slapd/daemon.c | 4 ++--
ldap/servers/slapd/disconnect_error_strings.h | 1 +
ldap/servers/slapd/disconnect_errors.h | 2 +-
4 files changed, 27 insertions(+), 4 deletions(-)
diff --git a/ldap/admin/src/logconv.pl b/ldap/admin/src/logconv.pl
index 7698c383a..2a933c4a3 100755
--- a/ldap/admin/src/logconv.pl
+++ b/ldap/admin/src/logconv.pl
@@ -267,7 +267,7 @@ my $optimeAvg = 0;
my %cipher = ();
my @removefiles = ();
-my @conncodes = qw(A1 B1 B4 T1 T2 B2 B3 R1 P1 P2 U1);
+my @conncodes = qw(A1 B1 B4 T1 T2 T3 B2 B3 R1 P1 P2 U1);
my %conn = ();
map {$conn{$_} = $_} @conncodes;
@@ -355,6 +355,7 @@ $connmsg{"B1"} = "Bad Ber Tag Encountered";
$connmsg{"B4"} = "Server failed to flush data (response) back to Client";
$connmsg{"T1"} = "Idle Timeout Exceeded";
$connmsg{"T2"} = "IO Block Timeout Exceeded or NTSSL Timeout";
+$connmsg{"T3"} = "Paged Search Time Limit Exceeded";
$connmsg{"B2"} = "Ber Too Big";
$connmsg{"B3"} = "Ber Peek";
$connmsg{"R1"} = "Revents";
@@ -1723,6 +1724,10 @@ if ($usage =~ /j/i || $verb eq "yes"){
print "\n $recCount. You have some coonections that are being closed by the ioblocktimeout setting. You may want to increase the ioblocktimeout.\n";
$recCount++;
}
+ if (defined($conncount->{"T3"}) and $conncount->{"T3"} > 0){
+ print "\n $recCount. You have some connections that are being closed because a paged result search limit has been exceeded. You may want to increase the search time limit.\n";
+ $recCount++;
+ }
# compare binds to unbinds, if the difference is more than 30% of the binds, then report a issue
if (($bindCount - $unbindCount) > ($bindCount*.3)){
print "\n $recCount. You have a significant difference between binds and unbinds. You may want to investigate this difference.\n";
@@ -2366,6 +2371,7 @@ sub parseLineNormal
$brokenPipeCount++;
if (m/- T1/){ $hashes->{rc}->{"T1"}++; }
elsif (m/- T2/){ $hashes->{rc}->{"T2"}++; }
+ elsif (m/- T3/){ $hashes->{rc}->{"T3"}++; }
elsif (m/- A1/){ $hashes->{rc}->{"A1"}++; }
elsif (m/- B1/){ $hashes->{rc}->{"B1"}++; }
elsif (m/- B4/){ $hashes->{rc}->{"B4"}++; }
@@ -2381,6 +2387,7 @@ sub parseLineNormal
$connResetByPeerCount++;
if (m/- T1/){ $hashes->{src}->{"T1"}++; }
elsif (m/- T2/){ $hashes->{src}->{"T2"}++; }
+ elsif (m/- T3/){ $hashes->{src}->{"T3"}++; }
elsif (m/- A1/){ $hashes->{src}->{"A1"}++; }
elsif (m/- B1/){ $hashes->{src}->{"B1"}++; }
elsif (m/- B4/){ $hashes->{src}->{"B4"}++; }
@@ -2396,6 +2403,7 @@ sub parseLineNormal
$resourceUnavailCount++;
if (m/- T1/){ $hashes->{rsrc}->{"T1"}++; }
elsif (m/- T2/){ $hashes->{rsrc}->{"T2"}++; }
+ elsif (m/- T3/){ $hashes->{rsrc}->{"T3"}++; }
elsif (m/- A1/){ $hashes->{rsrc}->{"A1"}++; }
elsif (m/- B1/){ $hashes->{rsrc}->{"B1"}++; }
elsif (m/- B4/){ $hashes->{rsrc}->{"B4"}++; }
@@ -2494,6 +2502,20 @@ sub parseLineNormal
}
}
}
+ if (m/- T3/){
+ if ($_ =~ /conn= *([0-9A-Z]+)/i) {
+ $exc = "no";
+ $ip = getIPfromConn($1, $serverRestartCount);
+ for (my $xxx = 0; $xxx < $#excludeIP; $xxx++){
+ if ($ip eq $excludeIP[$xxx]){$exc = "yes";}
+ }
+ if ($exc ne "yes"){
+ $hashes->{T3}->{$ip}++;
+ $hashes->{conncount}->{"T3"}++;
+ $connCodeCount++;
+ }
+ }
+ }
if (m/- B2/){
if ($_ =~ /conn= *([0-9A-Z]+)/i) {
$exc = "no";
diff --git a/ldap/servers/slapd/daemon.c b/ldap/servers/slapd/daemon.c
index 5a48aa66f..bb80dae36 100644
--- a/ldap/servers/slapd/daemon.c
+++ b/ldap/servers/slapd/daemon.c
@@ -1599,9 +1599,9 @@ setup_pr_read_pds(Connection_Table *ct)
int add_fd = 1;
/* check timeout for PAGED RESULTS */
if (pagedresults_is_timedout_nolock(c)) {
- /* Exceeded the timelimit; disconnect the client */
+ /* Exceeded the paged search timelimit; disconnect the client */
disconnect_server_nomutex(c, c->c_connid, -1,
- SLAPD_DISCONNECT_IO_TIMEOUT,
+ SLAPD_DISCONNECT_PAGED_SEARCH_LIMIT,
0);
connection_table_move_connection_out_of_active_list(ct,
c);
diff --git a/ldap/servers/slapd/disconnect_error_strings.h b/ldap/servers/slapd/disconnect_error_strings.h
index f7a31d728..c2d9e283b 100644
--- a/ldap/servers/slapd/disconnect_error_strings.h
+++ b/ldap/servers/slapd/disconnect_error_strings.h
@@ -27,6 +27,7 @@ ER2(SLAPD_DISCONNECT_BER_FLUSH, "B4")
ER2(SLAPD_DISCONNECT_IDLE_TIMEOUT, "T1")
ER2(SLAPD_DISCONNECT_REVENTS, "R1")
ER2(SLAPD_DISCONNECT_IO_TIMEOUT, "T2")
+ER2(SLAPD_DISCONNECT_PAGED_SEARCH_LIMIT, "T3")
ER2(SLAPD_DISCONNECT_PLUGIN, "P1")
ER2(SLAPD_DISCONNECT_UNBIND, "U1")
ER2(SLAPD_DISCONNECT_POLL, "P2")
diff --git a/ldap/servers/slapd/disconnect_errors.h b/ldap/servers/slapd/disconnect_errors.h
index a0484f1c2..e118f674c 100644
--- a/ldap/servers/slapd/disconnect_errors.h
+++ b/ldap/servers/slapd/disconnect_errors.h
@@ -35,6 +35,6 @@
#define SLAPD_DISCONNECT_SASL_FAIL SLAPD_DISCONNECT_ERROR_BASE + 12
#define SLAPD_DISCONNECT_PROXY_INVALID_HEADER SLAPD_DISCONNECT_ERROR_BASE + 13
#define SLAPD_DISCONNECT_PROXY_UNKNOWN SLAPD_DISCONNECT_ERROR_BASE + 14
-
+#define SLAPD_DISCONNECT_PAGED_SEARCH_LIMIT SLAPD_DISCONNECT_ERROR_BASE + 15
#endif /* __DISCONNECT_ERRORS_H_ */
--
2.45.0

View File

@ -0,0 +1,165 @@
From b2511553590f0d9b41856d8baff5f3cd103dd46f Mon Sep 17 00:00:00 2001
From: tbordaz <tbordaz@redhat.com>
Date: Thu, 6 Feb 2025 18:25:36 +0100
Subject: [PATCH] Issue 6554 - During import of entries without nsUniqueId, a
supplier generates duplicate nsUniqueId (LMDB only) (#6582)
Bug description:
During an import the entry is prepared (schema, operational
attributes, password encryption,...) before starting the
update of the database and indexes.
A step of the preparation is to assign a value to 'nsuniqueid'
operational attribute. 'nsuniqueid' must be unique.
In LMDB the preparation is done by multiple threads (workers).
In such case the 'nsuniqueid' are generated in parallel and
as it is time based several values can be duplicated.
Fix description:
To prevent that the routine dbmdb_import_generate_uniqueid
should make sure to synchronize the workers.
fixes: #6554
Reviewed by: Pierre Rogier
---
.../tests/suites/import/import_test.py | 79 ++++++++++++++++++-
.../back-ldbm/db-mdb/mdb_import_threads.c | 11 +++
2 files changed, 89 insertions(+), 1 deletion(-)
diff --git a/dirsrvtests/tests/suites/import/import_test.py b/dirsrvtests/tests/suites/import/import_test.py
index b7cba32fd..18caec633 100644
--- a/dirsrvtests/tests/suites/import/import_test.py
+++ b/dirsrvtests/tests/suites/import/import_test.py
@@ -14,11 +14,13 @@ import os
import pytest
import time
import glob
+import re
import logging
import subprocess
from datetime import datetime
from lib389.topologies import topology_st as topo
-from lib389._constants import DEFAULT_SUFFIX, TaskWarning
+from lib389.topologies import topology_m2 as topo_m2
+from lib389._constants import DEFAULT_BENAME, DEFAULT_SUFFIX, TaskWarning
from lib389.dbgen import dbgen_users
from lib389.tasks import ImportTask
from lib389.index import Indexes
@@ -690,6 +692,81 @@ def test_online_import_under_load(topo):
assert import_task.get_exit_code() == 0
+def test_duplicate_nsuniqueid(topo_m2, request):
+ """Test that after an offline import all
+ nsuniqueid are different
+
+ :id: a2541677-a288-4633-bacf-4050cc56016d
+ :setup: MMR with 2 suppliers
+ :steps:
+ 1. stop the instance to do offline operations
+ 2. Generate a 5K users LDIF file
+ 3. Check that no uniqueid are present in the generated file
+ 4. import the generated LDIF
+ 5. export the database
+ 6. Check that that exported LDIF contains more than 5K nsuniqueid
+ 7. Check that there is no duplicate nsuniqued in exported LDIF
+ :expectedresults:
+ 1. Should succeeds
+ 2. Should succeeds
+ 3. Should succeeds
+ 4. Should succeeds
+ 5. Should succeeds
+ 6. Should succeeds
+ 7. Should succeeds
+ """
+ m1 = topo_m2.ms["supplier1"]
+
+ # Stop the instance
+ m1.stop()
+
+ # Generate a test ldif (5k entries)
+ log.info("Generating LDIF...")
+ ldif_dir = m1.get_ldif_dir()
+ import_ldif = ldif_dir + '/5k_users_import.ldif'
+ dbgen_users(m1, 5000, import_ldif, DEFAULT_SUFFIX)
+
+ # Check that the generated LDIF does not contain nsuniqueid
+ all_nsuniqueid = []
+ with open(import_ldif, 'r') as file:
+ for line in file:
+ if line.lower().startswith("nsuniqueid: "):
+ all_nsuniqueid.append(line.split(': ')[1])
+ log.info("import file contains " + str(len(all_nsuniqueid)) + " nsuniqueid")
+ assert len(all_nsuniqueid) == 0
+
+ # Import the "nsuniquied free" LDIF file
+ if not m1.ldif2db('userRoot', None, None, None, import_ldif):
+ assert False
+
+ # Export the DB that now should contain nsuniqueid
+ export_ldif = ldif_dir + '/5k_user_export.ldif'
+ log.info("export to file " + export_ldif)
+ m1.db2ldif(bename=DEFAULT_BENAME, suffixes=[DEFAULT_SUFFIX],
+ excludeSuffixes=None, repl_data=False,
+ outputfile=export_ldif, encrypt=False)
+
+ # Check that the export LDIF contain nsuniqueid
+ all_nsuniqueid = []
+ with open(export_ldif, 'r') as file:
+ for line in file:
+ if line.lower().startswith("nsuniqueid: "):
+ all_nsuniqueid.append(line.split(': ')[1])
+ log.info("export file " + export_ldif + " contains " + str(len(all_nsuniqueid)) + " nsuniqueid")
+ assert len(all_nsuniqueid) >= 5000
+
+ # Check that the nsuniqueid are unique
+ assert len(set(all_nsuniqueid)) == len(all_nsuniqueid)
+
+ def fin():
+ if os.path.exists(import_ldif):
+ os.remove(import_ldif)
+ if os.path.exists(export_ldif):
+ os.remove(export_ldif)
+ m1.start
+
+ request.addfinalizer(fin)
+
if __name__ == '__main__':
# Run isolated
# -s for DEBUG mode
diff --git a/ldap/servers/slapd/back-ldbm/db-mdb/mdb_import_threads.c b/ldap/servers/slapd/back-ldbm/db-mdb/mdb_import_threads.c
index 707a110c5..0f445bb56 100644
--- a/ldap/servers/slapd/back-ldbm/db-mdb/mdb_import_threads.c
+++ b/ldap/servers/slapd/back-ldbm/db-mdb/mdb_import_threads.c
@@ -610,10 +610,20 @@ dbmdb_import_generate_uniqueid(ImportJob *job, Slapi_Entry *e)
{
const char *uniqueid = slapi_entry_get_uniqueid(e);
int rc = UID_SUCCESS;
+ static pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER;
if (!uniqueid && (job->uuid_gen_type != SLAPI_UNIQUEID_GENERATE_NONE)) {
char *newuniqueid;
+ /* With 'mdb' we have several workers generating nsuniqueid
+ * we need to serialize them to prevent generating duplicate value
+ * From performance pov it only impacts import
+ * The default value is SLAPI_UNIQUEID_GENERATE_TIME_BASED so
+ * the only syscall is clock_gettime and then string formating
+ * that should limit contention
+ */
+ pthread_mutex_lock(&mutex);
+
/* generate id based on dn */
if (job->uuid_gen_type == SLAPI_UNIQUEID_GENERATE_NAME_BASED) {
char *dn = slapi_entry_get_dn(e);
@@ -624,6 +634,7 @@ dbmdb_import_generate_uniqueid(ImportJob *job, Slapi_Entry *e)
/* time based */
rc = slapi_uniqueIDGenerateString(&newuniqueid);
}
+ pthread_mutex_unlock(&mutex);
if (rc == UID_SUCCESS) {
slapi_entry_set_uniqueid(e, newuniqueid);
--
2.48.0

View File

@ -1,44 +0,0 @@
From a112394af3a20787755029804684d57a9c3ffa9a Mon Sep 17 00:00:00 2001
From: James Chapman <jachapma@redhat.com>
Date: Wed, 21 Feb 2024 12:43:03 +0000
Subject: [PATCH] Issue 6103 - New connection timeout error breaks errormap
(#6104)
Bug description: A recent addition to the connection disconnect error
messaging, conflicts with how errormap.c maps error codes/strings.
Fix description: errormap expects error codes/strings to be in ascending
order. Moved the new error code to the bottom of the list.
Relates: https://github.com/389ds/389-ds-base/issues/6103
Reviewed by: @droideck. @progier389 (Thank you)
---
ldap/servers/slapd/disconnect_error_strings.h | 5 +++--
1 file changed, 3 insertions(+), 2 deletions(-)
diff --git a/ldap/servers/slapd/disconnect_error_strings.h b/ldap/servers/slapd/disconnect_error_strings.h
index c2d9e283b..f603a08ce 100644
--- a/ldap/servers/slapd/disconnect_error_strings.h
+++ b/ldap/servers/slapd/disconnect_error_strings.h
@@ -14,7 +14,8 @@
/* disconnect_error_strings.h
*
* Strings describing the errors used in logging the reason a connection
- * was closed.
+ * was closed. Ensure definitions are in the same order as the error codes
+ * defined in disconnect_errors.h
*/
#ifndef __DISCONNECT_ERROR_STRINGS_H_
#define __DISCONNECT_ERROR_STRINGS_H_
@@ -35,6 +36,6 @@ ER2(SLAPD_DISCONNECT_NTSSL_TIMEOUT, "T2")
ER2(SLAPD_DISCONNECT_SASL_FAIL, "S1")
ER2(SLAPD_DISCONNECT_PROXY_INVALID_HEADER, "P3")
ER2(SLAPD_DISCONNECT_PROXY_UNKNOWN, "P4")
-
+ER2(SLAPD_DISCONNECT_PAGED_SEARCH_LIMIT, "T3")
#endif /* __DISCONNECT_ERROR_STRINGS_H_ */
--
2.45.0

View File

@ -0,0 +1,38 @@
From 116b7cf21618ad7e717ae7f535709508a824f7d9 Mon Sep 17 00:00:00 2001
From: Viktor Ashirov <vashirov@redhat.com>
Date: Thu, 13 Feb 2025 16:37:43 +0100
Subject: [PATCH] Issue 6561 - TLS 1.2 stickiness in FIPS mode
Description:
TLS 1.3 works with NSS in FIPS mode for quite some time now,
this restriction is no longer needed.
Fixes: https://github.com/389ds/389-ds-base/issues/6561
Reviewed by: @mreynolds389 (Thanks!)
---
ldap/servers/slapd/ssl.c | 8 --------
1 file changed, 8 deletions(-)
diff --git a/ldap/servers/slapd/ssl.c b/ldap/servers/slapd/ssl.c
index 94259efe7..84a7fb004 100644
--- a/ldap/servers/slapd/ssl.c
+++ b/ldap/servers/slapd/ssl.c
@@ -1929,14 +1929,6 @@ slapd_ssl_init2(PRFileDesc **fd, int startTLS)
*/
sslStatus = SSL_VersionRangeGet(pr_sock, &slapdNSSVersions);
if (sslStatus == SECSuccess) {
- if (slapdNSSVersions.max > LDAP_OPT_X_TLS_PROTOCOL_TLS1_2 && fipsMode) {
- /*
- * FIPS & NSS currently only support a max version of TLS1.2
- * (although NSS advertises 1.3 as a max range in FIPS mode),
- * hopefully this code block can be removed soon...
- */
- slapdNSSVersions.max = LDAP_OPT_X_TLS_PROTOCOL_TLS1_2;
- }
/* Reset request range */
sslStatus = SSL_VersionRangeSet(pr_sock, &slapdNSSVersions);
if (sslStatus == SECSuccess) {
--
2.48.1

View File

@ -1,30 +0,0 @@
From edd9abc8901604dde1d739d87ca2906734d53dd3 Mon Sep 17 00:00:00 2001
From: Viktor Ashirov <vashirov@redhat.com>
Date: Thu, 13 Jun 2024 13:35:09 +0200
Subject: [PATCH] Issue 6103 - New connection timeout error breaks errormap
Description:
Remove duplicate SLAPD_DISCONNECT_PAGED_SEARCH_LIMIT error code.
Fixes: https://github.com/389ds/389-ds-base/issues/6103
Reviewed by: @tbordaz (Thanks!)
---
ldap/servers/slapd/disconnect_error_strings.h | 1 -
1 file changed, 1 deletion(-)
diff --git a/ldap/servers/slapd/disconnect_error_strings.h b/ldap/servers/slapd/disconnect_error_strings.h
index f603a08ce..d49cc79a2 100644
--- a/ldap/servers/slapd/disconnect_error_strings.h
+++ b/ldap/servers/slapd/disconnect_error_strings.h
@@ -28,7 +28,6 @@ ER2(SLAPD_DISCONNECT_BER_FLUSH, "B4")
ER2(SLAPD_DISCONNECT_IDLE_TIMEOUT, "T1")
ER2(SLAPD_DISCONNECT_REVENTS, "R1")
ER2(SLAPD_DISCONNECT_IO_TIMEOUT, "T2")
-ER2(SLAPD_DISCONNECT_PAGED_SEARCH_LIMIT, "T3")
ER2(SLAPD_DISCONNECT_PLUGIN, "P1")
ER2(SLAPD_DISCONNECT_UNBIND, "U1")
ER2(SLAPD_DISCONNECT_POLL, "P2")
--
2.45.0

View File

@ -1,220 +0,0 @@
From 8cf981c00ae18d3efaeb10819282cd991621e9a2 Mon Sep 17 00:00:00 2001
From: tbordaz <tbordaz@redhat.com>
Date: Wed, 22 May 2024 11:29:05 +0200
Subject: [PATCH] Issue 6172 - RFE: improve the performance of evaluation of
filter component when tested against a large valueset (like group members)
(#6173)
Bug description:
Before returning an entry (to a SRCH) the server checks that the entry matches the SRCH filter.
If a filter component (equality) is testing the value (ava) against a
large valueset (like uniquemember values), it takes a long time because
of the large number of values and required normalization of the values.
This can be improved taking benefit of sorted valueset. Those sorted
valueset were created to improve updates of large valueset (groups) but
at that time not implemented in SRCH path.
Fix description:
In case of LDAP_FILTER_EQUALITY component, the server can get
benefit of the sorted valuearray.
To limit the risk of regression, we use the sorted valuearray
only for the DN syntax attribute. Indeed the sorted valuearray was
designed for those type of attribute.
With those two limitations, there is no need of a toggle and
the call to plugin_call_syntax_filter_ava can be replaced by
a call to slapi_valueset_find.
In both cases, sorted valueset and plugin_call_syntax_filter_ava, ava and
values are normalized.
In sorted valueset, the values have been normalized to insert the index
in the sorted array and then comparison is done on normalized values.
In plugin_call_syntax_filter_ava, all values in valuearray (of valueset) are normalized
before comparison.
relates: #6172
Reviewed by: Pierre Rogier, Simon Pichugin (Big Thanks !!!)
---
.../tests/suites/filter/filter_test.py | 125 ++++++++++++++++++
ldap/servers/slapd/filterentry.c | 22 ++-
2 files changed, 146 insertions(+), 1 deletion(-)
diff --git a/dirsrvtests/tests/suites/filter/filter_test.py b/dirsrvtests/tests/suites/filter/filter_test.py
index d6bfa5a3b..4baaf04a7 100644
--- a/dirsrvtests/tests/suites/filter/filter_test.py
+++ b/dirsrvtests/tests/suites/filter/filter_test.py
@@ -9,7 +9,11 @@
import logging
import pytest
+import time
+from lib389.dirsrv_log import DirsrvAccessLog
from lib389.tasks import *
+from lib389.backend import Backends, Backend
+from lib389.dbgen import dbgen_users, dbgen_groups
from lib389.topologies import topology_st
from lib389._constants import PASSWORD, DEFAULT_SUFFIX, DN_DM, SUFFIX
from lib389.utils import *
@@ -304,6 +308,127 @@ def test_extended_search(topology_st):
ents = topology_st.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, myfilter)
assert len(ents) == 1
+def test_match_large_valueset(topology_st):
+ """Test that when returning a big number of entries
+ and that we need to match the filter from a large valueset
+ we get benefit to use the sorted valueset
+
+ :id: 7db5aa88-50e0-4c31-85dd-1d2072cb674c
+
+ :setup: Standalone instance
+
+ :steps:
+ 1. Create a users and groups backends and tune them
+ 2. Generate a test ldif (2k users and 1K groups with all users)
+ 3. Import test ldif file using Offline import (ldif2db).
+ 4. Prim the 'groups' entrycache with a "fast" search
+ 5. Search the 'groups' with a difficult matching value
+ 6. check that etime from step 5 is less than a second
+
+ :expectedresults:
+ 1. Create a users and groups backends should PASS
+ 2. Generate LDIF should PASS.
+ 3. Offline import should PASS.
+ 4. Priming should PASS.
+ 5. Performance search should PASS.
+ 6. Etime of performance search should PASS.
+ """
+
+ log.info('Running test_match_large_valueset...')
+ #
+ # Test online/offline LDIF imports
+ #
+ inst = topology_st.standalone
+ inst.start()
+ backends = Backends(inst)
+ users_suffix = "ou=users,%s" % DEFAULT_SUFFIX
+ users_backend = 'users'
+ users_ldif = 'users_import.ldif'
+ groups_suffix = "ou=groups,%s" % DEFAULT_SUFFIX
+ groups_backend = 'groups'
+ groups_ldif = 'groups_import.ldif'
+ groups_entrycache = '200000000'
+ users_number = 2000
+ groups_number = 1000
+
+
+ # For priming the cache we just want to be fast
+ # taking the first value in the valueset is good
+ # whether the valueset is sorted or not
+ priming_user_rdn = "user0001"
+
+ # For performance testing, this is important to use
+ # user1000 rather then user0001
+ # Because user0001 is the first value in the valueset
+ # whether we use the sorted valuearray or non sorted
+ # valuearray the performance will be similar.
+ # With middle value user1000, the performance boost of
+ # the sorted valuearray will make the difference.
+ perf_user_rdn = "user1000"
+
+ # Step 1. Prepare the backends and tune the groups entrycache
+ try:
+ be_users = backends.create(properties={'parent': DEFAULT_SUFFIX, 'nsslapd-suffix': users_suffix, 'name': users_backend})
+ be_groups = backends.create(properties={'parent': DEFAULT_SUFFIX, 'nsslapd-suffix': groups_suffix, 'name': groups_backend})
+
+ # set the entry cache to 200Mb as the 1K groups of 2K users require at least 170Mb
+ be_groups.replace('nsslapd-cachememsize', groups_entrycache)
+ except:
+ raise
+
+ # Step 2. Generate a test ldif (10k users entries)
+ log.info("Generating users LDIF...")
+ ldif_dir = inst.get_ldif_dir()
+ users_import_ldif = "%s/%s" % (ldif_dir, users_ldif)
+ groups_import_ldif = "%s/%s" % (ldif_dir, groups_ldif)
+ dbgen_users(inst, users_number, users_import_ldif, suffix=users_suffix, generic=True, parent=users_suffix)
+
+ # Generate a test ldif (800 groups with 10k members) that fit in 700Mb entry cache
+ props = {
+ "name": "group",
+ "suffix": groups_suffix,
+ "parent": groups_suffix,
+ "number": groups_number,
+ "numMembers": users_number,
+ "createMembers": False,
+ "memberParent": users_suffix,
+ "membershipAttr": "uniquemember",
+ }
+ dbgen_groups(inst, groups_import_ldif, props)
+
+ # Step 3. Do the both offline imports
+ inst.stop()
+ if not inst.ldif2db(users_backend, None, None, None, users_import_ldif):
+ log.fatal('test_basic_import_export: Offline users import failed')
+ assert False
+ if not inst.ldif2db(groups_backend, None, None, None, groups_import_ldif):
+ log.fatal('test_basic_import_export: Offline groups import failed')
+ assert False
+ inst.start()
+
+ # Step 4. first prime the cache
+ # Just request the 'DN'. We are interested by the time of matching not by the time of transfert
+ entries = topology_st.standalone.search_s(groups_suffix, ldap.SCOPE_SUBTREE, "(&(objectclass=groupOfUniqueNames)(uniquemember=uid=%s,%s))" % (priming_user_rdn, users_suffix), ['dn'])
+ assert len(entries) == groups_number
+
+ # Step 5. Now do the real performance checking it should take less than a second
+ # Just request the 'DN'. We are interested by the time of matching not by the time of transfert
+ search_start = time.time()
+ entries = topology_st.standalone.search_s(groups_suffix, ldap.SCOPE_SUBTREE, "(&(objectclass=groupOfUniqueNames)(uniquemember=uid=%s,%s))" % (perf_user_rdn, users_suffix), ['dn'])
+ duration = time.time() - search_start
+ log.info("Duration of the search was %f", duration)
+
+ # Step 6. Gather the etime from the access log
+ inst.stop()
+ access_log = DirsrvAccessLog(inst)
+ search_result = access_log.match(".*RESULT err=0 tag=101 nentries=%s.*" % groups_number)
+ log.info("Found patterns are %s", search_result[0])
+ log.info("Found patterns are %s", search_result[1])
+ etime = float(search_result[1].split('etime=')[1])
+ log.info("Duration of the search from access log was %f", etime)
+ assert len(entries) == groups_number
+ assert (etime < 1)
+
if __name__ == '__main__':
# Run isolated
# -s for DEBUG mode
diff --git a/ldap/servers/slapd/filterentry.c b/ldap/servers/slapd/filterentry.c
index fd8fdda9f..cae5c7edc 100644
--- a/ldap/servers/slapd/filterentry.c
+++ b/ldap/servers/slapd/filterentry.c
@@ -296,7 +296,27 @@ test_ava_filter(
rc = -1;
for (; a != NULL; a = a->a_next) {
if (slapi_attr_type_cmp(ava->ava_type, a->a_type, SLAPI_TYPE_CMP_SUBTYPE) == 0) {
- rc = plugin_call_syntax_filter_ava(a, ftype, ava);
+ if ((ftype == LDAP_FILTER_EQUALITY) &&
+ (slapi_attr_is_dn_syntax_type(a->a_type))) {
+ /* This path is for a performance improvement */
+
+ /* In case of equality filter we can get benefit of the
+ * sorted valuearray (from valueset).
+ * This improvement is limited to DN syntax attributes for
+ * which the sorted valueset was designed.
+ */
+ Slapi_Value *sval = NULL;
+ sval = slapi_value_new_berval(&ava->ava_value);
+ if (slapi_valueset_find((const Slapi_Attr *)a, &a->a_present_values, sval)) {
+ rc = 0;
+ }
+ slapi_value_free(&sval);
+ } else {
+ /* When sorted valuearray optimization cannot be used
+ * lets filter the value according to its syntax
+ */
+ rc = plugin_call_syntax_filter_ava(a, ftype, ava);
+ }
if (rc == 0) {
break;
}
--
2.46.0

View File

@ -1,163 +0,0 @@
From 57051154bafaf50b83fc27dadbd89a49fd1c8c36 Mon Sep 17 00:00:00 2001
From: Pierre Rogier <progier@redhat.com>
Date: Fri, 14 Jun 2024 13:27:10 +0200
Subject: [PATCH] Security fix for CVE-2024-5953
Description:
A denial of service vulnerability was found in the 389 Directory Server.
This issue may allow an authenticated user to cause a server denial
of service while attempting to log in with a user with a malformed hash
in their password.
Fix Description:
To prevent buffer overflow when a bind request is processed, the bind fails
if the hash size is not coherent without even attempting to process further
the hashed password.
References:
- https://nvd.nist.gov/vuln/detail/CVE-2024-5953
- https://access.redhat.com/security/cve/CVE-2024-5953
- https://bugzilla.redhat.com/show_bug.cgi?id=2292104
---
.../tests/suites/password/regression_test.py | 54 ++++++++++++++++++-
ldap/servers/plugins/pwdstorage/md5_pwd.c | 9 +++-
ldap/servers/plugins/pwdstorage/pbkdf2_pwd.c | 6 +++
3 files changed, 66 insertions(+), 3 deletions(-)
diff --git a/dirsrvtests/tests/suites/password/regression_test.py b/dirsrvtests/tests/suites/password/regression_test.py
index 8f1facb6d..1fa581643 100644
--- a/dirsrvtests/tests/suites/password/regression_test.py
+++ b/dirsrvtests/tests/suites/password/regression_test.py
@@ -7,12 +7,14 @@
#
import pytest
import time
+import glob
+import base64
from lib389._constants import PASSWORD, DN_DM, DEFAULT_SUFFIX
from lib389._constants import SUFFIX, PASSWORD, DN_DM, DN_CONFIG, PLUGIN_RETRO_CHANGELOG, DEFAULT_SUFFIX, DEFAULT_CHANGELOG_DB
from lib389 import Entry
from lib389.topologies import topology_m1 as topo_supplier
-from lib389.idm.user import UserAccounts
-from lib389.utils import ldap, os, logging, ensure_bytes, ds_is_newer
+from lib389.idm.user import UserAccounts, UserAccount
+from lib389.utils import ldap, os, logging, ensure_bytes, ds_is_newer, ds_supports_new_changelog
from lib389.topologies import topology_st as topo
from lib389.idm.organizationalunit import OrganizationalUnits
@@ -39,6 +41,13 @@ TEST_PASSWORDS += ['CNpwtest1ZZZZ', 'ZZZZZCNpwtest1',
TEST_PASSWORDS2 = (
'CN12pwtest31', 'SN3pwtest231', 'UID1pwtest123', 'MAIL2pwtest12@redhat.com', '2GN1pwtest123', 'People123')
+SUPPORTED_SCHEMES = (
+ "{SHA}", "{SSHA}", "{SHA256}", "{SSHA256}",
+ "{SHA384}", "{SSHA384}", "{SHA512}", "{SSHA512}",
+ "{crypt}", "{NS-MTA-MD5}", "{clear}", "{MD5}",
+ "{SMD5}", "{PBKDF2_SHA256}", "{PBKDF2_SHA512}",
+ "{GOST_YESCRYPT}", "{PBKDF2-SHA256}", "{PBKDF2-SHA512}" )
+
def _check_unhashed_userpw(inst, user_dn, is_present=False):
"""Check if unhashed#user#password attribute is present or not in the changelog"""
unhashed_pwd_attribute = 'unhashed#user#password'
@@ -319,6 +328,47 @@ def test_unhashed_pw_switch(topo_supplier):
# Add debugging steps(if any)...
pass
+@pytest.mark.parametrize("scheme", SUPPORTED_SCHEMES )
+def test_long_hashed_password(topo, create_user, scheme):
+ """Check that hashed password with very long value does not cause trouble
+
+ :id: 252a1f76-114b-11ef-8a7a-482ae39447e5
+ :setup: standalone Instance
+ :parametrized: yes
+ :steps:
+ 1. Add a test user user
+ 2. Set a long password with requested scheme
+ 3. Bind on that user using a wrong password
+ 4. Check that instance is still alive
+ 5. Remove the added user
+ :expectedresults:
+ 1. Success
+ 2. Success
+ 3. Should get ldap.INVALID_CREDENTIALS exception
+ 4. Success
+ 5. Success
+ """
+ inst = topo.standalone
+ inst.simple_bind_s(DN_DM, PASSWORD)
+ users = UserAccounts(inst, DEFAULT_SUFFIX)
+ # Make sure that server is started as this test may crash it
+ inst.start()
+ # Adding Test user (It may already exists if previous test failed)
+ user2 = UserAccount(inst, dn='uid=test_user_1002,ou=People,dc=example,dc=com')
+ if not user2.exists():
+ user2 = users.create_test_user(uid=1002, gid=2002)
+ # Setting hashed password
+ passwd = 'A'*4000
+ hashed_passwd = scheme.encode('utf-8') + base64.b64encode(passwd.encode('utf-8'))
+ user2.replace('userpassword', hashed_passwd)
+ # Bind on that user using a wrong password
+ with pytest.raises(ldap.INVALID_CREDENTIALS):
+ conn = user2.bind(PASSWORD)
+ # Check that instance is still alive
+ assert inst.status()
+ # Remove the added user
+ user2.delete()
+
if __name__ == '__main__':
# Run isolated
diff --git a/ldap/servers/plugins/pwdstorage/md5_pwd.c b/ldap/servers/plugins/pwdstorage/md5_pwd.c
index 1e2cf58e7..b9a48d5ca 100644
--- a/ldap/servers/plugins/pwdstorage/md5_pwd.c
+++ b/ldap/servers/plugins/pwdstorage/md5_pwd.c
@@ -37,6 +37,7 @@ md5_pw_cmp(const char *userpwd, const char *dbpwd)
unsigned char hash_out[MD5_HASH_LEN];
unsigned char b2a_out[MD5_HASH_LEN * 2]; /* conservative */
SECItem binary_item;
+ size_t dbpwd_len = strlen(dbpwd);
ctx = PK11_CreateDigestContext(SEC_OID_MD5);
if (ctx == NULL) {
@@ -45,6 +46,12 @@ md5_pw_cmp(const char *userpwd, const char *dbpwd)
goto loser;
}
+ if (dbpwd_len >= sizeof b2a_out) {
+ slapi_log_err(SLAPI_LOG_PLUGIN, MD5_SUBSYSTEM_NAME,
+ "The hashed password stored in the user entry is longer than any valid md5 hash");
+ goto loser;
+ }
+
/* create the hash */
PK11_DigestBegin(ctx);
PK11_DigestOp(ctx, (const unsigned char *)userpwd, strlen(userpwd));
@@ -57,7 +64,7 @@ md5_pw_cmp(const char *userpwd, const char *dbpwd)
bver = NSSBase64_EncodeItem(NULL, (char *)b2a_out, sizeof b2a_out, &binary_item);
/* bver points to b2a_out upon success */
if (bver) {
- rc = slapi_ct_memcmp(bver, dbpwd, strlen(dbpwd));
+ rc = slapi_ct_memcmp(bver, dbpwd, dbpwd_len);
} else {
slapi_log_err(SLAPI_LOG_PLUGIN, MD5_SUBSYSTEM_NAME,
"Could not base64 encode hashed value for password compare");
diff --git a/ldap/servers/plugins/pwdstorage/pbkdf2_pwd.c b/ldap/servers/plugins/pwdstorage/pbkdf2_pwd.c
index dcac4fcdd..82b8c9501 100644
--- a/ldap/servers/plugins/pwdstorage/pbkdf2_pwd.c
+++ b/ldap/servers/plugins/pwdstorage/pbkdf2_pwd.c
@@ -255,6 +255,12 @@ pbkdf2_sha256_pw_cmp(const char *userpwd, const char *dbpwd)
passItem.data = (unsigned char *)userpwd;
passItem.len = strlen(userpwd);
+ if (pwdstorage_base64_decode_len(dbpwd, dbpwd_len) > sizeof dbhash) {
+ /* Hashed value is too long and cannot match any value generated by pbkdf2_sha256_hash */
+ slapi_log_err(SLAPI_LOG_ERR, (char *)schemeName, "Unable to base64 decode dbpwd value. (hashed value is too long)\n");
+ return result;
+ }
+
/* Decode the DBpwd to bytes from b64 */
if (PL_Base64Decode(dbpwd, dbpwd_len, dbhash) == NULL) {
slapi_log_err(SLAPI_LOG_ERR, (char *)schemeName, "Unable to base64 decode dbpwd value\n");
--
2.46.0

View File

@ -1,178 +0,0 @@
From e8a5b1deef1b455aafecb71efc029d2407b1b06f Mon Sep 17 00:00:00 2001
From: Simon Pichugin <spichugi@redhat.com>
Date: Tue, 16 Jul 2024 08:32:21 -0700
Subject: [PATCH] Issue 4778 - Add COMPACT_CL5 task to dsconf replication
(#6260)
Description: In 1.4.3, the changelog is not part of a backend.
It can be compacted with nsds5task: CAMPACT_CL5 as part of the replication entry.
Add the task as a compact-changelog command under the dsconf replication tool.
Add tests for the feature and fix old tests.
Related: https://github.com/389ds/389-ds-base/issues/4778
Reviewed by: @progier389 (Thanks!)
---
.../tests/suites/config/compact_test.py | 36 ++++++++++++++---
src/lib389/lib389/cli_conf/replication.py | 10 +++++
src/lib389/lib389/replica.py | 40 +++++++++++++++++++
3 files changed, 81 insertions(+), 5 deletions(-)
diff --git a/dirsrvtests/tests/suites/config/compact_test.py b/dirsrvtests/tests/suites/config/compact_test.py
index 317258d0e..31d98d10c 100644
--- a/dirsrvtests/tests/suites/config/compact_test.py
+++ b/dirsrvtests/tests/suites/config/compact_test.py
@@ -13,14 +13,14 @@ import time
import datetime
from lib389.tasks import DBCompactTask
from lib389.backend import DatabaseConfig
-from lib389.replica import Changelog5
+from lib389.replica import Changelog5, Replicas
from lib389.topologies import topology_m1 as topo
log = logging.getLogger(__name__)
def test_compact_db_task(topo):
- """Specify a test case purpose or name here
+ """Test compaction of database
:id: 1b3222ef-a336-4259-be21-6a52f76e1859
:setup: Standalone Instance
@@ -48,7 +48,7 @@ def test_compact_db_task(topo):
def test_compaction_interval_and_time(topo):
- """Specify a test case purpose or name here
+ """Test compaction interval and time for database and changelog
:id: f361bee9-d7e7-4569-9255-d7b60dd9d92e
:setup: Supplier Instance
@@ -95,10 +95,36 @@ def test_compaction_interval_and_time(topo):
# Check compaction occurred as expected
time.sleep(45)
- assert not inst.searchErrorsLog("Compacting databases")
+ assert not inst.searchErrorsLog("compacting replication changelogs")
time.sleep(90)
- assert inst.searchErrorsLog("Compacting databases")
+ assert inst.searchErrorsLog("compacting replication changelogs")
+ inst.deleteErrorLogs(restart=False)
+
+
+def test_compact_cl5_task(topo):
+ """Test compaction of changelog5 database
+
+ :id: aadfa9f7-73c0-463a-912c-0a29aa1f8167
+ :setup: Standalone Instance
+ :steps:
+ 1. Run compaction task
+ 2. Check errors log to show task was run
+ :expectedresults:
+ 1. Success
+ 2. Success
+ """
+ inst = topo.ms["supplier1"]
+
+ replicas = Replicas(inst)
+ replicas.compact_changelog(log=log)
+
+ # Check compaction occurred as expected. But instead of time.sleep(5) check 1 sec in loop
+ for _ in range(5):
+ time.sleep(1)
+ if inst.searchErrorsLog("compacting replication changelogs"):
+ break
+ assert inst.searchErrorsLog("compacting replication changelogs")
inst.deleteErrorLogs(restart=False)
diff --git a/src/lib389/lib389/cli_conf/replication.py b/src/lib389/lib389/cli_conf/replication.py
index 352c0ee5b..ccc394255 100644
--- a/src/lib389/lib389/cli_conf/replication.py
+++ b/src/lib389/lib389/cli_conf/replication.py
@@ -1199,6 +1199,11 @@ def restore_cl_dir(inst, basedn, log, args):
replicas.restore_changelog(replica_roots=args.REPLICA_ROOTS, log=log)
+def compact_cl5(inst, basedn, log, args):
+ replicas = Replicas(inst)
+ replicas.compact_changelog(replica_roots=args.REPLICA_ROOTS, log=log)
+
+
def create_parser(subparsers):
############################################
@@ -1326,6 +1331,11 @@ def create_parser(subparsers):
help="Specify one replica root whose changelog you want to restore. "
"The replica root will be consumed from the LDIF file name if the option is omitted.")
+ compact_cl = repl_subcommands.add_parser('compact-changelog', help='Compact the changelog database')
+ compact_cl.set_defaults(func=compact_cl5)
+ compact_cl.add_argument('REPLICA_ROOTS', nargs="+",
+ help="Specify replica roots whose changelog you want to compact.")
+
restore_changelogdir = restore_subcommands.add_parser('from-changelogdir', help='Restore LDIF files from changelogdir.')
restore_changelogdir.set_defaults(func=restore_cl_dir)
restore_changelogdir.add_argument('REPLICA_ROOTS', nargs="+",
diff --git a/src/lib389/lib389/replica.py b/src/lib389/lib389/replica.py
index 94e1fdad5..1f321972d 100644
--- a/src/lib389/lib389/replica.py
+++ b/src/lib389/lib389/replica.py
@@ -1648,6 +1648,11 @@ class Replica(DSLdapObject):
"""
self.replace('nsds5task', 'ldif2cl')
+ def begin_task_compact_cl5(self):
+ """Begin COMPACT_CL5 task
+ """
+ self.replace('nsds5task', 'COMPACT_CL5')
+
def get_suffix(self):
"""Return the suffix
"""
@@ -1829,6 +1834,41 @@ class Replicas(DSLdapObjects):
log.error(f"Changelog LDIF for '{repl_root}' was not found")
continue
+ def compact_changelog(self, replica_roots=[], log=None):
+ """Compact Directory Server replication changelog
+
+ :param replica_roots: Replica suffixes that need to be processed (and optional LDIF file path)
+ :type replica_roots: list of str
+ :param log: The logger object
+ :type log: logger
+ """
+
+ if log is None:
+ log = self._log
+
+ # Check if the changelog entry exists
+ try:
+ cl = Changelog5(self._instance)
+ cl.get_attr_val_utf8_l("nsslapd-changelogdir")
+ except ldap.NO_SUCH_OBJECT:
+ raise ValueError("Changelog entry was not found. Probably, the replication is not enabled on this instance")
+
+ # Get all the replicas on the server if --replica-roots option is not specified
+ repl_roots = []
+ if not replica_roots:
+ for replica in self.list():
+ repl_roots.append(replica.get_attr_val_utf8("nsDS5ReplicaRoot"))
+ else:
+ for repl_root in replica_roots:
+ repl_roots.append(repl_root)
+
+ # Dump the changelog for the replica
+
+ # Dump the changelog for the replica
+ for repl_root in repl_roots:
+ replica = self.get(repl_root)
+ replica.begin_task_compact_cl5()
+
class BootstrapReplicationManager(DSLdapObject):
"""A Replication Manager credential for bootstrapping the repl process.
--
2.47.0

View File

@ -1,55 +0,0 @@
From d1cd9a5675e2953b7c8034ebb87a434cdd3ce0c3 Mon Sep 17 00:00:00 2001
From: tbordaz <tbordaz@redhat.com>
Date: Mon, 2 Dec 2024 17:18:32 +0100
Subject: [PATCH] Issue 6417 - If an entry RDN is identical to the suffix, then
Entryrdn gets broken during a reindex (#6418)
Bug description:
During a reindex, the entryrdn index is built at the end from
each entry in the suffix.
If one entry has a RDN that is identical to the suffix DN,
then entryrdn_lookup_dn may erroneously return the suffix DN
as the DN of the entry.
Fix description:
When the lookup entry has no parent (because index is under
work) the loop lookup the entry using the RDN.
If this RDN matches the suffix DN, then it exits from the loop
with the suffix DN.
Before exiting it checks that the original lookup entryID
is equal to suffix entryID. If it does not match
the function fails and then the DN from the entry will be
built from id2enty
fixes: #6417
Reviewed by: Pierre Rogier, Simon Pichugin (Thanks !!!)
---
ldap/servers/slapd/back-ldbm/ldbm_entryrdn.c | 11 ++++++++++-
1 file changed, 10 insertions(+), 1 deletion(-)
diff --git a/ldap/servers/slapd/back-ldbm/ldbm_entryrdn.c b/ldap/servers/slapd/back-ldbm/ldbm_entryrdn.c
index 5797dd779..83b041192 100644
--- a/ldap/servers/slapd/back-ldbm/ldbm_entryrdn.c
+++ b/ldap/servers/slapd/back-ldbm/ldbm_entryrdn.c
@@ -1224,7 +1224,16 @@ entryrdn_lookup_dn(backend *be,
}
goto bail;
}
- maybesuffix = 1;
+ if (workid == 1) {
+ /* The loop (workid) iterates from the starting 'id'
+ * up to the suffix ID (i.e. '1').
+ * A corner case (#6417) is if an entry, on the path
+ * 'id' -> suffix, has the same RDN than the suffix.
+ * In order to erroneously believe the loop hits the suffix
+ * we need to check that 'workid' is '1' (suffix)
+ */
+ maybesuffix = 1;
+ }
} else {
_entryrdn_cursor_print_error("entryrdn_lookup_dn",
key.data, data.size, data.ulen, rc);
--
2.48.0

View File

@ -1,267 +0,0 @@
From 9b2fc77a36156ea987dcea6e2043f8e4c4a6b259 Mon Sep 17 00:00:00 2001
From: progier389 <progier@redhat.com>
Date: Tue, 18 Jun 2024 14:21:07 +0200
Subject: [PATCH] Issue 6224 - d2entry - Could not open id2entry err 0 - at
startup when having sub-suffixes (#6225)
Problem:: d2entry - Could not open id2entry err 0 is logged at startup when having sub-suffixes
Reason: The slapi_exist_referral internal search access a backend that is not yet started.
Solution: Limit the internal search to a single backend
Issue: #6224
Reviewed by: @droideck Thanks!
(cherry picked from commit 796f703021e961fdd8cbc53b4ad4e20258af0e96)
---
.../tests/suites/ds_logs/ds_logs_test.py | 1 +
.../suites/mapping_tree/regression_test.py | 161 +++++++++++++++++-
ldap/servers/slapd/backend.c | 7 +-
3 files changed, 159 insertions(+), 10 deletions(-)
diff --git a/dirsrvtests/tests/suites/ds_logs/ds_logs_test.py b/dirsrvtests/tests/suites/ds_logs/ds_logs_test.py
index 812936c62..84a9c6ec8 100644
--- a/dirsrvtests/tests/suites/ds_logs/ds_logs_test.py
+++ b/dirsrvtests/tests/suites/ds_logs/ds_logs_test.py
@@ -1222,6 +1222,7 @@ def test_referral_check(topology_st, request):
request.addfinalizer(fin)
+<<<<<<< HEAD
def test_referral_subsuffix(topology_st, request):
"""Test the results of an inverted parent suffix definition in the configuration.
diff --git a/dirsrvtests/tests/suites/mapping_tree/regression_test.py b/dirsrvtests/tests/suites/mapping_tree/regression_test.py
index 99d4a1d5f..689ff9f59 100644
--- a/dirsrvtests/tests/suites/mapping_tree/regression_test.py
+++ b/dirsrvtests/tests/suites/mapping_tree/regression_test.py
@@ -11,10 +11,14 @@ import ldap
import logging
import os
import pytest
+import time
from lib389.backend import Backends, Backend
+from lib389._constants import HOST_STANDALONE, PORT_STANDALONE, DN_DM, PW_DM
from lib389.dbgen import dbgen_users
from lib389.mappingTree import MappingTrees
from lib389.topologies import topology_st
+from lib389.referral import Referrals, Referral
+
try:
from lib389.backend import BackendSuffixView
@@ -31,14 +35,26 @@ else:
logging.getLogger(__name__).setLevel(logging.INFO)
log = logging.getLogger(__name__)
+PARENT_SUFFIX = "dc=parent"
+CHILD1_SUFFIX = f"dc=child1,{PARENT_SUFFIX}"
+CHILD2_SUFFIX = f"dc=child2,{PARENT_SUFFIX}"
+
+PARENT_REFERRAL_DN = f"cn=ref,ou=People,{PARENT_SUFFIX}"
+CHILD1_REFERRAL_DN = f"cn=ref,ou=people,{CHILD1_SUFFIX}"
+CHILD2_REFERRAL_DN = f"cn=ref,ou=people,{CHILD2_SUFFIX}"
+
+REFERRAL_CHECK_PEDIOD = 7
+
+
+
BESTRUCT = [
- { "bename" : "parent", "suffix": "dc=parent" },
- { "bename" : "child1", "suffix": "dc=child1,dc=parent" },
- { "bename" : "child2", "suffix": "dc=child2,dc=parent" },
+ { "bename" : "parent", "suffix": PARENT_SUFFIX },
+ { "bename" : "child1", "suffix": CHILD1_SUFFIX },
+ { "bename" : "child2", "suffix": CHILD2_SUFFIX },
]
-@pytest.fixture(scope="function")
+@pytest.fixture(scope="module")
def topo(topology_st, request):
bes = []
@@ -50,6 +66,9 @@ def topo(topology_st, request):
request.addfinalizer(fin)
inst = topology_st.standalone
+ # Reduce nsslapd-referral-check-period to accelerate test
+ topology_st.standalone.config.set("nsslapd-referral-check-period", str(REFERRAL_CHECK_PEDIOD))
+
ldif_files = {}
for d in BESTRUCT:
bename = d['bename']
@@ -76,14 +95,13 @@ def topo(topology_st, request):
inst.start()
return topology_st
-# Parameters for test_change_repl_passwd
-EXPECTED_ENTRIES = (("dc=parent", 39), ("dc=child1,dc=parent", 13), ("dc=child2,dc=parent", 13))
+# Parameters for test_sub_suffixes
@pytest.mark.parametrize(
"orphan_param",
[
- pytest.param( ( True, { "dc=parent": 2, "dc=child1,dc=parent":1, "dc=child2,dc=parent":1}), id="orphan-is-true" ),
- pytest.param( ( False, { "dc=parent": 3, "dc=child1,dc=parent":1, "dc=child2,dc=parent":1}), id="orphan-is-false" ),
- pytest.param( ( None, { "dc=parent": 3, "dc=child1,dc=parent":1, "dc=child2,dc=parent":1}), id="no-orphan" ),
+ pytest.param( ( True, { PARENT_SUFFIX: 2, CHILD1_SUFFIX:1, CHILD2_SUFFIX:1}), id="orphan-is-true" ),
+ pytest.param( ( False, { PARENT_SUFFIX: 3, CHILD1_SUFFIX:1, CHILD2_SUFFIX:1}), id="orphan-is-false" ),
+ pytest.param( ( None, { PARENT_SUFFIX: 3, CHILD1_SUFFIX:1, CHILD2_SUFFIX:1}), id="no-orphan" ),
],
)
@@ -128,3 +146,128 @@ def test_sub_suffixes(topo, orphan_param):
log.info('Test PASSED')
+def test_one_level_search_on_sub_suffixes(topo):
+ """ Perform one level scoped search accross suffix and sub-suffix
+
+ :id: 92f3139e-280e-11ef-a989-482ae39447e5
+ :feature: mapping-tree
+ :setup: Standalone instance with 3 additional backends:
+ dc=parent, dc=child1,dc=parent, dc=childr21,dc=parent
+ :steps:
+ 1. Perform a ONE LEVEL search on dc=parent
+ 2. Check that all expected entries have been returned
+ 3. Check that only the expected entries have been returned
+ :expectedresults:
+ 1. Success
+ 2. each expected dn should be in the result set
+ 3. Number of returned entries should be the same as the number of expected entries
+ """
+ expected_dns = ( 'dc=child1,dc=parent',
+ 'dc=child2,dc=parent',
+ 'ou=accounting,dc=parent',
+ 'ou=product development,dc=parent',
+ 'ou=product testing,dc=parent',
+ 'ou=human resources,dc=parent',
+ 'ou=payroll,dc=parent',
+ 'ou=people,dc=parent',
+ 'ou=groups,dc=parent', )
+ entries = topo.standalone.search_s("dc=parent", ldap.SCOPE_ONELEVEL, "(objectClass=*)",
+ attrlist=("dc","ou"), escapehatch='i am sure')
+ log.info(f'one level search on dc=parent returned the following entries: {entries}')
+ dns = [ entry.dn for entry in entries ]
+ for dn in expected_dns:
+ assert dn in dns
+ assert len(entries) == len(expected_dns)
+
+
+def test_sub_suffixes_errlog(topo):
+ """ check the entries found on suffix/sub-suffix
+ used int
+
+ :id: 1db9d52e-28de-11ef-b286-482ae39447e5
+ :feature: mapping-tree
+ :setup: Standalone instance with 3 additional backends:
+ dc=parent, dc=child1,dc=parent, dc=childr21,dc=parent
+ :steps:
+ 1. Check that id2entry error message is not in the error log.
+ :expectedresults:
+ 1. Success
+ """
+ inst = topo.standalone
+ assert not inst.searchErrorsLog('id2entry - Could not open id2entry err 0')
+
+
+# Parameters for test_referral_subsuffix:
+# a tuple pair containing:
+# - list of referral dn that must be created
+# - dict of searches basedn: expected_number_of_referrals
+@pytest.mark.parametrize(
+ "parameters",
+ [
+ pytest.param( ((PARENT_REFERRAL_DN, CHILD1_REFERRAL_DN), {PARENT_SUFFIX: 2, CHILD1_SUFFIX:1, CHILD2_SUFFIX:0}), id="Both"),
+ pytest.param( ((PARENT_REFERRAL_DN,), {PARENT_SUFFIX: 1, CHILD1_SUFFIX:0, CHILD2_SUFFIX:0}) , id="Parent"),
+ pytest.param( ((CHILD1_REFERRAL_DN,), {PARENT_SUFFIX: 1, CHILD1_SUFFIX:1, CHILD2_SUFFIX:0}) , id="Child"),
+ pytest.param( ((), {PARENT_SUFFIX: 0, CHILD1_SUFFIX:0, CHILD2_SUFFIX:0}), id="None"),
+ ])
+
+def test_referral_subsuffix(topo, request, parameters):
+ """Test the results of an inverted parent suffix definition in the configuration.
+
+ For more details see:
+ https://www.port389.org/docs/389ds/design/mapping_tree_assembly.html
+
+ :id: 4e111a22-2a5d-11ef-a890-482ae39447e5
+ :feature: referrals
+ :setup: Standalone instance with 3 additional backends:
+ dc=parent, dc=child1,dc=parent, dc=childr21,dc=parent
+
+ :setup: Standalone instance
+ :parametrized: yes
+ :steps:
+ refs,searches = referrals
+
+ 1. Create the referrals according to the current parameter
+ 2. Wait enough time so they get detected
+ 3. For each search base dn, in the current parameter, perform the two following steps
+ 4. In 3. loop: Perform a search with provided base dn
+ 5. In 3. loop: Check that the number of returned referrals is the expected one.
+
+ :expectedresults:
+ all steps succeeds
+ """
+ inst = topo.standalone
+
+ def fin():
+ log.info('Deleting all referrals')
+ for ref in Referrals(inst, PARENT_SUFFIX).list():
+ ref.delete()
+
+ # Set cleanup callback
+ if DEBUGGING:
+ request.addfinalizer(fin)
+
+ # Remove all referrals
+ fin()
+ # Add requested referrals
+ for dn in parameters[0]:
+ refs = Referral(inst, dn=dn)
+ refs.create(basedn=dn, properties={ 'cn': 'ref', 'ref': f'ldap://remote/{dn}'})
+ # Wait that the internal search detects the referrals
+ time.sleep(REFERRAL_CHECK_PEDIOD + 1)
+ # Open a test connection
+ ldc = ldap.initialize(f"ldap://{HOST_STANDALONE}:{PORT_STANDALONE}")
+ ldc.set_option(ldap.OPT_REFERRALS,0)
+ ldc.simple_bind_s(DN_DM,PW_DM)
+
+ # For each search base dn:
+ for basedn,nbref in parameters[1].items():
+ log.info(f"Referrals are: {parameters[0]}")
+ # Perform a search with provided base dn
+ result = ldc.search_s(basedn, ldap.SCOPE_SUBTREE, filterstr="(ou=People)")
+ found_dns = [ dn for dn,entry in result if dn is not None ]
+ found_refs = [ entry for dn,entry in result if dn is None ]
+ log.info(f"Search on {basedn} returned {found_dns} and {found_refs}")
+ # Check that the number of returned referrals is the expected one.
+ log.info(f"Search returned {len(found_refs)} referrals. {nbref} are expected.")
+ assert len(found_refs) == nbref
+ ldc.unbind()
diff --git a/ldap/servers/slapd/backend.c b/ldap/servers/slapd/backend.c
index 498f683b1..f86b0b9b6 100644
--- a/ldap/servers/slapd/backend.c
+++ b/ldap/servers/slapd/backend.c
@@ -230,12 +230,17 @@ slapi_exist_referral(Slapi_Backend *be)
/* search for ("smart") referral entries */
search_pb = slapi_pblock_new();
- server_ctrls = (LDAPControl **) slapi_ch_calloc(2, sizeof (LDAPControl *));
+ server_ctrls = (LDAPControl **) slapi_ch_calloc(3, sizeof (LDAPControl *));
server_ctrls[0] = (LDAPControl *) slapi_ch_malloc(sizeof (LDAPControl));
server_ctrls[0]->ldctl_oid = slapi_ch_strdup(LDAP_CONTROL_MANAGEDSAIT);
server_ctrls[0]->ldctl_value.bv_val = NULL;
server_ctrls[0]->ldctl_value.bv_len = 0;
server_ctrls[0]->ldctl_iscritical = '\0';
+ server_ctrls[1] = (LDAPControl *) slapi_ch_malloc(sizeof (LDAPControl));
+ server_ctrls[1]->ldctl_oid = slapi_ch_strdup(MTN_CONTROL_USE_ONE_BACKEND_EXT_OID);
+ server_ctrls[1]->ldctl_value.bv_val = NULL;
+ server_ctrls[1]->ldctl_value.bv_len = 0;
+ server_ctrls[1]->ldctl_iscritical = '\0';
slapi_search_internal_set_pb(search_pb, suffix, LDAP_SCOPE_SUBTREE,
filter, NULL, 0, server_ctrls, NULL,
(void *) plugin_get_default_component_id(), 0);
--
2.48.0

View File

@ -1,32 +0,0 @@
From ab06b3cebbe0287ef557c0307ca2ee86fe8cb761 Mon Sep 17 00:00:00 2001
From: progier389 <progier@redhat.com>
Date: Thu, 21 Nov 2024 16:26:02 +0100
Subject: [PATCH] Issue 6224 - Fix merge issue in 389-ds-base-2.1 for
ds_log_test.py (#6414)
Fix a merge issue during cherry-pick over 389-ds-base-2.1 and 389-ds-base-1.4.3 branches
Issue: #6224
Reviewed by: @mreynolds389
(cherry picked from commit 2b541c64b8317209e4dafa4f82918d714039907c)
---
dirsrvtests/tests/suites/ds_logs/ds_logs_test.py | 1 -
1 file changed, 1 deletion(-)
diff --git a/dirsrvtests/tests/suites/ds_logs/ds_logs_test.py b/dirsrvtests/tests/suites/ds_logs/ds_logs_test.py
index 84a9c6ec8..812936c62 100644
--- a/dirsrvtests/tests/suites/ds_logs/ds_logs_test.py
+++ b/dirsrvtests/tests/suites/ds_logs/ds_logs_test.py
@@ -1222,7 +1222,6 @@ def test_referral_check(topology_st, request):
request.addfinalizer(fin)
-<<<<<<< HEAD
def test_referral_subsuffix(topology_st, request):
"""Test the results of an inverted parent suffix definition in the configuration.
--
2.48.0

View File

@ -1,214 +0,0 @@
From 3fe2cf7cdedcdf5cafb59867e52a1fbe4a643571 Mon Sep 17 00:00:00 2001
From: Masahiro Matsuya <mmatsuya@redhat.com>
Date: Fri, 20 Dec 2024 22:37:15 +0900
Subject: [PATCH] Issue 6224 - Remove test_referral_subsuffix from
ds_logs_test.py (#6456)
Bug Description:
test_referral_subsuffix test was removed from main branch and some other
ones for higher versions. But, it was not removed from 389-ds-base-1.4.3
and 389-ds-base-2.1. The test doesn't work anymore with the fix for
Issue 6224, because the added new control limited one backend for internal
search. The test should be removed.
Fix Description:
remove the test from ds_logs_test.py
relates: https://github.com/389ds/389-ds-base/issues/6224
---
.../tests/suites/ds_logs/ds_logs_test.py | 177 ------------------
1 file changed, 177 deletions(-)
diff --git a/dirsrvtests/tests/suites/ds_logs/ds_logs_test.py b/dirsrvtests/tests/suites/ds_logs/ds_logs_test.py
index 812936c62..84d721756 100644
--- a/dirsrvtests/tests/suites/ds_logs/ds_logs_test.py
+++ b/dirsrvtests/tests/suites/ds_logs/ds_logs_test.py
@@ -1222,183 +1222,6 @@ def test_referral_check(topology_st, request):
request.addfinalizer(fin)
-def test_referral_subsuffix(topology_st, request):
- """Test the results of an inverted parent suffix definition in the configuration.
-
- For more details see:
- https://www.port389.org/docs/389ds/design/mapping_tree_assembly.html
-
- :id: 4faf210a-4fde-4e4f-8834-865bdc8f4d37
- :setup: Standalone instance
- :steps:
- 1. First create two Backends, without mapping trees.
- 2. create the mapping trees for these backends
- 3. reduce nsslapd-referral-check-period to accelerate test
- 4. Remove error log file
- 5. Create a referral entry on parent suffix
- 6. Check that the server detected the referral
- 7. Delete the referral entry
- 8. Check that the server detected the deletion of the referral
- 9. Remove error log file
- 10. Create a referral entry on child suffix
- 11. Check that the server detected the referral on both parent and child suffixes
- 12. Delete the referral entry
- 13. Check that the server detected the deletion of the referral on both parent and child suffixes
- 14. Remove error log file
- 15. Create a referral entry on parent suffix
- 16. Check that the server detected the referral on both parent and child suffixes
- 17. Delete the child referral entry
- 18. Check that the server detected the deletion of the referral on child suffix but not on parent suffix
- 19. Delete the parent referral entry
- 20. Check that the server detected the deletion of the referral parent suffix
-
- :expectedresults:
- all steps succeeds
- """
- inst = topology_st.standalone
- # Step 1 First create two Backends, without mapping trees.
- PARENT_SUFFIX='dc=parent,dc=com'
- CHILD_SUFFIX='dc=child,%s' % PARENT_SUFFIX
- be1 = create_backend(inst, 'Parent', PARENT_SUFFIX)
- be2 = create_backend(inst, 'Child', CHILD_SUFFIX)
- # Step 2 create the mapping trees for these backends
- mts = MappingTrees(inst)
- mt1 = mts.create(properties={
- 'cn': PARENT_SUFFIX,
- 'nsslapd-state': 'backend',
- 'nsslapd-backend': 'Parent',
- })
- mt2 = mts.create(properties={
- 'cn': CHILD_SUFFIX,
- 'nsslapd-state': 'backend',
- 'nsslapd-backend': 'Child',
- 'nsslapd-parent-suffix': PARENT_SUFFIX,
- })
-
- dc_ex = Domain(inst, dn=PARENT_SUFFIX)
- assert dc_ex.exists()
-
- dc_st = Domain(inst, dn=CHILD_SUFFIX)
- assert dc_st.exists()
-
- # Step 3 reduce nsslapd-referral-check-period to accelerate test
- # requires a restart done on step 4
- REFERRAL_CHECK=7
- topology_st.standalone.config.set("nsslapd-referral-check-period", str(REFERRAL_CHECK))
-
- # Check that if we create a referral at parent level
- # - referral is detected at parent backend
- # - referral is not detected at child backend
-
- # Step 3 Remove error log file
- topology_st.standalone.stop()
- lpath = topology_st.standalone.ds_error_log._get_log_path()
- os.unlink(lpath)
- topology_st.standalone.start()
-
- # Step 4 Create a referral entry on parent suffix
- rs_parent = Referrals(topology_st.standalone, PARENT_SUFFIX)
-
- referral_entry_parent = rs_parent.create(properties={
- 'cn': 'testref',
- 'ref': 'ldap://localhost:38901/ou=People,dc=example,dc=com'
- })
-
- # Step 5 Check that the server detected the referral
- time.sleep(REFERRAL_CHECK + 1)
- assert topology_st.standalone.ds_error_log.match('.*slapd_daemon - New referral entries are detected under %s.*' % PARENT_SUFFIX)
- assert not topology_st.standalone.ds_error_log.match('.*slapd_daemon - New referral entries are detected under %s.*' % CHILD_SUFFIX)
- assert not topology_st.standalone.ds_error_log.match('.*slapd_daemon - No more referral entry under %s' % PARENT_SUFFIX)
-
- # Step 6 Delete the referral entry
- referral_entry_parent.delete()
-
- # Step 7 Check that the server detected the deletion of the referral
- time.sleep(REFERRAL_CHECK + 1)
- assert topology_st.standalone.ds_error_log.match('.*slapd_daemon - No more referral entry under %s' % PARENT_SUFFIX)
-
- # Check that if we create a referral at child level
- # - referral is detected at parent backend
- # - referral is detected at child backend
-
- # Step 8 Remove error log file
- topology_st.standalone.stop()
- lpath = topology_st.standalone.ds_error_log._get_log_path()
- os.unlink(lpath)
- topology_st.standalone.start()
-
- # Step 9 Create a referral entry on child suffix
- rs_child = Referrals(topology_st.standalone, CHILD_SUFFIX)
- referral_entry_child = rs_child.create(properties={
- 'cn': 'testref',
- 'ref': 'ldap://localhost:38901/ou=People,dc=example,dc=com'
- })
-
- # Step 10 Check that the server detected the referral on both parent and child suffixes
- time.sleep(REFERRAL_CHECK + 1)
- assert topology_st.standalone.ds_error_log.match('.*slapd_daemon - New referral entries are detected under %s.*' % PARENT_SUFFIX)
- assert topology_st.standalone.ds_error_log.match('.*slapd_daemon - New referral entries are detected under %s.*' % CHILD_SUFFIX)
- assert not topology_st.standalone.ds_error_log.match('.*slapd_daemon - No more referral entry under %s' % CHILD_SUFFIX)
-
- # Step 11 Delete the referral entry
- referral_entry_child.delete()
-
- # Step 12 Check that the server detected the deletion of the referral on both parent and child suffixes
- time.sleep(REFERRAL_CHECK + 1)
- assert topology_st.standalone.ds_error_log.match('.*slapd_daemon - No more referral entry under %s' % PARENT_SUFFIX)
- assert topology_st.standalone.ds_error_log.match('.*slapd_daemon - No more referral entry under %s' % CHILD_SUFFIX)
-
- # Check that if we create a referral at child level and parent level
- # - referral is detected at parent backend
- # - referral is detected at child backend
-
- # Step 13 Remove error log file
- topology_st.standalone.stop()
- lpath = topology_st.standalone.ds_error_log._get_log_path()
- os.unlink(lpath)
- topology_st.standalone.start()
-
- # Step 14 Create a referral entry on parent suffix
- # Create a referral entry on child suffix
- referral_entry_parent = rs_parent.create(properties={
- 'cn': 'testref',
- 'ref': 'ldap://localhost:38901/ou=People,dc=example,dc=com'
- })
- referral_entry_child = rs_child.create(properties={
- 'cn': 'testref',
- 'ref': 'ldap://localhost:38901/ou=People,dc=example,dc=com'
- })
-
- # Step 15 Check that the server detected the referral on both parent and child suffixes
- time.sleep(REFERRAL_CHECK + 1)
- assert topology_st.standalone.ds_error_log.match('.*slapd_daemon - New referral entries are detected under %s.*' % PARENT_SUFFIX)
- assert topology_st.standalone.ds_error_log.match('.*slapd_daemon - New referral entries are detected under %s.*' % CHILD_SUFFIX)
- assert not topology_st.standalone.ds_error_log.match('.*slapd_daemon - No more referral entry under %s' % CHILD_SUFFIX)
-
- # Step 16 Delete the child referral entry
- referral_entry_child.delete()
-
- # Step 17 Check that the server detected the deletion of the referral on child suffix but not on parent suffix
- time.sleep(REFERRAL_CHECK + 1)
- assert topology_st.standalone.ds_error_log.match('.*slapd_daemon - No more referral entry under %s' % CHILD_SUFFIX)
- assert not topology_st.standalone.ds_error_log.match('.*slapd_daemon - No more referral entry under %s' % PARENT_SUFFIX)
-
- # Step 18 Delete the parent referral entry
- referral_entry_parent.delete()
-
- # Step 19 Check that the server detected the deletion of the referral parent suffix
- time.sleep(REFERRAL_CHECK + 1)
- assert topology_st.standalone.ds_error_log.match('.*slapd_daemon - No more referral entry under %s' % PARENT_SUFFIX)
-
- def fin():
- log.info('Deleting referral')
- try:
- referral_entry_parent.delete()
- referral.entry_child.delete()
- except:
- pass
-
- request.addfinalizer(fin)
def test_missing_backend_suffix(topology_st, request):
"""Test that the server does not crash if a backend has no suffix
--
2.48.0

View File

@ -1,90 +0,0 @@
From 4121ffe7a44fbacf513758661e71e483eb11ee3c Mon Sep 17 00:00:00 2001
From: tbordaz <tbordaz@redhat.com>
Date: Mon, 6 Jan 2025 14:00:39 +0100
Subject: [PATCH] Issue 6417 - (2nd) If an entry RDN is identical to the
suffix, then Entryrdn gets broken during a reindex (#6460)
Bug description:
The primary fix has a flaw as it assumes that the
suffix ID is '1'.
If the RUV entry is the first entry of the database
the server loops indefinitely
Fix description:
Read the suffix ID from the entryrdn index
fixes: #6417
Reviewed by: Pierre Rogier (also reviewed the first fix)
---
.../suites/replication/regression_m2_test.py | 9 +++++++++
ldap/servers/slapd/back-ldbm/ldbm_entryrdn.c | 19 ++++++++++++++++++-
2 files changed, 27 insertions(+), 1 deletion(-)
diff --git a/dirsrvtests/tests/suites/replication/regression_m2_test.py b/dirsrvtests/tests/suites/replication/regression_m2_test.py
index abac46ada..72d4b9f89 100644
--- a/dirsrvtests/tests/suites/replication/regression_m2_test.py
+++ b/dirsrvtests/tests/suites/replication/regression_m2_test.py
@@ -1010,6 +1010,15 @@ def test_online_reinit_may_hang(topo_with_sigkill):
"""
M1 = topo_with_sigkill.ms["supplier1"]
M2 = topo_with_sigkill.ms["supplier2"]
+
+ # The RFE 5367 (when enabled) retrieves the DN
+ # from the dncache. This hides an issue
+ # with primary fix for 6417.
+ # We need to disable the RFE to verify that the primary
+ # fix is properly fixed.
+ if ds_is_newer('2.3.1'):
+ M1.config.replace('nsslapd-return-original-entrydn', 'off')
+
M1.stop()
ldif_file = '%s/supplier1.ldif' % M1.get_ldif_dir()
M1.db2ldif(bename=DEFAULT_BENAME, suffixes=[DEFAULT_SUFFIX],
diff --git a/ldap/servers/slapd/back-ldbm/ldbm_entryrdn.c b/ldap/servers/slapd/back-ldbm/ldbm_entryrdn.c
index 83b041192..1bbb6252a 100644
--- a/ldap/servers/slapd/back-ldbm/ldbm_entryrdn.c
+++ b/ldap/servers/slapd/back-ldbm/ldbm_entryrdn.c
@@ -1115,6 +1115,7 @@ entryrdn_lookup_dn(backend *be,
rdn_elem *elem = NULL;
int maybesuffix = 0;
int db_retry = 0;
+ ID suffix_id = 1;
slapi_log_err(SLAPI_LOG_TRACE, "entryrdn_lookup_dn",
"--> entryrdn_lookup_dn\n");
@@ -1175,6 +1176,22 @@ entryrdn_lookup_dn(backend *be,
/* Setting the bulk fetch buffer */
data.flags = DB_DBT_MALLOC;
+ /* Just in case the suffix ID is not '1' retrieve it from the database */
+ keybuf = slapi_ch_strdup(slapi_sdn_get_ndn(be->be_suffix));
+ dblayer_value_set(be, &key, keybuf, strlen(keybuf) + 1);
+ rc = dblayer_cursor_op(&ctx.cursor, DBI_OP_MOVE_TO_KEY, &key, &data);
+ if (rc) {
+ slapi_log_err(SLAPI_LOG_WARNING, "entryrdn_lookup_dn",
+ "Fails to retrieve the ID of suffix %s - keep the default value '%d'\n",
+ slapi_sdn_get_ndn(be->be_suffix),
+ suffix_id);
+ } else {
+ elem = (rdn_elem *)data.data;
+ suffix_id = id_stored_to_internal(elem->rdn_elem_id);
+ }
+ dblayer_value_free(be, &data);
+ dblayer_value_free(be, &key);
+
do {
/* Setting up a key for the node to get its parent */
slapi_ch_free_string(&keybuf);
@@ -1224,7 +1241,7 @@ entryrdn_lookup_dn(backend *be,
}
goto bail;
}
- if (workid == 1) {
+ if (workid == suffix_id) {
/* The loop (workid) iterates from the starting 'id'
* up to the suffix ID (i.e. '1').
* A corner case (#6417) is if an entry, on the path
--
2.48.0

View File

@ -1,40 +0,0 @@
From 1ffcc9aa9a397180fe35283ee61b164471d073fb Mon Sep 17 00:00:00 2001
From: Thierry Bordaz <tbordaz@redhat.com>
Date: Tue, 7 Jan 2025 10:01:51 +0100
Subject: [PATCH] Issue 6417 - (2nd) fix typo
---
ldap/servers/slapd/back-ldbm/ldbm_entryrdn.c | 10 ++++++----
1 file changed, 6 insertions(+), 4 deletions(-)
diff --git a/ldap/servers/slapd/back-ldbm/ldbm_entryrdn.c b/ldap/servers/slapd/back-ldbm/ldbm_entryrdn.c
index 1bbb6252a..e2b8273a2 100644
--- a/ldap/servers/slapd/back-ldbm/ldbm_entryrdn.c
+++ b/ldap/servers/slapd/back-ldbm/ldbm_entryrdn.c
@@ -1178,8 +1178,10 @@ entryrdn_lookup_dn(backend *be,
/* Just in case the suffix ID is not '1' retrieve it from the database */
keybuf = slapi_ch_strdup(slapi_sdn_get_ndn(be->be_suffix));
- dblayer_value_set(be, &key, keybuf, strlen(keybuf) + 1);
- rc = dblayer_cursor_op(&ctx.cursor, DBI_OP_MOVE_TO_KEY, &key, &data);
+ key.data = keybuf;
+ key.size = key.ulen = strlen(keybuf) + 1;
+ key.flags = DB_DBT_USERMEM;
+ rc = cursor->c_get(cursor, &key, &data, DB_SET);
if (rc) {
slapi_log_err(SLAPI_LOG_WARNING, "entryrdn_lookup_dn",
"Fails to retrieve the ID of suffix %s - keep the default value '%d'\n",
@@ -1189,8 +1191,8 @@ entryrdn_lookup_dn(backend *be,
elem = (rdn_elem *)data.data;
suffix_id = id_stored_to_internal(elem->rdn_elem_id);
}
- dblayer_value_free(be, &data);
- dblayer_value_free(be, &key);
+ slapi_ch_free(&data.data);
+ slapi_ch_free_string(&keybuf);
do {
/* Setting up a key for the node to get its parent */
--
2.48.0

View File

@ -1,75 +0,0 @@
From 9e1284122a929fe14633a2aa6e2de4d72891f98f Mon Sep 17 00:00:00 2001
From: Thierry Bordaz <tbordaz@redhat.com>
Date: Mon, 13 Jan 2025 17:41:18 +0100
Subject: [PATCH] Issue 6417 - (3rd) If an entry RDN is identical to the
suffix, then Entryrdn gets broken during a reindex (#6480)
Bug description:
The previous fix had a flaw.
In case entryrdn_lookup_dn is called with an undefined suffix
the lookup of the suffix trigger a crash.
For example it can occur during internal search of an
unexisting map (view plugin).
The issue exists in all releases but is hidden since 2.3.
Fix description:
testing the suffix is defined
fixes: #6417
Reviewed by: Pierre Rogier (THnaks !)
---
ldap/servers/slapd/back-ldbm/ldbm_entryrdn.c | 36 +++++++++++---------
1 file changed, 20 insertions(+), 16 deletions(-)
diff --git a/ldap/servers/slapd/back-ldbm/ldbm_entryrdn.c b/ldap/servers/slapd/back-ldbm/ldbm_entryrdn.c
index e2b8273a2..01c77156f 100644
--- a/ldap/servers/slapd/back-ldbm/ldbm_entryrdn.c
+++ b/ldap/servers/slapd/back-ldbm/ldbm_entryrdn.c
@@ -1176,23 +1176,27 @@ entryrdn_lookup_dn(backend *be,
/* Setting the bulk fetch buffer */
data.flags = DB_DBT_MALLOC;
- /* Just in case the suffix ID is not '1' retrieve it from the database */
- keybuf = slapi_ch_strdup(slapi_sdn_get_ndn(be->be_suffix));
- key.data = keybuf;
- key.size = key.ulen = strlen(keybuf) + 1;
- key.flags = DB_DBT_USERMEM;
- rc = cursor->c_get(cursor, &key, &data, DB_SET);
- if (rc) {
- slapi_log_err(SLAPI_LOG_WARNING, "entryrdn_lookup_dn",
- "Fails to retrieve the ID of suffix %s - keep the default value '%d'\n",
- slapi_sdn_get_ndn(be->be_suffix),
- suffix_id);
- } else {
- elem = (rdn_elem *)data.data;
- suffix_id = id_stored_to_internal(elem->rdn_elem_id);
+ /* Just in case the suffix ID is not '1' retrieve it from the database
+ * if the suffix is not defined suffix_id remains '1'
+ */
+ if (be->be_suffix) {
+ keybuf = slapi_ch_strdup(slapi_sdn_get_ndn(be->be_suffix));
+ key.data = keybuf;
+ key.size = key.ulen = strlen(keybuf) + 1;
+ key.flags = DB_DBT_USERMEM;
+ rc = cursor->c_get(cursor, &key, &data, DB_SET);
+ if (rc) {
+ slapi_log_err(SLAPI_LOG_WARNING, "entryrdn_lookup_dn",
+ "Fails to retrieve the ID of suffix %s - keep the default value '%d'\n",
+ slapi_sdn_get_ndn(be->be_suffix),
+ suffix_id);
+ } else {
+ elem = (rdn_elem *) data.data;
+ suffix_id = id_stored_to_internal(elem->rdn_elem_id);
+ }
+ slapi_ch_free(&data.data);
+ slapi_ch_free_string(&keybuf);
}
- slapi_ch_free(&data.data);
- slapi_ch_free_string(&keybuf);
do {
/* Setting up a key for the node to get its parent */
--
2.48.0

View File

@ -1,297 +0,0 @@
From d2f9dd82e3610ee9b73feea981c680c03bb21394 Mon Sep 17 00:00:00 2001
From: Mark Reynolds <mreynolds@redhat.com>
Date: Thu, 16 Jan 2025 08:42:53 -0500
Subject: [PATCH] Issue 6509 - Race condition with Paged Result searches
Description:
There is a race condition with Paged Result searches when a new operation comes
in while a paged search is finishing. This triggers an invalid time out error
and closes the connection with a T3 code.
The problem is that we do not use the "PagedResult lock" when checking the
connection's paged result data for a timeout event. This causes the paged
result timeout value to change unexpectedly and trigger a false timeout when a
new operation arrives.
Now we check the timeout without hte conn lock, if its expired it could
be a race condition and false positive. Try the lock again and test the
timeout. This also prevents blocking non-paged result searches from
getting held up by the lock when it's not necessary.
This also fixes some memory leaks that occur when an error happens.
Relates: https://github.com/389ds/389-ds-base/issues/6509
Reviewed by: tbordaz & proger (Thanks!!)
---
ldap/servers/slapd/daemon.c | 61 ++++++++++++++++++-------------
ldap/servers/slapd/opshared.c | 58 ++++++++++++++---------------
ldap/servers/slapd/pagedresults.c | 9 +++++
ldap/servers/slapd/slap.h | 2 +-
4 files changed, 75 insertions(+), 55 deletions(-)
diff --git a/ldap/servers/slapd/daemon.c b/ldap/servers/slapd/daemon.c
index bb80dae36..13dfe250d 100644
--- a/ldap/servers/slapd/daemon.c
+++ b/ldap/servers/slapd/daemon.c
@@ -1578,7 +1578,29 @@ setup_pr_read_pds(Connection_Table *ct)
if (c->c_state == CONN_STATE_FREE) {
connection_table_move_connection_out_of_active_list(ct, c);
} else {
- /* we try to acquire the connection mutex, if it is already
+ /* Check for a timeout for PAGED RESULTS */
+ if (pagedresults_is_timedout_nolock(c)) {
+ /*
+ * There could be a race condition so lets try again with the
+ * right lock
+ */
+ pthread_mutex_t *pr_mutex = pageresult_lock_get_addr(c);
+ if (pthread_mutex_trylock(pr_mutex) == EBUSY) {
+ c = next;
+ continue;
+ }
+ if (pagedresults_is_timedout_nolock(c)) {
+ pthread_mutex_unlock(pr_mutex);
+ disconnect_server(c, c->c_connid, -1,
+ SLAPD_DISCONNECT_PAGED_SEARCH_LIMIT,
+ 0);
+ } else {
+ pthread_mutex_unlock(pr_mutex);
+ }
+ }
+
+ /*
+ * we try to acquire the connection mutex, if it is already
* acquired by another thread, don't wait
*/
if (pthread_mutex_trylock(&(c->c_mutex)) == EBUSY) {
@@ -1586,35 +1608,24 @@ setup_pr_read_pds(Connection_Table *ct)
continue;
}
if (c->c_flags & CONN_FLAG_CLOSING) {
- /* A worker thread has marked that this connection
- * should be closed by calling disconnect_server.
- * move this connection out of the active list
- * the last thread to use the connection will close it
+ /*
+ * A worker thread, or paged result timeout, has marked that
+ * this connection should be closed by calling
+ * disconnect_server(). Move this connection out of the active
+ * list then the last thread to use the connection will close
+ * it.
*/
connection_table_move_connection_out_of_active_list(ct, c);
} else if (c->c_sd == SLAPD_INVALID_SOCKET) {
connection_table_move_connection_out_of_active_list(ct, c);
} else if (c->c_prfd != NULL) {
if ((!c->c_gettingber) && (c->c_threadnumber < c->c_max_threads_per_conn)) {
- int add_fd = 1;
- /* check timeout for PAGED RESULTS */
- if (pagedresults_is_timedout_nolock(c)) {
- /* Exceeded the paged search timelimit; disconnect the client */
- disconnect_server_nomutex(c, c->c_connid, -1,
- SLAPD_DISCONNECT_PAGED_SEARCH_LIMIT,
- 0);
- connection_table_move_connection_out_of_active_list(ct,
- c);
- add_fd = 0; /* do not poll on this fd */
- }
- if (add_fd) {
- ct->fd[count].fd = c->c_prfd;
- ct->fd[count].in_flags = SLAPD_POLL_FLAGS;
- /* slot i of the connection table is mapped to slot
- * count of the fds array */
- c->c_fdi = count;
- count++;
- }
+ ct->fd[listnum][count].fd = c->c_prfd;
+ ct->fd[listnum][count].in_flags = SLAPD_POLL_FLAGS;
+ /* slot i of the connection table is mapped to slot
+ * count of the fds array */
+ c->c_fdi = count;
+ count++;
} else {
if (c->c_threadnumber >= c->c_max_threads_per_conn) {
c->c_maxthreadsblocked++;
@@ -1675,7 +1686,7 @@ handle_pr_read_ready(Connection_Table *ct, PRIntn num_poll __attribute__((unused
continue;
}
- /* Try to get connection mutex, if not available just skip the connection and
+ /* Try to get connection mutex, if not available just skip the connection and
* process other connections events. May generates cpu load for listening thread
* if connection mutex is held for a long time
*/
diff --git a/ldap/servers/slapd/opshared.c b/ldap/servers/slapd/opshared.c
index 7ab4117cd..a29eed052 100644
--- a/ldap/servers/slapd/opshared.c
+++ b/ldap/servers/slapd/opshared.c
@@ -250,7 +250,7 @@ op_shared_search(Slapi_PBlock *pb, int send_result)
char *errtext = NULL;
int nentries, pnentries;
int flag_search_base_found = 0;
- int flag_no_such_object = 0;
+ bool flag_no_such_object = false;
int flag_referral = 0;
int flag_psearch = 0;
int err_code = LDAP_SUCCESS;
@@ -315,7 +315,7 @@ op_shared_search(Slapi_PBlock *pb, int send_result)
rc = -1;
goto free_and_return_nolock;
}
-
+
/* Set the time we actually started the operation */
slapi_operation_set_time_started(operation);
@@ -798,11 +798,11 @@ op_shared_search(Slapi_PBlock *pb, int send_result)
}
/* subtree searches :
- * if the search was started above the backend suffix
- * - temporarily set the SLAPI_SEARCH_TARGET_SDN to the
- * base of the node so that we don't get a NO SUCH OBJECT error
- * - do not change the scope
- */
+ * if the search was started above the backend suffix
+ * - temporarily set the SLAPI_SEARCH_TARGET_SDN to the
+ * base of the node so that we don't get a NO SUCH OBJECT error
+ * - do not change the scope
+ */
if (scope == LDAP_SCOPE_SUBTREE) {
if (slapi_sdn_issuffix(be_suffix, basesdn)) {
if (free_sdn) {
@@ -825,53 +825,53 @@ op_shared_search(Slapi_PBlock *pb, int send_result)
switch (rc) {
case 1:
/* if the backend returned LDAP_NO_SUCH_OBJECT for a SEARCH request,
- * it will not have sent back a result - otherwise, it will have
- * sent a result */
+ * it will not have sent back a result - otherwise, it will have
+ * sent a result */
rc = SLAPI_FAIL_GENERAL;
slapi_pblock_get(pb, SLAPI_RESULT_CODE, &err);
if (err == LDAP_NO_SUCH_OBJECT) {
/* may be the object exist somewhere else
- * wait the end of the loop to send back this error
- */
- flag_no_such_object = 1;
+ * wait the end of the loop to send back this error
+ */
+ flag_no_such_object = true;
} else {
/* err something other than LDAP_NO_SUCH_OBJECT, so the backend will
- * have sent the result -
- * Set a flag here so we don't return another result. */
+ * have sent the result -
+ * Set a flag here so we don't return another result. */
sent_result = 1;
}
- /* fall through */
+ /* fall through */
case -1: /* an error occurred */
+ slapi_pblock_get(pb, SLAPI_RESULT_CODE, &err);
/* PAGED RESULTS */
if (op_is_pagedresults(operation)) {
/* cleanup the slot */
pthread_mutex_lock(pagedresults_mutex);
+ if (err != LDAP_NO_SUCH_OBJECT && !flag_no_such_object) {
+ /* Free the results if not "no_such_object" */
+ void *sr = NULL;
+ slapi_pblock_get(pb, SLAPI_SEARCH_RESULT_SET, &sr);
+ be->be_search_results_release(&sr);
+ }
pagedresults_set_search_result(pb_conn, operation, NULL, 1, pr_idx);
rc = pagedresults_set_current_be(pb_conn, NULL, pr_idx, 1);
pthread_mutex_unlock(pagedresults_mutex);
}
- if (1 == flag_no_such_object) {
- break;
- }
- slapi_pblock_get(pb, SLAPI_RESULT_CODE, &err);
- if (err == LDAP_NO_SUCH_OBJECT) {
- /* may be the object exist somewhere else
- * wait the end of the loop to send back this error
- */
- flag_no_such_object = 1;
+
+ if (err == LDAP_NO_SUCH_OBJECT || flag_no_such_object) {
+ /* Maybe the object exists somewhere else, wait to the end
+ * of the loop to send back this error */
+ flag_no_such_object = true;
break;
} else {
- /* for error other than LDAP_NO_SUCH_OBJECT
- * the error has already been sent
- * stop the search here
- */
+ /* For error other than LDAP_NO_SUCH_OBJECT the error has
+ * already been sent stop the search here */
cache_return_target_entry(pb, be, operation);
goto free_and_return;
}
/* when rc == SLAPI_FAIL_DISKFULL this case is executed */
-
case SLAPI_FAIL_DISKFULL:
operation_out_of_disk_space();
cache_return_target_entry(pb, be, operation);
diff --git a/ldap/servers/slapd/pagedresults.c b/ldap/servers/slapd/pagedresults.c
index db87e486e..4aa1fa3e5 100644
--- a/ldap/servers/slapd/pagedresults.c
+++ b/ldap/servers/slapd/pagedresults.c
@@ -121,12 +121,15 @@ pagedresults_parse_control_value(Slapi_PBlock *pb,
if (ber_scanf(ber, "{io}", pagesize, &cookie) == LBER_ERROR) {
slapi_log_err(SLAPI_LOG_ERR, "pagedresults_parse_control_value",
"<= corrupted control value\n");
+ ber_free(ber, 1);
return LDAP_PROTOCOL_ERROR;
}
if (!maxreqs) {
slapi_log_err(SLAPI_LOG_ERR, "pagedresults_parse_control_value",
"Simple paged results requests per conn exceeded the limit: %d\n",
maxreqs);
+ ber_free(ber, 1);
+ slapi_ch_free_string(&cookie.bv_val);
return LDAP_UNWILLING_TO_PERFORM;
}
@@ -376,6 +379,10 @@ pagedresults_free_one_msgid(Connection *conn, ber_int_t msgid, pthread_mutex_t *
}
prp->pr_flags |= CONN_FLAG_PAGEDRESULTS_ABANDONED;
prp->pr_flags &= ~CONN_FLAG_PAGEDRESULTS_PROCESSING;
+ if (conn->c_pagedresults.prl_count > 0) {
+ _pr_cleanup_one_slot(prp);
+ conn->c_pagedresults.prl_count--;
+ }
rc = 0;
break;
}
@@ -940,7 +947,9 @@ pagedresults_is_timedout_nolock(Connection *conn)
return 1;
}
}
+
slapi_log_err(SLAPI_LOG_TRACE, "<-- pagedresults_is_timedout", "<= false 2\n");
+
return 0;
}
diff --git a/ldap/servers/slapd/slap.h b/ldap/servers/slapd/slap.h
index 072f6f962..469874fd1 100644
--- a/ldap/servers/slapd/slap.h
+++ b/ldap/servers/slapd/slap.h
@@ -74,7 +74,7 @@ static char ptokPBE[34] = "Internal (Software) Token ";
#include <sys/stat.h>
#include <sys/socket.h>
#include <netinet/in.h>
-
+#include <stdbool.h>
#include <time.h> /* For timespec definitions */
/* Provides our int types and platform specific requirements. */
--
2.48.0

View File

@ -1,29 +0,0 @@
From 27cd055197bc3cae458a1f86621aa5410c66dd2c Mon Sep 17 00:00:00 2001
From: Mark Reynolds <mreynolds@redhat.com>
Date: Mon, 20 Jan 2025 15:51:24 -0500
Subject: [PATCH] Issue 6509 - Fix cherry pick issue (race condition in Paged
results)
Relates: https://github.com/389ds/389-ds-base/issues/6509
---
ldap/servers/slapd/daemon.c | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/ldap/servers/slapd/daemon.c b/ldap/servers/slapd/daemon.c
index 13dfe250d..57e07e5f5 100644
--- a/ldap/servers/slapd/daemon.c
+++ b/ldap/servers/slapd/daemon.c
@@ -1620,8 +1620,8 @@ setup_pr_read_pds(Connection_Table *ct)
connection_table_move_connection_out_of_active_list(ct, c);
} else if (c->c_prfd != NULL) {
if ((!c->c_gettingber) && (c->c_threadnumber < c->c_max_threads_per_conn)) {
- ct->fd[listnum][count].fd = c->c_prfd;
- ct->fd[listnum][count].in_flags = SLAPD_POLL_FLAGS;
+ ct->fd[count].fd = c->c_prfd;
+ ct->fd[count].in_flags = SLAPD_POLL_FLAGS;
/* slot i of the connection table is mapped to slot
* count of the fds array */
c->c_fdi = count;
--
2.48.0

View File

@ -0,0 +1,3 @@
#Type Name ID GECOS Home directory Shell
g dirsrv 389
u dirsrv 389:389 "user for 389-ds-base" /usr/share/dirsrv/ /sbin/nologin

View File

@ -1,933 +0,0 @@
# This file is automatically @generated by Cargo.
# It is not intended for manual editing.
version = 3
[[package]]
name = "addr2line"
version = "0.21.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8a30b2e23b9e17a9f90641c7ab1549cd9b44f296d3ccbf309d2863cfe398a0cb"
dependencies = [
"gimli",
]
[[package]]
name = "adler"
version = "1.0.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe"
[[package]]
name = "ahash"
version = "0.7.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5a824f2aa7e75a0c98c5a504fceb80649e9c35265d44525b5f94de4771a395cd"
dependencies = [
"getrandom",
"once_cell",
"version_check",
]
[[package]]
name = "ansi_term"
version = "0.12.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d52a9bb7ec0cf484c551830a7ce27bd20d67eac647e1befb56b0be4ee39a55d2"
dependencies = [
"winapi",
]
[[package]]
name = "atty"
version = "0.2.14"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8"
dependencies = [
"hermit-abi",
"libc",
"winapi",
]
[[package]]
name = "autocfg"
version = "1.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa"
[[package]]
name = "backtrace"
version = "0.3.69"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2089b7e3f35b9dd2d0ed921ead4f6d318c27680d4a5bd167b3ee120edb105837"
dependencies = [
"addr2line",
"cc",
"cfg-if",
"libc",
"miniz_oxide",
"object",
"rustc-demangle",
]
[[package]]
name = "base64"
version = "0.13.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9e1b586273c5702936fe7b7d6896644d8be71e6314cfe09d3167c95f712589e8"
[[package]]
name = "bitflags"
version = "1.3.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a"
[[package]]
name = "bitflags"
version = "2.4.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "327762f6e5a765692301e5bb513e0d9fef63be86bbc14528052b1cd3e6f03e07"
[[package]]
name = "byteorder"
version = "1.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b"
[[package]]
name = "cbindgen"
version = "0.9.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9daec6140ab4dcd38c3dd57e580b59a621172a526ac79f1527af760a55afeafd"
dependencies = [
"clap",
"log",
"proc-macro2",
"quote",
"serde",
"serde_json",
"syn 1.0.109",
"tempfile",
"toml",
]
[[package]]
name = "cc"
version = "1.0.83"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f1174fb0b6ec23863f8b971027804a42614e347eafb0a95bf0b12cdae21fc4d0"
dependencies = [
"jobserver",
"libc",
]
[[package]]
name = "cfg-if"
version = "1.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
[[package]]
name = "clap"
version = "2.34.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a0610544180c38b88101fecf2dd634b174a62eef6946f84dfc6a7127512b381c"
dependencies = [
"ansi_term",
"atty",
"bitflags 1.3.2",
"strsim",
"textwrap",
"unicode-width",
"vec_map",
]
[[package]]
name = "concread"
version = "0.2.21"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "dcc9816f5ac93ebd51c37f7f9a6bf2b40dfcd42978ad2aea5d542016e9244cf6"
dependencies = [
"ahash",
"crossbeam",
"crossbeam-epoch",
"crossbeam-utils",
"lru",
"parking_lot",
"rand",
"smallvec",
"tokio",
]
[[package]]
name = "crossbeam"
version = "0.8.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1137cd7e7fc0fb5d3c5a8678be38ec56e819125d8d7907411fe24ccb943faca8"
dependencies = [
"crossbeam-channel",
"crossbeam-deque",
"crossbeam-epoch",
"crossbeam-queue",
"crossbeam-utils",
]
[[package]]
name = "crossbeam-channel"
version = "0.5.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "176dc175b78f56c0f321911d9c8eb2b77a78a4860b9c19db83835fea1a46649b"
dependencies = [
"crossbeam-utils",
]
[[package]]
name = "crossbeam-deque"
version = "0.8.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "613f8cc01fe9cf1a3eb3d7f488fd2fa8388403e97039e2f73692932e291a770d"
dependencies = [
"crossbeam-epoch",
"crossbeam-utils",
]
[[package]]
name = "crossbeam-epoch"
version = "0.9.18"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5b82ac4a3c2ca9c3460964f020e1402edd5753411d7737aa39c3714ad1b5420e"
dependencies = [
"crossbeam-utils",
]
[[package]]
name = "crossbeam-queue"
version = "0.3.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "df0346b5d5e76ac2fe4e327c5fd1118d6be7c51dfb18f9b7922923f287471e35"
dependencies = [
"crossbeam-utils",
]
[[package]]
name = "crossbeam-utils"
version = "0.8.19"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "248e3bacc7dc6baa3b21e405ee045c3047101a49145e7e9eca583ab4c2ca5345"
[[package]]
name = "entryuuid"
version = "0.1.0"
dependencies = [
"cc",
"libc",
"paste",
"slapi_r_plugin",
"uuid",
]
[[package]]
name = "entryuuid_syntax"
version = "0.1.0"
dependencies = [
"cc",
"libc",
"paste",
"slapi_r_plugin",
"uuid",
]
[[package]]
name = "errno"
version = "0.3.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a258e46cdc063eb8519c00b9fc845fc47bcfca4130e2f08e88665ceda8474245"
dependencies = [
"libc",
"windows-sys",
]
[[package]]
name = "fastrand"
version = "2.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "25cbce373ec4653f1a01a31e8a5e5ec0c622dc27ff9c4e6606eefef5cbbed4a5"
[[package]]
name = "fernet"
version = "0.1.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "93804560e638370a8be6d59ce71ed803e55e230abdbf42598e666b41adda9b1f"
dependencies = [
"base64",
"byteorder",
"getrandom",
"openssl",
"zeroize",
]
[[package]]
name = "foreign-types"
version = "0.3.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f6f339eb8adc052cd2ca78910fda869aefa38d22d5cb648e6485e4d3fc06f3b1"
dependencies = [
"foreign-types-shared",
]
[[package]]
name = "foreign-types-shared"
version = "0.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b"
[[package]]
name = "getrandom"
version = "0.2.12"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "190092ea657667030ac6a35e305e62fc4dd69fd98ac98631e5d3a2b1575a12b5"
dependencies = [
"cfg-if",
"libc",
"wasi",
]
[[package]]
name = "gimli"
version = "0.28.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4271d37baee1b8c7e4b708028c57d816cf9d2434acb33a549475f78c181f6253"
[[package]]
name = "hashbrown"
version = "0.12.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888"
dependencies = [
"ahash",
]
[[package]]
name = "hermit-abi"
version = "0.1.19"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "62b467343b94ba476dcb2500d242dadbb39557df889310ac77c5d99100aaac33"
dependencies = [
"libc",
]
[[package]]
name = "instant"
version = "0.1.12"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7a5bbe824c507c5da5956355e86a746d82e0e1464f65d862cc5e71da70e94b2c"
dependencies = [
"cfg-if",
]
[[package]]
name = "itoa"
version = "1.0.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b1a46d1a171d865aa5f83f92695765caa047a9b4cbae2cbf37dbd613a793fd4c"
[[package]]
name = "jobserver"
version = "0.1.27"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8c37f63953c4c63420ed5fd3d6d398c719489b9f872b9fa683262f8edd363c7d"
dependencies = [
"libc",
]
[[package]]
name = "libc"
version = "0.2.152"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "13e3bf6590cbc649f4d1a3eefc9d5d6eb746f5200ffb04e5e142700b8faa56e7"
[[package]]
name = "librnsslapd"
version = "0.1.0"
dependencies = [
"cbindgen",
"libc",
"slapd",
]
[[package]]
name = "librslapd"
version = "0.1.0"
dependencies = [
"cbindgen",
"concread",
"libc",
"slapd",
]
[[package]]
name = "linux-raw-sys"
version = "0.4.12"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c4cd1a83af159aa67994778be9070f0ae1bd732942279cabb14f86f986a21456"
[[package]]
name = "lock_api"
version = "0.4.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3c168f8615b12bc01f9c17e2eb0cc07dcae1940121185446edc3744920e8ef45"
dependencies = [
"autocfg",
"scopeguard",
]
[[package]]
name = "log"
version = "0.4.20"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b5e6163cb8c49088c2c36f57875e58ccd8c87c7427f7fbd50ea6710b2f3f2e8f"
[[package]]
name = "lru"
version = "0.7.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e999beba7b6e8345721bd280141ed958096a2e4abdf74f67ff4ce49b4b54e47a"
dependencies = [
"hashbrown",
]
[[package]]
name = "memchr"
version = "2.7.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "523dc4f511e55ab87b694dc30d0f820d60906ef06413f93d4d7a1385599cc149"
[[package]]
name = "miniz_oxide"
version = "0.7.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e7810e0be55b428ada41041c41f32c9f1a42817901b4ccf45fa3d4b6561e74c7"
dependencies = [
"adler",
]
[[package]]
name = "object"
version = "0.32.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a6a622008b6e321afc04970976f62ee297fdbaa6f95318ca343e3eebb9648441"
dependencies = [
"memchr",
]
[[package]]
name = "once_cell"
version = "1.19.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3fdb12b2476b595f9358c5161aa467c2438859caa136dec86c26fdd2efe17b92"
[[package]]
name = "openssl"
version = "0.10.62"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8cde4d2d9200ad5909f8dac647e29482e07c3a35de8a13fce7c9c7747ad9f671"
dependencies = [
"bitflags 2.4.1",
"cfg-if",
"foreign-types",
"libc",
"once_cell",
"openssl-macros",
"openssl-sys",
]
[[package]]
name = "openssl-macros"
version = "0.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.48",
]
[[package]]
name = "openssl-sys"
version = "0.9.98"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c1665caf8ab2dc9aef43d1c0023bd904633a6a05cb30b0ad59bec2ae986e57a7"
dependencies = [
"cc",
"libc",
"pkg-config",
"vcpkg",
]
[[package]]
name = "parking_lot"
version = "0.11.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7d17b78036a60663b797adeaee46f5c9dfebb86948d1255007a1d6be0271ff99"
dependencies = [
"instant",
"lock_api",
"parking_lot_core",
]
[[package]]
name = "parking_lot_core"
version = "0.8.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "60a2cfe6f0ad2bfc16aefa463b497d5c7a5ecd44a23efa72aa342d90177356dc"
dependencies = [
"cfg-if",
"instant",
"libc",
"redox_syscall 0.2.16",
"smallvec",
"winapi",
]
[[package]]
name = "paste"
version = "0.1.18"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "45ca20c77d80be666aef2b45486da86238fabe33e38306bd3118fe4af33fa880"
dependencies = [
"paste-impl",
"proc-macro-hack",
]
[[package]]
name = "paste-impl"
version = "0.1.18"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d95a7db200b97ef370c8e6de0088252f7e0dfff7d047a28528e47456c0fc98b6"
dependencies = [
"proc-macro-hack",
]
[[package]]
name = "pin-project-lite"
version = "0.2.13"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8afb450f006bf6385ca15ef45d71d2288452bc3683ce2e2cacc0d18e4be60b58"
[[package]]
name = "pkg-config"
version = "0.3.28"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "69d3587f8a9e599cc7ec2c00e331f71c4e69a5f9a4b8a6efd5b07466b9736f9a"
[[package]]
name = "ppv-lite86"
version = "0.2.17"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de"
[[package]]
name = "proc-macro-hack"
version = "0.5.20+deprecated"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "dc375e1527247fe1a97d8b7156678dfe7c1af2fc075c9a4db3690ecd2a148068"
[[package]]
name = "proc-macro2"
version = "1.0.76"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "95fc56cda0b5c3325f5fbbd7ff9fda9e02bb00bb3dac51252d2f1bfa1cb8cc8c"
dependencies = [
"unicode-ident",
]
[[package]]
name = "pwdchan"
version = "0.1.0"
dependencies = [
"base64",
"cc",
"libc",
"openssl",
"paste",
"slapi_r_plugin",
"uuid",
]
[[package]]
name = "quote"
version = "1.0.35"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "291ec9ab5efd934aaf503a6466c5d5251535d108ee747472c3977cc5acc868ef"
dependencies = [
"proc-macro2",
]
[[package]]
name = "rand"
version = "0.8.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404"
dependencies = [
"libc",
"rand_chacha",
"rand_core",
]
[[package]]
name = "rand_chacha"
version = "0.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88"
dependencies = [
"ppv-lite86",
"rand_core",
]
[[package]]
name = "rand_core"
version = "0.6.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c"
dependencies = [
"getrandom",
]
[[package]]
name = "redox_syscall"
version = "0.2.16"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fb5a58c1855b4b6819d59012155603f0b22ad30cad752600aadfcb695265519a"
dependencies = [
"bitflags 1.3.2",
]
[[package]]
name = "redox_syscall"
version = "0.4.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4722d768eff46b75989dd134e5c353f0d6296e5aaa3132e776cbdb56be7731aa"
dependencies = [
"bitflags 1.3.2",
]
[[package]]
name = "rsds"
version = "0.1.0"
[[package]]
name = "rustc-demangle"
version = "0.1.23"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d626bb9dae77e28219937af045c257c28bfd3f69333c512553507f5f9798cb76"
[[package]]
name = "rustix"
version = "0.38.30"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "322394588aaf33c24007e8bb3238ee3e4c5c09c084ab32bc73890b99ff326bca"
dependencies = [
"bitflags 2.4.1",
"errno",
"libc",
"linux-raw-sys",
"windows-sys",
]
[[package]]
name = "ryu"
version = "1.0.16"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f98d2aa92eebf49b69786be48e4477826b256916e84a57ff2a4f21923b48eb4c"
[[package]]
name = "scopeguard"
version = "1.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49"
[[package]]
name = "serde"
version = "1.0.195"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "63261df402c67811e9ac6def069e4786148c4563f4b50fd4bf30aa370d626b02"
dependencies = [
"serde_derive",
]
[[package]]
name = "serde_derive"
version = "1.0.195"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "46fe8f8603d81ba86327b23a2e9cdf49e1255fb94a4c5f297f6ee0547178ea2c"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.48",
]
[[package]]
name = "serde_json"
version = "1.0.111"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "176e46fa42316f18edd598015a5166857fc835ec732f5215eac6b7bdbf0a84f4"
dependencies = [
"itoa",
"ryu",
"serde",
]
[[package]]
name = "slapd"
version = "0.1.0"
dependencies = [
"fernet",
]
[[package]]
name = "slapi_r_plugin"
version = "0.1.0"
dependencies = [
"libc",
"paste",
"uuid",
]
[[package]]
name = "smallvec"
version = "1.12.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2593d31f82ead8df961d8bd23a64c2ccf2eb5dd34b0a34bfb4dd54011c72009e"
[[package]]
name = "strsim"
version = "0.8.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8ea5119cdb4c55b55d432abb513a0429384878c15dde60cc77b1c99de1a95a6a"
[[package]]
name = "syn"
version = "1.0.109"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237"
dependencies = [
"proc-macro2",
"quote",
"unicode-ident",
]
[[package]]
name = "syn"
version = "2.0.48"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0f3531638e407dfc0814761abb7c00a5b54992b849452a0646b7f65c9f770f3f"
dependencies = [
"proc-macro2",
"quote",
"unicode-ident",
]
[[package]]
name = "tempfile"
version = "3.9.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "01ce4141aa927a6d1bd34a041795abd0db1cccba5d5f24b009f694bdf3a1f3fa"
dependencies = [
"cfg-if",
"fastrand",
"redox_syscall 0.4.1",
"rustix",
"windows-sys",
]
[[package]]
name = "textwrap"
version = "0.11.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d326610f408c7a4eb6f51c37c330e496b08506c9457c9d34287ecc38809fb060"
dependencies = [
"unicode-width",
]
[[package]]
name = "tokio"
version = "1.35.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c89b4efa943be685f629b149f53829423f8f5531ea21249408e8e2f8671ec104"
dependencies = [
"backtrace",
"pin-project-lite",
"tokio-macros",
]
[[package]]
name = "tokio-macros"
version = "2.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5b8a1e28f2deaa14e508979454cb3a223b10b938b45af148bc0986de36f1923b"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.48",
]
[[package]]
name = "toml"
version = "0.5.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f4f7f0dd8d50a853a531c426359045b1998f04219d88799810762cd4ad314234"
dependencies = [
"serde",
]
[[package]]
name = "unicode-ident"
version = "1.0.12"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b"
[[package]]
name = "unicode-width"
version = "0.1.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e51733f11c9c4f72aa0c160008246859e340b00807569a0da0e7a1079b27ba85"
[[package]]
name = "uuid"
version = "0.8.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bc5cf98d8186244414c848017f0e2676b3fcb46807f6668a97dfe67359a3c4b7"
dependencies = [
"getrandom",
]
[[package]]
name = "vcpkg"
version = "0.2.15"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426"
[[package]]
name = "vec_map"
version = "0.8.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f1bddf1187be692e79c5ffeab891132dfb0f236ed36a43c7ed39f1165ee20191"
[[package]]
name = "version_check"
version = "0.9.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f"
[[package]]
name = "wasi"
version = "0.11.0+wasi-snapshot-preview1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423"
[[package]]
name = "winapi"
version = "0.3.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419"
dependencies = [
"winapi-i686-pc-windows-gnu",
"winapi-x86_64-pc-windows-gnu",
]
[[package]]
name = "winapi-i686-pc-windows-gnu"
version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6"
[[package]]
name = "winapi-x86_64-pc-windows-gnu"
version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f"
[[package]]
name = "windows-sys"
version = "0.52.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d"
dependencies = [
"windows-targets",
]
[[package]]
name = "windows-targets"
version = "0.52.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8a18201040b24831fbb9e4eb208f8892e1f50a37feb53cc7ff887feb8f50e7cd"
dependencies = [
"windows_aarch64_gnullvm",
"windows_aarch64_msvc",
"windows_i686_gnu",
"windows_i686_msvc",
"windows_x86_64_gnu",
"windows_x86_64_gnullvm",
"windows_x86_64_msvc",
]
[[package]]
name = "windows_aarch64_gnullvm"
version = "0.52.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cb7764e35d4db8a7921e09562a0304bf2f93e0a51bfccee0bd0bb0b666b015ea"
[[package]]
name = "windows_aarch64_msvc"
version = "0.52.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bbaa0368d4f1d2aaefc55b6fcfee13f41544ddf36801e793edbbfd7d7df075ef"
[[package]]
name = "windows_i686_gnu"
version = "0.52.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a28637cb1fa3560a16915793afb20081aba2c92ee8af57b4d5f28e4b3e7df313"
[[package]]
name = "windows_i686_msvc"
version = "0.52.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ffe5e8e31046ce6230cc7215707b816e339ff4d4d67c65dffa206fd0f7aa7b9a"
[[package]]
name = "windows_x86_64_gnu"
version = "0.52.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3d6fa32db2bc4a2f5abeacf2b69f7992cd09dca97498da74a151a3132c26befd"
[[package]]
name = "windows_x86_64_gnullvm"
version = "0.52.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1a657e1e9d3f514745a572a6846d3c7aa7dbe1658c056ed9c3344c4109a6949e"
[[package]]
name = "windows_x86_64_msvc"
version = "0.52.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "dff9641d1cd4be8d1a070daf9e3773c5f67e78b4d9d42263020c057706765c04"
[[package]]
name = "zeroize"
version = "1.7.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "525b4ec142c6b68a2d10f01f7bbf6755599ca3f81ea53b8431b7dd348f5fdb2d"
dependencies = [
"zeroize_derive",
]
[[package]]
name = "zeroize_derive"
version = "1.4.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.48",
]

File diff suppressed because it is too large Load Diff