Bump version to 3.0.6-3

- Resolves: RHEL-5141 - [RFE] For each request, a ldap client can assign an identifier to be added in the logs
- Resolves: RHEL-77948 - ns-slapd crashes with data directory ≥ 2 days old [rhel-10]
- Resolves: RHEL-78342 - During import of entries without nsUniqueId, a supplier generates duplicate nsUniqueId (LMDB only)
This commit is contained in:
Viktor Ashirov 2025-02-12 16:13:44 +01:00
parent 265edbfe35
commit 6a59456f24
14 changed files with 3858 additions and 238 deletions

View File

@ -1,237 +0,0 @@
From a251914c8defce11c3f8496406af8dec6cf50c4b Mon Sep 17 00:00:00 2001
From: progier389 <progier@redhat.com>
Date: Fri, 6 Sep 2024 18:07:17 +0200
Subject: [PATCH] Issue 6316 - lmdb reindex is broken if index type is
specified (#6318)
While reindexing using task or offline reindex, if the attribute name contains the index type (for example :eq,pres)
Then the attribute is not reindexed. Problem occurs when lmdb is used, things are working fine with bdb.
Solution: strip the index type in reindex as it is done in bdb case.
Anyway the reindex design requires that for a given attribute all the configured index types must be rebuild.
Issue: #6316
Reviewed by: @tbordaz, @droideck (Thanks!)
---
.../tests/suites/indexes/regression_test.py | 141 +++++++++++++++++-
.../slapd/back-ldbm/db-mdb/mdb_import.c | 10 +-
2 files changed, 147 insertions(+), 4 deletions(-)
diff --git a/dirsrvtests/tests/suites/indexes/regression_test.py b/dirsrvtests/tests/suites/indexes/regression_test.py
index 51f88017d..e98ca6172 100644
--- a/dirsrvtests/tests/suites/indexes/regression_test.py
+++ b/dirsrvtests/tests/suites/indexes/regression_test.py
@@ -10,6 +10,9 @@ import time
import os
import pytest
import ldap
+import logging
+import glob
+import re
from lib389._constants import DEFAULT_BENAME, DEFAULT_SUFFIX
from lib389.backend import Backend, Backends, DatabaseConfig
from lib389.cos import CosClassicDefinition, CosClassicDefinitions, CosTemplate
@@ -31,6 +34,8 @@ SUFFIX2 = 'dc=example2,dc=com'
BENAME2 = 'be2'
DEBUGGING = os.getenv("DEBUGGING", default=False)
+logging.getLogger(__name__).setLevel(logging.INFO)
+log = logging.getLogger(__name__)
@pytest.fixture(scope="function")
@@ -83,6 +88,7 @@ def add_a_group_with_users(request, topo):
'cn': USER_NAME,
'uidNumber': f'{num}',
'gidNumber': f'{num}',
+ 'description': f'Description for {USER_NAME}',
'homeDirectory': f'/home/{USER_NAME}'
})
users_list.append(user)
@@ -95,9 +101,10 @@ def add_a_group_with_users(request, topo):
# If the server crashed, start it again to do the cleanup
if not topo.standalone.status():
topo.standalone.start()
- for user in users_list:
- user.delete()
- group.delete()
+ if not DEBUGGING:
+ for user in users_list:
+ user.delete()
+ group.delete()
request.addfinalizer(fin)
@@ -124,6 +131,38 @@ def set_small_idlistscanlimit(request, topo):
request.addfinalizer(fin)
+
+@pytest.fixture(scope="function")
+def set_description_index(request, topo, add_a_group_with_users):
+ """
+ Set some description values and description index without reindexing.
+ """
+ inst = topo.standalone
+ backends = Backends(inst)
+ backend = backends.get(DEFAULT_BENAME)
+ indexes = backend.get_indexes()
+ attr = 'description'
+
+ def fin(always=False):
+ if always or not DEBUGGING:
+ try:
+ idx = indexes.get(attr)
+ idx.delete()
+ except ldap.NO_SUCH_OBJECT:
+ pass
+
+ request.addfinalizer(fin)
+ fin(always=True)
+ index = indexes.create(properties={
+ 'cn': attr,
+ 'nsSystemIndex': 'false',
+ 'nsIndexType': ['eq', 'pres', 'sub']
+ })
+ # Restart needed with lmdb (to open the dbi handle)
+ inst.restart()
+ return (indexes, attr)
+
+
#unstable or unstatus tests, skipped for now
@pytest.mark.flaky(max_runs=2, min_passes=1)
@pytest.mark.skipif(ds_is_older("1.4.4.4"), reason="Not implemented")
@@ -346,6 +385,102 @@ def test_task_status(topo):
assert reindex_task.get_exit_code() == 0
+def count_keys(inst, bename, attr, prefix=''):
+ indexfile = os.path.join(inst.dbdir, bename, attr + '.db')
+ # (bdb - we should also accept a version number for .db suffix)
+ for f in glob.glob(f'{indexfile}*'):
+ indexfile = f
+
+ inst.stop()
+ output = inst.dbscan(None, None, args=['-f', indexfile, '-A'], stopping=False).decode()
+ inst.start()
+ count = 0
+ regexp = f'^KEY: {re.escape(prefix)}'
+ for match in re.finditer(regexp, output, flags=re.MULTILINE):
+ count += 1
+ log.info(f"count_keys found {count} keys starting with '{prefix}' in {indexfile}")
+ return count
+
+
+def test_reindex_task_with_type(topo, set_description_index):
+ """Check that reindex task works as expected when index type is specified.
+
+ :id: 0c7f2fda-69f6-11ef-9eb8-083a88554478
+ :setup: Standalone instance
+ - with 100 users having description attribute
+ - with description:eq,pres,sub index entry but not yet reindexed
+ :steps:
+ 1. Set description in suffix entry
+ 2. Count number of equality keys in description index
+ 3. Start a Reindex task on description:eq,pres and wait for completion
+ 4. Check the task status and exit code
+ 5. Count the equality, presence and substring keys in description index
+ 6. Start a Reindex task on description and wait for completion
+ 7. Check the task status and exit code
+ 8. Count the equality, presence and substring keys in description index
+
+ :expectedresults:
+ 1. Success
+ 2. Should be either no key (bdb) or a single one (lmdb)
+ 3. Success
+ 4. Success
+ 5. Should have: more equality keys than in step 2
+ one presence key
+ some substrings keys
+ 6. Success
+ 7. Success
+ 8. Should have same counts than in step 5
+ """
+ (indexes, attr) = set_description_index
+ inst = topo.standalone
+ if not inst.is_dbi_supported():
+ pytest.skip('This test requires that dbscan supports -A option')
+ # modify indexed value
+ Domain(inst, DEFAULT_SUFFIX).replace(attr, f'test_before_reindex')
+
+ keys1 = count_keys(inst, DEFAULT_BENAME, attr, prefix='=')
+ assert keys1 <= 1
+
+ tasks = Tasks(topo.standalone)
+ # completed reindex tasks MUST have a status because freeipa check it.
+
+ # Reindex attr with eq,pres types
+ log.info(f'Reindex {attr} with eq,pres types')
+ tasks.reindex(
+ suffix=DEFAULT_SUFFIX,
+ attrname=f'{attr}:eq,pres',
+ args={TASK_WAIT: True}
+ )
+ reindex_task = Task(topo.standalone, tasks.dn)
+ assert reindex_task.status()
+ assert reindex_task.get_exit_code() == 0
+
+ keys2e = count_keys(inst, DEFAULT_BENAME, attr, prefix='=')
+ keys2p = count_keys(inst, DEFAULT_BENAME, attr, prefix='+')
+ keys2s = count_keys(inst, DEFAULT_BENAME, attr, prefix='*')
+ assert keys2e > keys1
+ assert keys2p > 0
+ assert keys2s > 0
+
+ # Reindex attr without types
+ log.info(f'Reindex {attr} without types')
+ tasks.reindex(
+ suffix=DEFAULT_SUFFIX,
+ attrname=attr,
+ args={TASK_WAIT: True}
+ )
+ reindex_task = Task(topo.standalone, tasks.dn)
+ assert reindex_task.status()
+ assert reindex_task.get_exit_code() == 0
+
+ keys3e = count_keys(inst, DEFAULT_BENAME, attr, prefix='=')
+ keys3p = count_keys(inst, DEFAULT_BENAME, attr, prefix='+')
+ keys3s = count_keys(inst, DEFAULT_BENAME, attr, prefix='*')
+ assert keys3e == keys2e
+ assert keys3p == keys2p
+ assert keys3s == keys2s
+
+
def test_task_and_be(topo, add_backend_and_ldif_50K_users):
"""Check that backend is writable after finishing a tasks
diff --git a/ldap/servers/slapd/back-ldbm/db-mdb/mdb_import.c b/ldap/servers/slapd/back-ldbm/db-mdb/mdb_import.c
index cfd9de268..5f8e36cdc 100644
--- a/ldap/servers/slapd/back-ldbm/db-mdb/mdb_import.c
+++ b/ldap/servers/slapd/back-ldbm/db-mdb/mdb_import.c
@@ -1150,6 +1150,8 @@ process_db2index_attrs(Slapi_PBlock *pb, ImportCtx_t *ctx)
* TBD
*/
char **attrs = NULL;
+ char *attrname = NULL;
+ char *pt = NULL;
int i;
slapi_pblock_get(pb, SLAPI_DB2INDEX_ATTRS, &attrs);
@@ -1157,7 +1159,13 @@ process_db2index_attrs(Slapi_PBlock *pb, ImportCtx_t *ctx)
for (i = 0; attrs && attrs[i]; i++) {
switch (attrs[i][0]) {
case 't': /* attribute type to index */
- slapi_ch_array_add(&ctx->indexAttrs, slapi_ch_strdup(attrs[i] + 1));
+ attrname = slapi_ch_strdup(attrs[i] + 1);
+ /* Strip index type */
+ pt = strchr(attrname, ':');
+ if (pt != NULL) {
+ *pt = '\0';
+ }
+ slapi_ch_array_add(&ctx->indexAttrs, attrname);
break;
case 'T': /* VLV Search to index */
slapi_ch_array_add(&ctx->indexVlvs, get_vlv_dbname(attrs[i] + 1));
--
2.46.0

View File

@ -0,0 +1,53 @@
From fc7f5aa01e245c7c2e35b01d171dbd5a6dc75db4 Mon Sep 17 00:00:00 2001
From: Viktor Ashirov <vashirov@redhat.com>
Date: Sat, 25 Jan 2025 13:54:33 +0100
Subject: [PATCH] Issue 6544 - logconv.py: python3-magic conflicts with
python3-file-magic
Bug Description:
python3-magic and python3-file-magic can't be installed simultaneously,
python3-magic is not packaged for EL10.
Fix Description:
Use python3-file-magic instead.
Issue identified and fix suggested by Adam Williamson.
Fixes: https://github.com/389ds/389-ds-base/issues/6544
Reviewed by: @mreynolds389 (Thanks!)
---
ldap/admin/src/logconv.py | 3 +--
rpm/389-ds-base.spec.in | 2 +-
2 files changed, 2 insertions(+), 3 deletions(-)
diff --git a/ldap/admin/src/logconv.py b/ldap/admin/src/logconv.py
index 566f9af38..2fb5bb8c1 100755
--- a/ldap/admin/src/logconv.py
+++ b/ldap/admin/src/logconv.py
@@ -1798,8 +1798,7 @@ class logAnalyser:
return None
try:
- mime = magic.Magic(mime=True)
- filetype = mime.from_file(filepath)
+ filetype = magic.detect_from_filename(filepath).mime_type
# List of supported compression types
compressed_mime_types = [
diff --git a/rpm/389-ds-base.spec.in b/rpm/389-ds-base.spec.in
index 3146b9186..3c6e95938 100644
--- a/rpm/389-ds-base.spec.in
+++ b/rpm/389-ds-base.spec.in
@@ -298,7 +298,7 @@ Requires: json-c
# Log compression
Requires: zlib-devel
# logconv.py, MIME type
-Requires: python-magic
+Requires: python3-file-magic
# Picks up our systemd deps.
%{?systemd_requires}
--
2.48.0

View File

@ -0,0 +1,311 @@
From 1aabba9b17f99eb1a460be3305aad4b7099b9fe6 Mon Sep 17 00:00:00 2001
From: progier389 <progier@redhat.com>
Date: Wed, 13 Nov 2024 15:31:35 +0100
Subject: [PATCH] Issue 6374 - nsslapd-mdb-max-dbs autotuning doesn't work
properly (#6400)
* Issue 6374 - nsslapd-mdb-max-dbs autotuning doesn't work properly
Several issues:
After restarting the server nsslapd-mdb-max-dbs may not be high enough to add a new backend
because the value computation is wrong.
dbscan fails to open the database if nsslapd-mdb-max-dbs has been increased.
dbscan crashes when closing the database (typically when using -S)
When starting the instance the nsslapd-mdb-max-dbs parameter is increased to ensure that a new backend may be added.
When dse.ldif path is not specified, the db environment is now open using the INFO.mdb data instead of using the default values.
synchronization between thread closure and database context destruction is hardened
Issue: #6374
Reviewed by: @tbordaz , @vashirov (Thanks!)
(cherry picked from commit 56cd3389da608a3f6eeee58d20dffbcd286a8033)
---
.../tests/suites/config/config_test.py | 86 +++++++++++++++++++
ldap/servers/slapd/back-ldbm/back-ldbm.h | 2 +
.../slapd/back-ldbm/db-mdb/mdb_config.c | 17 ++--
.../back-ldbm/db-mdb/mdb_import_threads.c | 9 +-
.../slapd/back-ldbm/db-mdb/mdb_instance.c | 8 ++
ldap/servers/slapd/back-ldbm/dbimpl.c | 2 +-
ldap/servers/slapd/back-ldbm/import.c | 14 ++-
7 files changed, 128 insertions(+), 10 deletions(-)
diff --git a/dirsrvtests/tests/suites/config/config_test.py b/dirsrvtests/tests/suites/config/config_test.py
index c3e26eed4..08544594f 100644
--- a/dirsrvtests/tests/suites/config/config_test.py
+++ b/dirsrvtests/tests/suites/config/config_test.py
@@ -17,6 +17,7 @@ from lib389.topologies import topology_m2, topology_st as topo
from lib389.utils import *
from lib389._constants import DN_CONFIG, DEFAULT_SUFFIX, DEFAULT_BENAME
from lib389._mapped_object import DSLdapObjects
+from lib389.agreement import Agreements
from lib389.cli_base import FakeArgs
from lib389.cli_conf.backend import db_config_set
from lib389.idm.user import UserAccounts, TEST_USER_PROPERTIES
@@ -27,6 +28,8 @@ from lib389.cos import CosPointerDefinitions, CosTemplates
from lib389.backend import Backends, DatabaseConfig
from lib389.monitor import MonitorLDBM, Monitor
from lib389.plugins import ReferentialIntegrityPlugin
+from lib389.replica import BootstrapReplicationManager, Replicas
+from lib389.passwd import password_generate
pytestmark = pytest.mark.tier0
@@ -36,6 +39,8 @@ PSTACK_CMD = '/usr/bin/pstack'
logging.getLogger(__name__).setLevel(logging.INFO)
log = logging.getLogger(__name__)
+DEBUGGING = os.getenv("DEBUGGING", default=False)
+
@pytest.fixture(scope="module")
def big_file():
TEMP_BIG_FILE = ''
@@ -813,6 +818,87 @@ def test_numlisteners_limit(topo):
assert numlisteners[0] == '4'
+def bootstrap_replication(inst_from, inst_to, creds):
+ manager = BootstrapReplicationManager(inst_to)
+ rdn_val = 'replication manager'
+ if manager.exists():
+ manager.delete()
+ manager.create(properties={
+ 'cn': rdn_val,
+ 'uid': rdn_val,
+ 'userPassword': creds
+ })
+ for replica in Replicas(inst_to).list():
+ replica.remove_all('nsDS5ReplicaBindDNGroup')
+ replica.replace('nsDS5ReplicaBindDN', manager.dn)
+ for agmt in Agreements(inst_from).list():
+ agmt.replace('nsDS5ReplicaBindDN', manager.dn)
+ agmt.replace('nsDS5ReplicaCredentials', creds)
+
+
+@pytest.mark.skipif(get_default_db_lib() != "mdb", reason="This test requires lmdb")
+def test_lmdb_autotuned_maxdbs(topology_m2, request):
+ """Verify that after restart, nsslapd-mdb-max-dbs is large enough to add a new backend.
+
+ :id: 0272d432-9080-11ef-8f40-482ae39447e5
+ :setup: Two suppliers configuration
+ :steps:
+ 1. loop 20 times
+ 3. In 1 loop: restart instance
+ 3. In 1 loop: add a new backend
+ 4. In 1 loop: check that instance is still alive
+ :expectedresults:
+ 1. Success
+ 2. Success
+ 3. Success
+ 4. Success
+ """
+
+ s1 = topology_m2.ms["supplier1"]
+ s2 = topology_m2.ms["supplier2"]
+
+ backends = Backends(s1)
+ db_config = DatabaseConfig(s1)
+ # Generate the teardown finalizer
+ belist = []
+ creds=password_generate()
+ bootstrap_replication(s2, s1, creds)
+ bootstrap_replication(s1, s2, creds)
+
+ def fin():
+ s1.start()
+ for be in belist:
+ be.delete()
+
+ if not DEBUGGING:
+ request.addfinalizer(fin)
+
+ # 1. Set autotuning (off-line to be able to decrease the value)
+ s1.stop()
+ dse_ldif = DSEldif(s1)
+ dse_ldif.replace(db_config.dn, 'nsslapd-mdb-max-dbs', '0')
+ os.remove(f'{s1.dbdir}/data.mdb')
+ s1.start()
+
+ # 2. Reinitialize the db:
+ log.info("Bulk import...")
+ agmt = Agreements(s2).list()[0]
+ agmt.begin_reinit()
+ (done, error) = agmt.wait_reinit()
+ log.info(f'Bulk importresult is ({done}, {error})')
+ assert done is True
+ assert error is False
+
+ # 3. loop 20 times
+ for idx in range(20):
+ s1.restart()
+ log.info(f'Adding backend test{idx}')
+ belist.append(backends.create(properties={'cn': f'test{idx}',
+ 'nsslapd-suffix': f'dc=test{idx}'}))
+ assert s1.status()
+
+
+
if __name__ == '__main__':
# Run isolated
# -s for DEBUG mode
diff --git a/ldap/servers/slapd/back-ldbm/back-ldbm.h b/ldap/servers/slapd/back-ldbm/back-ldbm.h
index 8fea63e35..35d0ece04 100644
--- a/ldap/servers/slapd/back-ldbm/back-ldbm.h
+++ b/ldap/servers/slapd/back-ldbm/back-ldbm.h
@@ -896,4 +896,6 @@ typedef struct _back_search_result_set
((L)->size == (R)->size && !memcmp((L)->data, (R)->data, (L)->size))
typedef int backend_implement_init_fn(struct ldbminfo *li, config_info *config_array);
+
+pthread_mutex_t *get_import_ctx_mutex();
#endif /* _back_ldbm_h_ */
diff --git a/ldap/servers/slapd/back-ldbm/db-mdb/mdb_config.c b/ldap/servers/slapd/back-ldbm/db-mdb/mdb_config.c
index 351f54037..1f7b71442 100644
--- a/ldap/servers/slapd/back-ldbm/db-mdb/mdb_config.c
+++ b/ldap/servers/slapd/back-ldbm/db-mdb/mdb_config.c
@@ -83,7 +83,7 @@ dbmdb_compute_limits(struct ldbminfo *li)
uint64_t total_space = 0;
uint64_t avail_space = 0;
uint64_t cur_dbsize = 0;
- int nbchangelogs = 0;
+ int nbvlvs = 0;
int nbsuffixes = 0;
int nbindexes = 0;
int nbagmt = 0;
@@ -99,8 +99,8 @@ dbmdb_compute_limits(struct ldbminfo *li)
* But some tunable may be autotuned.
*/
if (dbmdb_count_config_entries("(objectClass=nsMappingTree)", &nbsuffixes) ||
- dbmdb_count_config_entries("(objectClass=nsIndex)", &nbsuffixes) ||
- dbmdb_count_config_entries("(&(objectClass=nsds5Replica)(nsDS5Flags=1))", &nbchangelogs) ||
+ dbmdb_count_config_entries("(objectClass=nsIndex)", &nbindexes) ||
+ dbmdb_count_config_entries("(objectClass=vlvIndex)", &nbvlvs) ||
dbmdb_count_config_entries("(objectClass=nsds5replicationagreement)", &nbagmt)) {
/* error message is already logged */
return 1;
@@ -120,8 +120,15 @@ dbmdb_compute_limits(struct ldbminfo *li)
info->pagesize = sysconf(_SC_PAGE_SIZE);
limits->min_readers = config_get_threadnumber() + nbagmt + DBMDB_READERS_MARGIN;
- /* Default indexes are counted in "nbindexes" so we should always have enough resource to add 1 new suffix */
- limits->min_dbs = nbsuffixes + nbindexes + nbchangelogs + DBMDB_DBS_MARGIN;
+ /*
+ * For each suffix there are 4 databases instances:
+ * long-entryrdn, replication_changelog, id2entry and ancestorid
+ * then the indexes and the vlv and vlv cache
+ *
+ * Default indexes are counted in "nbindexes" so we should always have enough
+ * resource to add 1 new suffix
+ */
+ limits->min_dbs = 4*nbsuffixes + nbindexes + 2*nbvlvs + DBMDB_DBS_MARGIN;
total_space = ((uint64_t)(buf.f_blocks)) * ((uint64_t)(buf.f_bsize));
avail_space = ((uint64_t)(buf.f_bavail)) * ((uint64_t)(buf.f_bsize));
diff --git a/ldap/servers/slapd/back-ldbm/db-mdb/mdb_import_threads.c b/ldap/servers/slapd/back-ldbm/db-mdb/mdb_import_threads.c
index 8c879da31..707a110c5 100644
--- a/ldap/servers/slapd/back-ldbm/db-mdb/mdb_import_threads.c
+++ b/ldap/servers/slapd/back-ldbm/db-mdb/mdb_import_threads.c
@@ -4312,9 +4312,12 @@ dbmdb_import_init_writer(ImportJob *job, ImportRole_t role)
void
dbmdb_free_import_ctx(ImportJob *job)
{
- if (job->writer_ctx) {
- ImportCtx_t *ctx = job->writer_ctx;
- job->writer_ctx = NULL;
+ ImportCtx_t *ctx = NULL;
+ pthread_mutex_lock(get_import_ctx_mutex());
+ ctx = job->writer_ctx;
+ job->writer_ctx = NULL;
+ pthread_mutex_unlock(get_import_ctx_mutex());
+ if (ctx) {
pthread_mutex_destroy(&ctx->workerq.mutex);
pthread_cond_destroy(&ctx->workerq.cv);
slapi_ch_free((void**)&ctx->workerq.slots);
diff --git a/ldap/servers/slapd/back-ldbm/db-mdb/mdb_instance.c b/ldap/servers/slapd/back-ldbm/db-mdb/mdb_instance.c
index 6386ecf06..05f1e348d 100644
--- a/ldap/servers/slapd/back-ldbm/db-mdb/mdb_instance.c
+++ b/ldap/servers/slapd/back-ldbm/db-mdb/mdb_instance.c
@@ -287,6 +287,13 @@ int add_dbi(dbi_open_ctx_t *octx, backend *be, const char *fname, int flags)
slapi_ch_free((void**)&treekey.dbname);
return octx->rc;
}
+ if (treekey.dbi >= ctx->dsecfg.max_dbs) {
+ octx->rc = MDB_DBS_FULL;
+ slapi_log_err(SLAPI_LOG_ERR, "add_dbi", "Failed to open database instance %s slots: %d/%d. Error is %d: %s.\n",
+ treekey.dbname, treekey.dbi, ctx->dsecfg.max_dbs, octx->rc, mdb_strerror(octx->rc));
+ slapi_ch_free((void**)&treekey.dbname);
+ return octx->rc;
+ }
if (octx->ai && octx->ai->ai_key_cmp_fn) {
octx->rc = dbmdb_update_dbi_cmp_fn(ctx, &treekey, octx->ai->ai_key_cmp_fn, octx->txn);
if (octx->rc) {
@@ -689,6 +696,7 @@ int dbmdb_make_env(dbmdb_ctx_t *ctx, int readOnly, mdb_mode_t mode)
rc = dbmdb_write_infofile(ctx);
} else {
/* No Config ==> read it from info file */
+ ctx->dsecfg = ctx->startcfg;
}
if (rc) {
return rc;
diff --git a/ldap/servers/slapd/back-ldbm/dbimpl.c b/ldap/servers/slapd/back-ldbm/dbimpl.c
index 86df986bd..f3bf68a9f 100644
--- a/ldap/servers/slapd/back-ldbm/dbimpl.c
+++ b/ldap/servers/slapd/back-ldbm/dbimpl.c
@@ -505,7 +505,7 @@ int dblayer_show_statistics(const char *dbimpl_name, const char *dbhome, FILE *f
li->li_plugin = be->be_database;
li->li_plugin->plg_name = (char*) "back-ldbm-dbimpl";
li->li_plugin->plg_libpath = (char*) "libback-ldbm";
- li->li_directory = (char*)dbhome;
+ li->li_directory = get_li_directory(dbhome);
/* Initialize database plugin */
rc = dbimpl_setup(li, dbimpl_name);
diff --git a/ldap/servers/slapd/back-ldbm/import.c b/ldap/servers/slapd/back-ldbm/import.c
index 2bb8cb581..30ec462fa 100644
--- a/ldap/servers/slapd/back-ldbm/import.c
+++ b/ldap/servers/slapd/back-ldbm/import.c
@@ -27,6 +27,9 @@
#define NEED_DN_NORM_SP -25
#define NEED_DN_NORM_BT -26
+/* Protect against import context destruction */
+static pthread_mutex_t import_ctx_mutex = PTHREAD_MUTEX_INITIALIZER;
+
/********** routines to manipulate the entry fifo **********/
@@ -143,6 +146,14 @@ ldbm_back_wire_import(Slapi_PBlock *pb)
/* Threads management */
+/* Return the mutex that protects against import context destruction */
+pthread_mutex_t *
+get_import_ctx_mutex()
+{
+ return &import_ctx_mutex;
+}
+
+
/* tell all the threads to abort */
void
import_abort_all(ImportJob *job, int wait_for_them)
@@ -151,7 +162,7 @@ import_abort_all(ImportJob *job, int wait_for_them)
/* tell all the worker threads to abort */
job->flags |= FLAG_ABORT;
-
+ pthread_mutex_lock(&import_ctx_mutex);
for (worker = job->worker_list; worker; worker = worker->next)
worker->command = ABORT;
@@ -167,6 +178,7 @@ import_abort_all(ImportJob *job, int wait_for_them)
}
}
}
+ pthread_mutex_unlock(&import_ctx_mutex);
}
--
2.48.0

View File

@ -0,0 +1,72 @@
From 6b80ba631161219093267e8e4c885bfc392d3d61 Mon Sep 17 00:00:00 2001
From: progier389 <progier@redhat.com>
Date: Fri, 6 Sep 2024 14:45:06 +0200
Subject: [PATCH] Issue 6090 - Fix dbscan options and man pages (#6315)
* Issue 6090 - Fix dbscan options and man pages
dbscan -d option is dangerously confusing as it removes a database instance while in db_stat it identify the database
(cf issue #5609 ).
This fix implements long options in dbscan, rename -d in --remove, and requires a new --do-it option for action that change the database content.
The fix should also align both the usage and the dbscan man page with the new set of options
Issue: #6090
Reviewed by: @tbordaz, @droideck (Thanks!)
(cherry picked from commit 25e1d16887ebd299dfe0088080b9ee0deec1e41f)
---
ldap/servers/slapd/back-ldbm/dbimpl.c | 5 ++++-
src/lib389/lib389/cli_ctl/dblib.py | 13 ++++++++++++-
2 files changed, 16 insertions(+), 2 deletions(-)
diff --git a/ldap/servers/slapd/back-ldbm/dbimpl.c b/ldap/servers/slapd/back-ldbm/dbimpl.c
index f3bf68a9f..83662df8c 100644
--- a/ldap/servers/slapd/back-ldbm/dbimpl.c
+++ b/ldap/servers/slapd/back-ldbm/dbimpl.c
@@ -481,7 +481,10 @@ int dblayer_private_close(Slapi_Backend **be, dbi_env_t **env, dbi_db_t **db)
slapi_ch_free_string(&li->li_directory);
slapi_ch_free((void**)&li->li_dblayer_private);
slapi_ch_free((void**)&li->li_dblayer_config);
- ldbm_config_destroy(li);
+ if (dblayer_is_lmdb(*be)) {
+ /* Generate use after free and double free in bdb case */
+ ldbm_config_destroy(li);
+ }
slapi_ch_free((void**)&(*be)->be_database);
slapi_ch_free((void**)&(*be)->be_instance_info);
slapi_ch_free((void**)be);
diff --git a/src/lib389/lib389/cli_ctl/dblib.py b/src/lib389/lib389/cli_ctl/dblib.py
index 053a72d61..318ae5ae9 100644
--- a/src/lib389/lib389/cli_ctl/dblib.py
+++ b/src/lib389/lib389/cli_ctl/dblib.py
@@ -199,6 +199,14 @@ def run_dbscan(args):
return output
+def does_dbscan_need_do_it():
+ prefix = os.environ.get('PREFIX', "")
+ prog = f'{prefix}/bin/dbscan'
+ args = [ prog, '-h' ]
+ output = subprocess.run(args, encoding='utf-8', stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+ return '--do-it' in output.stdout
+
+
def export_changelog(be, dblib):
# Export backend changelog
if not be['has_changelog']:
@@ -217,7 +225,10 @@ def import_changelog(be, dblib):
try:
cl5dbname = be['eccl5dbname'] if dblib == "bdb" else be['cl5dbname']
_log.info(f"Importing changelog {cl5dbname} from {be['cl5name']}")
- run_dbscan(['-D', dblib, '-f', cl5dbname, '--import', be['cl5name'], '--do-it'])
+ if does_dbscan_need_do_it():
+ run_dbscan(['-D', dblib, '-f', cl5dbname, '-I', be['cl5name'], '--do-it'])
+ else:
+ run_dbscan(['-D', dblib, '-f', cl5dbname, '-I', be['cl5name']])
return True
except subprocess.CalledProcessError as e:
return False
--
2.48.0

View File

@ -0,0 +1,146 @@
From dc8032856d51c382e266eea72f66284e70a0e40c Mon Sep 17 00:00:00 2001
From: Mark Reynolds <mreynolds@redhat.com>
Date: Fri, 31 Jan 2025 08:54:27 -0500
Subject: [PATCH] Issue 6489 - After log rotation refresh the FD pointer
Description:
When flushing a log buffer we get a FD for log prior to checking if the
log should be rotated. If the log is rotated that FD reference is now
invalid, and it needs to be refrehed before proceeding
Relates: https://github.com/389ds/389-ds-base/issues/6489
Reviewed by: tbordaz(Thanks!)
---
.../suites/logging/log_flush_rotation_test.py | 81 +++++++++++++++++++
ldap/servers/slapd/log.c | 18 +++++
2 files changed, 99 insertions(+)
create mode 100644 dirsrvtests/tests/suites/logging/log_flush_rotation_test.py
diff --git a/dirsrvtests/tests/suites/logging/log_flush_rotation_test.py b/dirsrvtests/tests/suites/logging/log_flush_rotation_test.py
new file mode 100644
index 000000000..b33a622e1
--- /dev/null
+++ b/dirsrvtests/tests/suites/logging/log_flush_rotation_test.py
@@ -0,0 +1,81 @@
+# --- BEGIN COPYRIGHT BLOCK ---
+# Copyright (C) 2025 Red Hat, Inc.
+# All rights reserved.
+#
+# License: GPL (version 3 or any later version).
+# See LICENSE for details.
+# --- END COPYRIGHT BLOCK ---
+#
+import os
+import logging
+import time
+import pytest
+from lib389._constants import DEFAULT_SUFFIX, PW_DM
+from lib389.tasks import ImportTask
+from lib389.idm.user import UserAccounts
+from lib389.topologies import topology_st as topo
+
+
+log = logging.getLogger(__name__)
+
+
+def test_log_flush_and_rotation_crash(topo):
+ """Make sure server does not crash whening flushing a buffer and rotating
+ the log at the same time
+
+ :id: d4b0af2f-48b2-45f5-ae8b-f06f692c3133
+ :setup: Standalone Instance
+ :steps:
+ 1. Enable all logs
+ 2. Enable log buffering for all logs
+ 3. Set rotation time unit to 1 minute
+ 4. Make sure server is still running after 1 minute
+ :expectedresults:
+ 1. Success
+ 2. Success
+ 3. Success
+ 4. Success
+ """
+
+ inst = topo.standalone
+
+ # Enable logging and buffering
+ inst.config.set("nsslapd-auditlog-logging-enabled", "on")
+ inst.config.set("nsslapd-accesslog-logbuffering", "on")
+ inst.config.set("nsslapd-auditlog-logbuffering", "on")
+ inst.config.set("nsslapd-errorlog-logbuffering", "on")
+ inst.config.set("nsslapd-securitylog-logbuffering", "on")
+
+ # Set rotation policy to trigger rotation asap
+ inst.config.set("nsslapd-accesslog-logrotationtimeunit", "minute")
+ inst.config.set("nsslapd-auditlog-logrotationtimeunit", "minute")
+ inst.config.set("nsslapd-errorlog-logrotationtimeunit", "minute")
+ inst.config.set("nsslapd-securitylog-logrotationtimeunit", "minute")
+
+ #
+ # Performs ops to populate all the logs
+ #
+ # Access & audit log
+ users = UserAccounts(topo.standalone, DEFAULT_SUFFIX)
+ user = users.create_test_user()
+ user.set("userPassword", PW_DM)
+ # Security log
+ user.bind(PW_DM)
+ # Error log
+ import_task = ImportTask(inst)
+ import_task.import_suffix_from_ldif(ldiffile="/not/here",
+ suffix=DEFAULT_SUFFIX)
+
+ # Wait a minute and make sure the server did not crash
+ log.info("Sleep until logs are flushed and rotated")
+ time.sleep(61)
+
+ assert inst.status()
+
+
+if __name__ == '__main__':
+ # Run isolated
+ # -s for DEBUG mode
+ CURRENT_FILE = os.path.realpath(__file__)
+ pytest.main(["-s", CURRENT_FILE])
+
diff --git a/ldap/servers/slapd/log.c b/ldap/servers/slapd/log.c
index 76f2b6768..7e2c980a4 100644
--- a/ldap/servers/slapd/log.c
+++ b/ldap/servers/slapd/log.c
@@ -6746,6 +6746,23 @@ log_refresh_state(int32_t log_type)
return 0;
}
}
+static LOGFD
+log_refresh_fd(int32_t log_type)
+{
+ switch (log_type) {
+ case SLAPD_ACCESS_LOG:
+ return loginfo.log_access_fdes;
+ case SLAPD_SECURITY_LOG:
+ return loginfo.log_security_fdes;
+ case SLAPD_AUDIT_LOG:
+ return loginfo.log_audit_fdes;
+ case SLAPD_AUDITFAIL_LOG:
+ return loginfo.log_auditfail_fdes;
+ case SLAPD_ERROR_LOG:
+ return loginfo.log_error_fdes;
+ }
+ return NULL;
+}
/* this function assumes the lock is already acquired */
/* if sync_now is non-zero, data is flushed to physical storage */
@@ -6857,6 +6874,7 @@ log_flush_buffer(LogBufferInfo *lbi, int log_type, int sync_now, int locked)
rotationtime_secs);
}
log_state = log_refresh_state(log_type);
+ fd = log_refresh_fd(log_type);
}
if (log_state & LOGGING_NEED_TITLE) {
--
2.48.0

View File

@ -0,0 +1,236 @@
From 90460bfa66fb77118967927963572f69e097c4eb Mon Sep 17 00:00:00 2001
From: James Chapman <jachapma@redhat.com>
Date: Wed, 29 Jan 2025 17:41:55 +0000
Subject: [PATCH] Issue 6436 - MOD on a large group slow if substring index is
present (#6437)
Bug Description: If the substring index is configured for the group
membership attribute ( member or uniqueMember ), the removal of a
member from a large static group is pretty slow.
Fix Description: A solution to this issue would be to introduce
a new index to track a membership atttribute index. In the interm,
we add a check to healthcheck to inform the user of the implications
of this configuration.
Fixes: https://github.com/389ds/389-ds-base/issues/6436
Reviewed by: @Firstyear, @tbordaz, @droideck (Thanks)
---
.../suites/healthcheck/health_config_test.py | 89 ++++++++++++++++++-
src/lib389/lib389/lint.py | 15 ++++
src/lib389/lib389/plugins.py | 37 +++++++-
3 files changed, 137 insertions(+), 4 deletions(-)
diff --git a/dirsrvtests/tests/suites/healthcheck/health_config_test.py b/dirsrvtests/tests/suites/healthcheck/health_config_test.py
index e1e5398ab..f09bc8bb8 100644
--- a/dirsrvtests/tests/suites/healthcheck/health_config_test.py
+++ b/dirsrvtests/tests/suites/healthcheck/health_config_test.py
@@ -167,6 +167,7 @@ def test_healthcheck_RI_plugin_missing_indexes(topology_st):
MEMBER_DN = 'cn=member,cn=index,cn=userroot,cn=ldbm database,cn=plugins,cn=config'
standalone = topology_st.standalone
+ standalone.config.set("nsslapd-accesslog-logbuffering", "on")
log.info('Enable RI plugin')
plugin = ReferentialIntegrityPlugin(standalone)
@@ -188,7 +189,7 @@ def test_healthcheck_RI_plugin_missing_indexes(topology_st):
def test_healthcheck_MO_plugin_missing_indexes(topology_st):
- """Check if HealthCheck returns DSMOLE0002 code
+ """Check if HealthCheck returns DSMOLE0001 code
:id: 236b0ec2-13da-48fb-b65a-db7406d56d5d
:setup: Standalone instance
@@ -203,8 +204,8 @@ def test_healthcheck_MO_plugin_missing_indexes(topology_st):
:expectedresults:
1. Success
2. Success
- 3. Healthcheck reports DSMOLE0002 code and related details
- 4. Healthcheck reports DSMOLE0002 code and related details
+ 3. Healthcheck reports DSMOLE0001 code and related details
+ 4. Healthcheck reports DSMOLE0001 code and related details
5. Success
6. Healthcheck reports no issue found
7. Healthcheck reports no issue found
@@ -214,6 +215,7 @@ def test_healthcheck_MO_plugin_missing_indexes(topology_st):
MO_GROUP_ATTR = 'creatorsname'
standalone = topology_st.standalone
+ standalone.config.set("nsslapd-accesslog-logbuffering", "on")
log.info('Enable MO plugin')
plugin = MemberOfPlugin(standalone)
@@ -236,6 +238,87 @@ def test_healthcheck_MO_plugin_missing_indexes(topology_st):
standalone.restart()
+def test_healthcheck_MO_plugin_substring_index(topology_st):
+ """Check if HealthCheck returns DSMOLE0002 code when the
+ member, uniquemember attribute contains a substring index type
+
+ :id: 10954811-24ac-4886-8183-e30892f8e02d
+ :setup: Standalone instance
+ :steps:
+ 1. Create DS instance
+ 2. Configure the instance with MO Plugin
+ 3. Change index type to substring for member attribute
+ 4. Use HealthCheck without --json option
+ 5. Use HealthCheck with --json option
+ 6. Change index type back to equality for member attribute
+ 7. Use HealthCheck without --json option
+ 8. Use HealthCheck with --json option
+ 9. Change index type to substring for uniquemember attribute
+ 10. Use HealthCheck without --json option
+ 11. Use HealthCheck with --json option
+ 12. Change index type back to equality for uniquemember attribute
+ 13. Use HealthCheck without --json option
+ 14. Use HealthCheck with --json option
+
+ :expectedresults:
+ 1. Success
+ 2. Success
+ 3. Success
+ 4. Healthcheck reports DSMOLE0002 code and related details
+ 5. Healthcheck reports DSMOLE0002 code and related details
+ 6. Success
+ 7. Healthcheck reports no issue found
+ 8. Healthcheck reports no issue found
+ 9. Success
+ 10. Healthcheck reports DSMOLE0002 code and related details
+ 11. Healthcheck reports DSMOLE0002 code and related details
+ 12. Success
+ 13. Healthcheck reports no issue found
+ 14. Healthcheck reports no issue found
+ """
+
+ RET_CODE = 'DSMOLE0002'
+ MEMBER_DN = 'cn=member,cn=index,cn=userroot,cn=ldbm database,cn=plugins,cn=config'
+ UNIQUE_MEMBER_DN = 'cn=uniquemember,cn=index,cn=userroot,cn=ldbm database,cn=plugins,cn=config'
+
+ standalone = topology_st.standalone
+ standalone.config.set("nsslapd-accesslog-logbuffering", "on")
+
+ log.info('Enable MO plugin')
+ plugin = MemberOfPlugin(standalone)
+ plugin.disable()
+ plugin.enable()
+
+ log.info('Change the index type of the member attribute index to substring')
+ index = Index(topology_st.standalone, MEMBER_DN)
+ index.replace('nsIndexType', 'sub')
+
+ run_healthcheck_and_flush_log(topology_st, standalone, json=False, searched_code=RET_CODE)
+ run_healthcheck_and_flush_log(topology_st, standalone, json=True, searched_code=RET_CODE)
+
+ log.info('Set the index type of the member attribute index back to eq')
+ index.replace('nsIndexType', 'eq')
+
+ run_healthcheck_and_flush_log(topology_st, standalone, json=False, searched_code=CMD_OUTPUT)
+ run_healthcheck_and_flush_log(topology_st, standalone, json=True, searched_code=JSON_OUTPUT)
+
+ log.info('Change the index type of the uniquemember attribute index to substring')
+ index = Index(topology_st.standalone, UNIQUE_MEMBER_DN)
+ index.replace('nsIndexType', 'sub')
+
+ run_healthcheck_and_flush_log(topology_st, standalone, json=False, searched_code=RET_CODE)
+ run_healthcheck_and_flush_log(topology_st, standalone, json=True, searched_code=RET_CODE)
+
+ log.info('Set the index type of the uniquemember attribute index back to eq')
+ index.replace('nsIndexType', 'eq')
+
+ run_healthcheck_and_flush_log(topology_st, standalone, json=False, searched_code=CMD_OUTPUT)
+ run_healthcheck_and_flush_log(topology_st, standalone, json=True, searched_code=JSON_OUTPUT)
+
+ # Restart the instance after changing the plugin to avoid breaking the other tests
+ standalone.restart()
+
+
@pytest.mark.xfail(ds_is_older("1.4.1"), reason="Not implemented")
def test_healthcheck_virtual_attr_incorrectly_indexed(topology_st):
"""Check if HealthCheck returns DSVIRTLE0001 code
diff --git a/src/lib389/lib389/lint.py b/src/lib389/lib389/lint.py
index d0747f0f4..460bf64fc 100644
--- a/src/lib389/lib389/lint.py
+++ b/src/lib389/lib389/lint.py
@@ -270,6 +270,21 @@ database after adding the missing index type. Here is an example using dsconf:
"""
}
+DSMOLE0002 = {
+ 'dsle': 'DSMOLE0002',
+ 'severity': 'LOW',
+ 'description': 'Removal of a member can be slow ',
+ 'items': ['cn=memberof plugin,cn=plugins,cn=config', ],
+ 'detail': """If the substring index is configured for a membership attribute. The removal of a member
+from the large group can be slow.
+
+""",
+ 'fix': """If not required, you can remove the substring index type using dsconf:
+
+ # dsconf slapd-YOUR_INSTANCE backend index set --attr=ATTR BACKEND --del-type=sub
+"""
+}
+
# Disk Space check. Note - PARTITION is replaced by the calling function
DSDSLE0001 = {
'dsle': 'DSDSLE0001',
diff --git a/src/lib389/lib389/plugins.py b/src/lib389/lib389/plugins.py
index 67af93a14..31bbfa502 100644
--- a/src/lib389/lib389/plugins.py
+++ b/src/lib389/lib389/plugins.py
@@ -12,7 +12,7 @@ import copy
import os.path
from lib389 import tasks
from lib389._mapped_object import DSLdapObjects, DSLdapObject
-from lib389.lint import DSRILE0001, DSRILE0002, DSMOLE0001
+from lib389.lint import DSRILE0001, DSRILE0002, DSMOLE0001, DSMOLE0002
from lib389.utils import ensure_str, ensure_list_bytes
from lib389.schema import Schema
from lib389._constants import (
@@ -827,6 +827,41 @@ class MemberOfPlugin(Plugin):
report['check'] = f'memberof:attr_indexes'
yield report
+ def _lint_member_substring_index(self):
+ if self.status():
+ from lib389.backend import Backends
+ backends = Backends(self._instance).list()
+ membership_attrs = ['member', 'uniquemember']
+ container = self.get_attr_val_utf8_l("nsslapd-plugincontainerscope")
+ for backend in backends:
+ suffix = backend.get_attr_val_utf8_l('nsslapd-suffix')
+ if suffix == "cn=changelog":
+ # Always skip retro changelog
+ continue
+ if container is not None:
+ # Check if this backend is in the scope
+ if not container.endswith(suffix):
+ # skip this backend that is not in the scope
+ continue
+ indexes = backend.get_indexes()
+ for attr in membership_attrs:
+ report = copy.deepcopy(DSMOLE0002)
+ try:
+ index = indexes.get(attr)
+ types = index.get_attr_vals_utf8_l("nsIndexType")
+ if "sub" in types:
+ report['detail'] = report['detail'].replace('ATTR', attr)
+ report['detail'] = report['detail'].replace('BACKEND', suffix)
+ report['fix'] = report['fix'].replace('ATTR', attr)
+ report['fix'] = report['fix'].replace('BACKEND', suffix)
+ report['fix'] = report['fix'].replace('YOUR_INSTANCE', self._instance.serverid)
+ report['items'].append(suffix)
+ report['items'].append(attr)
+ report['check'] = f'attr:substring_index'
+ yield report
+ except KeyError:
+ continue
+
def get_attr(self):
"""Get memberofattr attribute"""
--
2.48.0

View File

@ -0,0 +1,70 @@
From dcb6298db5bfef4b2541f7c52682d153b424bfa7 Mon Sep 17 00:00:00 2001
From: James Chapman <jachapma@redhat.com>
Date: Tue, 4 Feb 2025 15:40:16 +0000
Subject: [PATCH] Issue 6566 - RI plugin failure to handle a modrdn for rename
of member of multiple groups (#6567)
Bug description:
With AM and RI plugins enabled, the rename of a user that is part of multiple groups
fails with a "value exists" error.
Fix description:
For a modrdn the RI plugin creates a new DN, before a modify is attempted check
if the new DN already exists in the attr being updated.
Fixes: https://github.com/389ds/389-ds-base/issues/6566
Reviewed by: @progier389 , @tbordaz (Thank you)
---
ldap/servers/plugins/referint/referint.c | 15 ++++++++++++---
1 file changed, 12 insertions(+), 3 deletions(-)
diff --git a/ldap/servers/plugins/referint/referint.c b/ldap/servers/plugins/referint/referint.c
index 468fdc239..218863ea5 100644
--- a/ldap/servers/plugins/referint/referint.c
+++ b/ldap/servers/plugins/referint/referint.c
@@ -924,6 +924,7 @@ _update_all_per_mod(Slapi_DN *entrySDN, /* DN of the searched entry */
{
Slapi_Mods *smods = NULL;
char *newDN = NULL;
+ struct berval bv = {0};
char **dnParts = NULL;
char *sval = NULL;
char *newvalue = NULL;
@@ -1026,22 +1027,30 @@ _update_all_per_mod(Slapi_DN *entrySDN, /* DN of the searched entry */
}
/* else: normalize_rc < 0) Ignore the DN normalization error for now. */
+ bv.bv_val = newDN;
+ bv.bv_len = strlen(newDN);
p = PL_strstr(sval, slapi_sdn_get_ndn(origDN));
if (p == sval) {
/* (case 1) */
slapi_mods_add_string(smods, LDAP_MOD_DELETE, attrName, sval);
- slapi_mods_add_string(smods, LDAP_MOD_ADD, attrName, newDN);
-
+ /* Add only if the attr value does not exist */
+ if (VALUE_PRESENT != attr_value_find_wsi(attr, &bv, &v)) {
+ slapi_mods_add_string(smods, LDAP_MOD_ADD, attrName, newDN);
+ }
} else if (p) {
/* (case 2) */
slapi_mods_add_string(smods, LDAP_MOD_DELETE, attrName, sval);
*p = '\0';
newvalue = slapi_ch_smprintf("%s%s", sval, newDN);
- slapi_mods_add_string(smods, LDAP_MOD_ADD, attrName, newvalue);
+ /* Add only if the attr value does not exist */
+ if (VALUE_PRESENT != attr_value_find_wsi(attr, &bv, &v)) {
+ slapi_mods_add_string(smods, LDAP_MOD_ADD, attrName, newvalue);
+ }
slapi_ch_free_string(&newvalue);
}
/* else: value does not include the modified DN. Ignore it. */
slapi_ch_free_string(&sval);
+ bv = (struct berval){0};
}
rc = _do_modify(mod_pb, entrySDN, slapi_mods_get_ldapmods_byref(smods));
if (rc) {
--
2.48.0

View File

@ -0,0 +1,43 @@
From be57ea839934c29b3f4db450a65281aa30a72caf Mon Sep 17 00:00:00 2001
From: Masahiro Matsuya <mmatsuya@redhat.com>
Date: Wed, 5 Feb 2025 11:38:28 +0900
Subject: [PATCH] Issue 6258 - Mitigate race condition in paged_results_test.py
(#6433)
The regression test dirsrvtests/tests/suites/paged_results/paged_results_test.py::test_multi_suffix_search has a race condition causing it to fail due to multiple queries potentially writing their logs out of chronological order.
This failure is mitigated by sorting the retrieved access_log_lines by their "op" value. This ensures the log lines are in chronological order, as expected by the assertions at the end of test_multi_suffix_search().
Helps fix: #6258
Reviewed by: @droideck , @progier389 (Thanks!)
Co-authored-by: Anuar Beisembayev <111912342+abeisemb@users.noreply.github.com>
---
dirsrvtests/tests/suites/paged_results/paged_results_test.py | 3 +++
1 file changed, 3 insertions(+)
diff --git a/dirsrvtests/tests/suites/paged_results/paged_results_test.py b/dirsrvtests/tests/suites/paged_results/paged_results_test.py
index eaf0e0da9..fca48db0f 100644
--- a/dirsrvtests/tests/suites/paged_results/paged_results_test.py
+++ b/dirsrvtests/tests/suites/paged_results/paged_results_test.py
@@ -7,6 +7,7 @@
# --- END COPYRIGHT BLOCK ---
#
import socket
+import re
from random import sample, randrange
import pytest
@@ -1126,6 +1127,8 @@ def test_multi_suffix_search(topology_st, create_user, new_suffixes):
topology_st.standalone.restart(timeout=10)
access_log_lines = topology_st.standalone.ds_access_log.match('.*pr_cookie=.*')
+ # Sort access_log_lines by op number to mitigate race condition effects.
+ access_log_lines.sort(key=lambda x: int(re.search(r"op=(\d+) RESULT", x).group(1)))
pr_cookie_list = ([line.rsplit('=', 1)[-1] for line in access_log_lines])
pr_cookie_list = [int(pr_cookie) for pr_cookie in pr_cookie_list]
log.info('Assert that last pr_cookie == -1 and others pr_cookie == 0')
--
2.48.0

View File

@ -0,0 +1,566 @@
From 8e3a484f88fc9f9a3fcdfdd685d4ad2ed3cbe5d9 Mon Sep 17 00:00:00 2001
From: progier389 <progier@redhat.com>
Date: Fri, 28 Jun 2024 18:56:49 +0200
Subject: [PATCH] Issue 6229 - After an initial failure, subsequent online
backups fail (#6230)
* Issue 6229 - After an initial failure, subsequent online backups will not work
Several issues related to backup task error handling:
Backends stay busy after the failure
Exit code is 0 in some cases
Crash if failing to open the backup directory
And a more general one:
lib389 Task DN collision
Solutions:
Always reset the busy flags that have been set
Ensure that 0 is not returned in error case
Avoid closing NULL directory descriptor
Use a timestamp having milliseconds precision to create the task DN
Issue: #6229
Reviewed by: @droideck (Thanks!)
(cherry picked from commit 04a0b6ac776a1d588ec2e10ff651e5015078ad21)
---
ldap/servers/slapd/back-ldbm/archive.c | 45 +++++-----
.../slapd/back-ldbm/db-mdb/mdb_layer.c | 3 +
src/lib389/lib389/__init__.py | 10 +--
src/lib389/lib389/tasks.py | 82 +++++++++----------
4 files changed, 70 insertions(+), 70 deletions(-)
diff --git a/ldap/servers/slapd/back-ldbm/archive.c b/ldap/servers/slapd/back-ldbm/archive.c
index 0460a42f6..6658cc80a 100644
--- a/ldap/servers/slapd/back-ldbm/archive.c
+++ b/ldap/servers/slapd/back-ldbm/archive.c
@@ -16,6 +16,8 @@
#include "back-ldbm.h"
#include "dblayer.h"
+#define NO_OBJECT ((Object*)-1)
+
int
ldbm_temporary_close_all_instances(Slapi_PBlock *pb)
{
@@ -270,6 +272,7 @@ ldbm_back_ldbm2archive(Slapi_PBlock *pb)
int run_from_cmdline = 0;
Slapi_Task *task;
struct stat sbuf;
+ Object *last_busy_inst_obj = NO_OBJECT;
slapi_pblock_get(pb, SLAPI_PLUGIN_PRIVATE, &li);
slapi_pblock_get(pb, SLAPI_SEQ_VAL, &rawdirectory);
@@ -380,13 +383,12 @@ ldbm_back_ldbm2archive(Slapi_PBlock *pb)
/* to avoid conflict w/ import, do this check for commandline, as well */
{
- Object *inst_obj, *inst_obj2;
ldbm_instance *inst = NULL;
/* server is up -- mark all backends busy */
- for (inst_obj = objset_first_obj(li->li_instance_set); inst_obj;
- inst_obj = objset_next_obj(li->li_instance_set, inst_obj)) {
- inst = (ldbm_instance *)object_get_data(inst_obj);
+ for (last_busy_inst_obj = objset_first_obj(li->li_instance_set); last_busy_inst_obj;
+ last_busy_inst_obj = objset_next_obj(li->li_instance_set, last_busy_inst_obj)) {
+ inst = (ldbm_instance *)object_get_data(last_busy_inst_obj);
/* check if an import/restore is already ongoing... */
if (instance_set_busy(inst) != 0 || dblayer_in_import(inst) != 0) {
@@ -400,20 +402,6 @@ ldbm_back_ldbm2archive(Slapi_PBlock *pb)
"another task and cannot be disturbed.",
inst->inst_name);
}
-
- /* painfully, we have to clear the BUSY flags on the
- * backends we'd already marked...
- */
- for (inst_obj2 = objset_first_obj(li->li_instance_set);
- inst_obj2 && (inst_obj2 != inst_obj);
- inst_obj2 = objset_next_obj(li->li_instance_set,
- inst_obj2)) {
- inst = (ldbm_instance *)object_get_data(inst_obj2);
- instance_set_not_busy(inst);
- }
- if (inst_obj2 && inst_obj2 != inst_obj)
- object_release(inst_obj2);
- object_release(inst_obj);
goto err;
}
}
@@ -427,18 +415,26 @@ ldbm_back_ldbm2archive(Slapi_PBlock *pb)
goto err;
}
- if (!run_from_cmdline) {
+err:
+ /* Clear all BUSY flags that have been previously set */
+ if (last_busy_inst_obj != NO_OBJECT) {
ldbm_instance *inst;
Object *inst_obj;
- /* none of these backends are busy anymore */
- for (inst_obj = objset_first_obj(li->li_instance_set); inst_obj;
+ for (inst_obj = objset_first_obj(li->li_instance_set);
+ inst_obj && (inst_obj != last_busy_inst_obj);
inst_obj = objset_next_obj(li->li_instance_set, inst_obj)) {
inst = (ldbm_instance *)object_get_data(inst_obj);
instance_set_not_busy(inst);
}
+ if (last_busy_inst_obj != NULL) {
+ /* release last seen object for aborted objset_next_obj iterations */
+ if (inst_obj != NULL) {
+ object_release(inst_obj);
+ }
+ object_release(last_busy_inst_obj);
+ }
}
-err:
if (return_value) {
if (dir_bak) {
slapi_log_err(SLAPI_LOG_ERR,
@@ -727,7 +723,10 @@ ldbm_archive_config(char *bakdir, Slapi_Task *task)
}
error:
- PR_CloseDir(dirhandle);
+ if (NULL != dirhandle) {
+ PR_CloseDir(dirhandle);
+ dirhandle = NULL;
+ }
dse_backup_unlock();
slapi_ch_free_string(&backup_config_dir);
slapi_ch_free_string(&dse_file);
diff --git a/ldap/servers/slapd/back-ldbm/db-mdb/mdb_layer.c b/ldap/servers/slapd/back-ldbm/db-mdb/mdb_layer.c
index 4a7beedeb..3ecc47170 100644
--- a/ldap/servers/slapd/back-ldbm/db-mdb/mdb_layer.c
+++ b/ldap/servers/slapd/back-ldbm/db-mdb/mdb_layer.c
@@ -983,6 +983,9 @@ dbmdb_backup(struct ldbminfo *li, char *dest_dir, Slapi_Task *task)
if (ldbm_archive_config(dest_dir, task) != 0) {
slapi_log_err(SLAPI_LOG_ERR, "dbmdb_backup",
"Backup of config files failed or is incomplete\n");
+ if (0 == return_value) {
+ return_value = -1;
+ }
}
goto bail;
diff --git a/src/lib389/lib389/__init__.py b/src/lib389/lib389/__init__.py
index 368741a66..cb372c138 100644
--- a/src/lib389/lib389/__init__.py
+++ b/src/lib389/lib389/__init__.py
@@ -69,7 +69,7 @@ from lib389.utils import (
get_user_is_root)
from lib389.paths import Paths
from lib389.nss_ssl import NssSsl
-from lib389.tasks import BackupTask, RestoreTask
+from lib389.tasks import BackupTask, RestoreTask, Task
from lib389.dseldif import DSEldif
# mixin
@@ -1424,7 +1424,7 @@ class DirSrv(SimpleLDAPObject, object):
name, self.ds_paths.prefix)
# create the archive
- name = "backup_%s_%s.tar.gz" % (self.serverid, time.strftime("%m%d%Y_%H%M%S"))
+ name = "backup_%s_%s.tar.gz" % (self.serverid, Task.get_timestamp())
backup_file = os.path.join(backup_dir, name)
tar = tarfile.open(backup_file, "w:gz")
tar.extraction_filter = (lambda member, path: member)
@@ -2810,7 +2810,7 @@ class DirSrv(SimpleLDAPObject, object):
else:
# No output file specified. Use the default ldif location/name
cmd.append('-a')
- tnow = datetime.now().strftime("%Y_%m_%d_%H_%M_%S")
+ tnow = Task.get_timestamp()
if bename:
ldifname = os.path.join(self.ds_paths.ldif_dir, "%s-%s-%s.ldif" % (self.serverid, bename, tnow))
else:
@@ -2881,7 +2881,7 @@ class DirSrv(SimpleLDAPObject, object):
if archive_dir is None:
# Use the instance name and date/time as the default backup name
- tnow = datetime.now().strftime("%Y_%m_%d_%H_%M_%S")
+ tnow = Task.get_timestamp()
archive_dir = os.path.join(self.ds_paths.backup_dir, "%s-%s" % (self.serverid, tnow))
elif not archive_dir.startswith("/"):
# Relative path, append it to the bak directory
@@ -3506,7 +3506,7 @@ class DirSrv(SimpleLDAPObject, object):
if archive is None:
# Use the instance name and date/time as the default backup name
- tnow = datetime.now().strftime("%Y_%m_%d_%H_%M_%S")
+ tnow = Task.get_timestamp()
if self.serverid is not None:
backup_dir_name = "%s-%s" % (self.serverid, tnow)
else:
diff --git a/src/lib389/lib389/tasks.py b/src/lib389/lib389/tasks.py
index 6c2adb5b2..6bf302862 100644
--- a/src/lib389/lib389/tasks.py
+++ b/src/lib389/lib389/tasks.py
@@ -118,7 +118,7 @@ class Task(DSLdapObject):
return super(Task, self).create(rdn, properties, basedn)
@staticmethod
- def _get_task_date():
+ def get_timestamp():
"""Return a timestamp to use in naming new task entries."""
return datetime.now().isoformat()
@@ -132,7 +132,7 @@ class AutomemberRebuildMembershipTask(Task):
"""
def __init__(self, instance, dn=None):
- self.cn = 'automember_rebuild_' + Task._get_task_date()
+ self.cn = 'automember_rebuild_' + Task.get_timestamp()
dn = "cn=" + self.cn + "," + DN_AUTOMEMBER_REBUILD_TASK
super(AutomemberRebuildMembershipTask, self).__init__(instance, dn)
@@ -147,7 +147,7 @@ class AutomemberAbortRebuildTask(Task):
"""
def __init__(self, instance, dn=None):
- self.cn = 'automember_abort_' + Task._get_task_date()
+ self.cn = 'automember_abort_' + Task.get_timestamp()
dn = "cn=" + self.cn + "," + DN_AUTOMEMBER_ABORT_REBUILD_TASK
super(AutomemberAbortRebuildTask, self).__init__(instance, dn)
@@ -161,7 +161,7 @@ class FixupLinkedAttributesTask(Task):
"""
def __init__(self, instance, dn=None):
- self.cn = 'fixup_linked_attrs_' + Task._get_task_date()
+ self.cn = 'fixup_linked_attrs_' + Task.get_timestamp()
dn = "cn=" + self.cn + "," + DN_FIXUP_LINKED_ATTIBUTES
super(FixupLinkedAttributesTask, self).__init__(instance, dn)
@@ -175,7 +175,7 @@ class MemberUidFixupTask(Task):
"""
def __init__(self, instance, dn=None):
- self.cn = 'memberUid_fixup_' + Task._get_task_date()
+ self.cn = 'memberUid_fixup_' + Task.get_timestamp()
dn = f"cn={self.cn},cn=memberuid task,cn=tasks,cn=config"
super(MemberUidFixupTask, self).__init__(instance, dn)
@@ -190,7 +190,7 @@ class MemberOfFixupTask(Task):
"""
def __init__(self, instance, dn=None):
- self.cn = 'memberOf_fixup_' + Task._get_task_date()
+ self.cn = 'memberOf_fixup_' + Task.get_timestamp()
dn = "cn=" + self.cn + "," + DN_MBO_TASK
super(MemberOfFixupTask, self).__init__(instance, dn)
@@ -205,7 +205,7 @@ class USNTombstoneCleanupTask(Task):
"""
def __init__(self, instance, dn=None):
- self.cn = 'usn_cleanup_' + Task._get_task_date()
+ self.cn = 'usn_cleanup_' + Task.get_timestamp()
dn = "cn=" + self.cn + ",cn=USN tombstone cleanup task," + DN_TASKS
super(USNTombstoneCleanupTask, self).__init__(instance, dn)
@@ -225,7 +225,7 @@ class csngenTestTask(Task):
"""
def __init__(self, instance, dn=None):
- self.cn = 'csngenTest_' + Task._get_task_date()
+ self.cn = 'csngenTest_' + Task.get_timestamp()
dn = "cn=" + self.cn + ",cn=csngen_test," + DN_TASKS
super(csngenTestTask, self).__init__(instance, dn)
@@ -238,7 +238,7 @@ class EntryUUIDFixupTask(Task):
"""
def __init__(self, instance, dn=None):
- self.cn = 'entryuuid_fixup_' + Task._get_task_date()
+ self.cn = 'entryuuid_fixup_' + Task.get_timestamp()
dn = "cn=" + self.cn + "," + DN_EUUID_TASK
super(EntryUUIDFixupTask, self).__init__(instance, dn)
self._must_attributes.extend(['basedn'])
@@ -252,7 +252,7 @@ class DBCompactTask(Task):
"""
def __init__(self, instance, dn=None):
- self.cn = 'compact_db_' + Task._get_task_date()
+ self.cn = 'compact_db_' + Task.get_timestamp()
dn = "cn=" + self.cn + "," + DN_COMPACTDB_TASK
super(DBCompactTask, self).__init__(instance, dn)
@@ -265,7 +265,7 @@ class SchemaReloadTask(Task):
"""
def __init__(self, instance, dn=None):
- self.cn = 'schema_reload_' + Task._get_task_date()
+ self.cn = 'schema_reload_' + Task.get_timestamp()
dn = "cn=" + self.cn + ",cn=schema reload task," + DN_TASKS
super(SchemaReloadTask, self).__init__(instance, dn)
@@ -278,7 +278,7 @@ class SyntaxValidateTask(Task):
"""
def __init__(self, instance, dn=None):
- self.cn = 'syntax_validate_' + Task._get_task_date()
+ self.cn = 'syntax_validate_' + Task.get_timestamp()
dn = f"cn={self.cn},cn=syntax validate,cn=tasks,cn=config"
super(SyntaxValidateTask, self).__init__(instance, dn)
@@ -295,7 +295,7 @@ class AbortCleanAllRUVTask(Task):
"""
def __init__(self, instance, dn=None):
- self.cn = 'abortcleanallruv_' + Task._get_task_date()
+ self.cn = 'abortcleanallruv_' + Task.get_timestamp()
dn = "cn=" + self.cn + ",cn=abort cleanallruv," + DN_TASKS
super(AbortCleanAllRUVTask, self).__init__(instance, dn)
@@ -312,7 +312,7 @@ class CleanAllRUVTask(Task):
"""
def __init__(self, instance, dn=None):
- self.cn = 'cleanallruv_' + Task._get_task_date()
+ self.cn = 'cleanallruv_' + Task.get_timestamp()
dn = "cn=" + self.cn + ",cn=cleanallruv," + DN_TASKS
self._properties = None
@@ -359,7 +359,7 @@ class ImportTask(Task):
"""
def __init__(self, instance, dn=None):
- self.cn = 'import_' + Task._get_task_date()
+ self.cn = 'import_' + Task.get_timestamp()
dn = "cn=%s,%s" % (self.cn, DN_IMPORT_TASK)
self._properties = None
@@ -388,7 +388,7 @@ class ExportTask(Task):
"""
def __init__(self, instance, dn=None):
- self.cn = 'export_' + Task._get_task_date()
+ self.cn = 'export_' + Task.get_timestamp()
dn = "cn=%s,%s" % (self.cn, DN_EXPORT_TASK)
self._properties = None
@@ -411,7 +411,7 @@ class BackupTask(Task):
"""
def __init__(self, instance, dn=None):
- self.cn = 'backup_' + Task._get_task_date()
+ self.cn = 'backup_' + Task.get_timestamp()
dn = "cn=" + self.cn + ",cn=backup," + DN_TASKS
self._properties = None
@@ -426,7 +426,7 @@ class RestoreTask(Task):
"""
def __init__(self, instance, dn=None):
- self.cn = 'restore_' + Task._get_task_date()
+ self.cn = 'restore_' + Task.get_timestamp()
dn = "cn=" + self.cn + ",cn=restore," + DN_TASKS
self._properties = None
@@ -513,7 +513,7 @@ class Tasks(object):
raise ValueError("Import file (%s) does not exist" % input_file)
# Prepare the task entry
- cn = "import_" + time.strftime("%m%d%Y_%H%M%S", time.localtime())
+ cn = "import_" + Task.get_timestamp()
dn = "cn=%s,%s" % (cn, DN_IMPORT_TASK)
entry = Entry(dn)
entry.setValues('objectclass', 'top', 'extensibleObject')
@@ -581,7 +581,7 @@ class Tasks(object):
raise ValueError("output_file is mandatory")
# Prepare the task entry
- cn = "export_" + time.strftime("%m%d%Y_%H%M%S", time.localtime())
+ cn = "export_" + Task.get_timestamp()
dn = "cn=%s,%s" % (cn, DN_EXPORT_TASK)
entry = Entry(dn)
entry.update({
@@ -637,7 +637,7 @@ class Tasks(object):
raise ValueError("You must specify a backup directory.")
# build the task entry
- cn = "backup_" + time.strftime("%m%d%Y_%H%M%S", time.localtime())
+ cn = "backup_" + Task.get_timestamp()
dn = "cn=%s,%s" % (cn, DN_BACKUP_TASK)
entry = Entry(dn)
entry.update({
@@ -694,7 +694,7 @@ class Tasks(object):
raise ValueError("Backup file (%s) does not exist" % backup_dir)
# build the task entry
- cn = "restore_" + time.strftime("%m%d%Y_%H%M%S", time.localtime())
+ cn = "restore_" + Task.get_timestamp()
dn = "cn=%s,%s" % (cn, DN_RESTORE_TASK)
entry = Entry(dn)
entry.update({
@@ -789,7 +789,7 @@ class Tasks(object):
attrs.append(attr)
else:
attrs.append(attrname)
- cn = "index_vlv_%s" % (time.strftime("%m%d%Y_%H%M%S", time.localtime()))
+ cn = "index_vlv_%s" % (Task.get_timestamp())
dn = "cn=%s,%s" % (cn, DN_INDEX_TASK)
entry = Entry(dn)
entry.update({
@@ -803,7 +803,7 @@ class Tasks(object):
#
# Reindex all attributes - gather them first...
#
- cn = "index_all_%s" % (time.strftime("%m%d%Y_%H%M%S", time.localtime()))
+ cn = "index_all_%s" % (Task.get_timestamp())
dn = ('cn=%s,cn=ldbm database,cn=plugins,cn=config' % backend)
try:
indexes = self.conn.search_s(dn, ldap.SCOPE_SUBTREE, '(objectclass=nsIndex)')
@@ -815,7 +815,7 @@ class Tasks(object):
#
# Reindex specific attributes
#
- cn = "index_attrs_%s" % (time.strftime("%m%d%Y_%H%M%S", time.localtime()))
+ cn = "index_attrs_%s" % (Task.get_timestamp())
if isinstance(attrname, (tuple, list)):
# Need to guarantee this is a list (and not a tuple)
for attr in attrname:
@@ -903,8 +903,7 @@ class Tasks(object):
suffix = ents[0].getValue(attr)
- cn = "fixupmemberof_" + time.strftime("%m%d%Y_%H%M%S",
- time.localtime())
+ cn = "fixupmemberof_" + Task.get_timestamp()
dn = "cn=%s,%s" % (cn, DN_MBO_TASK)
entry = Entry(dn)
entry.setValues('objectclass', 'top', 'extensibleObject')
@@ -965,8 +964,7 @@ class Tasks(object):
if len(ents) != 1:
raise ValueError("invalid backend name: %s" % bename)
- cn = "fixupTombstone_" + time.strftime("%m%d%Y_%H%M%S",
- time.localtime())
+ cn = "fixupTombstone_" + Task.get_timestamp()
dn = "cn=%s,%s" % (cn, DN_TOMB_FIXUP_TASK)
entry = Entry(dn)
entry.setValues('objectclass', 'top', 'extensibleObject')
@@ -1019,7 +1017,7 @@ class Tasks(object):
@return exit code
'''
- cn = 'task-' + time.strftime("%m%d%Y_%H%M%S", time.localtime())
+ cn = 'task-' + Task.get_timestamp()
dn = ('cn=%s,cn=automember rebuild membership,cn=tasks,cn=config' % cn)
entry = Entry(dn)
@@ -1077,7 +1075,7 @@ class Tasks(object):
if not ldif_out:
raise ValueError("Missing ldif_out")
- cn = 'task-' + time.strftime("%m%d%Y_%H%M%S", time.localtime())
+ cn = 'task-' + Task.get_timestamp()
dn = ('cn=%s,cn=automember export updates,cn=tasks,cn=config' % cn)
entry = Entry(dn)
entry.setValues('objectclass', 'top', 'extensibleObject')
@@ -1129,7 +1127,7 @@ class Tasks(object):
if not ldif_out or not ldif_in:
raise ValueError("Missing ldif_out and/or ldif_in")
- cn = 'task-' + time.strftime("%m%d%Y_%H%M%S", time.localtime())
+ cn = 'task-' + Task.get_timestamp()
dn = ('cn=%s,cn=automember map updates,cn=tasks,cn=config' % cn)
entry = Entry(dn)
@@ -1175,7 +1173,7 @@ class Tasks(object):
@return exit code
'''
- cn = 'task-' + time.strftime("%m%d%Y_%H%M%S", time.localtime())
+ cn = 'task-' + Task.get_timestamp()
dn = ('cn=%s,cn=fixup linked attributes,cn=tasks,cn=config' % cn)
entry = Entry(dn)
entry.setValues('objectclass', 'top', 'extensibleObject')
@@ -1219,7 +1217,7 @@ class Tasks(object):
@return exit code
'''
- cn = 'task-' + time.strftime("%m%d%Y_%H%M%S", time.localtime())
+ cn = 'task-' + Task.get_timestamp()
dn = ('cn=%s,cn=schema reload task,cn=tasks,cn=config' % cn)
entry = Entry(dn)
entry.setValues('objectclass', 'top', 'extensibleObject')
@@ -1264,7 +1262,7 @@ class Tasks(object):
@return exit code
'''
- cn = 'task-' + time.strftime("%m%d%Y_%H%M%S", time.localtime())
+ cn = 'task-' + Task.get_timestamp()
dn = ('cn=%s,cn=memberuid task,cn=tasks,cn=config' % cn)
entry = Entry(dn)
entry.setValues('objectclass', 'top', 'extensibleObject')
@@ -1311,7 +1309,7 @@ class Tasks(object):
@return exit code
'''
- cn = 'task-' + time.strftime("%m%d%Y_%H%M%S", time.localtime())
+ cn = 'task-' + Task.get_timestamp()
dn = ('cn=%s,cn=syntax validate,cn=tasks,cn=config' % cn)
entry = Entry(dn)
entry.setValues('objectclass', 'top', 'extensibleObject')
@@ -1358,7 +1356,7 @@ class Tasks(object):
@return exit code
'''
- cn = 'task-' + time.strftime("%m%d%Y_%H%M%S", time.localtime())
+ cn = 'task-' + Task.get_timestamp()
dn = ('cn=%s,cn=USN tombstone cleanup task,cn=tasks,cn=config' % cn)
entry = Entry(dn)
entry.setValues('objectclass', 'top', 'extensibleObject')
@@ -1413,7 +1411,7 @@ class Tasks(object):
if not configfile:
raise ValueError("Missing required paramter: configfile")
- cn = 'task-' + time.strftime("%m%d%Y_%H%M%S", time.localtime())
+ cn = 'task-' + Task.get_timestamp()
dn = ('cn=%s,cn=sysconfig reload,cn=tasks,cn=config' % cn)
entry = Entry(dn)
entry.setValues('objectclass', 'top', 'extensibleObject')
@@ -1464,7 +1462,7 @@ class Tasks(object):
if not suffix:
raise ValueError("Missing required paramter: suffix")
- cn = 'task-' + time.strftime("%m%d%Y_%H%M%S", time.localtime())
+ cn = 'task-' + Task.get_timestamp()
dn = ('cn=%s,cn=cleanallruv,cn=tasks,cn=config' % cn)
entry = Entry(dn)
entry.setValues('objectclass', 'top', 'extensibleObject')
@@ -1516,7 +1514,7 @@ class Tasks(object):
if not suffix:
raise ValueError("Missing required paramter: suffix")
- cn = 'task-' + time.strftime("%m%d%Y_%H%M%S", time.localtime())
+ cn = 'task-' + Task.get_timestamp()
dn = ('cn=%s,cn=abort cleanallruv,cn=tasks,cn=config' % cn)
entry = Entry(dn)
entry.setValues('objectclass', 'top', 'extensibleObject')
@@ -1571,7 +1569,7 @@ class Tasks(object):
if not nsArchiveDir:
raise ValueError("Missing required paramter: nsArchiveDir")
- cn = 'task-' + time.strftime("%m%d%Y_%H%M%S", time.localtime())
+ cn = 'task-' + Task.get_timestamp()
dn = ('cn=%s,cn=upgradedb,cn=tasks,cn=config' % cn)
entry = Entry(dn)
entry.setValues('objectclass', 'top', 'extensibleObject')
@@ -1616,6 +1614,6 @@ class LDAPIMappingReloadTask(Task):
"""
def __init__(self, instance, dn=None):
- self.cn = 'reload-' + Task._get_task_date()
+ self.cn = 'reload-' + Task.get_timestamp()
dn = f'cn={self.cn},cn=reload ldapi mappings,cn=tasks,cn=config'
super(LDAPIMappingReloadTask, self).__init__(instance, dn)
--
2.48.0

View File

@ -0,0 +1,165 @@
From 2b1b2db90c9d337166fa28e313f60828cd43de09 Mon Sep 17 00:00:00 2001
From: tbordaz <tbordaz@redhat.com>
Date: Thu, 6 Feb 2025 18:25:36 +0100
Subject: [PATCH] Issue 6554 - During import of entries without nsUniqueId, a
supplier generates duplicate nsUniqueId (LMDB only) (#6582)
Bug description:
During an import the entry is prepared (schema, operational
attributes, password encryption,...) before starting the
update of the database and indexes.
A step of the preparation is to assign a value to 'nsuniqueid'
operational attribute. 'nsuniqueid' must be unique.
In LMDB the preparation is done by multiple threads (workers).
In such case the 'nsuniqueid' are generated in parallel and
as it is time based several values can be duplicated.
Fix description:
To prevent that the routine dbmdb_import_generate_uniqueid
should make sure to synchronize the workers.
fixes: #6554
Reviewed by: Pierre Rogier
---
.../tests/suites/import/import_test.py | 79 ++++++++++++++++++-
.../back-ldbm/db-mdb/mdb_import_threads.c | 11 +++
2 files changed, 89 insertions(+), 1 deletion(-)
diff --git a/dirsrvtests/tests/suites/import/import_test.py b/dirsrvtests/tests/suites/import/import_test.py
index dbd921924..54d304753 100644
--- a/dirsrvtests/tests/suites/import/import_test.py
+++ b/dirsrvtests/tests/suites/import/import_test.py
@@ -14,11 +14,13 @@ import os
import pytest
import time
import glob
+import re
import logging
import subprocess
from datetime import datetime
from lib389.topologies import topology_st as topo
-from lib389._constants import DEFAULT_SUFFIX, TaskWarning
+from lib389.topologies import topology_m2 as topo_m2
+from lib389._constants import DEFAULT_BENAME, DEFAULT_SUFFIX, TaskWarning
from lib389.dbgen import dbgen_users
from lib389.tasks import ImportTask
from lib389.index import Indexes
@@ -688,6 +690,81 @@ def test_online_import_under_load(topo):
assert import_task.get_exit_code() == 0
+def test_duplicate_nsuniqueid(topo_m2, request):
+ """Test that after an offline import all
+ nsuniqueid are different
+
+ :id: a2541677-a288-4633-bacf-4050cc56016d
+ :setup: MMR with 2 suppliers
+ :steps:
+ 1. stop the instance to do offline operations
+ 2. Generate a 5K users LDIF file
+ 3. Check that no uniqueid are present in the generated file
+ 4. import the generated LDIF
+ 5. export the database
+ 6. Check that that exported LDIF contains more than 5K nsuniqueid
+ 7. Check that there is no duplicate nsuniqued in exported LDIF
+ :expectedresults:
+ 1. Should succeeds
+ 2. Should succeeds
+ 3. Should succeeds
+ 4. Should succeeds
+ 5. Should succeeds
+ 6. Should succeeds
+ 7. Should succeeds
+ """
+ m1 = topo_m2.ms["supplier1"]
+
+ # Stop the instance
+ m1.stop()
+
+ # Generate a test ldif (5k entries)
+ log.info("Generating LDIF...")
+ ldif_dir = m1.get_ldif_dir()
+ import_ldif = ldif_dir + '/5k_users_import.ldif'
+ dbgen_users(m1, 5000, import_ldif, DEFAULT_SUFFIX)
+
+ # Check that the generated LDIF does not contain nsuniqueid
+ all_nsuniqueid = []
+ with open(import_ldif, 'r') as file:
+ for line in file:
+ if line.lower().startswith("nsuniqueid: "):
+ all_nsuniqueid.append(line.split(': ')[1])
+ log.info("import file contains " + str(len(all_nsuniqueid)) + " nsuniqueid")
+ assert len(all_nsuniqueid) == 0
+
+ # Import the "nsuniquied free" LDIF file
+ if not m1.ldif2db('userRoot', None, None, None, import_ldif):
+ assert False
+
+ # Export the DB that now should contain nsuniqueid
+ export_ldif = ldif_dir + '/5k_user_export.ldif'
+ log.info("export to file " + export_ldif)
+ m1.db2ldif(bename=DEFAULT_BENAME, suffixes=[DEFAULT_SUFFIX],
+ excludeSuffixes=None, repl_data=False,
+ outputfile=export_ldif, encrypt=False)
+
+ # Check that the export LDIF contain nsuniqueid
+ all_nsuniqueid = []
+ with open(export_ldif, 'r') as file:
+ for line in file:
+ if line.lower().startswith("nsuniqueid: "):
+ all_nsuniqueid.append(line.split(': ')[1])
+ log.info("export file " + export_ldif + " contains " + str(len(all_nsuniqueid)) + " nsuniqueid")
+ assert len(all_nsuniqueid) >= 5000
+
+ # Check that the nsuniqueid are unique
+ assert len(set(all_nsuniqueid)) == len(all_nsuniqueid)
+
+ def fin():
+ if os.path.exists(import_ldif):
+ os.remove(import_ldif)
+ if os.path.exists(export_ldif):
+ os.remove(export_ldif)
+ m1.start
+
+ request.addfinalizer(fin)
+
if __name__ == '__main__':
# Run isolated
# -s for DEBUG mode
diff --git a/ldap/servers/slapd/back-ldbm/db-mdb/mdb_import_threads.c b/ldap/servers/slapd/back-ldbm/db-mdb/mdb_import_threads.c
index 707a110c5..0f445bb56 100644
--- a/ldap/servers/slapd/back-ldbm/db-mdb/mdb_import_threads.c
+++ b/ldap/servers/slapd/back-ldbm/db-mdb/mdb_import_threads.c
@@ -610,10 +610,20 @@ dbmdb_import_generate_uniqueid(ImportJob *job, Slapi_Entry *e)
{
const char *uniqueid = slapi_entry_get_uniqueid(e);
int rc = UID_SUCCESS;
+ static pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER;
if (!uniqueid && (job->uuid_gen_type != SLAPI_UNIQUEID_GENERATE_NONE)) {
char *newuniqueid;
+ /* With 'mdb' we have several workers generating nsuniqueid
+ * we need to serialize them to prevent generating duplicate value
+ * From performance pov it only impacts import
+ * The default value is SLAPI_UNIQUEID_GENERATE_TIME_BASED so
+ * the only syscall is clock_gettime and then string formating
+ * that should limit contention
+ */
+ pthread_mutex_lock(&mutex);
+
/* generate id based on dn */
if (job->uuid_gen_type == SLAPI_UNIQUEID_GENERATE_NAME_BASED) {
char *dn = slapi_entry_get_dn(e);
@@ -624,6 +634,7 @@ dbmdb_import_generate_uniqueid(ImportJob *job, Slapi_Entry *e)
/* time based */
rc = slapi_uniqueIDGenerateString(&newuniqueid);
}
+ pthread_mutex_unlock(&mutex);
if (rc == UID_SUCCESS) {
slapi_entry_set_uniqueid(e, newuniqueid);
--
2.48.0

View File

@ -0,0 +1,77 @@
From e638e801afd51ca44523222a90a9f69f4be82ae3 Mon Sep 17 00:00:00 2001
From: Firstyear <william@blackhats.net.au>
Date: Fri, 7 Feb 2025 14:47:29 +1000
Subject: [PATCH] Issue 6596 - BUG - Compilation Regresion (#6597)
Bug Description: The addition of the json auditlog feature caused
a regresion in compilation due to the use of labels in a declaration.
Fix Description: Enclose the switch/case in braces to resolve the
compilation issue.
fixes: https://github.com/389ds/389-ds-base/issues/6596
Author: William Brown <william@blackhats.net.au>
Review by: @droideck Thanks!
---
ldap/servers/slapd/auditlog.c | 15 ++++++++-------
1 file changed, 8 insertions(+), 7 deletions(-)
diff --git a/ldap/servers/slapd/auditlog.c b/ldap/servers/slapd/auditlog.c
index c288a1a7f..ff9a6fdde 100644
--- a/ldap/servers/slapd/auditlog.c
+++ b/ldap/servers/slapd/auditlog.c
@@ -456,7 +456,7 @@ write_audit_file_json(Slapi_PBlock *pb, Slapi_Entry *entry, int logtype,
add_entry_attrs_json(entry, log_json);
switch (optype) {
- case SLAPI_OPERATION_MODIFY:
+ case SLAPI_OPERATION_MODIFY: {
json_object *mod_list = json_object_new_array();
mods = change;
for (size_t j = 0; (mods != NULL) && (mods[j] != NULL); j++) {
@@ -511,8 +511,8 @@ write_audit_file_json(Slapi_PBlock *pb, Slapi_Entry *entry, int logtype,
/* Add entire mod list to the main object */
json_object_object_add(log_json, "modify", mod_list);
break;
-
- case SLAPI_OPERATION_ADD:
+ }
+ case SLAPI_OPERATION_ADD: {
int len;
e = change;
tmp = slapi_entry2str(e, &len);
@@ -526,8 +526,8 @@ write_audit_file_json(Slapi_PBlock *pb, Slapi_Entry *entry, int logtype,
json_object_object_add(log_json, "add", json_object_new_string(tmp));
slapi_ch_free_string(&tmpsave);
break;
-
- case SLAPI_OPERATION_DELETE:
+ }
+ case SLAPI_OPERATION_DELETE: {
tmp = change;
del_obj = json_object_new_object();
if (tmp && tmp[0]) {
@@ -538,8 +538,8 @@ write_audit_file_json(Slapi_PBlock *pb, Slapi_Entry *entry, int logtype,
json_object_object_add(log_json, "delete", del_obj);
}
break;
-
- case SLAPI_OPERATION_MODDN:
+ }
+ case SLAPI_OPERATION_MODDN: {
newrdn = ((char **)change)[0];
modrdn_obj = json_object_new_object();
json_object_object_add(modrdn_obj, attr_newrdn, json_object_new_string(newrdn));
@@ -551,6 +551,7 @@ write_audit_file_json(Slapi_PBlock *pb, Slapi_Entry *entry, int logtype,
}
json_object_object_add(log_json, "modrdn", modrdn_obj);
break;
+ }
}
msg = (char *)json_object_to_json_string_ext(log_json, log_format);
--
2.48.0

File diff suppressed because it is too large Load Diff

View File

@ -506,6 +506,18 @@ Source4: 389-ds-base.sysusers
Source5: https://fedorapeople.org/groups/389ds/libdb-5.3.28-59.tar.bz2
%endif
Patch: 0001-Issue-6544-logconv.py-python3-magic-conflicts-with-p.patch
Patch: 0002-Issue-6374-nsslapd-mdb-max-dbs-autotuning-doesn-t-wo.patch
Patch: 0003-Issue-6090-Fix-dbscan-options-and-man-pages-6315.patch
Patch: 0004-Issue-6489-After-log-rotation-refresh-the-FD-pointer.patch
Patch: 0005-Issue-6436-MOD-on-a-large-group-slow-if-substring-in.patch
Patch: 0006-Issue-6566-RI-plugin-failure-to-handle-a-modrdn-for-.patch
Patch: 0007-Issue-6258-Mitigate-race-condition-in-paged_results_.patch
Patch: 0008-Issue-6229-After-an-initial-failure-subsequent-onlin.patch
Patch: 0009-Issue-6554-During-import-of-entries-without-nsUnique.patch
Patch: 0010-Issue-6596-BUG-Compilation-Regresion-6597.patch
Patch: 0011-Issue-6367-RFE-support-of-Session-Tracking-Control-i.patch
%description
389 Directory Server is an LDAPv3 compliant server. The base package includes
the LDAP server and command line utilities for server administration.

View File

@ -14,4 +14,4 @@
/test:
/upstream_basic:
test: pytest -v /root/ds/dirsrvtests/tests/suites/basic/basic_test.py
duration: 30m
duration: 60m