diff --git a/.389-ds-base.metadata b/.389-ds-base.metadata
index b3fb6c1..43ee160 100644
--- a/.389-ds-base.metadata
+++ b/.389-ds-base.metadata
@@ -1,2 +1,2 @@
-274dec37976c1efde9cbeb458d50bbcd6b244974 SOURCES/389-ds-base-2.5.2.tar.bz2
+25969f6e65d79aa29671eff7185e4307ff3c08a0 SOURCES/389-ds-base-2.6.1.tar.bz2
 1c8f2d0dfbf39fa8cd86363bf3314351ab21f8d4 SOURCES/jemalloc-5.3.0.tar.bz2
diff --git a/.gitignore b/.gitignore
index b46948c..13fa011 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,2 +1,2 @@
-SOURCES/389-ds-base-2.5.2.tar.bz2
+SOURCES/389-ds-base-2.6.1.tar.bz2
 SOURCES/jemalloc-5.3.0.tar.bz2
diff --git a/SOURCES/0001-Issue-6468-Fix-building-for-older-versions-of-Python.patch b/SOURCES/0001-Issue-6468-Fix-building-for-older-versions-of-Python.patch
new file mode 100644
index 0000000..b3e87c5
--- /dev/null
+++ b/SOURCES/0001-Issue-6468-Fix-building-for-older-versions-of-Python.patch
@@ -0,0 +1,60 @@
+From 0921400a39b61687db2bc55ebd5021eef507e960 Mon Sep 17 00:00:00 2001
+From: Viktor Ashirov <vashirov@redhat.com>
+Date: Tue, 28 Jan 2025 21:05:49 +0100
+Subject: [PATCH] Issue 6468 - Fix building for older versions of Python
+
+Bug Description:
+Structural Pattern Matching has been added in Python 3.10, older version
+do not support it.
+
+Fix Description:
+Replace `match` and `case` statements with `if-elif`.
+
+Relates: https://github.com/389ds/389-ds-base/issues/6468
+
+Reviewed by: @droideck (Thanks!)
+---
+ src/lib389/lib389/cli_conf/logging.py | 27 ++++++++++++++-------------
+ 1 file changed, 14 insertions(+), 13 deletions(-)
+
+diff --git a/src/lib389/lib389/cli_conf/logging.py b/src/lib389/lib389/cli_conf/logging.py
+index 2e86f2de8..d1e32822c 100644
+--- a/src/lib389/lib389/cli_conf/logging.py
++++ b/src/lib389/lib389/cli_conf/logging.py
+@@ -234,19 +234,20 @@ def get_log_config(inst, basedn, log, args):
+     attr_map = {}
+     levels = {}
+ 
+-    match args.logtype:
+-        case "access":
+-            attr_map = ACCESS_ATTR_MAP
+-            levels = ACCESS_LEVELS
+-        case "error":
+-            attr_map = ERROR_ATTR_MAP
+-            levels = ERROR_LEVELS
+-        case "security":
+-            attr_map = SECURITY_ATTR_MAP
+-        case "audit":
+-            attr_map = AUDIT_ATTR_MAP
+-        case "auditfail":
+-            attr_map = AUDITFAIL_ATTR_MAP
++    if args.logtype == "access":
++        attr_map = ACCESS_ATTR_MAP
++        levels = ACCESS_LEVELS
++    elif args.logtype == "error":
++        attr_map = ERROR_ATTR_MAP
++        levels = ERROR_LEVELS
++    elif args.logtype == "security":
++        attr_map = SECURITY_ATTR_MAP
++    elif args.logtype == "audit":
++        attr_map = AUDIT_ATTR_MAP
++    elif args.logtype == "auditfail":
++        attr_map = AUDITFAIL_ATTR_MAP
++    else:
++        raise ValueError(f"Unknown logtype: {args.logtype}")
+ 
+     sorted_results = []
+     for attr, value in attrs.items():
+-- 
+2.48.0
+
diff --git a/SOURCES/0002-Issue-6489-After-log-rotation-refresh-the-FD-pointer.patch b/SOURCES/0002-Issue-6489-After-log-rotation-refresh-the-FD-pointer.patch
new file mode 100644
index 0000000..42ca433
--- /dev/null
+++ b/SOURCES/0002-Issue-6489-After-log-rotation-refresh-the-FD-pointer.patch
@@ -0,0 +1,146 @@
+From 12f9bf81e834549db02b1243ecf769b511c9f69f Mon Sep 17 00:00:00 2001
+From: Mark Reynolds <mreynolds@redhat.com>
+Date: Fri, 31 Jan 2025 08:54:27 -0500
+Subject: [PATCH] Issue 6489 - After log rotation refresh the FD pointer
+
+Description:
+
+When flushing a log buffer we get a FD for log prior to checking if the
+log should be rotated.  If the log is rotated that FD reference is now
+invalid, and it needs to be refrehed before proceeding
+
+Relates: https://github.com/389ds/389-ds-base/issues/6489
+
+Reviewed by: tbordaz(Thanks!)
+---
+ .../suites/logging/log_flush_rotation_test.py | 81 +++++++++++++++++++
+ ldap/servers/slapd/log.c                      | 18 +++++
+ 2 files changed, 99 insertions(+)
+ create mode 100644 dirsrvtests/tests/suites/logging/log_flush_rotation_test.py
+
+diff --git a/dirsrvtests/tests/suites/logging/log_flush_rotation_test.py b/dirsrvtests/tests/suites/logging/log_flush_rotation_test.py
+new file mode 100644
+index 000000000..b33a622e1
+--- /dev/null
++++ b/dirsrvtests/tests/suites/logging/log_flush_rotation_test.py
+@@ -0,0 +1,81 @@
++# --- BEGIN COPYRIGHT BLOCK ---
++# Copyright (C) 2025 Red Hat, Inc.
++# All rights reserved.
++#
++# License: GPL (version 3 or any later version).
++# See LICENSE for details.
++# --- END COPYRIGHT BLOCK ---
++#
++import os
++import logging
++import time
++import pytest
++from lib389._constants import DEFAULT_SUFFIX, PW_DM
++from lib389.tasks import ImportTask
++from lib389.idm.user import UserAccounts
++from lib389.topologies import topology_st as topo
++
++
++log = logging.getLogger(__name__)
++
++
++def test_log_flush_and_rotation_crash(topo):
++    """Make sure server does not crash whening flushing a buffer and rotating
++    the log at the same time
++
++    :id: d4b0af2f-48b2-45f5-ae8b-f06f692c3133
++    :setup: Standalone Instance
++    :steps:
++        1. Enable all logs
++        2. Enable log buffering for all logs
++        3. Set rotation time unit to 1 minute
++        4. Make sure server is still running after 1 minute
++    :expectedresults:
++        1. Success
++        2. Success
++        3. Success
++        4. Success
++    """
++
++    inst = topo.standalone
++
++    # Enable logging and buffering
++    inst.config.set("nsslapd-auditlog-logging-enabled", "on")
++    inst.config.set("nsslapd-accesslog-logbuffering", "on")
++    inst.config.set("nsslapd-auditlog-logbuffering", "on")
++    inst.config.set("nsslapd-errorlog-logbuffering", "on")
++    inst.config.set("nsslapd-securitylog-logbuffering", "on")
++
++    # Set rotation policy to trigger rotation asap
++    inst.config.set("nsslapd-accesslog-logrotationtimeunit", "minute")
++    inst.config.set("nsslapd-auditlog-logrotationtimeunit", "minute")
++    inst.config.set("nsslapd-errorlog-logrotationtimeunit", "minute")
++    inst.config.set("nsslapd-securitylog-logrotationtimeunit", "minute")
++
++    #
++    # Performs ops to populate all the logs
++    #
++    # Access & audit log
++    users = UserAccounts(topo.standalone, DEFAULT_SUFFIX)
++    user = users.create_test_user()
++    user.set("userPassword", PW_DM)
++    # Security log
++    user.bind(PW_DM)
++    # Error log
++    import_task = ImportTask(inst)
++    import_task.import_suffix_from_ldif(ldiffile="/not/here",
++                                        suffix=DEFAULT_SUFFIX)
++
++    # Wait a minute and make sure the server did not crash
++    log.info("Sleep until logs are flushed and rotated")
++    time.sleep(61)
++
++    assert inst.status()
++
++
++if __name__ == '__main__':
++    # Run isolated
++    # -s for DEBUG mode
++    CURRENT_FILE = os.path.realpath(__file__)
++    pytest.main(["-s", CURRENT_FILE])
++
+diff --git a/ldap/servers/slapd/log.c b/ldap/servers/slapd/log.c
+index 8352f4abd..c1260a203 100644
+--- a/ldap/servers/slapd/log.c
++++ b/ldap/servers/slapd/log.c
+@@ -6746,6 +6746,23 @@ log_refresh_state(int32_t log_type)
+         return 0;
+     }
+ }
++static LOGFD
++log_refresh_fd(int32_t log_type)
++{
++    switch (log_type) {
++    case SLAPD_ACCESS_LOG:
++        return loginfo.log_access_fdes;
++    case SLAPD_SECURITY_LOG:
++        return loginfo.log_security_fdes;
++    case SLAPD_AUDIT_LOG:
++        return loginfo.log_audit_fdes;
++    case SLAPD_AUDITFAIL_LOG:
++        return loginfo.log_auditfail_fdes;
++    case SLAPD_ERROR_LOG:
++        return loginfo.log_error_fdes;
++    }
++    return NULL;
++}
+ 
+ /* this function assumes the lock is already acquired */
+ /* if sync_now is non-zero, data is flushed to physical storage */
+@@ -6857,6 +6874,7 @@ log_flush_buffer(LogBufferInfo *lbi, int log_type, int sync_now, int locked)
+                                                         rotationtime_secs);
+         }
+         log_state = log_refresh_state(log_type);
++        fd = log_refresh_fd(log_type);
+     }
+ 
+     if (log_state & LOGGING_NEED_TITLE) {
+-- 
+2.48.0
+
diff --git a/SOURCES/0003-Issue-6374-nsslapd-mdb-max-dbs-autotuning-doesn-t-wo.patch b/SOURCES/0003-Issue-6374-nsslapd-mdb-max-dbs-autotuning-doesn-t-wo.patch
new file mode 100644
index 0000000..44a0cb1
--- /dev/null
+++ b/SOURCES/0003-Issue-6374-nsslapd-mdb-max-dbs-autotuning-doesn-t-wo.patch
@@ -0,0 +1,311 @@
+From f077f9692d1625a1bc2dc6ee02a4fca71ee30b03 Mon Sep 17 00:00:00 2001
+From: progier389 <progier@redhat.com>
+Date: Wed, 13 Nov 2024 15:31:35 +0100
+Subject: [PATCH] Issue 6374 - nsslapd-mdb-max-dbs autotuning doesn't work
+ properly (#6400)
+
+* Issue 6374 - nsslapd-mdb-max-dbs autotuning doesn't work properly
+
+Several issues:
+
+After restarting the server nsslapd-mdb-max-dbs may not be high enough to add a new backend
+because the value computation is wrong.
+dbscan fails to open the database if nsslapd-mdb-max-dbs has been increased.
+dbscan crashes when closing the database (typically when using -S)
+When starting the instance the nsslapd-mdb-max-dbs parameter is increased to ensure that a new backend may be added.
+When dse.ldif path is not specified, the db environment is now open using the INFO.mdb data instead of using the default values.
+synchronization between thread closure and database context destruction is hardened
+Issue: #6374
+
+Reviewed by: @tbordaz , @vashirov (Thanks!)
+
+(cherry picked from commit 56cd3389da608a3f6eeee58d20dffbcd286a8033)
+---
+ .../tests/suites/config/config_test.py        | 86 +++++++++++++++++++
+ ldap/servers/slapd/back-ldbm/back-ldbm.h      |  2 +
+ .../slapd/back-ldbm/db-mdb/mdb_config.c       | 17 ++--
+ .../back-ldbm/db-mdb/mdb_import_threads.c     |  9 +-
+ .../slapd/back-ldbm/db-mdb/mdb_instance.c     |  8 ++
+ ldap/servers/slapd/back-ldbm/dbimpl.c         |  2 +-
+ ldap/servers/slapd/back-ldbm/import.c         | 14 ++-
+ 7 files changed, 128 insertions(+), 10 deletions(-)
+
+diff --git a/dirsrvtests/tests/suites/config/config_test.py b/dirsrvtests/tests/suites/config/config_test.py
+index 57b155af7..34dac36b6 100644
+--- a/dirsrvtests/tests/suites/config/config_test.py
++++ b/dirsrvtests/tests/suites/config/config_test.py
+@@ -17,6 +17,7 @@ from lib389.topologies import topology_m2, topology_st as topo
+ from lib389.utils import *
+ from lib389._constants import DN_CONFIG, DEFAULT_SUFFIX, DEFAULT_BENAME
+ from lib389._mapped_object import DSLdapObjects
++from lib389.agreement import Agreements
+ from lib389.cli_base import FakeArgs
+ from lib389.cli_conf.backend import db_config_set
+ from lib389.idm.user import UserAccounts, TEST_USER_PROPERTIES
+@@ -27,6 +28,8 @@ from lib389.cos import CosPointerDefinitions, CosTemplates
+ from lib389.backend import Backends, DatabaseConfig
+ from lib389.monitor import MonitorLDBM, Monitor
+ from lib389.plugins import ReferentialIntegrityPlugin
++from lib389.replica import BootstrapReplicationManager, Replicas
++from lib389.passwd import password_generate
+ 
+ pytestmark = pytest.mark.tier0
+ 
+@@ -36,6 +39,8 @@ PSTACK_CMD = '/usr/bin/pstack'
+ logging.getLogger(__name__).setLevel(logging.INFO)
+ log = logging.getLogger(__name__)
+ 
++DEBUGGING = os.getenv("DEBUGGING", default=False)
++
+ @pytest.fixture(scope="module")
+ def big_file():
+     TEMP_BIG_FILE = ''
+@@ -811,6 +816,87 @@ def test_numlisteners_limit(topo):
+     assert numlisteners[0] == '4'
+ 
+ 
++def bootstrap_replication(inst_from, inst_to, creds):
++    manager = BootstrapReplicationManager(inst_to)
++    rdn_val = 'replication manager'
++    if  manager.exists():
++        manager.delete()
++    manager.create(properties={
++        'cn': rdn_val,
++        'uid': rdn_val,
++        'userPassword': creds
++    })
++    for replica in Replicas(inst_to).list():
++        replica.remove_all('nsDS5ReplicaBindDNGroup')
++        replica.replace('nsDS5ReplicaBindDN', manager.dn)
++    for agmt in Agreements(inst_from).list():
++        agmt.replace('nsDS5ReplicaBindDN', manager.dn)
++        agmt.replace('nsDS5ReplicaCredentials', creds)
++
++
++@pytest.mark.skipif(get_default_db_lib() != "mdb", reason="This test requires lmdb")
++def test_lmdb_autotuned_maxdbs(topology_m2, request):
++    """Verify that after restart, nsslapd-mdb-max-dbs is large enough to add a new backend.
++
++    :id: 0272d432-9080-11ef-8f40-482ae39447e5
++    :setup: Two suppliers configuration
++    :steps:
++        1. loop 20 times
++        3. In 1 loop: restart instance
++        3. In 1 loop: add a new backend
++        4. In 1 loop: check that instance is still alive
++    :expectedresults:
++        1. Success
++        2. Success
++        3. Success
++        4. Success
++    """
++
++    s1 = topology_m2.ms["supplier1"]
++    s2 = topology_m2.ms["supplier2"]
++
++    backends = Backends(s1)
++    db_config = DatabaseConfig(s1)
++    # Generate the teardown finalizer
++    belist = []
++    creds=password_generate()
++    bootstrap_replication(s2, s1, creds)
++    bootstrap_replication(s1, s2, creds)
++
++    def fin():
++        s1.start()
++        for be in belist:
++            be.delete()
++
++    if not DEBUGGING:
++        request.addfinalizer(fin)
++
++    # 1. Set autotuning (off-line to be able to decrease the value)
++    s1.stop()
++    dse_ldif = DSEldif(s1)
++    dse_ldif.replace(db_config.dn, 'nsslapd-mdb-max-dbs', '0')
++    os.remove(f'{s1.dbdir}/data.mdb')
++    s1.start()
++
++    # 2. Reinitialize the db:
++    log.info("Bulk import...")
++    agmt = Agreements(s2).list()[0]
++    agmt.begin_reinit()
++    (done, error) = agmt.wait_reinit()
++    log.info(f'Bulk importresult is ({done}, {error})')
++    assert done is True
++    assert error is False
++
++    # 3. loop 20 times
++    for idx in range(20):
++        s1.restart()
++        log.info(f'Adding backend test{idx}')
++        belist.append(backends.create(properties={'cn': f'test{idx}',
++                                     'nsslapd-suffix': f'dc=test{idx}'}))
++        assert s1.status()
++
++
++
+ if __name__ == '__main__':
+     # Run isolated
+     # -s for DEBUG mode
+diff --git a/ldap/servers/slapd/back-ldbm/back-ldbm.h b/ldap/servers/slapd/back-ldbm/back-ldbm.h
+index 8fea63e35..35d0ece04 100644
+--- a/ldap/servers/slapd/back-ldbm/back-ldbm.h
++++ b/ldap/servers/slapd/back-ldbm/back-ldbm.h
+@@ -896,4 +896,6 @@ typedef struct _back_search_result_set
+     ((L)->size == (R)->size && !memcmp((L)->data, (R)->data, (L)->size))
+ 
+ typedef int backend_implement_init_fn(struct ldbminfo *li, config_info *config_array);
++
++pthread_mutex_t *get_import_ctx_mutex();
+ #endif /* _back_ldbm_h_ */
+diff --git a/ldap/servers/slapd/back-ldbm/db-mdb/mdb_config.c b/ldap/servers/slapd/back-ldbm/db-mdb/mdb_config.c
+index 351f54037..1f7b71442 100644
+--- a/ldap/servers/slapd/back-ldbm/db-mdb/mdb_config.c
++++ b/ldap/servers/slapd/back-ldbm/db-mdb/mdb_config.c
+@@ -83,7 +83,7 @@ dbmdb_compute_limits(struct ldbminfo *li)
+     uint64_t total_space = 0;
+     uint64_t avail_space = 0;
+     uint64_t cur_dbsize = 0;
+-    int nbchangelogs = 0;
++    int nbvlvs = 0;
+     int nbsuffixes = 0;
+     int nbindexes = 0;
+     int nbagmt = 0;
+@@ -99,8 +99,8 @@ dbmdb_compute_limits(struct ldbminfo *li)
+      *  But some tunable may be autotuned.
+      */
+     if (dbmdb_count_config_entries("(objectClass=nsMappingTree)", &nbsuffixes) ||
+-        dbmdb_count_config_entries("(objectClass=nsIndex)", &nbsuffixes) ||
+-        dbmdb_count_config_entries("(&(objectClass=nsds5Replica)(nsDS5Flags=1))", &nbchangelogs) ||
++        dbmdb_count_config_entries("(objectClass=nsIndex)", &nbindexes) ||
++        dbmdb_count_config_entries("(objectClass=vlvIndex)", &nbvlvs) ||
+         dbmdb_count_config_entries("(objectClass=nsds5replicationagreement)", &nbagmt)) {
+         /* error message is already logged */
+         return 1;
+@@ -120,8 +120,15 @@ dbmdb_compute_limits(struct ldbminfo *li)
+ 
+     info->pagesize = sysconf(_SC_PAGE_SIZE);
+     limits->min_readers = config_get_threadnumber() + nbagmt + DBMDB_READERS_MARGIN;
+-    /* Default indexes are counted in "nbindexes" so we should always have enough resource to add 1 new suffix */
+-    limits->min_dbs = nbsuffixes + nbindexes + nbchangelogs + DBMDB_DBS_MARGIN;
++    /*
++     * For each suffix there are 4 databases instances:
++     *  long-entryrdn, replication_changelog, id2entry and ancestorid
++     * then the indexes and the vlv and vlv cache
++     *
++     * Default indexes are counted in "nbindexes" so we should always have enough
++     *  resource to add 1 new suffix
++     */
++    limits->min_dbs = 4*nbsuffixes + nbindexes + 2*nbvlvs + DBMDB_DBS_MARGIN;
+ 
+     total_space = ((uint64_t)(buf.f_blocks)) * ((uint64_t)(buf.f_bsize));
+     avail_space = ((uint64_t)(buf.f_bavail)) * ((uint64_t)(buf.f_bsize));
+diff --git a/ldap/servers/slapd/back-ldbm/db-mdb/mdb_import_threads.c b/ldap/servers/slapd/back-ldbm/db-mdb/mdb_import_threads.c
+index 8c879da31..707a110c5 100644
+--- a/ldap/servers/slapd/back-ldbm/db-mdb/mdb_import_threads.c
++++ b/ldap/servers/slapd/back-ldbm/db-mdb/mdb_import_threads.c
+@@ -4312,9 +4312,12 @@ dbmdb_import_init_writer(ImportJob *job, ImportRole_t role)
+ void
+ dbmdb_free_import_ctx(ImportJob *job)
+ {
+-    if (job->writer_ctx) {
+-        ImportCtx_t *ctx = job->writer_ctx;
+-        job->writer_ctx = NULL;
++    ImportCtx_t *ctx = NULL;
++    pthread_mutex_lock(get_import_ctx_mutex());
++    ctx = job->writer_ctx;
++    job->writer_ctx = NULL;
++    pthread_mutex_unlock(get_import_ctx_mutex());
++    if (ctx) {
+         pthread_mutex_destroy(&ctx->workerq.mutex);
+         pthread_cond_destroy(&ctx->workerq.cv);
+         slapi_ch_free((void**)&ctx->workerq.slots);
+diff --git a/ldap/servers/slapd/back-ldbm/db-mdb/mdb_instance.c b/ldap/servers/slapd/back-ldbm/db-mdb/mdb_instance.c
+index 6386ecf06..05f1e348d 100644
+--- a/ldap/servers/slapd/back-ldbm/db-mdb/mdb_instance.c
++++ b/ldap/servers/slapd/back-ldbm/db-mdb/mdb_instance.c
+@@ -287,6 +287,13 @@ int add_dbi(dbi_open_ctx_t *octx, backend *be, const char *fname, int flags)
+         slapi_ch_free((void**)&treekey.dbname);
+         return octx->rc;
+     }
++    if (treekey.dbi >= ctx->dsecfg.max_dbs) {
++        octx->rc = MDB_DBS_FULL;
++        slapi_log_err(SLAPI_LOG_ERR, "add_dbi", "Failed to open database instance %s slots: %d/%d. Error is %d: %s.\n",
++                      treekey.dbname, treekey.dbi, ctx->dsecfg.max_dbs, octx->rc, mdb_strerror(octx->rc));
++        slapi_ch_free((void**)&treekey.dbname);
++        return octx->rc;
++    }
+     if (octx->ai && octx->ai->ai_key_cmp_fn) {
+ 		octx->rc = dbmdb_update_dbi_cmp_fn(ctx, &treekey, octx->ai->ai_key_cmp_fn, octx->txn);
+         if (octx->rc) {
+@@ -689,6 +696,7 @@ int dbmdb_make_env(dbmdb_ctx_t *ctx, int readOnly, mdb_mode_t mode)
+         rc = dbmdb_write_infofile(ctx);
+     } else {
+         /* No Config ==> read it from info file */
++        ctx->dsecfg = ctx->startcfg;
+     }
+     if (rc) {
+         return rc;
+diff --git a/ldap/servers/slapd/back-ldbm/dbimpl.c b/ldap/servers/slapd/back-ldbm/dbimpl.c
+index da4a4548e..42f4a0718 100644
+--- a/ldap/servers/slapd/back-ldbm/dbimpl.c
++++ b/ldap/servers/slapd/back-ldbm/dbimpl.c
+@@ -463,7 +463,7 @@ int dblayer_show_statistics(const char *dbimpl_name, const char *dbhome, FILE *f
+     li->li_plugin = be->be_database;
+     li->li_plugin->plg_name = (char*) "back-ldbm-dbimpl";
+     li->li_plugin->plg_libpath = (char*) "libback-ldbm";
+-    li->li_directory = (char*)dbhome;
++    li->li_directory = get_li_directory(dbhome);
+ 
+     /* Initialize database plugin */
+     rc = dbimpl_setup(li, dbimpl_name);
+diff --git a/ldap/servers/slapd/back-ldbm/import.c b/ldap/servers/slapd/back-ldbm/import.c
+index 2bb8cb581..30ec462fa 100644
+--- a/ldap/servers/slapd/back-ldbm/import.c
++++ b/ldap/servers/slapd/back-ldbm/import.c
+@@ -27,6 +27,9 @@
+ #define NEED_DN_NORM_SP -25
+ #define NEED_DN_NORM_BT -26
+ 
++/* Protect against import context destruction */
++static pthread_mutex_t import_ctx_mutex = PTHREAD_MUTEX_INITIALIZER;
++
+ 
+ /********** routines to manipulate the entry fifo **********/
+ 
+@@ -143,6 +146,14 @@ ldbm_back_wire_import(Slapi_PBlock *pb)
+ 
+ /* Threads management */
+ 
++/* Return the mutex that protects against import context destruction */
++pthread_mutex_t *
++get_import_ctx_mutex()
++{
++    return &import_ctx_mutex;
++}
++
++
+ /* tell all the threads to abort */
+ void
+ import_abort_all(ImportJob *job, int wait_for_them)
+@@ -151,7 +162,7 @@ import_abort_all(ImportJob *job, int wait_for_them)
+ 
+     /* tell all the worker threads to abort */
+     job->flags |= FLAG_ABORT;
+-
++    pthread_mutex_lock(&import_ctx_mutex);
+     for (worker = job->worker_list; worker; worker = worker->next)
+         worker->command = ABORT;
+ 
+@@ -167,6 +178,7 @@ import_abort_all(ImportJob *job, int wait_for_them)
+             }
+         }
+     }
++    pthread_mutex_unlock(&import_ctx_mutex);
+ }
+ 
+ 
+-- 
+2.48.0
+
diff --git a/SOURCES/0004-Issue-6090-Fix-dbscan-options-and-man-pages-6315.patch b/SOURCES/0004-Issue-6090-Fix-dbscan-options-and-man-pages-6315.patch
new file mode 100644
index 0000000..648eea5
--- /dev/null
+++ b/SOURCES/0004-Issue-6090-Fix-dbscan-options-and-man-pages-6315.patch
@@ -0,0 +1,894 @@
+From b53faa9e7289383bbc02fc260b1b34958a317fdd Mon Sep 17 00:00:00 2001
+From: progier389 <progier@redhat.com>
+Date: Fri, 6 Sep 2024 14:45:06 +0200
+Subject: [PATCH] Issue 6090 - Fix dbscan options and man pages (#6315)
+
+* Issue 6090 - Fix dbscan options and man pages
+
+dbscan -d option is dangerously confusing as it removes a database instance while in db_stat it identify the database
+(cf issue #5609 ).
+This fix implements long options in dbscan, rename -d in --remove, and requires a new --do-it option for action that change the database content.
+The fix should also align both the usage and the dbscan man page with the new set of options
+
+Issue: #6090
+
+Reviewed by: @tbordaz, @droideck (Thanks!)
+
+(cherry picked from commit 25e1d16887ebd299dfe0088080b9ee0deec1e41f)
+---
+ dirsrvtests/tests/suites/clu/dbscan_test.py   | 253 ++++++++++++++++++
+ .../tests/suites/clu/repl_monitor_test.py     |   4 +-
+ .../slapd/back-ldbm/db-bdb/bdb_layer.c        |  12 +-
+ ldap/servers/slapd/back-ldbm/dbimpl.c         |  50 +++-
+ ldap/servers/slapd/tools/dbscan.c             | 182 ++++++++++---
+ man/man1/dbscan.1                             |  74 +++--
+ src/lib389/lib389/__init__.py                 |   9 +-
+ src/lib389/lib389/cli_ctl/dblib.py            |  13 +-
+ 8 files changed, 531 insertions(+), 66 deletions(-)
+ create mode 100644 dirsrvtests/tests/suites/clu/dbscan_test.py
+
+diff --git a/dirsrvtests/tests/suites/clu/dbscan_test.py b/dirsrvtests/tests/suites/clu/dbscan_test.py
+new file mode 100644
+index 000000000..2c9a9651a
+--- /dev/null
++++ b/dirsrvtests/tests/suites/clu/dbscan_test.py
+@@ -0,0 +1,253 @@
++# --- BEGIN COPYRIGHT BLOCK ---
++# Copyright (C) 2024 Red Hat, Inc.
++# All rights reserved.
++#
++# License: GPL (version 3 or any later version).
++# See LICENSE for details.
++# --- END COPYRIGHT BLOCK ---
++#
++import logging
++import os
++import pytest
++import re
++import subprocess
++import sys
++
++from lib389 import DirSrv
++from lib389._constants import DBSCAN
++from lib389.topologies import topology_m2 as topo_m2
++from difflib import context_diff
++
++pytestmark = pytest.mark.tier0
++
++logging.getLogger(__name__).setLevel(logging.DEBUG)
++log = logging.getLogger(__name__)
++
++DEBUGGING = os.getenv("DEBUGGING", default=False)
++
++
++class CalledProcessUnexpectedReturnCode(subprocess.CalledProcessError):
++    def __init__(self, result, expected_rc):
++        super().__init__(cmd=result.args, returncode=result.returncode, output=result.stdout, stderr=result.stderr)
++        self.expected_rc = expected_rc
++        self.result = result
++
++    def __str__(self):
++        return f'Command {self.result.args} returned {self.result.returncode} instead of {self.expected_rc}'
++
++
++class DbscanPaths:
++    @staticmethod
++    def list_instances(inst, dblib, dbhome):
++        # compute db instance pathnames
++        instances = dbscan(['-D', dblib, '-L', dbhome], inst=inst).stdout
++        dbis = []
++        if dblib == 'bdb':
++            pattern = r'^ (.*) $'
++            prefix = f'{dbhome}/'
++        else:
++            pattern = r'^ (.*) flags:'
++            prefix = f''
++        for match in re.finditer(pattern, instances, flags=re.MULTILINE):
++            dbis.append(prefix+match.group(1))
++        return dbis
++
++    @staticmethod
++    def list_options(inst):
++        # compute supported options
++        options = []
++        usage = dbscan(['-h'], inst=inst, expected_rc=None).stdout
++        pattern = r'^\s+(?:(-[^-,]+), +)?(--[^ ]+).*$'
++        for match in re.finditer(pattern, usage, flags=re.MULTILINE):
++            for idx in range(1,3):
++                if match.group(idx) is not None:
++                    options.append(match.group(idx))
++        return options
++
++    def __init__(self, inst):
++        dblib = inst.get_db_lib()
++        dbhome = inst.ds_paths.db_home_dir
++        self.inst = inst
++        self.dblib = dblib
++        self.dbhome = dbhome
++        self.options = DbscanPaths.list_options(inst)
++        self.dbis = DbscanPaths.list_instances(inst, dblib, dbhome)
++        self.ldif_dir = inst.ds_paths.ldif_dir
++
++    def get_dbi(self, attr, backend='userroot'):
++        for dbi in self.dbis:
++            if f'{backend}/{attr}.'.lower() in dbi.lower():
++                return dbi
++        raise KeyError(f'Unknown dbi {backend}/{attr}')
++
++    def __repr__(self):
++        attrs = ['inst', 'dblib', 'dbhome', 'ldif_dir', 'options', 'dbis' ]
++        res = ", ".join(map(lambda x: f'{x}={self.__dict__[x]}', attrs))
++        return f'DbscanPaths({res})'
++
++
++def dbscan(args, inst=None, expected_rc=0):
++    if inst is None:
++        prefix = os.environ.get('PREFIX', "")
++        prog = f'{prefix}/bin/dbscan'
++    else:
++        prog = os.path.join(inst.ds_paths.bin_dir, DBSCAN)
++    args.insert(0, prog)
++    output = subprocess.run(args, encoding='utf-8', stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
++    log.debug(f'{args} result is {output.returncode} output is {output.stdout}')
++    if expected_rc is not None and expected_rc != output.returncode:
++        raise CalledProcessUnexpectedReturnCode(output, expected_rc)
++    return output
++
++
++def log_export_file(filename):
++    with open(filename, 'r') as file:
++        log.debug(f'=========== Dump of {filename} ================')
++        for line in file:
++            log.debug(line.rstrip('\n'))
++        log.debug(f'=========== Enf of {filename} =================')
++
++
++@pytest.fixture(scope='module')
++def paths(topo_m2, request):
++    inst = topo_m2.ms["supplier1"]
++    if sys.version_info < (3,5):
++        pytest.skip('requires python version >= 3.5')
++    paths = DbscanPaths(inst)
++    if '--do-it' not in paths.options:
++       pytest.skip('Not supported with this dbscan version')
++    inst.stop()
++    return paths
++
++
++def test_dbscan_destructive_actions(paths, request):
++    """Test that dbscan remove/import actions
++
++    :id: f40b0c42-660a-11ef-9544-083a88554478
++    :setup: Stopped standalone instance
++    :steps:
++         1. Export cn instance with dbscan
++         2. Run dbscan --remove ...
++         3. Check the error message about missing --do-it
++         4. Check that cn instance is still present
++         5. Run dbscan -I import_file ...
++         6. Check it was properly imported
++         7. Check that cn instance is still present
++         8. Run dbscan --remove ... --doit
++         9. Check the error message about missing --do-it
++         10. Check that cn instance is still present
++         11. Run dbscan -I import_file ... --do-it
++         12. Check it was properly imported
++         13. Check that cn instance is still present
++         14. Export again the database
++         15. Check that content of export files are the same
++    :expectedresults:
++         1. Success
++         2. dbscan return code should be 1 (error)
++         3. Error message should be present
++         4. cn instance should be present
++         5. dbscan return code should be 1 (error)
++         6. Error message should be present
++         7. cn instance should be present
++         8. dbscan return code should be 0 (success)
++         9. Error message should not be present
++         10. cn instance should not be present
++         11. dbscan return code should be 0 (success)
++         12. Error message should not be present
++         13. cn instance should be present
++         14. Success
++         15. Export files content should be the same
++    """
++
++    # Export cn instance with dbscan
++    export_cn = f'{paths.ldif_dir}/dbscan_cn.data'
++    export_cn2 = f'{paths.ldif_dir}/dbscan_cn2.data'
++    cndbi = paths.get_dbi('replication_changelog')
++    inst = paths.inst
++    dblib = paths.dblib
++    exportok = False
++    def fin():
++        if os.path.exists(export_cn):
++            # Restore cn if it was exported successfully but does not exists any more
++            if exportok and cndbi not in DbscanPaths.list_instances(inst, dblib, paths.dbhome):
++                    dbscan(['-D', dblib, '-f', cndbi, '-I', export_cn, '--do-it'], inst=inst)
++            if not DEBUGGING:
++                os.remove(export_cn)
++        if os.path.exists(export_cn) and not DEBUGGING:
++            os.remove(export_cn2)
++
++    fin()
++    request.addfinalizer(fin)
++    dbscan(['-D', dblib,  '-f', cndbi, '-X', export_cn], inst=inst)
++    exportok = True
++
++    expected_msg = "without specifying '--do-it' parameter."
++
++    # Run dbscan --remove ...
++    result = dbscan(['-D', paths.dblib, '--remove', '-f', cndbi],
++                    inst=paths.inst, expected_rc=1)
++
++    # Check the error message about missing --do-it
++    assert expected_msg in result.stdout
++
++    # Check that cn instance is still present
++    curdbis = DbscanPaths.list_instances(paths.inst, paths.dblib, paths.dbhome)
++    assert cndbi in curdbis
++
++    # Run dbscan -I import_file ...
++    result = dbscan(['-D', paths.dblib, '-f', cndbi, '-I', export_cn],
++                    inst=paths.inst, expected_rc=1)
++
++    # Check the error message about missing --do-it
++    assert expected_msg in result.stdout
++
++    # Check that cn instance is still present
++    curdbis = DbscanPaths.list_instances(paths.inst, paths.dblib, paths.dbhome)
++    assert cndbi in curdbis
++
++    # Run dbscan --remove ... --doit
++    result = dbscan(['-D', paths.dblib, '--remove', '-f', cndbi, '--do-it'],
++                    inst=paths.inst, expected_rc=0)
++
++    # Check the error message about missing --do-it
++    assert expected_msg not in result.stdout
++
++    # Check that cn instance is still present
++    curdbis = DbscanPaths.list_instances(paths.inst, paths.dblib, paths.dbhome)
++    assert cndbi not in curdbis
++
++    # Run dbscan -I import_file ... --do-it
++    result = dbscan(['-D', paths.dblib, '-f', cndbi,
++                     '-I', export_cn, '--do-it'],
++                    inst=paths.inst, expected_rc=0)
++
++    # Check the error message about missing --do-it
++    assert expected_msg not in result.stdout
++
++    # Check that cn instance is still present
++    curdbis = DbscanPaths.list_instances(paths.inst, paths.dblib, paths.dbhome)
++    assert cndbi in curdbis
++
++    # Export again the database
++    dbscan(['-D', dblib,  '-f', cndbi, '-X', export_cn2], inst=inst)
++
++    # Check that content of export files are the same
++    with open(export_cn) as f1:
++        f1lines = f1.readlines()
++    with open(export_cn2) as f2:
++        f2lines = f2.readlines()
++    diffs = list(context_diff(f1lines, f2lines))
++    if len(diffs) > 0:
++        log.debug("Export file differences are:")
++        for d in diffs:
++            log.debug(d)
++        log_export_file(export_cn)
++        log_export_file(export_cn2)
++        assert diffs is None
++
++
++if __name__ == '__main__':
++    # Run isolated
++    # -s for DEBUG mode
++    CURRENT_FILE = os.path.realpath(__file__)
++    pytest.main("-s %s" % CURRENT_FILE)
+diff --git a/dirsrvtests/tests/suites/clu/repl_monitor_test.py b/dirsrvtests/tests/suites/clu/repl_monitor_test.py
+index d83416847..842dd96fd 100644
+--- a/dirsrvtests/tests/suites/clu/repl_monitor_test.py
++++ b/dirsrvtests/tests/suites/clu/repl_monitor_test.py
+@@ -77,13 +77,13 @@ def get_hostnames_from_log(port1, port2):
+     # search for Supplier :hostname:port 
+     # and use \D to insure there is no more number is after
+     # the matched port (i.e that 10 is not matching 101)
+-    regexp = '(Supplier: )([^:]*)(:' + str(port1) + '\D)'
++    regexp = '(Supplier: )([^:]*)(:' + str(port1) + r'\D)'
+     match=re.search(regexp, logtext)
+     host_m1 = 'localhost.localdomain'
+     if (match is not None):
+         host_m1 = match.group(2)
+     # Same for supplier 2 
+-    regexp = '(Supplier: )([^:]*)(:' + str(port2) + '\D)'
++    regexp = '(Supplier: )([^:]*)(:' + str(port2) + r'\D)'
+     match=re.search(regexp, logtext)
+     host_m2 = 'localhost.localdomain'
+     if (match is not None):
+diff --git a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.c b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.c
+index de6be0f42..4b30e8e87 100644
+--- a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.c
++++ b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.c
+@@ -5820,8 +5820,16 @@ bdb_import_file_name(ldbm_instance *inst)
+ static char *
+ bdb_restore_file_name(struct ldbminfo *li)
+ {
+-    char *fname = slapi_ch_smprintf("%s/../.restore", li->li_directory);
+-
++    char *pt = strrchr(li->li_directory, '/');
++    char *fname =  NULL;
++    if (pt == NULL) {
++        fname = slapi_ch_strdup(".restore");
++    } else {
++        size_t len = pt-li->li_directory;
++        fname = slapi_ch_malloc(len+10);
++        strncpy(fname, li->li_directory, len);
++        strcpy(fname+len, "/.restore");
++    }
+     return fname;
+ }
+ 
+diff --git a/ldap/servers/slapd/back-ldbm/dbimpl.c b/ldap/servers/slapd/back-ldbm/dbimpl.c
+index 42f4a0718..134d06480 100644
+--- a/ldap/servers/slapd/back-ldbm/dbimpl.c
++++ b/ldap/servers/slapd/back-ldbm/dbimpl.c
+@@ -397,7 +397,48 @@ const char *dblayer_op2str(dbi_op_t op)
+     return str[idx];
+ }
+ 
+-/* Open db env, db and db file privately */
++/* Get the li_directory directory from the database instance name -
++ * Caller should free the returned value
++ */
++static char *
++get_li_directory(const char *fname)
++{
++    /*
++     * li_directory is an existing directory.
++     * it can be fname or its parent or its greatparent
++     * in case of problem returns the provided name
++     */
++    char *lid = slapi_ch_strdup(fname);
++    struct stat sbuf = {0};
++    char *pt = NULL;
++    for (int count=0; count<3; count++) {
++        if (stat(lid, &sbuf) == 0) {
++            if (S_ISDIR(sbuf.st_mode)) {
++                return lid;
++            }
++            /* Non directory existing file could be regular
++             * at the first iteration otherwise it is an error.
++             */
++            if (count>0 || !S_ISREG(sbuf.st_mode)) {
++                break;
++            }
++        }
++        pt = strrchr(lid, '/');
++        if (pt == NULL) {
++            slapi_ch_free_string(&lid);
++            return slapi_ch_strdup(".");
++        }
++        *pt = '\0';
++    }
++    /*
++     * Error case. Returns a copy of the original string:
++     *  and let dblayer_private_open_fn fail to open the database
++     */
++    slapi_ch_free_string(&lid);
++    return slapi_ch_strdup(fname);
++}
++
++/* Open db env, db and db file privately (for dbscan) */
+ int dblayer_private_open(const char *plgname, const char *dbfilename, int rw, Slapi_Backend **be, dbi_env_t **env, dbi_db_t **db)
+ {
+     struct ldbminfo *li;
+@@ -412,7 +453,7 @@ int dblayer_private_open(const char *plgname, const char *dbfilename, int rw, Sl
+     li->li_plugin = (*be)->be_database;
+     li->li_plugin->plg_name = (char*) "back-ldbm-dbimpl";
+     li->li_plugin->plg_libpath = (char*) "libback-ldbm";
+-    li->li_directory = slapi_ch_strdup(dbfilename);
++    li->li_directory = get_li_directory(dbfilename);
+ 
+     /* Initialize database plugin */
+     rc = dbimpl_setup(li, plgname);
+@@ -439,7 +480,10 @@ int dblayer_private_close(Slapi_Backend **be, dbi_env_t **env, dbi_db_t **db)
+         }
+         slapi_ch_free((void**)&li->li_dblayer_private);
+         slapi_ch_free((void**)&li->li_dblayer_config);
+-        ldbm_config_destroy(li);
++        if (dblayer_is_lmdb(*be)) {
++            /* Generate use after free and double free in bdb case */
++            ldbm_config_destroy(li);
++        }
+         slapi_ch_free((void**)&(*be)->be_database);
+         slapi_ch_free((void**)&(*be)->be_instance_info);
+         slapi_ch_free((void**)be);
+diff --git a/ldap/servers/slapd/tools/dbscan.c b/ldap/servers/slapd/tools/dbscan.c
+index 2d28dd951..12edf7c5b 100644
+--- a/ldap/servers/slapd/tools/dbscan.c
++++ b/ldap/servers/slapd/tools/dbscan.c
+@@ -26,6 +26,7 @@
+ #include <string.h>
+ #include <ctype.h>
+ #include <errno.h>
++#include <getopt.h>
+ #include "../back-ldbm/dbimpl.h"
+ #include "../slapi-plugin.h"
+ #include "nspr.h"
+@@ -85,6 +86,8 @@
+ #define DB_BUFFER_SMALL ENOMEM
+ #endif
+ 
++#define COUNTOF(array)    ((sizeof(array))/sizeof(*(array)))
++
+ #if defined(linux)
+ #include <getopt.h>
+ #endif
+@@ -130,9 +133,43 @@ long ind_cnt = 0;
+ long allids_cnt = 0;
+ long other_cnt = 0;
+ char *dump_filename = NULL;
++int do_it = 0;
+ 
+ static Slapi_Backend *be = NULL; /* Pseudo backend used to interact with db */
+ 
++/* For Long options without shortcuts */
++enum {
++    OPT_FIRST = 0x1000,
++    OPT_DO_IT,
++    OPT_REMOVE,
++};
++
++static const struct option options[] = {
++    /* Options without shortcut */
++    { "do-it", no_argument, 0, OPT_DO_IT },
++    { "remove", no_argument, 0, OPT_REMOVE },
++    /* Options with shortcut */
++    { "import", required_argument, 0, 'I' },
++    { "export", required_argument, 0, 'X' },
++    { "db-type", required_argument, 0, 'D' },
++    { "dbi", required_argument, 0, 'f' },
++    { "ascii", no_argument, 0, 'A' },
++    { "raw", no_argument, 0, 'R' },
++    { "truncate-entry", required_argument, 0, 't' },
++    { "entry-id", required_argument, 0, 'K' },
++    { "key", required_argument, 0, 'k' },
++    { "list", required_argument, 0, 'L' },
++    { "stats", required_argument, 0, 'S' },
++    { "id-list-max-size", required_argument, 0, 'l' },
++    { "id-list-min-size", required_argument, 0, 'G' },
++    { "show-id-list-lenghts", no_argument, 0, 'n' },
++    { "show-id-list", no_argument, 0, 'r' },
++    { "summary", no_argument, 0, 's' },
++    { "help", no_argument, 0, 'h' },
++    { 0, 0, 0, 0 }
++};
++
++
+ /** db_printf - functioning same as printf but a place for manipluating output.
+ */
+ void
+@@ -899,7 +936,7 @@ is_changelog(char *filename)
+ }
+ 
+ static void
+-usage(char *argv0)
++usage(char *argv0, int error)
+ {
+     char *copy = strdup(argv0);
+     char *p0 = NULL, *p1 = NULL;
+@@ -922,42 +959,52 @@ usage(char *argv0)
+     }
+     printf("\n%s - scan a db file and dump the contents\n", p0);
+     printf("  common options:\n");
+-    printf("    -D <dbimpl>     specify db implementaion (may be: bdb or mdb)\n");
+-    printf("    -f <filename>   specify db file\n");
+-    printf("    -A              dump as ascii data\n");
+-    printf("    -R              dump as raw data\n");
+-    printf("    -t <size>       entry truncate size (bytes)\n");
++    printf("    -A, --ascii                    dump as ascii data\n");
++    printf("    -D, --db-type <dbimpl>         specify db implementaion (may be: bdb or mdb)\n");
++    printf("    -f, --dbi <filename>           specify db instance\n");
++    printf("    -R, --raw                      dump as raw data\n");
++    printf("    -t, --truncate-entry <size>    entry truncate size (bytes)\n");
++
+     printf("  entry file options:\n");
+-    printf("    -K <entry_id>   lookup only a specific entry id\n");
++    printf("    -K, --entry-id <entry_id>      lookup only a specific entry id\n");
++
+     printf("  index file options:\n");
+-    printf("    -k <key>        lookup only a specific key\n");
+-    printf("    -L <dbhome>     list all db files\n");
+-    printf("    -S <dbhome>     show statistics\n");
+-    printf("    -l <size>       max length of dumped id list\n");
+-    printf("                    (default %" PRIu32 "; 40 bytes <= size <= 1048576 bytes)\n", MAX_BUFFER);
+-    printf("    -G <n>          only display index entries with more than <n> ids\n");
+-    printf("    -n              display ID list lengths\n");
+-    printf("    -r              display the conents of ID list\n");
+-    printf("    -s              Summary of index counts\n");
+-    printf("    -I file         Import database content from file\n");
+-    printf("    -X file         Export database content in file\n");
++    printf("    -G, --id-list-min-size <n>     only display index entries with more than <n> ids\n");
++    printf("    -I, --import file              Import database instance from file.\n");
++    printf("    -k, --key <key>                lookup only a specific key\n");
++    printf("    -l, --id-list-max-size <size>  max length of dumped id list\n");
++    printf("                                  (default %" PRIu32 "; 40 bytes <= size <= 1048576 bytes)\n", MAX_BUFFER);
++    printf("    -n, --show-id-list-lenghts     display ID list lengths\n");
++    printf("    --remove                       remove database instance\n");
++    printf("    -r, --show-id-list             display the conents of ID list\n");
++    printf("    -S, --stats <dbhome>           show statistics\n");
++    printf("    -X, --export file              export database instance in file\n");
++
++    printf("  other options:\n");
++    printf("    -s, --summary                  summary of index counts\n");
++    printf("    -L, --list <dbhome>            list all db files\n");
++    printf("    --do-it                        confirmation flags for destructive actions like --remove or --import\n");
++    printf("    -h, --help                     display this usage\n");
++
+     printf("  sample usages:\n");
+-    printf("    # list the db files\n");
+-    printf("    %s -D mdb -L /var/lib/dirsrv/slapd-i/db/\n", p0);
+-    printf("    %s -f id2entry.db\n", p0);
++    printf("    # list the database instances\n");
++    printf("    %s -L /var/lib/dirsrv/slapd-supplier1/db/\n", p0);
+     printf("    # dump the entry file\n");
+     printf("    %s -f id2entry.db\n", p0);
+     printf("    # display index keys in cn.db4\n");
+     printf("    %s -f cn.db4\n", p0);
++    printf("    # display index keys in cn on lmdb\n");
++    printf("    %s -f /var/lib/dirsrv/slapd-supplier1/db/userroot/cn.db\n", p0);
++    printf("    (Note: Use 'dbscan -L db_home_dir' to get the db instance path)\n");
+     printf("    # display index keys and the count of entries having the key in mail.db4\n");
+     printf("    %s -r -f mail.db4\n", p0);
+     printf("    # display index keys and the IDs having more than 20 IDs in sn.db4\n");
+     printf("    %s -r -G 20 -f sn.db4\n", p0);
+     printf("    # display summary of objectclass.db4\n");
+-    printf("    %s -f objectclass.db4\n", p0);
++    printf("    %s -s -f objectclass.db4\n", p0);
+     printf("\n");
+     free(copy);
+-    exit(1);
++    exit(error?1:0);
+ }
+ 
+ void dump_ascii_val(const char *str, dbi_val_t *val)
+@@ -1126,13 +1173,12 @@ importdb(const char *dbimpl_name, const char *filename, const char *dump_name)
+     dblayer_init_pvt_txn();
+ 
+     if (!dump) {
+-        printf("Failed to open dump file %s. Error %d: %s\n", dump_name, errno, strerror(errno));
+-        fclose(dump);
++        printf("Error: Failed to open dump file %s. Error %d: %s\n", dump_name, errno, strerror(errno));
+         return 1;
+     }
+ 
+     if (dblayer_private_open(dbimpl_name, filename, 1, &be, &env, &db)) {
+-        printf("Can't initialize db plugin: %s\n", dbimpl_name);
++        printf("Error: Can't initialize db plugin: %s\n", dbimpl_name);
+         fclose(dump);
+         return 1;
+     }
+@@ -1142,11 +1188,16 @@ importdb(const char *dbimpl_name, const char *filename, const char *dump_name)
+            !_read_line(dump, &keyword, &data) && keyword == 'v') {
+         ret = dblayer_db_op(be, db, txn.txn, DBI_OP_PUT, &key, &data);
+     }
++    if (ret !=0) {
++        printf("Error: failed to write record in database. Error %d: %s\n", ret, dblayer_strerror(ret));
++        dump_ascii_val("Failing record key", &key);
++        dump_ascii_val("Failing record value", &data);
++    }
+     fclose(dump);
+     dblayer_value_free(be, &key);
+     dblayer_value_free(be, &data);
+     if (dblayer_private_close(&be, &env, &db)) {
+-        printf("Unable to shutdown the db plugin: %s\n", dblayer_strerror(1));
++        printf("Error: Unable to shutdown the db plugin: %s\n", dblayer_strerror(1));
+         return 1;
+     }
+     return ret;
+@@ -1243,6 +1294,7 @@ removedb(const char *dbimpl_name, const char *filename)
+         return 1;
+     }
+ 
++    db = NULL; /* Database is already closed by dblayer_db_remove */
+     if (dblayer_private_close(&be, &env, &db)) {
+         printf("Unable to shutdown the db plugin: %s\n", dblayer_strerror(1));
+         return 1;
+@@ -1250,7 +1302,6 @@ removedb(const char *dbimpl_name, const char *filename)
+     return 0;
+ }
+ 
+-
+ int
+ main(int argc, char **argv)
+ {
+@@ -1262,11 +1313,46 @@ main(int argc, char **argv)
+     int ret = 0;
+     char *find_key = NULL;
+     uint32_t entry_id = 0xffffffff;
+-    char *dbimpl_name = (char*) "bdb";
+-    int c;
++    char *defdbimpl = getenv("NSSLAPD_DB_LIB");
++    char *dbimpl_name = (char*) "mdb";
++    int longopt_idx = 0;
++    int c = 0;
++    char optstring[2*COUNTOF(options)+1] = {0};
++
++    if (defdbimpl) {
++        if (strcasecmp(defdbimpl, "bdb") == 0) {
++            dbimpl_name = (char*) "bdb";
++        }
++        if (strcasecmp(defdbimpl, "mdb") == 0) {
++            dbimpl_name = (char*) "mdb";
++        }
++    }
++
++    /* Compute getopt short option string */
++    {
++        char *pt = optstring;
++        for (const struct option *opt = options; opt->name; opt++) {
++            if (opt->val>0 && opt->val<OPT_FIRST) {
++                *pt++ = (char)(opt->val);
++                if (opt->has_arg == required_argument) {
++                    *pt++ = ':';
++                }
++            }
++        }
++        *pt = '\0';
++    }
+ 
+-    while ((c = getopt(argc, argv, "Af:RL:S:l:nG:srk:K:hvt:D:X:I:d")) != EOF) {
++    while ((c = getopt_long(argc, argv, optstring, options, &longopt_idx)) != EOF) {
++        if (c == 0) {
++            c = longopt_idx;
++        }
+         switch (c) {
++        case OPT_DO_IT:
++            do_it = 1;
++            break;
++        case OPT_REMOVE:
++            display_mode |= REMOVE;
++            break;
+         case 'A':
+             display_mode |= ASCIIDATA;
+             break;
+@@ -1332,32 +1418,48 @@ main(int argc, char **argv)
+             display_mode |= IMPORT;
+             dump_filename = optarg;
+             break;
+-        case 'd':
+-            display_mode |= REMOVE;
+-            break;
+         case 'h':
+         default:
+-            usage(argv[0]);
++            usage(argv[0], 1);
+         }
+     }
+ 
++    if (filename == NULL) {
++        fprintf(stderr, "PARAMETER ERROR! 'filename' parameter is missing.\n");
++        usage(argv[0], 1);
++    }
++
+     if (display_mode & EXPORT) {
+         return exportdb(dbimpl_name, filename, dump_filename);
+     }
+ 
+     if (display_mode & IMPORT) {
++        if (!strstr(filename, "/id2entry") && !strstr(filename, "/replication_changelog")) {
++            /* schema is unknown in dbscan ==> duplicate keys sort order is unknown
++             *  ==> cannot create dbi with duplicate keys
++             * ==> only id2entry and repl changelog is importable.
++             */
++            fprintf(stderr, "ERROR: The only database instances that may be imported with dbscan are id2entry and replication_changelog.\n");
++            exit(1);
++        }
++
++        if (do_it == 0) {
++            fprintf(stderr, "PARAMETER ERROR! Trying to perform a destructive action (import)\n"
++                            " without specifying '--do-it' parameter.\n");
++            exit(1);
++        }
+         return importdb(dbimpl_name, filename, dump_filename);
+     }
+ 
+     if (display_mode & REMOVE) {
++        if (do_it == 0) {
++            fprintf(stderr, "PARAMETER ERROR! Trying to perform a destructive action (remove)\n"
++                            " without specifying '--do-it' parameter.\n");
++            exit(1);
++        }
+         return removedb(dbimpl_name, filename);
+     }
+ 
+-    if (filename == NULL) {
+-        fprintf(stderr, "PARAMETER ERROR! 'filename' parameter is missing.\n");
+-        usage(argv[0]);
+-    }
+-
+     if (display_mode & LISTDBS) {
+         dbi_dbslist_t *dbs = dblayer_list_dbs(dbimpl_name, filename);
+         if (dbs) {
+diff --git a/man/man1/dbscan.1 b/man/man1/dbscan.1
+index 810608371..dfb6e8351 100644
+--- a/man/man1/dbscan.1
++++ b/man/man1/dbscan.1
+@@ -31,50 +31,94 @@ Scans a Directory Server database index file and dumps the contents.
+ .\" respectively.
+ .SH OPTIONS
+ A summary of options is included below:
++.IP
++common options:
++.TP
++.B \fB\-A, \-\-ascii\fR
++dump as ascii data
++.TP
++.B \fB\-D, \-\-db\-type\fR <filename>
++specify db type: bdb or mdb
+ .TP
+-.B \fB\-f\fR <filename>
+-specify db file
++.B \fB\-f, \-\-dbi\fR <filename>
++specify db instance
+ .TP
+-.B \fB\-R\fR
++.B \fB\-R, \-\-raw\fR
+ dump as raw data
+ .TP
+-.B \fB\-t\fR <size>
++.B \fB\-t, \-\-truncate\-entry\fR <size>
+ entry truncate size (bytes)
+ .IP
+ entry file options:
+ .TP
+-.B \fB\-K\fR <entry_id>
++.B \fB\-K, \-\-entry\-id\fR <entry_id>
+ lookup only a specific entry id
++.IP
+ index file options:
+ .TP
+-.B \fB\-k\fR <key>
++.B \fB\-G, \-\-id\-list\-min\-size\fR <n>
++only display index entries with more than <n> ids
++.TP
++.B \fB\-I, \-\-import\fR <file>
++Import database instance from file. Requires \-\-do\-it parameter
++WARNING! Only the id2entry and replication_changelog database instances
++may be imported by dbscan.
++.TP
++.B \fB\-k, \-\-key\fR <key>
+ lookup only a specific key
+ .TP
+-.B \fB\-l\fR <size>
++.B \fB\-l, \-\-id\-list\-max\-size\fR <size>
+ max length of dumped id list
+ (default 4096; 40 bytes <= size <= 1048576 bytes)
+ .TP
+-.B \fB\-G\fR <n>
+-only display index entries with more than <n> ids
+-.TP
+-.B \fB\-n\fR
++.B \fB\-n, \-\-show\-id\-list\-lenghts\fR
+ display ID list lengths
+ .TP
+-.B \fB\-r\fR
++.B \fB\-\-remove\fR
++remove a db instance. Requires \-\-do\-it parameter
++.TP
++.B \fB\-r, \-\-show\-id\-list\fR
+ display the contents of ID list
+ .TP
+-.B \fB\-s\fR
++.B \fB\-S, \-\-stats\fR
++display statistics
++.TP
++.B \fB\-X, \-\-export\fR <file>
++Export database instance to file
++.IP
++other options:
++.TP
++.B \fB\-s, \-\-summary\fR
+ Summary of index counts
++.TP
++.B \fB\-L, \-\-list\fR
++List od database instances
++.TP
++.B \fB\-\-do\-it\fR
++confirmation required for actions that change the database contents
++.TP
++.B \fB\-h, \-\-help\-it\fR
++display the usage
+ .IP
+ .SH USAGE
+ Sample usages:
+ .TP
++List the database instances
++.B
++dbscan -L /var/lib/dirsrv/slapd-supplier1/db
++.TP
+ Dump the entry file:
+ .B
+ dbscan \fB\-f\fR id2entry.db4
+ .TP
+ Display index keys in cn.db4:
+-.B dbscan \fB\-f\fR cn.db4
++.B
++dbscan \fB\-f\fR cn.db4
++.TP
++Display index keys in cn on lmdb:
++.B
++dbscan \fB\-f\fR /var/lib/dirsrv/slapd\-supplier1/db/userroot/cn.db
++ (Note: Use \fBdbscan \-L db_home_dir\R to get the db instance path)
+ .TP
+ Display index keys and the count of entries having the key in mail.db4:
+ .B
+@@ -86,7 +130,7 @@ dbscan \fB\-r\fR \fB\-G\fR 20 \fB\-f\fR sn.db4
+ .TP
+ Display summary of objectclass.db4:
+ .B
+-dbscan \fB\-f\fR objectclass.db4
++dbscan \fB\-s \-f\fR objectclass.db4
+ .br
+ .SH AUTHOR
+ dbscan was written by the 389 Project.
+diff --git a/src/lib389/lib389/__init__.py b/src/lib389/lib389/__init__.py
+index e87582d9e..368741a66 100644
+--- a/src/lib389/lib389/__init__.py
++++ b/src/lib389/lib389/__init__.py
+@@ -3039,14 +3039,17 @@ class DirSrv(SimpleLDAPObject, object):
+             return self._dbisupport
+         # check if -D and -L options are supported
+         try:
+-            cmd = ["%s/dbscan" % self.get_bin_dir(), "--help"]
++            cmd = ["%s/dbscan" % self.get_bin_dir(), "-h"]
+             self.log.debug("DEBUG: checking dbscan supported options %s" % cmd)
+             p = subprocess.Popen(cmd, stdout=subprocess.PIPE)
+         except subprocess.CalledProcessError:
+             pass
+         output, stderr = p.communicate()
+-        self.log.debug("is_dbi_supported output " + output.decode())
+-        if "-D <dbimpl>" in output.decode() and "-L <dbhome>" in output.decode():
++        output = output.decode()
++        self.log.debug("is_dbi_supported output " + output)
++        if "-D <dbimpl>" in output and "-L <dbhome>" in output:
++            self._dbisupport = True
++        elif "--db-type" in output and "--list" in output:
+             self._dbisupport = True
+         else:
+             self._dbisupport = False
+diff --git a/src/lib389/lib389/cli_ctl/dblib.py b/src/lib389/lib389/cli_ctl/dblib.py
+index e9269e340..82f09c70c 100644
+--- a/src/lib389/lib389/cli_ctl/dblib.py
++++ b/src/lib389/lib389/cli_ctl/dblib.py
+@@ -158,6 +158,14 @@ def run_dbscan(args):
+     return output
+ 
+ 
++def does_dbscan_need_do_it():
++    prefix = os.environ.get('PREFIX', "")
++    prog = f'{prefix}/bin/dbscan'
++    args = [ prog, '-h' ]
++    output = subprocess.run(args, encoding='utf-8', stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
++    return '--do-it' in output.stdout
++
++
+ def export_changelog(be, dblib):
+     # Export backend changelog
+     try:
+@@ -172,7 +180,10 @@ def import_changelog(be, dblib):
+     # import backend changelog
+     try:
+         cl5dbname = be['eccl5dbname'] if dblib == "bdb" else be['cl5dbname']
+-        run_dbscan(['-D', dblib, '-f', cl5dbname, '-I', be['cl5name']])
++        if does_dbscan_need_do_it():
++            run_dbscan(['-D', dblib, '-f', cl5dbname, '-I', be['cl5name'], '--do-it'])
++        else:
++            run_dbscan(['-D', dblib, '-f', cl5dbname, '-I', be['cl5name']])
+         return True
+     except subprocess.CalledProcessError as e:
+         return False
+-- 
+2.48.0
+
diff --git a/SOURCES/0005-Issue-6566-RI-plugin-failure-to-handle-a-modrdn-for-.patch b/SOURCES/0005-Issue-6566-RI-plugin-failure-to-handle-a-modrdn-for-.patch
new file mode 100644
index 0000000..8fea644
--- /dev/null
+++ b/SOURCES/0005-Issue-6566-RI-plugin-failure-to-handle-a-modrdn-for-.patch
@@ -0,0 +1,70 @@
+From de52853a3551f1d1876ea21b33a5242ad669fec1 Mon Sep 17 00:00:00 2001
+From: James Chapman <jachapma@redhat.com>
+Date: Tue, 4 Feb 2025 15:40:16 +0000
+Subject: [PATCH] Issue 6566 - RI plugin failure to handle a modrdn for rename
+ of member of multiple groups (#6567)
+
+Bug description:
+With AM and RI plugins enabled, the rename of a user that is part of multiple groups
+fails with a "value exists" error.
+
+Fix description:
+For a modrdn the RI plugin creates a new DN, before a modify is attempted check
+if the new DN already exists in the attr being updated.
+
+Fixes: https://github.com/389ds/389-ds-base/issues/6566
+
+Reviewed by: @progier389 , @tbordaz  (Thank you)
+---
+ ldap/servers/plugins/referint/referint.c | 15 ++++++++++++---
+ 1 file changed, 12 insertions(+), 3 deletions(-)
+
+diff --git a/ldap/servers/plugins/referint/referint.c b/ldap/servers/plugins/referint/referint.c
+index 468fdc239..218863ea5 100644
+--- a/ldap/servers/plugins/referint/referint.c
++++ b/ldap/servers/plugins/referint/referint.c
+@@ -924,6 +924,7 @@ _update_all_per_mod(Slapi_DN *entrySDN, /* DN of the searched entry */
+ {
+     Slapi_Mods *smods = NULL;
+     char *newDN = NULL;
++    struct berval bv = {0};
+     char **dnParts = NULL;
+     char *sval = NULL;
+     char *newvalue = NULL;
+@@ -1026,22 +1027,30 @@ _update_all_per_mod(Slapi_DN *entrySDN, /* DN of the searched entry */
+             }
+             /* else: normalize_rc < 0) Ignore the DN normalization error for now. */
+ 
++            bv.bv_val = newDN;
++            bv.bv_len = strlen(newDN);
+             p = PL_strstr(sval, slapi_sdn_get_ndn(origDN));
+             if (p == sval) {
+                 /* (case 1) */
+                 slapi_mods_add_string(smods, LDAP_MOD_DELETE, attrName, sval);
+-                slapi_mods_add_string(smods, LDAP_MOD_ADD, attrName, newDN);
+-
++                /* Add only if the attr value does not exist */
++                if (VALUE_PRESENT != attr_value_find_wsi(attr, &bv, &v)) {
++                    slapi_mods_add_string(smods, LDAP_MOD_ADD, attrName, newDN);
++                }
+             } else if (p) {
+                 /* (case 2) */
+                 slapi_mods_add_string(smods, LDAP_MOD_DELETE, attrName, sval);
+                 *p = '\0';
+                 newvalue = slapi_ch_smprintf("%s%s", sval, newDN);
+-                slapi_mods_add_string(smods, LDAP_MOD_ADD, attrName, newvalue);
++                /* Add only if the attr value does not exist */
++                if (VALUE_PRESENT != attr_value_find_wsi(attr, &bv, &v)) {
++                    slapi_mods_add_string(smods, LDAP_MOD_ADD, attrName, newvalue);
++                }
+                 slapi_ch_free_string(&newvalue);
+             }
+             /* else: value does not include the modified DN.  Ignore it. */
+             slapi_ch_free_string(&sval);
++            bv = (struct berval){0};
+         }
+         rc = _do_modify(mod_pb, entrySDN, slapi_mods_get_ldapmods_byref(smods));
+         if (rc) {
+-- 
+2.48.0
+
diff --git a/SOURCES/0006-Issue-6258-Mitigate-race-condition-in-paged_results_.patch b/SOURCES/0006-Issue-6258-Mitigate-race-condition-in-paged_results_.patch
new file mode 100644
index 0000000..2f66c4f
--- /dev/null
+++ b/SOURCES/0006-Issue-6258-Mitigate-race-condition-in-paged_results_.patch
@@ -0,0 +1,43 @@
+From a634756784056270773d67747061e26152d85469 Mon Sep 17 00:00:00 2001
+From: Masahiro Matsuya <mmatsuya@redhat.com>
+Date: Wed, 5 Feb 2025 11:38:04 +0900
+Subject: [PATCH] Issue 6258 - Mitigate race condition in paged_results_test.py
+ (#6433)
+
+The regression test dirsrvtests/tests/suites/paged_results/paged_results_test.py::test_multi_suffix_search has a race condition causing it to fail due to multiple queries potentially writing their logs out of chronological order.
+
+This failure is mitigated by sorting the retrieved access_log_lines by their "op" value. This ensures the log lines are in chronological order, as expected by the assertions at the end of test_multi_suffix_search().
+
+Helps fix: #6258
+
+Reviewed by: @droideck , @progier389 (Thanks!)
+
+Co-authored-by: Anuar Beisembayev <111912342+abeisemb@users.noreply.github.com>
+---
+ dirsrvtests/tests/suites/paged_results/paged_results_test.py | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/dirsrvtests/tests/suites/paged_results/paged_results_test.py b/dirsrvtests/tests/suites/paged_results/paged_results_test.py
+index eaf0e0da9..fca48db0f 100644
+--- a/dirsrvtests/tests/suites/paged_results/paged_results_test.py
++++ b/dirsrvtests/tests/suites/paged_results/paged_results_test.py
+@@ -7,6 +7,7 @@
+ # --- END COPYRIGHT BLOCK ---
+ #
+ import socket
++import re
+ from random import sample, randrange
+ 
+ import pytest
+@@ -1126,6 +1127,8 @@ def test_multi_suffix_search(topology_st, create_user, new_suffixes):
+         topology_st.standalone.restart(timeout=10)
+ 
+         access_log_lines = topology_st.standalone.ds_access_log.match('.*pr_cookie=.*')
++        # Sort access_log_lines by op number to mitigate race condition effects. 
++        access_log_lines.sort(key=lambda x: int(re.search(r"op=(\d+) RESULT", x).group(1)))
+         pr_cookie_list = ([line.rsplit('=', 1)[-1] for line in access_log_lines])
+         pr_cookie_list = [int(pr_cookie) for pr_cookie in pr_cookie_list]
+         log.info('Assert that last pr_cookie == -1 and others pr_cookie == 0')
+-- 
+2.48.0
+
diff --git a/SOURCES/0007-Issue-6229-After-an-initial-failure-subsequent-onlin.patch b/SOURCES/0007-Issue-6229-After-an-initial-failure-subsequent-onlin.patch
new file mode 100644
index 0000000..0abffa3
--- /dev/null
+++ b/SOURCES/0007-Issue-6229-After-an-initial-failure-subsequent-onlin.patch
@@ -0,0 +1,566 @@
+From 769e71499880a0820424bf925c0f0fe793e11cc8 Mon Sep 17 00:00:00 2001
+From: progier389 <progier@redhat.com>
+Date: Fri, 28 Jun 2024 18:56:49 +0200
+Subject: [PATCH] Issue 6229 - After an initial failure, subsequent online
+ backups fail (#6230)
+
+* Issue 6229 - After an initial failure, subsequent online backups will not work
+
+Several issues related to backup task error handling:
+Backends stay busy after the failure
+Exit code is 0 in some cases
+Crash if failing to open the backup directory
+And a more general one:
+lib389 Task DN collision
+
+Solutions:
+Always reset the busy flags that have been set
+Ensure that 0 is not returned in error case
+Avoid closing NULL directory descriptor
+Use a timestamp having milliseconds precision to create the task DN
+
+Issue: #6229
+
+Reviewed by: @droideck (Thanks!)
+
+(cherry picked from commit 04a0b6ac776a1d588ec2e10ff651e5015078ad21)
+---
+ ldap/servers/slapd/back-ldbm/archive.c        | 45 +++++-----
+ .../slapd/back-ldbm/db-mdb/mdb_layer.c        |  3 +
+ src/lib389/lib389/__init__.py                 | 10 +--
+ src/lib389/lib389/tasks.py                    | 82 +++++++++----------
+ 4 files changed, 70 insertions(+), 70 deletions(-)
+
+diff --git a/ldap/servers/slapd/back-ldbm/archive.c b/ldap/servers/slapd/back-ldbm/archive.c
+index 0460a42f6..6658cc80a 100644
+--- a/ldap/servers/slapd/back-ldbm/archive.c
++++ b/ldap/servers/slapd/back-ldbm/archive.c
+@@ -16,6 +16,8 @@
+ #include "back-ldbm.h"
+ #include "dblayer.h"
+ 
++#define NO_OBJECT ((Object*)-1)
++
+ int
+ ldbm_temporary_close_all_instances(Slapi_PBlock *pb)
+ {
+@@ -270,6 +272,7 @@ ldbm_back_ldbm2archive(Slapi_PBlock *pb)
+     int run_from_cmdline = 0;
+     Slapi_Task *task;
+     struct stat sbuf;
++    Object *last_busy_inst_obj = NO_OBJECT;
+ 
+     slapi_pblock_get(pb, SLAPI_PLUGIN_PRIVATE, &li);
+     slapi_pblock_get(pb, SLAPI_SEQ_VAL, &rawdirectory);
+@@ -380,13 +383,12 @@ ldbm_back_ldbm2archive(Slapi_PBlock *pb)
+ 
+     /* to avoid conflict w/ import, do this check for commandline, as well */
+     {
+-        Object *inst_obj, *inst_obj2;
+         ldbm_instance *inst = NULL;
+ 
+         /* server is up -- mark all backends busy */
+-        for (inst_obj = objset_first_obj(li->li_instance_set); inst_obj;
+-             inst_obj = objset_next_obj(li->li_instance_set, inst_obj)) {
+-            inst = (ldbm_instance *)object_get_data(inst_obj);
++        for (last_busy_inst_obj = objset_first_obj(li->li_instance_set); last_busy_inst_obj;
++             last_busy_inst_obj = objset_next_obj(li->li_instance_set, last_busy_inst_obj)) {
++            inst = (ldbm_instance *)object_get_data(last_busy_inst_obj);
+ 
+             /* check if an import/restore is already ongoing... */
+             if (instance_set_busy(inst) != 0 || dblayer_in_import(inst) != 0) {
+@@ -400,20 +402,6 @@ ldbm_back_ldbm2archive(Slapi_PBlock *pb)
+                                           "another task and cannot be disturbed.",
+                                           inst->inst_name);
+                 }
+-
+-                /* painfully, we have to clear the BUSY flags on the
+-                 * backends we'd already marked...
+-                 */
+-                for (inst_obj2 = objset_first_obj(li->li_instance_set);
+-                     inst_obj2 && (inst_obj2 != inst_obj);
+-                     inst_obj2 = objset_next_obj(li->li_instance_set,
+-                                                 inst_obj2)) {
+-                    inst = (ldbm_instance *)object_get_data(inst_obj2);
+-                    instance_set_not_busy(inst);
+-                }
+-                if (inst_obj2 && inst_obj2 != inst_obj)
+-                    object_release(inst_obj2);
+-                object_release(inst_obj);
+                 goto err;
+             }
+         }
+@@ -427,18 +415,26 @@ ldbm_back_ldbm2archive(Slapi_PBlock *pb)
+         goto err;
+     }
+ 
+-    if (!run_from_cmdline) {
++err:
++    /* Clear all BUSY flags that have been previously set */
++    if (last_busy_inst_obj != NO_OBJECT) {
+         ldbm_instance *inst;
+         Object *inst_obj;
+ 
+-        /* none of these backends are busy anymore */
+-        for (inst_obj = objset_first_obj(li->li_instance_set); inst_obj;
++        for (inst_obj = objset_first_obj(li->li_instance_set);
++             inst_obj && (inst_obj != last_busy_inst_obj);
+              inst_obj = objset_next_obj(li->li_instance_set, inst_obj)) {
+             inst = (ldbm_instance *)object_get_data(inst_obj);
+             instance_set_not_busy(inst);
+         }
++        if (last_busy_inst_obj != NULL) {
++            /* release last seen object for aborted objset_next_obj iterations */
++            if (inst_obj != NULL) {
++                object_release(inst_obj);
++            }
++            object_release(last_busy_inst_obj);
++        }
+     }
+-err:
+     if (return_value) {
+         if (dir_bak) {
+             slapi_log_err(SLAPI_LOG_ERR,
+@@ -727,7 +723,10 @@ ldbm_archive_config(char *bakdir, Slapi_Task *task)
+     }
+ 
+ error:
+-    PR_CloseDir(dirhandle);
++    if (NULL != dirhandle) {
++        PR_CloseDir(dirhandle);
++        dirhandle = NULL;
++    }
+     dse_backup_unlock();
+     slapi_ch_free_string(&backup_config_dir);
+     slapi_ch_free_string(&dse_file);
+diff --git a/ldap/servers/slapd/back-ldbm/db-mdb/mdb_layer.c b/ldap/servers/slapd/back-ldbm/db-mdb/mdb_layer.c
+index 70a289bdb..de4161b0c 100644
+--- a/ldap/servers/slapd/back-ldbm/db-mdb/mdb_layer.c
++++ b/ldap/servers/slapd/back-ldbm/db-mdb/mdb_layer.c
+@@ -983,6 +983,9 @@ dbmdb_backup(struct ldbminfo *li, char *dest_dir, Slapi_Task *task)
+     if (ldbm_archive_config(dest_dir, task) != 0) {
+         slapi_log_err(SLAPI_LOG_ERR, "dbmdb_backup",
+                 "Backup of config files failed or is incomplete\n");
++         if (0 == return_value) {
++            return_value = -1;
++        }
+     }
+ 
+     goto bail;
+diff --git a/src/lib389/lib389/__init__.py b/src/lib389/lib389/__init__.py
+index 368741a66..cb372c138 100644
+--- a/src/lib389/lib389/__init__.py
++++ b/src/lib389/lib389/__init__.py
+@@ -69,7 +69,7 @@ from lib389.utils import (
+     get_user_is_root)
+ from lib389.paths import Paths
+ from lib389.nss_ssl import NssSsl
+-from lib389.tasks import BackupTask, RestoreTask
++from lib389.tasks import BackupTask, RestoreTask, Task
+ from lib389.dseldif import DSEldif
+ 
+ # mixin
+@@ -1424,7 +1424,7 @@ class DirSrv(SimpleLDAPObject, object):
+                                        name, self.ds_paths.prefix)
+ 
+         # create the archive
+-        name = "backup_%s_%s.tar.gz" % (self.serverid, time.strftime("%m%d%Y_%H%M%S"))
++        name = "backup_%s_%s.tar.gz" % (self.serverid, Task.get_timestamp())
+         backup_file = os.path.join(backup_dir, name)
+         tar = tarfile.open(backup_file, "w:gz")
+         tar.extraction_filter = (lambda member, path: member)
+@@ -2810,7 +2810,7 @@ class DirSrv(SimpleLDAPObject, object):
+         else:
+             # No output file specified.  Use the default ldif location/name
+             cmd.append('-a')
+-            tnow = datetime.now().strftime("%Y_%m_%d_%H_%M_%S")
++            tnow = Task.get_timestamp()
+             if bename:
+                 ldifname = os.path.join(self.ds_paths.ldif_dir, "%s-%s-%s.ldif" % (self.serverid, bename, tnow))
+             else:
+@@ -2881,7 +2881,7 @@ class DirSrv(SimpleLDAPObject, object):
+ 
+         if archive_dir is None:
+             # Use the instance name and date/time as the default backup name
+-            tnow = datetime.now().strftime("%Y_%m_%d_%H_%M_%S")
++            tnow = Task.get_timestamp()
+             archive_dir = os.path.join(self.ds_paths.backup_dir, "%s-%s" % (self.serverid, tnow))
+         elif not archive_dir.startswith("/"):
+             # Relative path, append it to the bak directory
+@@ -3506,7 +3506,7 @@ class DirSrv(SimpleLDAPObject, object):
+ 
+         if archive is None:
+             # Use the instance name and date/time as the default backup name
+-            tnow = datetime.now().strftime("%Y_%m_%d_%H_%M_%S")
++            tnow = Task.get_timestamp()
+             if self.serverid is not None:
+                 backup_dir_name = "%s-%s" % (self.serverid, tnow)
+             else:
+diff --git a/src/lib389/lib389/tasks.py b/src/lib389/lib389/tasks.py
+index 6c2adb5b2..6bf302862 100644
+--- a/src/lib389/lib389/tasks.py
++++ b/src/lib389/lib389/tasks.py
+@@ -118,7 +118,7 @@ class Task(DSLdapObject):
+         return super(Task, self).create(rdn, properties, basedn)
+ 
+     @staticmethod
+-    def _get_task_date():
++    def get_timestamp():
+         """Return a timestamp to use in naming new task entries."""
+ 
+         return datetime.now().isoformat()
+@@ -132,7 +132,7 @@ class AutomemberRebuildMembershipTask(Task):
+     """
+ 
+     def __init__(self, instance, dn=None):
+-        self.cn = 'automember_rebuild_' + Task._get_task_date()
++        self.cn = 'automember_rebuild_' + Task.get_timestamp()
+         dn = "cn=" + self.cn + "," + DN_AUTOMEMBER_REBUILD_TASK
+ 
+         super(AutomemberRebuildMembershipTask, self).__init__(instance, dn)
+@@ -147,7 +147,7 @@ class AutomemberAbortRebuildTask(Task):
+     """
+ 
+     def __init__(self, instance, dn=None):
+-        self.cn = 'automember_abort_' + Task._get_task_date()
++        self.cn = 'automember_abort_' + Task.get_timestamp()
+         dn = "cn=" + self.cn + "," + DN_AUTOMEMBER_ABORT_REBUILD_TASK
+ 
+         super(AutomemberAbortRebuildTask, self).__init__(instance, dn)
+@@ -161,7 +161,7 @@ class FixupLinkedAttributesTask(Task):
+     """
+ 
+     def __init__(self, instance, dn=None):
+-        self.cn = 'fixup_linked_attrs_' + Task._get_task_date()
++        self.cn = 'fixup_linked_attrs_' + Task.get_timestamp()
+         dn = "cn=" + self.cn + "," + DN_FIXUP_LINKED_ATTIBUTES
+ 
+         super(FixupLinkedAttributesTask, self).__init__(instance, dn)
+@@ -175,7 +175,7 @@ class MemberUidFixupTask(Task):
+     """
+ 
+     def __init__(self, instance, dn=None):
+-        self.cn = 'memberUid_fixup_' + Task._get_task_date()
++        self.cn = 'memberUid_fixup_' + Task.get_timestamp()
+         dn = f"cn={self.cn},cn=memberuid task,cn=tasks,cn=config"
+ 
+         super(MemberUidFixupTask, self).__init__(instance, dn)
+@@ -190,7 +190,7 @@ class MemberOfFixupTask(Task):
+     """
+ 
+     def __init__(self, instance, dn=None):
+-        self.cn = 'memberOf_fixup_' + Task._get_task_date()
++        self.cn = 'memberOf_fixup_' + Task.get_timestamp()
+         dn = "cn=" + self.cn + "," + DN_MBO_TASK
+ 
+         super(MemberOfFixupTask, self).__init__(instance, dn)
+@@ -205,7 +205,7 @@ class USNTombstoneCleanupTask(Task):
+     """
+ 
+     def __init__(self, instance, dn=None):
+-        self.cn = 'usn_cleanup_' + Task._get_task_date()
++        self.cn = 'usn_cleanup_' + Task.get_timestamp()
+         dn = "cn=" + self.cn + ",cn=USN tombstone cleanup task," + DN_TASKS
+ 
+         super(USNTombstoneCleanupTask, self).__init__(instance, dn)
+@@ -225,7 +225,7 @@ class csngenTestTask(Task):
+     """
+ 
+     def __init__(self, instance, dn=None):
+-        self.cn = 'csngenTest_' + Task._get_task_date()
++        self.cn = 'csngenTest_' + Task.get_timestamp()
+         dn = "cn=" + self.cn + ",cn=csngen_test," + DN_TASKS
+         super(csngenTestTask, self).__init__(instance, dn)
+ 
+@@ -238,7 +238,7 @@ class EntryUUIDFixupTask(Task):
+     """
+ 
+     def __init__(self, instance, dn=None):
+-        self.cn = 'entryuuid_fixup_' + Task._get_task_date()
++        self.cn = 'entryuuid_fixup_' + Task.get_timestamp()
+         dn = "cn=" + self.cn + "," + DN_EUUID_TASK
+         super(EntryUUIDFixupTask, self).__init__(instance, dn)
+         self._must_attributes.extend(['basedn'])
+@@ -252,7 +252,7 @@ class DBCompactTask(Task):
+     """
+ 
+     def __init__(self, instance, dn=None):
+-        self.cn = 'compact_db_' + Task._get_task_date()
++        self.cn = 'compact_db_' + Task.get_timestamp()
+         dn = "cn=" + self.cn + "," + DN_COMPACTDB_TASK
+         super(DBCompactTask, self).__init__(instance, dn)
+ 
+@@ -265,7 +265,7 @@ class SchemaReloadTask(Task):
+     """
+ 
+     def __init__(self, instance, dn=None):
+-        self.cn = 'schema_reload_' + Task._get_task_date()
++        self.cn = 'schema_reload_' + Task.get_timestamp()
+         dn = "cn=" + self.cn + ",cn=schema reload task," + DN_TASKS
+         super(SchemaReloadTask, self).__init__(instance, dn)
+ 
+@@ -278,7 +278,7 @@ class SyntaxValidateTask(Task):
+     """
+ 
+     def __init__(self, instance, dn=None):
+-        self.cn = 'syntax_validate_' + Task._get_task_date()
++        self.cn = 'syntax_validate_' + Task.get_timestamp()
+         dn = f"cn={self.cn},cn=syntax validate,cn=tasks,cn=config"
+ 
+         super(SyntaxValidateTask, self).__init__(instance, dn)
+@@ -295,7 +295,7 @@ class AbortCleanAllRUVTask(Task):
+     """
+ 
+     def __init__(self, instance, dn=None):
+-        self.cn = 'abortcleanallruv_' + Task._get_task_date()
++        self.cn = 'abortcleanallruv_' + Task.get_timestamp()
+         dn = "cn=" + self.cn + ",cn=abort cleanallruv," + DN_TASKS
+ 
+         super(AbortCleanAllRUVTask, self).__init__(instance, dn)
+@@ -312,7 +312,7 @@ class CleanAllRUVTask(Task):
+     """
+ 
+     def __init__(self, instance, dn=None):
+-        self.cn = 'cleanallruv_' + Task._get_task_date()
++        self.cn = 'cleanallruv_' + Task.get_timestamp()
+         dn = "cn=" + self.cn + ",cn=cleanallruv," + DN_TASKS
+         self._properties = None
+ 
+@@ -359,7 +359,7 @@ class ImportTask(Task):
+     """
+ 
+     def __init__(self, instance, dn=None):
+-        self.cn = 'import_' + Task._get_task_date()
++        self.cn = 'import_' + Task.get_timestamp()
+         dn = "cn=%s,%s" % (self.cn, DN_IMPORT_TASK)
+         self._properties = None
+ 
+@@ -388,7 +388,7 @@ class ExportTask(Task):
+     """
+ 
+     def __init__(self, instance, dn=None):
+-        self.cn = 'export_' + Task._get_task_date()
++        self.cn = 'export_' + Task.get_timestamp()
+         dn = "cn=%s,%s" % (self.cn, DN_EXPORT_TASK)
+         self._properties = None
+ 
+@@ -411,7 +411,7 @@ class BackupTask(Task):
+     """
+ 
+     def __init__(self, instance, dn=None):
+-        self.cn = 'backup_' + Task._get_task_date()
++        self.cn = 'backup_' + Task.get_timestamp()
+         dn = "cn=" + self.cn + ",cn=backup," + DN_TASKS
+         self._properties = None
+ 
+@@ -426,7 +426,7 @@ class RestoreTask(Task):
+     """
+ 
+     def __init__(self, instance, dn=None):
+-        self.cn = 'restore_' + Task._get_task_date()
++        self.cn = 'restore_' + Task.get_timestamp()
+         dn = "cn=" + self.cn + ",cn=restore," + DN_TASKS
+         self._properties = None
+ 
+@@ -513,7 +513,7 @@ class Tasks(object):
+             raise ValueError("Import file (%s) does not exist" % input_file)
+ 
+         # Prepare the task entry
+-        cn = "import_" + time.strftime("%m%d%Y_%H%M%S", time.localtime())
++        cn = "import_" + Task.get_timestamp()
+         dn = "cn=%s,%s" % (cn, DN_IMPORT_TASK)
+         entry = Entry(dn)
+         entry.setValues('objectclass', 'top', 'extensibleObject')
+@@ -581,7 +581,7 @@ class Tasks(object):
+             raise ValueError("output_file is mandatory")
+ 
+         # Prepare the task entry
+-        cn = "export_" + time.strftime("%m%d%Y_%H%M%S", time.localtime())
++        cn = "export_" + Task.get_timestamp()
+         dn = "cn=%s,%s" % (cn, DN_EXPORT_TASK)
+         entry = Entry(dn)
+         entry.update({
+@@ -637,7 +637,7 @@ class Tasks(object):
+             raise ValueError("You must specify a backup directory.")
+ 
+         # build the task entry
+-        cn = "backup_" + time.strftime("%m%d%Y_%H%M%S", time.localtime())
++        cn = "backup_" + Task.get_timestamp()
+         dn = "cn=%s,%s" % (cn, DN_BACKUP_TASK)
+         entry = Entry(dn)
+         entry.update({
+@@ -694,7 +694,7 @@ class Tasks(object):
+             raise ValueError("Backup file (%s) does not exist" % backup_dir)
+ 
+         # build the task entry
+-        cn = "restore_" + time.strftime("%m%d%Y_%H%M%S", time.localtime())
++        cn = "restore_" + Task.get_timestamp()
+         dn = "cn=%s,%s" % (cn, DN_RESTORE_TASK)
+         entry = Entry(dn)
+         entry.update({
+@@ -789,7 +789,7 @@ class Tasks(object):
+                     attrs.append(attr)
+             else:
+                 attrs.append(attrname)
+-            cn = "index_vlv_%s" % (time.strftime("%m%d%Y_%H%M%S", time.localtime()))
++            cn = "index_vlv_%s" % (Task.get_timestamp())
+             dn = "cn=%s,%s" % (cn, DN_INDEX_TASK)
+             entry = Entry(dn)
+             entry.update({
+@@ -803,7 +803,7 @@ class Tasks(object):
+                 #
+                 # Reindex all attributes - gather them first...
+                 #
+-                cn = "index_all_%s" % (time.strftime("%m%d%Y_%H%M%S", time.localtime()))
++                cn = "index_all_%s" % (Task.get_timestamp())
+                 dn = ('cn=%s,cn=ldbm database,cn=plugins,cn=config' % backend)
+                 try:
+                     indexes = self.conn.search_s(dn, ldap.SCOPE_SUBTREE, '(objectclass=nsIndex)')
+@@ -815,7 +815,7 @@ class Tasks(object):
+                 #
+                 # Reindex specific attributes
+                 #
+-                cn = "index_attrs_%s" % (time.strftime("%m%d%Y_%H%M%S", time.localtime()))
++                cn = "index_attrs_%s" % (Task.get_timestamp())
+                 if isinstance(attrname, (tuple, list)):
+                     # Need to guarantee this is a list (and not a tuple)
+                     for attr in attrname:
+@@ -903,8 +903,7 @@ class Tasks(object):
+ 
+             suffix = ents[0].getValue(attr)
+ 
+-        cn = "fixupmemberof_" + time.strftime("%m%d%Y_%H%M%S",
+-                                              time.localtime())
++        cn = "fixupmemberof_" + Task.get_timestamp()
+         dn = "cn=%s,%s" % (cn, DN_MBO_TASK)
+         entry = Entry(dn)
+         entry.setValues('objectclass', 'top', 'extensibleObject')
+@@ -965,8 +964,7 @@ class Tasks(object):
+             if len(ents) != 1:
+                 raise ValueError("invalid backend name: %s" % bename)
+ 
+-        cn = "fixupTombstone_" + time.strftime("%m%d%Y_%H%M%S",
+-                                               time.localtime())
++        cn = "fixupTombstone_" + Task.get_timestamp()
+         dn = "cn=%s,%s" % (cn, DN_TOMB_FIXUP_TASK)
+         entry = Entry(dn)
+         entry.setValues('objectclass', 'top', 'extensibleObject')
+@@ -1019,7 +1017,7 @@ class Tasks(object):
+         @return exit code
+         '''
+ 
+-        cn = 'task-' + time.strftime("%m%d%Y_%H%M%S", time.localtime())
++        cn = 'task-' + Task.get_timestamp()
+         dn = ('cn=%s,cn=automember rebuild membership,cn=tasks,cn=config' % cn)
+ 
+         entry = Entry(dn)
+@@ -1077,7 +1075,7 @@ class Tasks(object):
+         if not ldif_out:
+             raise ValueError("Missing ldif_out")
+ 
+-        cn = 'task-' + time.strftime("%m%d%Y_%H%M%S", time.localtime())
++        cn = 'task-' + Task.get_timestamp()
+         dn = ('cn=%s,cn=automember export updates,cn=tasks,cn=config' % cn)
+         entry = Entry(dn)
+         entry.setValues('objectclass', 'top', 'extensibleObject')
+@@ -1129,7 +1127,7 @@ class Tasks(object):
+         if not ldif_out or not ldif_in:
+             raise ValueError("Missing ldif_out and/or ldif_in")
+ 
+-        cn = 'task-' + time.strftime("%m%d%Y_%H%M%S", time.localtime())
++        cn = 'task-' + Task.get_timestamp()
+         dn = ('cn=%s,cn=automember map updates,cn=tasks,cn=config' % cn)
+ 
+         entry = Entry(dn)
+@@ -1175,7 +1173,7 @@ class Tasks(object):
+         @return exit code
+         '''
+ 
+-        cn = 'task-' + time.strftime("%m%d%Y_%H%M%S", time.localtime())
++        cn = 'task-' + Task.get_timestamp()
+         dn = ('cn=%s,cn=fixup linked attributes,cn=tasks,cn=config' % cn)
+         entry = Entry(dn)
+         entry.setValues('objectclass', 'top', 'extensibleObject')
+@@ -1219,7 +1217,7 @@ class Tasks(object):
+         @return exit code
+         '''
+ 
+-        cn = 'task-' + time.strftime("%m%d%Y_%H%M%S", time.localtime())
++        cn = 'task-' + Task.get_timestamp()
+         dn = ('cn=%s,cn=schema reload task,cn=tasks,cn=config' % cn)
+         entry = Entry(dn)
+         entry.setValues('objectclass', 'top', 'extensibleObject')
+@@ -1264,7 +1262,7 @@ class Tasks(object):
+         @return exit code
+         '''
+ 
+-        cn = 'task-' + time.strftime("%m%d%Y_%H%M%S", time.localtime())
++        cn = 'task-' + Task.get_timestamp()
+         dn = ('cn=%s,cn=memberuid task,cn=tasks,cn=config' % cn)
+         entry = Entry(dn)
+         entry.setValues('objectclass', 'top', 'extensibleObject')
+@@ -1311,7 +1309,7 @@ class Tasks(object):
+         @return exit code
+         '''
+ 
+-        cn = 'task-' + time.strftime("%m%d%Y_%H%M%S", time.localtime())
++        cn = 'task-' + Task.get_timestamp()
+         dn = ('cn=%s,cn=syntax validate,cn=tasks,cn=config' % cn)
+         entry = Entry(dn)
+         entry.setValues('objectclass', 'top', 'extensibleObject')
+@@ -1358,7 +1356,7 @@ class Tasks(object):
+         @return exit code
+         '''
+ 
+-        cn = 'task-' + time.strftime("%m%d%Y_%H%M%S", time.localtime())
++        cn = 'task-' + Task.get_timestamp()
+         dn = ('cn=%s,cn=USN tombstone cleanup task,cn=tasks,cn=config' % cn)
+         entry = Entry(dn)
+         entry.setValues('objectclass', 'top', 'extensibleObject')
+@@ -1413,7 +1411,7 @@ class Tasks(object):
+         if not configfile:
+             raise ValueError("Missing required paramter: configfile")
+ 
+-        cn = 'task-' + time.strftime("%m%d%Y_%H%M%S", time.localtime())
++        cn = 'task-' + Task.get_timestamp()
+         dn = ('cn=%s,cn=sysconfig reload,cn=tasks,cn=config' % cn)
+         entry = Entry(dn)
+         entry.setValues('objectclass', 'top', 'extensibleObject')
+@@ -1464,7 +1462,7 @@ class Tasks(object):
+         if not suffix:
+             raise ValueError("Missing required paramter: suffix")
+ 
+-        cn = 'task-' + time.strftime("%m%d%Y_%H%M%S", time.localtime())
++        cn = 'task-' + Task.get_timestamp()
+         dn = ('cn=%s,cn=cleanallruv,cn=tasks,cn=config' % cn)
+         entry = Entry(dn)
+         entry.setValues('objectclass', 'top', 'extensibleObject')
+@@ -1516,7 +1514,7 @@ class Tasks(object):
+         if not suffix:
+             raise ValueError("Missing required paramter: suffix")
+ 
+-        cn = 'task-' + time.strftime("%m%d%Y_%H%M%S", time.localtime())
++        cn = 'task-' + Task.get_timestamp()
+         dn = ('cn=%s,cn=abort cleanallruv,cn=tasks,cn=config' % cn)
+         entry = Entry(dn)
+         entry.setValues('objectclass', 'top', 'extensibleObject')
+@@ -1571,7 +1569,7 @@ class Tasks(object):
+         if not nsArchiveDir:
+             raise ValueError("Missing required paramter: nsArchiveDir")
+ 
+-        cn = 'task-' + time.strftime("%m%d%Y_%H%M%S", time.localtime())
++        cn = 'task-' + Task.get_timestamp()
+         dn = ('cn=%s,cn=upgradedb,cn=tasks,cn=config' % cn)
+         entry = Entry(dn)
+         entry.setValues('objectclass', 'top', 'extensibleObject')
+@@ -1616,6 +1614,6 @@ class LDAPIMappingReloadTask(Task):
+     """
+ 
+     def __init__(self, instance, dn=None):
+-        self.cn = 'reload-' + Task._get_task_date()
++        self.cn = 'reload-' + Task.get_timestamp()
+         dn = f'cn={self.cn},cn=reload ldapi mappings,cn=tasks,cn=config'
+         super(LDAPIMappingReloadTask, self).__init__(instance, dn)
+-- 
+2.48.0
+
diff --git a/SOURCES/0008-Issue-6554-During-import-of-entries-without-nsUnique.patch b/SOURCES/0008-Issue-6554-During-import-of-entries-without-nsUnique.patch
new file mode 100644
index 0000000..f5dd5f0
--- /dev/null
+++ b/SOURCES/0008-Issue-6554-During-import-of-entries-without-nsUnique.patch
@@ -0,0 +1,165 @@
+From b2511553590f0d9b41856d8baff5f3cd103dd46f Mon Sep 17 00:00:00 2001
+From: tbordaz <tbordaz@redhat.com>
+Date: Thu, 6 Feb 2025 18:25:36 +0100
+Subject: [PATCH] Issue 6554 - During import of entries without nsUniqueId, a
+ supplier generates duplicate nsUniqueId (LMDB only) (#6582)
+
+Bug description:
+	During an import the entry is prepared (schema, operational
+	attributes, password encryption,...) before starting the
+	update of the database and indexes.
+	A step of the preparation is to assign a value to 'nsuniqueid'
+	operational attribute. 'nsuniqueid' must be unique.
+	In LMDB the preparation is done by multiple threads (workers).
+	In such case the 'nsuniqueid' are generated in parallel and
+	as it is time based several values can be duplicated.
+
+Fix description:
+	To prevent that the routine dbmdb_import_generate_uniqueid
+	should make sure to synchronize the workers.
+
+fixes: #6554
+
+Reviewed by: Pierre Rogier
+---
+ .../tests/suites/import/import_test.py        | 79 ++++++++++++++++++-
+ .../back-ldbm/db-mdb/mdb_import_threads.c     | 11 +++
+ 2 files changed, 89 insertions(+), 1 deletion(-)
+
+diff --git a/dirsrvtests/tests/suites/import/import_test.py b/dirsrvtests/tests/suites/import/import_test.py
+index b7cba32fd..18caec633 100644
+--- a/dirsrvtests/tests/suites/import/import_test.py
++++ b/dirsrvtests/tests/suites/import/import_test.py
+@@ -14,11 +14,13 @@ import os
+ import pytest
+ import time
+ import glob
++import re
+ import logging
+ import subprocess
+ from datetime import datetime
+ from lib389.topologies import topology_st as topo
+-from lib389._constants import DEFAULT_SUFFIX, TaskWarning
++from lib389.topologies import topology_m2 as topo_m2
++from lib389._constants import DEFAULT_BENAME, DEFAULT_SUFFIX, TaskWarning
+ from lib389.dbgen import dbgen_users
+ from lib389.tasks import ImportTask
+ from lib389.index import Indexes
+@@ -690,6 +692,81 @@ def test_online_import_under_load(topo):
+     assert import_task.get_exit_code() == 0
+ 
+ 
++def test_duplicate_nsuniqueid(topo_m2, request):
++    """Test that after an offline import all
++    nsuniqueid are different
++
++    :id: a2541677-a288-4633-bacf-4050cc56016d
++    :setup: MMR with 2 suppliers
++    :steps:
++        1. stop the instance to do offline operations
++        2. Generate a 5K users LDIF file
++        3. Check that no uniqueid are present in the generated file
++        4. import the generated LDIF
++        5. export the database
++        6. Check that that exported LDIF contains more than 5K nsuniqueid
++        7. Check that there is no duplicate nsuniqued in exported LDIF
++    :expectedresults:
++        1. Should succeeds
++        2. Should succeeds
++        3. Should succeeds
++        4. Should succeeds
++        5. Should succeeds
++        6. Should succeeds
++        7. Should succeeds
++    """
++    m1 = topo_m2.ms["supplier1"]
++
++    # Stop the instance
++    m1.stop()
++
++    # Generate a test ldif (5k entries)
++    log.info("Generating LDIF...")
++    ldif_dir = m1.get_ldif_dir()
++    import_ldif = ldif_dir + '/5k_users_import.ldif'
++    dbgen_users(m1, 5000, import_ldif, DEFAULT_SUFFIX)
++
++    # Check that the generated LDIF does not contain nsuniqueid
++    all_nsuniqueid = []
++    with open(import_ldif, 'r') as file:
++        for line in file:
++            if line.lower().startswith("nsuniqueid: "):
++                all_nsuniqueid.append(line.split(': ')[1])
++    log.info("import file contains " + str(len(all_nsuniqueid)) + " nsuniqueid")
++    assert len(all_nsuniqueid) == 0
++
++    # Import the "nsuniquied free" LDIF file
++    if not m1.ldif2db('userRoot', None, None, None, import_ldif):
++        assert False
++
++    # Export the DB that now should contain nsuniqueid
++    export_ldif = ldif_dir + '/5k_user_export.ldif'
++    log.info("export to file " + export_ldif)
++    m1.db2ldif(bename=DEFAULT_BENAME, suffixes=[DEFAULT_SUFFIX],
++               excludeSuffixes=None, repl_data=False,
++               outputfile=export_ldif, encrypt=False)
++
++    # Check that the export LDIF contain nsuniqueid
++    all_nsuniqueid = []
++    with open(export_ldif, 'r') as file:
++        for line in file:
++            if line.lower().startswith("nsuniqueid: "):
++                all_nsuniqueid.append(line.split(': ')[1])
++    log.info("export file " + export_ldif + " contains " + str(len(all_nsuniqueid)) + " nsuniqueid")
++    assert len(all_nsuniqueid) >= 5000
++
++    # Check that the nsuniqueid are unique
++    assert len(set(all_nsuniqueid)) == len(all_nsuniqueid)
++
++    def fin():
++        if os.path.exists(import_ldif):
++            os.remove(import_ldif)
++        if os.path.exists(export_ldif):
++            os.remove(export_ldif)
++        m1.start
++
++    request.addfinalizer(fin)
++
+ if __name__ == '__main__':
+     # Run isolated
+     # -s for DEBUG mode
+diff --git a/ldap/servers/slapd/back-ldbm/db-mdb/mdb_import_threads.c b/ldap/servers/slapd/back-ldbm/db-mdb/mdb_import_threads.c
+index 707a110c5..0f445bb56 100644
+--- a/ldap/servers/slapd/back-ldbm/db-mdb/mdb_import_threads.c
++++ b/ldap/servers/slapd/back-ldbm/db-mdb/mdb_import_threads.c
+@@ -610,10 +610,20 @@ dbmdb_import_generate_uniqueid(ImportJob *job, Slapi_Entry *e)
+ {
+     const char *uniqueid = slapi_entry_get_uniqueid(e);
+     int rc = UID_SUCCESS;
++    static pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER;
+ 
+     if (!uniqueid && (job->uuid_gen_type != SLAPI_UNIQUEID_GENERATE_NONE)) {
+         char *newuniqueid;
+ 
++        /* With 'mdb' we have several workers generating nsuniqueid
++         * we need to serialize them to prevent generating duplicate value
++         * From performance pov it only impacts import
++         * The default value is SLAPI_UNIQUEID_GENERATE_TIME_BASED so
++         * the only syscall is clock_gettime and then string formating
++         * that should limit contention
++         */
++        pthread_mutex_lock(&mutex);
++
+         /* generate id based on dn */
+         if (job->uuid_gen_type == SLAPI_UNIQUEID_GENERATE_NAME_BASED) {
+             char *dn = slapi_entry_get_dn(e);
+@@ -624,6 +634,7 @@ dbmdb_import_generate_uniqueid(ImportJob *job, Slapi_Entry *e)
+             /* time based */
+             rc = slapi_uniqueIDGenerateString(&newuniqueid);
+         }
++        pthread_mutex_unlock(&mutex);
+ 
+         if (rc == UID_SUCCESS) {
+             slapi_entry_set_uniqueid(e, newuniqueid);
+-- 
+2.48.0
+
diff --git a/SOURCES/0009-Issue-6561-TLS-1.2-stickiness-in-FIPS-mode.patch b/SOURCES/0009-Issue-6561-TLS-1.2-stickiness-in-FIPS-mode.patch
new file mode 100644
index 0000000..93fef39
--- /dev/null
+++ b/SOURCES/0009-Issue-6561-TLS-1.2-stickiness-in-FIPS-mode.patch
@@ -0,0 +1,38 @@
+From 116b7cf21618ad7e717ae7f535709508a824f7d9 Mon Sep 17 00:00:00 2001
+From: Viktor Ashirov <vashirov@redhat.com>
+Date: Thu, 13 Feb 2025 16:37:43 +0100
+Subject: [PATCH] Issue 6561 - TLS 1.2 stickiness in FIPS mode
+
+Description:
+TLS 1.3 works with NSS in FIPS mode for quite some time now,
+this restriction is no longer needed.
+
+Fixes: https://github.com/389ds/389-ds-base/issues/6561
+
+Reviewed by: @mreynolds389 (Thanks!)
+---
+ ldap/servers/slapd/ssl.c | 8 --------
+ 1 file changed, 8 deletions(-)
+
+diff --git a/ldap/servers/slapd/ssl.c b/ldap/servers/slapd/ssl.c
+index 94259efe7..84a7fb004 100644
+--- a/ldap/servers/slapd/ssl.c
++++ b/ldap/servers/slapd/ssl.c
+@@ -1929,14 +1929,6 @@ slapd_ssl_init2(PRFileDesc **fd, int startTLS)
+      */
+     sslStatus = SSL_VersionRangeGet(pr_sock, &slapdNSSVersions);
+     if (sslStatus == SECSuccess) {
+-        if (slapdNSSVersions.max > LDAP_OPT_X_TLS_PROTOCOL_TLS1_2 && fipsMode) {
+-            /*
+-             * FIPS & NSS currently only support a max version of TLS1.2
+-             * (although NSS advertises 1.3 as a max range in FIPS mode),
+-             * hopefully this code block can be removed soon...
+-             */
+-            slapdNSSVersions.max = LDAP_OPT_X_TLS_PROTOCOL_TLS1_2;
+-        }
+         /* Reset request range */
+         sslStatus = SSL_VersionRangeSet(pr_sock, &slapdNSSVersions);
+         if (sslStatus == SECSuccess) {
+-- 
+2.48.1
+
diff --git a/SPECS/389-ds-base.spec b/SPECS/389-ds-base.spec
index 458e84e..1128df8 100644
--- a/SPECS/389-ds-base.spec
+++ b/SPECS/389-ds-base.spec
@@ -46,9 +46,9 @@ ExcludeArch: i686
 
 Summary:          389 Directory Server (base)
 Name:             389-ds-base
-Version:          2.5.2
-Release:          1%{?dist}
-License:          GPL-3.0-or-later AND (0BSD OR Apache-2.0 OR MIT) AND (Apache-2.0 OR Apache-2.0 WITH LLVM-exception OR MIT) AND (Apache-2.0 OR BSL-1.0) AND (Apache-2.0 OR MIT OR Zlib) AND (Apache-2.0 OR MIT) AND (CC-BY-4.0 AND MIT) AND (MIT OR Apache-2.0) AND Unicode-DFS-2016 AND (MIT OR CC0-1.0) AND (MIT OR Unlicense) AND 0BSD AND Apache-2.0 AND BSD-2-Clause AND BSD-3-Clause AND ISC AND MIT AND MIT AND ISC AND MPL-2.0 AND PSF-2.0
+Version:          2.6.1
+Release:          4%{?dist}
+License:          GPL-3.0-or-later WITH GPL-3.0-389-ds-base-exception AND (0BSD OR Apache-2.0 OR MIT) AND (Apache-2.0 OR Apache-2.0 WITH LLVM-exception OR MIT) AND (Apache-2.0 OR BSD-2-Clause OR MIT) AND (Apache-2.0 OR BSL-1.0) AND (Apache-2.0 OR MIT OR Zlib) AND (Apache-2.0 OR MIT) AND (CC-BY-4.0 AND MIT) AND (MIT OR Apache-2.0) AND Unicode-3.0 AND (MIT OR CC0-1.0) AND (MIT OR Unlicense) AND 0BSD AND Apache-2.0 AND BSD-2-Clause AND BSD-3-Clause AND ISC AND MIT AND MIT AND ISC AND MPL-2.0 AND PSF-2.0
 URL:              https://www.port389.org
 Conflicts:        selinux-policy-base < 3.9.8
 Conflicts:        freeipa-server < 4.0.3
@@ -58,94 +58,95 @@ Obsoletes:        %{name}-legacy-tools-debuginfo < 1.4.4.6
 Provides:         ldif2ldbm >= 0
 
 ##### Bundled cargo crates list - START #####
-Provides:  bundled(crate(addr2line)) = 0.22.0
-Provides:  bundled(crate(adler)) = 1.0.2
+Provides:  bundled(crate(addr2line)) = 0.24.2
+Provides:  bundled(crate(adler2)) = 2.0.0
 Provides:  bundled(crate(ahash)) = 0.7.8
 Provides:  bundled(crate(atty)) = 0.2.14
-Provides:  bundled(crate(autocfg)) = 1.3.0
-Provides:  bundled(crate(backtrace)) = 0.3.73
+Provides:  bundled(crate(autocfg)) = 1.4.0
+Provides:  bundled(crate(backtrace)) = 0.3.74
 Provides:  bundled(crate(base64)) = 0.13.1
-Provides:  bundled(crate(bitflags)) = 2.6.0
+Provides:  bundled(crate(bitflags)) = 2.8.0
 Provides:  bundled(crate(byteorder)) = 1.5.0
 Provides:  bundled(crate(cbindgen)) = 0.26.0
-Provides:  bundled(crate(cc)) = 1.1.7
+Provides:  bundled(crate(cc)) = 1.2.10
 Provides:  bundled(crate(cfg-if)) = 1.0.0
 Provides:  bundled(crate(clap)) = 3.2.25
 Provides:  bundled(crate(clap_lex)) = 0.2.4
 Provides:  bundled(crate(concread)) = 0.2.21
 Provides:  bundled(crate(crossbeam)) = 0.8.4
-Provides:  bundled(crate(crossbeam-channel)) = 0.5.13
-Provides:  bundled(crate(crossbeam-deque)) = 0.8.5
+Provides:  bundled(crate(crossbeam-channel)) = 0.5.14
+Provides:  bundled(crate(crossbeam-deque)) = 0.8.6
 Provides:  bundled(crate(crossbeam-epoch)) = 0.9.18
-Provides:  bundled(crate(crossbeam-queue)) = 0.3.11
-Provides:  bundled(crate(crossbeam-utils)) = 0.8.20
-Provides:  bundled(crate(errno)) = 0.3.9
-Provides:  bundled(crate(fastrand)) = 2.1.0
+Provides:  bundled(crate(crossbeam-queue)) = 0.3.12
+Provides:  bundled(crate(crossbeam-utils)) = 0.8.21
+Provides:  bundled(crate(errno)) = 0.3.10
+Provides:  bundled(crate(fastrand)) = 2.3.0
 Provides:  bundled(crate(fernet)) = 0.1.4
 Provides:  bundled(crate(foreign-types)) = 0.3.2
 Provides:  bundled(crate(foreign-types-shared)) = 0.1.1
 Provides:  bundled(crate(getrandom)) = 0.2.15
-Provides:  bundled(crate(gimli)) = 0.29.0
+Provides:  bundled(crate(gimli)) = 0.31.1
 Provides:  bundled(crate(hashbrown)) = 0.12.3
 Provides:  bundled(crate(heck)) = 0.4.1
 Provides:  bundled(crate(hermit-abi)) = 0.1.19
 Provides:  bundled(crate(indexmap)) = 1.9.3
 Provides:  bundled(crate(instant)) = 0.1.13
-Provides:  bundled(crate(itoa)) = 1.0.11
+Provides:  bundled(crate(itoa)) = 1.0.14
 Provides:  bundled(crate(jobserver)) = 0.1.32
-Provides:  bundled(crate(libc)) = 0.2.155
-Provides:  bundled(crate(linux-raw-sys)) = 0.4.14
+Provides:  bundled(crate(libc)) = 0.2.169
+Provides:  bundled(crate(linux-raw-sys)) = 0.4.15
 Provides:  bundled(crate(lock_api)) = 0.4.12
-Provides:  bundled(crate(log)) = 0.4.22
+Provides:  bundled(crate(log)) = 0.4.25
 Provides:  bundled(crate(lru)) = 0.7.8
 Provides:  bundled(crate(memchr)) = 2.7.4
-Provides:  bundled(crate(miniz_oxide)) = 0.7.4
-Provides:  bundled(crate(object)) = 0.36.2
-Provides:  bundled(crate(once_cell)) = 1.19.0
-Provides:  bundled(crate(openssl)) = 0.10.66
+Provides:  bundled(crate(miniz_oxide)) = 0.8.3
+Provides:  bundled(crate(object)) = 0.36.7
+Provides:  bundled(crate(once_cell)) = 1.20.2
+Provides:  bundled(crate(openssl)) = 0.10.68
 Provides:  bundled(crate(openssl-macros)) = 0.1.1
-Provides:  bundled(crate(openssl-sys)) = 0.9.103
+Provides:  bundled(crate(openssl-sys)) = 0.9.104
 Provides:  bundled(crate(os_str_bytes)) = 6.6.1
 Provides:  bundled(crate(parking_lot)) = 0.11.2
 Provides:  bundled(crate(parking_lot_core)) = 0.8.6
 Provides:  bundled(crate(paste)) = 0.1.18
 Provides:  bundled(crate(paste-impl)) = 0.1.18
-Provides:  bundled(crate(pin-project-lite)) = 0.2.14
-Provides:  bundled(crate(pkg-config)) = 0.3.30
-Provides:  bundled(crate(ppv-lite86)) = 0.2.18
+Provides:  bundled(crate(pin-project-lite)) = 0.2.16
+Provides:  bundled(crate(pkg-config)) = 0.3.31
+Provides:  bundled(crate(ppv-lite86)) = 0.2.20
 Provides:  bundled(crate(proc-macro-hack)) = 0.5.20+deprecated
-Provides:  bundled(crate(proc-macro2)) = 1.0.86
-Provides:  bundled(crate(quote)) = 1.0.36
+Provides:  bundled(crate(proc-macro2)) = 1.0.93
+Provides:  bundled(crate(quote)) = 1.0.38
 Provides:  bundled(crate(rand)) = 0.8.5
 Provides:  bundled(crate(rand_chacha)) = 0.3.1
 Provides:  bundled(crate(rand_core)) = 0.6.4
 Provides:  bundled(crate(redox_syscall)) = 0.2.16
 Provides:  bundled(crate(rustc-demangle)) = 0.1.24
-Provides:  bundled(crate(rustix)) = 0.38.34
+Provides:  bundled(crate(rustix)) = 0.38.44
 Provides:  bundled(crate(ryu)) = 1.0.18
 Provides:  bundled(crate(scopeguard)) = 1.2.0
-Provides:  bundled(crate(serde)) = 1.0.204
-Provides:  bundled(crate(serde_derive)) = 1.0.204
-Provides:  bundled(crate(serde_json)) = 1.0.121
+Provides:  bundled(crate(serde)) = 1.0.217
+Provides:  bundled(crate(serde_derive)) = 1.0.217
+Provides:  bundled(crate(serde_json)) = 1.0.137
+Provides:  bundled(crate(shlex)) = 1.3.0
 Provides:  bundled(crate(smallvec)) = 1.13.2
 Provides:  bundled(crate(strsim)) = 0.10.0
-Provides:  bundled(crate(syn)) = 2.0.72
-Provides:  bundled(crate(tempfile)) = 3.10.1
+Provides:  bundled(crate(syn)) = 2.0.96
+Provides:  bundled(crate(tempfile)) = 3.15.0
 Provides:  bundled(crate(termcolor)) = 1.4.1
 Provides:  bundled(crate(textwrap)) = 0.16.1
-Provides:  bundled(crate(tokio)) = 1.39.2
-Provides:  bundled(crate(tokio-macros)) = 2.4.0
+Provides:  bundled(crate(tokio)) = 1.43.0
+Provides:  bundled(crate(tokio-macros)) = 2.5.0
 Provides:  bundled(crate(toml)) = 0.5.11
-Provides:  bundled(crate(unicode-ident)) = 1.0.12
+Provides:  bundled(crate(unicode-ident)) = 1.0.15
 Provides:  bundled(crate(uuid)) = 0.8.2
 Provides:  bundled(crate(vcpkg)) = 0.2.15
 Provides:  bundled(crate(version_check)) = 0.9.5
 Provides:  bundled(crate(wasi)) = 0.11.0+wasi_snapshot_preview1
 Provides:  bundled(crate(winapi)) = 0.3.9
 Provides:  bundled(crate(winapi-i686-pc-windows-gnu)) = 0.4.0
-Provides:  bundled(crate(winapi-util)) = 0.1.8
+Provides:  bundled(crate(winapi-util)) = 0.1.9
 Provides:  bundled(crate(winapi-x86_64-pc-windows-gnu)) = 0.4.0
-Provides:  bundled(crate(windows-sys)) = 0.52.0
+Provides:  bundled(crate(windows-sys)) = 0.59.0
 Provides:  bundled(crate(windows-targets)) = 0.52.6
 Provides:  bundled(crate(windows_aarch64_gnullvm)) = 0.52.6
 Provides:  bundled(crate(windows_aarch64_msvc)) = 0.52.6
@@ -155,8 +156,8 @@ Provides:  bundled(crate(windows_i686_msvc)) = 0.52.6
 Provides:  bundled(crate(windows_x86_64_gnu)) = 0.52.6
 Provides:  bundled(crate(windows_x86_64_gnullvm)) = 0.52.6
 Provides:  bundled(crate(windows_x86_64_msvc)) = 0.52.6
-Provides:  bundled(crate(zerocopy)) = 0.6.6
-Provides:  bundled(crate(zerocopy-derive)) = 0.6.6
+Provides:  bundled(crate(zerocopy)) = 0.7.35
+Provides:  bundled(crate(zerocopy-derive)) = 0.7.35
 Provides:  bundled(crate(zeroize)) = 1.8.1
 Provides:  bundled(crate(zeroize_derive)) = 1.4.2
 Provides:  bundled(npm(@aashutoshrathi/word-wrap)) = 1.2.6
@@ -205,7 +206,7 @@ Provides:  bundled(npm(color-convert)) = 2.0.1
 Provides:  bundled(npm(color-name)) = 1.1.4
 Provides:  bundled(npm(concat-map)) = 0.0.1
 Provides:  bundled(npm(core-js)) = 2.6.12
-Provides:  bundled(npm(cross-spawn)) = 7.0.3
+Provides:  bundled(npm(cross-spawn)) = 7.0.6
 Provides:  bundled(npm(d3-array)) = 3.2.4
 Provides:  bundled(npm(d3-color)) = 3.1.0
 Provides:  bundled(npm(d3-ease)) = 3.0.1
@@ -469,6 +470,15 @@ Source3:          https://github.com/jemalloc/%{jemalloc_name}/releases/download
 %endif
 Source4:          389-ds-base.sysusers
 
+Patch:            0001-Issue-6468-Fix-building-for-older-versions-of-Python.patch
+Patch:            0002-Issue-6489-After-log-rotation-refresh-the-FD-pointer.patch
+Patch:            0003-Issue-6374-nsslapd-mdb-max-dbs-autotuning-doesn-t-wo.patch
+Patch:            0004-Issue-6090-Fix-dbscan-options-and-man-pages-6315.patch
+Patch:            0005-Issue-6566-RI-plugin-failure-to-handle-a-modrdn-for-.patch
+Patch:            0006-Issue-6258-Mitigate-race-condition-in-paged_results_.patch
+Patch:            0007-Issue-6229-After-an-initial-failure-subsequent-onlin.patch
+Patch:            0008-Issue-6554-During-import-of-entries-without-nsUnique.patch
+Patch:            0009-Issue-6561-TLS-1.2-stickiness-in-FIPS-mode.patch
 
 %description
 389 Directory Server is an LDAPv3 compliant server.  The base package includes
@@ -570,7 +580,7 @@ A cockpit UI Plugin for configuring and administering the 389 Directory Server
 
 %prep
 
-%autosetup -p1 -v -n %{name}-%{version}
+%autosetup -p1 -n %{name}-%{version}
 %if %{bundle_jemalloc}
 %setup -q -n %{name}-%{version} -T -D -b 3
 %endif
@@ -911,6 +921,67 @@ exit 0
 %endif
 
 %changelog
+* Wed Feb 19 2025 Viktor Ashirov <vashirov@redhat.com> - 2.6.1-4
+- Resolves: RHEL-78722 - Failed to set sslversionmax to TLS1.3 in FIPS mode with dsconf $INSTANCE security set --tls-protocol-max TLS1.3
+
+* Wed Feb 12 2025 Viktor Ashirov <vashirov@redhat.com> - 2.6.1-3
+- Resolves: RHEL-18333 Can't rename users member of automember rule
+- Resolves: RHEL-61341 After an initial failure, subsequent online backups will not work.
+- Resolves: RHEL-63887 nsslapd-mdb-max-dbs autotuning doesn't work properly
+- Resolves: RHEL-63891 dbscan crashes when showing statistics for MDB
+- Resolves: RHEL-63998 dsconf should check for number of available named databases
+- Resolves: RHEL-78344 During import of entries without nsUniqueId, a supplier generates duplicate nsUniqueId (LMDB only) [rhel-9]
+
+* Sat Feb 01 2025 Viktor Ashirov <vashirov@redhat.com> - 2.6.1-2
+- Resolves: RHEL-76748: ns-slapd crashes with data directory ≥ 2 days old
+
+* Tue Jan 28 2025 Viktor Ashirov <vashirov@redhat.com> - 2.6.1-1
+- Update to 2.6.1
+- Resolves: RHEL-5151 - [RFE] defer memberof nested updates
+- Resolves: RHEL-54148 - leaked_storage: Variable "childelems" going out of scope leaks the storage it points to.
+- Resolves: RHEL-60135 - deadlock during cleanAllRuv
+- Resolves: RHEL-61341 - After an initial failure, subsequent online backups will not work.
+- Resolves: RHEL-61349 - Remove deprecated setting for HR time stamps in logs
+- Resolves: RHEL-62875 - Passwords are not being updated to use the configured storage scheme ( nsslapd-enable-upgrade-hash is enabled ).
+- Resolves: RHEL-64438 - VLV errors with RSNv3 and pruning enabled [rhel-9]
+- Resolves: RHEL-64854 - cleanallruv consums CPU and is slow
+- Resolves: RHEL-65506 - AddressSanitizer: double-free
+- Resolves: RHEL-65512 - AddressSanitizer: heap-use-after-free in import_abort_all
+- Resolves: RHEL-65561 - LeakSanitizer: detected memory leaks in dbmdb_public_db_op
+- Resolves: RHEL-65662 - Replication issue between masters using cert based authentication
+- Resolves: RHEL-65664 - LDAP unprotected search query during certificate based authentication
+- Resolves: RHEL-65665 - Ambiguous warning about SELinux in dscreate for non-root user
+- Resolves: RHEL-65741 - LeakSanitizer: memory leak in ldbm_entryrdn.c
+- Resolves: RHEL-65776 - Wrong set of entries returned for some search filters [rhel-9]
+- Resolves: RHEL-67004 - "dsconf config replace" should handle multivalued attributes.
+- Resolves: RHEL-67005 - Online backup hangs sporadically.
+- Resolves: RHEL-67008 - Some replication status data are reset upon a restart.
+- Resolves: RHEL-67020 - 389DirectoryServer Process Stops When Setting up Sorted VLV Index
+- Resolves: RHEL-67024 - Some nsslapd-haproxy-trusted-ip values are discarded upon a restart.
+- Resolves: RHEL-69806 - ipahealthcheck.ds.replication displays WARNING '1 conflict entries found under the replication suffix'
+- Resolves: RHEL-69826 - "Duplicated DN detected" errors when creating indexes or importing entries. [rhel-9]
+- Resolves: RHEL-70127 - Crash in attrlist_find() when the Account Policy plugin is enabled. [rhel-9]
+- Resolves: RHEL-70252 - Freelist ordering causes high wtime
+- Resolves: RHEL-71218 - Sub suffix causes "id2entry - Could not open id2entry err 0" error when the Directory Server starts [rhel-9]
+- Resolves: RHEL-74153 - backup/restore broken [rhel-9]
+- Resolves: RHEL-74158 - If an entry RDN is identical to the suffix, then Entryrdn gets broken during a reindex [rhel-9]
+- Resolves: RHEL-74163 - Crash during bind when acct policy plugin does not have "alwaysrecordlogin" set [rhel-9]
+- Resolves: RHEL-74168 - On replica consumer, account policy plugin fails to manage the last login history [rhel-9]
+- Resolves: RHEL-74174 - Replication broken after backup restore with freeipa configuration [rhel-9]
+- Resolves: RHEL-74353 - nsslapd-haproxy-trusted-ip is not in schema [rhel-9]
+- Resolves: RHEL-76019 - IPA LDAP error code T3 when no exceeded time limit from a paged search result [rhel-9]
+
+* Mon Dec 16 2024 Viktor Ashirov <vashirov@redhat.com> - 2.6.0-2
+- Fix License tag
+
+* Mon Dec 16 2024 Viktor Ashirov <vashirov@redhat.com> - 2.6.0-1
+- Update to 2.6.0
+- Resolves: RHEL-67195 - Rebase 389-ds-base to 2.6.0
+
+* Mon Sep 16 2024 Viktor Ashirov <vashirov@redhat.com> - 2.5.2-2
+- Bump version to 2.5.2-2
+- Resolves: RHEL-55744 - ipahealthcheck.ds.backends.BackendsCheck.DSBLE0006: BDB is deprecated and should not be used as a backend
+
 * Mon Aug 12 2024 Viktor Ashirov <vashirov@redhat.com> - 2.5.2-1
 - Bump version to 2.5.2-1
 - Resolves: RHEL-5108 - ns-slapd crash in referint_get_config