diff --git a/SOURCES/0010-Issue-6417-If-an-entry-RDN-is-identical-to-the-suffi.patch b/SOURCES/0010-Issue-6417-If-an-entry-RDN-is-identical-to-the-suffi.patch new file mode 100644 index 0000000..b7f8d34 --- /dev/null +++ b/SOURCES/0010-Issue-6417-If-an-entry-RDN-is-identical-to-the-suffi.patch @@ -0,0 +1,200 @@ +From 7ed791beda5d507e15c970a88289f533aa06b2d3 Mon Sep 17 00:00:00 2001 +From: tbordaz +Date: Mon, 2 Dec 2024 17:18:32 +0100 +Subject: [PATCH] Issue 6417 - If an entry RDN is identical to the suffix, then + Entryrdn gets broken during a reindex (#6418) + +Bug description: + During a reindex, the entryrdn index is built at the end from + each entry in the suffix. + If one entry has a RDN that is identical to the suffix DN, + then entryrdn_lookup_dn may erroneously return the suffix DN + as the DN of the entry. + +Fix description: + When the lookup entry has no parent (because index is under + work) the loop lookup the entry using the RDN. + If this RDN matches the suffix DN, then it exits from the loop + with the suffix DN. + Before exiting it checks that the original lookup entryID + is equal to suffix entryID. If it does not match + the function fails and then the DN from the entry will be + built from id2enty + +fixes: #6417 + +Reviewed by: Pierre Rogier, Simon Pichugin (Thanks !!!) +--- + .../tests/suites/indexes/entryrdn_test.py | 108 +++++++++++++++++- + .../back-ldbm/db-mdb/mdb_import_threads.c | 2 +- + ldap/servers/slapd/back-ldbm/ldbm_entryrdn.c | 11 +- + 3 files changed, 118 insertions(+), 3 deletions(-) + +diff --git a/dirsrvtests/tests/suites/indexes/entryrdn_test.py b/dirsrvtests/tests/suites/indexes/entryrdn_test.py +index 345955d4d..7669292ab 100644 +--- a/dirsrvtests/tests/suites/indexes/entryrdn_test.py ++++ b/dirsrvtests/tests/suites/indexes/entryrdn_test.py +@@ -11,11 +11,16 @@ import os + import pytest + import ldap + import logging ++from lib389 import Entry + from lib389._constants import DEFAULT_BENAME, DEFAULT_SUFFIX +-from lib389.backend import Backends ++from lib389.backend import Backends, Backend ++from lib389.mappingTree import MappingTrees ++from lib389.configurations.sample import create_base_domain ++from lib389.idm.domain import Domain + from lib389.idm.user import UserAccounts, UserAccount + from lib389.idm.organizationalunit import OrganizationalUnits + from lib389.topologies import topology_m2 as topo_m2 ++from lib389.topologies import topology_st + from lib389.agreement import Agreements + from lib389.utils import ds_is_older, ensure_bytes + from lib389.tasks import Tasks,ExportTask, ImportTask +@@ -283,6 +288,107 @@ def test_long_rdn(topo_m2): + ou.delete() + assert not ou.exists() + ++def test_entry_rdn_same_as_suffix(topology_st, request): ++ """ ++ Test that a reindex is successful even if an entry ++ has a RDN that is identical to the suffix ++ ++ :id: 7f5a38e9-b979-4664-b132-81df0e60f38a ++ :setup: standalone ++ :steps: ++ 1. Create a new backend with suffix 'dc=dup_rdn' (ID 1) ++ 2. Create a dummy entry 'ou=my_org,dc=dup_rdn' (ID 2) ++ 3. Create the problematic entry 'dc=dup_rdn,dc=dup_rdn' (ID 3) ++ 4. Create a dummy entry 'ou=my_org,dc=dup_rdn,dc=dup_rdn' (ID 4) ++ 5. Do an offline reindex ++ 6. Check that entryrdn contains the key P3 (parent of ID 3) ++ 7. Check that error log does not contain 'entryrdn_insert_key - Same DN' ++ :expectedresults: ++ 1. Should succeed ++ 2. Should succeed ++ 3. Should succeed ++ 4. Should succeed ++ 5. Should succeed ++ 6. Should succeed ++ 7. Should succeed ++ """ ++ inst = topology_st.standalone ++ ++ # Create a suffix 'dc=dup_rdn' ++ be_name = 'domain' ++ dc_value = 'dup_rdn' ++ suffix = 'dc=' + dc_value ++ rdn = 'my_org' ++ be1 = Backend(inst) ++ be1.create(properties={ ++ 'cn': be_name, ++ 'nsslapd-suffix': suffix, ++ }, ++ create_mapping_tree=False ++ ) ++ ++ mts = MappingTrees(inst) ++ mt = mts.create(properties={ ++ 'cn': suffix, ++ 'nsslapd-state': 'backend', ++ 'nsslapd-backend': be_name, ++ }) ++ ++ # Create the domain entry 'dc=dup_rdn' ++ create_base_domain(inst, suffix) ++ ++ # Create the org ou=my_org,dc=dup_rdn ++ ous = OrganizationalUnits(inst, suffix) ++ ou = ous.create(properties={ 'ou': rdn }) ++ ++ # when reindexing entryrdn the following entry ++ # (dc=dup_rdn,dc=dup_rdn) Triggers ++ # this message. ++ # This is because its RDN (dc=dup_rdn) is also ++ # the suffix DN ++ info_message = 'entryrdn_insert_key - Same DN (dn: %s) is already in the entryrdn file with different' % (ou.dn) ++ log.info("In case if the bug still exist this line should be in the error log") ++ log.info(" --> " + info_message) ++ ++ # Create the domain entry 'dc=dup_rdn,dc=dup_rdn' ++ trigger_entry = suffix + "," + suffix ++ domain = Domain(inst, dn=trigger_entry) ++ domain.create(properties={ ++ 'dc': dc_value, ++ 'description': 'Entry with RDN identical to suffix' ++ }) ++ ++ # Create the org ou=my_org,dc=dup_rdn,dc=dup_rdn ++ ous = OrganizationalUnits(inst, trigger_entry) ++ ou = ous.create(properties={ 'ou': rdn }) ++ ++ ++ # Trigger an offline reindex ++ log.info('Offline reindex, stopping the server') ++ topology_st.standalone.stop() ++ ++ log.info('Reindex all the suffix') ++ topology_st.standalone.db2index(bename=be_name) ++ ++ # make sure the key 'P3' (parent of 'dc=dup_rdn,dc=dup_rdn') exists ++ dbscanout = topology_st.standalone.dbscan(bename=be_name, index='entryrdn') ++ log.info(dbscanout) ++ assert(ensure_bytes('P3') in ensure_bytes(dbscanout)) ++ ++ # make sure there is no failure detected/logged in error logs ++ if topology_st.standalone.get_db_lib() == "mdb": ++ pattern_str = ".*Inconsistent id2entry database.*" ++ else: ++ pattern_str = ".*entryrdn_insert_key - Same DN.*is already in the entryrdn file with different.*$" ++ assert not topology_st.standalone.ds_error_log.match(pattern_str) ++ ++ ++ def fin(): ++ topology_st.standalone.restart() ++ mt.delete() ++ be1.delete() ++ ++ request.addfinalizer(fin) + + if __name__ == "__main__": + # Run isolated +diff --git a/ldap/servers/slapd/back-ldbm/db-mdb/mdb_import_threads.c b/ldap/servers/slapd/back-ldbm/db-mdb/mdb_import_threads.c +index 143f2c9df..65f564aff 100644 +--- a/ldap/servers/slapd/back-ldbm/db-mdb/mdb_import_threads.c ++++ b/ldap/servers/slapd/back-ldbm/db-mdb/mdb_import_threads.c +@@ -731,7 +731,7 @@ get_entry_type(WorkerQueueData_t *wqelmt, Slapi_DN *sdn) + int len = SLAPI_ATTR_UNIQUEID_LENGTH; + const char *ndn = slapi_sdn_get_ndn(sdn); + +- if (slapi_be_issuffix(be, sdn)) { ++ if (slapi_be_issuffix(be, sdn) && (wqelmt->wait_id == 1)) { + return DNRC_SUFFIX; + } + if (PL_strncasecmp(ndn, SLAPI_ATTR_UNIQUEID, len) || ndn[len] != '=') { +diff --git a/ldap/servers/slapd/back-ldbm/ldbm_entryrdn.c b/ldap/servers/slapd/back-ldbm/ldbm_entryrdn.c +index 267957a36..8901b790a 100644 +--- a/ldap/servers/slapd/back-ldbm/ldbm_entryrdn.c ++++ b/ldap/servers/slapd/back-ldbm/ldbm_entryrdn.c +@@ -1077,7 +1077,16 @@ entryrdn_lookup_dn(backend *be, + _ENTRYRDN_DEBUG_GOTO_BAIL(); + goto bail; + } +- maybesuffix = 1; ++ if (workid == 1) { ++ /* The loop (workid) iterates from the starting 'id' ++ * up to the suffix ID (i.e. '1'). ++ * A corner case (#6417) is if an entry, on the path ++ * 'id' -> suffix, has the same RDN than the suffix. ++ * In order to erroneously believe the loop hits the suffix ++ * we need to check that 'workid' is '1' (suffix) ++ */ ++ maybesuffix = 1; ++ } + } else { + _entryrdn_cursor_print_error("entryrdn_lookup_dn", + key.data, data.size, data.ulen, rc); +-- +2.48.1 + diff --git a/SOURCES/0011-Issue-6417-2nd-If-an-entry-RDN-is-identical-to-the-s.patch b/SOURCES/0011-Issue-6417-2nd-If-an-entry-RDN-is-identical-to-the-s.patch new file mode 100644 index 0000000..77b3e99 --- /dev/null +++ b/SOURCES/0011-Issue-6417-2nd-If-an-entry-RDN-is-identical-to-the-s.patch @@ -0,0 +1,90 @@ +From f8f390dcc32cefccf762be49c1625a8d123a5d7f Mon Sep 17 00:00:00 2001 +From: tbordaz +Date: Mon, 6 Jan 2025 14:00:39 +0100 +Subject: [PATCH] Issue 6417 - (2nd) If an entry RDN is identical to the + suffix, then Entryrdn gets broken during a reindex (#6460) + +Bug description: + The primary fix has a flaw as it assumes that the + suffix ID is '1'. + If the RUV entry is the first entry of the database + the server loops indefinitely + +Fix description: + Read the suffix ID from the entryrdn index + +fixes: #6417 + +Reviewed by: Pierre Rogier (also reviewed the first fix) +--- + .../suites/replication/regression_m2_test.py | 9 +++++++++ + ldap/servers/slapd/back-ldbm/ldbm_entryrdn.c | 19 ++++++++++++++++++- + 2 files changed, 27 insertions(+), 1 deletion(-) + +diff --git a/dirsrvtests/tests/suites/replication/regression_m2_test.py b/dirsrvtests/tests/suites/replication/regression_m2_test.py +index 456ed8b99..f8d8b1640 100644 +--- a/dirsrvtests/tests/suites/replication/regression_m2_test.py ++++ b/dirsrvtests/tests/suites/replication/regression_m2_test.py +@@ -1171,6 +1171,15 @@ def test_online_reinit_may_hang(topo_with_sigkill): + """ + M1 = topo_with_sigkill.ms["supplier1"] + M2 = topo_with_sigkill.ms["supplier2"] ++ ++ # The RFE 5367 (when enabled) retrieves the DN ++ # from the dncache. This hides an issue ++ # with primary fix for 6417. ++ # We need to disable the RFE to verify that the primary ++ # fix is properly fixed. ++ if ds_is_newer('2.3.1'): ++ M1.config.replace('nsslapd-return-original-entrydn', 'off') ++ + M1.stop() + ldif_file = '%s/supplier1.ldif' % M1.get_ldif_dir() + M1.db2ldif(bename=DEFAULT_BENAME, suffixes=[DEFAULT_SUFFIX], +diff --git a/ldap/servers/slapd/back-ldbm/ldbm_entryrdn.c b/ldap/servers/slapd/back-ldbm/ldbm_entryrdn.c +index 8901b790a..2713240c1 100644 +--- a/ldap/servers/slapd/back-ldbm/ldbm_entryrdn.c ++++ b/ldap/servers/slapd/back-ldbm/ldbm_entryrdn.c +@@ -992,6 +992,7 @@ entryrdn_lookup_dn(backend *be, + ID workid = id; /* starting from the given id */ + rdn_elem *elem = NULL; + int maybesuffix = 0; ++ ID suffix_id = 1; + + slapi_log_err(SLAPI_LOG_TRACE, "entryrdn_lookup_dn", + "--> entryrdn_lookup_dn\n"); +@@ -1032,6 +1033,22 @@ entryrdn_lookup_dn(backend *be, + dblayer_value_free(be, &data); + dblayer_value_init(be, &data); + ++ /* Just in case the suffix ID is not '1' retrieve it from the database */ ++ keybuf = slapi_ch_strdup(slapi_sdn_get_ndn(be->be_suffix)); ++ dblayer_value_set(be, &key, keybuf, strlen(keybuf) + 1); ++ rc = dblayer_cursor_op(&ctx.cursor, DBI_OP_MOVE_TO_KEY, &key, &data); ++ if (rc) { ++ slapi_log_err(SLAPI_LOG_WARNING, "entryrdn_lookup_dn", ++ "Fails to retrieve the ID of suffix %s - keep the default value '%d'\n", ++ slapi_sdn_get_ndn(be->be_suffix), ++ suffix_id); ++ } else { ++ elem = (rdn_elem *)data.data; ++ suffix_id = id_stored_to_internal(elem->rdn_elem_id); ++ } ++ dblayer_value_free(be, &data); ++ dblayer_value_free(be, &key); ++ + do { + /* Setting up a key for the node to get its parent */ + keybuf = slapi_ch_smprintf("%c%u", RDN_INDEX_PARENT, workid); +@@ -1077,7 +1094,7 @@ entryrdn_lookup_dn(backend *be, + _ENTRYRDN_DEBUG_GOTO_BAIL(); + goto bail; + } +- if (workid == 1) { ++ if (workid == suffix_id) { + /* The loop (workid) iterates from the starting 'id' + * up to the suffix ID (i.e. '1'). + * A corner case (#6417) is if an entry, on the path +-- +2.48.1 + diff --git a/SOURCES/0012-Issue-6417-3rd-If-an-entry-RDN-is-identical-to-the-s.patch b/SOURCES/0012-Issue-6417-3rd-If-an-entry-RDN-is-identical-to-the-s.patch new file mode 100644 index 0000000..7b71152 --- /dev/null +++ b/SOURCES/0012-Issue-6417-3rd-If-an-entry-RDN-is-identical-to-the-s.patch @@ -0,0 +1,71 @@ +From 20c20ff195272e0a2ccea6d6b081311798f03eb8 Mon Sep 17 00:00:00 2001 +From: tbordaz +Date: Mon, 13 Jan 2025 15:52:07 +0100 +Subject: [PATCH] Issue 6417 - (3rd) If an entry RDN is identical to the + suffix, then Entryrdn gets broken during a reindex (#6480) + +Bug description: + The previous fix had a flaw. + In case entryrdn_lookup_dn is called with an undefined suffix + the lookup of the suffix trigger a crash. + For example it can occur during internal search of an + unexisting map (view plugin). + The issue exists in all releases but is hidden since 2.3. + +Fix description: + testing the suffix is defined + +fixes: #6417 + +Reviewed by: Pierre Rogier (THnaks !) +--- + ldap/servers/slapd/back-ldbm/ldbm_entryrdn.c | 32 +++++++++++--------- + 1 file changed, 18 insertions(+), 14 deletions(-) + +diff --git a/ldap/servers/slapd/back-ldbm/ldbm_entryrdn.c b/ldap/servers/slapd/back-ldbm/ldbm_entryrdn.c +index 2713240c1..e3d765893 100644 +--- a/ldap/servers/slapd/back-ldbm/ldbm_entryrdn.c ++++ b/ldap/servers/slapd/back-ldbm/ldbm_entryrdn.c +@@ -1033,21 +1033,25 @@ entryrdn_lookup_dn(backend *be, + dblayer_value_free(be, &data); + dblayer_value_init(be, &data); + +- /* Just in case the suffix ID is not '1' retrieve it from the database */ +- keybuf = slapi_ch_strdup(slapi_sdn_get_ndn(be->be_suffix)); +- dblayer_value_set(be, &key, keybuf, strlen(keybuf) + 1); +- rc = dblayer_cursor_op(&ctx.cursor, DBI_OP_MOVE_TO_KEY, &key, &data); +- if (rc) { +- slapi_log_err(SLAPI_LOG_WARNING, "entryrdn_lookup_dn", +- "Fails to retrieve the ID of suffix %s - keep the default value '%d'\n", +- slapi_sdn_get_ndn(be->be_suffix), +- suffix_id); +- } else { +- elem = (rdn_elem *)data.data; +- suffix_id = id_stored_to_internal(elem->rdn_elem_id); ++ /* Just in case the suffix ID is not '1' retrieve it from the database ++ * if the suffix is not defined suffix_id remains '1' ++ */ ++ if (be->be_suffix) { ++ keybuf = slapi_ch_strdup(slapi_sdn_get_ndn(be->be_suffix)); ++ dblayer_value_set(be, &key, keybuf, strlen(keybuf) + 1); ++ rc = dblayer_cursor_op(&ctx.cursor, DBI_OP_MOVE_TO_KEY, &key, &data); ++ if (rc) { ++ slapi_log_err(SLAPI_LOG_WARNING, "entryrdn_lookup_dn", ++ "Fails to retrieve the ID of suffix %s - keep the default value '%d'\n", ++ slapi_sdn_get_ndn(be->be_suffix), ++ suffix_id); ++ } else { ++ elem = (rdn_elem *)data.data; ++ suffix_id = id_stored_to_internal(elem->rdn_elem_id); ++ } ++ dblayer_value_free(be, &data); ++ dblayer_value_free(be, &key); + } +- dblayer_value_free(be, &data); +- dblayer_value_free(be, &key); + + do { + /* Setting up a key for the node to get its parent */ +-- +2.48.1 + diff --git a/SOURCES/0013-Issue-6432-Crash-during-bind-when-acct-policy-plugin.patch b/SOURCES/0013-Issue-6432-Crash-during-bind-when-acct-policy-plugin.patch new file mode 100644 index 0000000..f90dd30 --- /dev/null +++ b/SOURCES/0013-Issue-6432-Crash-during-bind-when-acct-policy-plugin.patch @@ -0,0 +1,137 @@ +From 5d2afc232c6f98addb7810e6573f652a02a00f34 Mon Sep 17 00:00:00 2001 +From: Mark Reynolds +Date: Wed, 4 Dec 2024 15:39:58 -0500 +Subject: [PATCH] Issue 6432 - Crash during bind when acct policy plugin does + not have "alwaysrecordlogin" set + +Description: + +If alwaysrecordlogin is off then we dereference NULL ptr cfg->login_history_attr +when trying to write the history/time value. Instead we should skip +over this code if it is not set. + +Relates: https://github.com/389ds/389-ds-base/issues/6432 + +Reviewed by: tbordaz(Thanks!) +--- + .../accpol_check_all_state_attrs_test.py | 77 ++++++++++++++++++- + ldap/servers/plugins/acctpolicy/acct_plugin.c | 4 +- + 2 files changed, 77 insertions(+), 4 deletions(-) + +diff --git a/dirsrvtests/tests/suites/plugins/accpol_check_all_state_attrs_test.py b/dirsrvtests/tests/suites/plugins/accpol_check_all_state_attrs_test.py +index ec518ca7f..96c7a0324 100644 +--- a/dirsrvtests/tests/suites/plugins/accpol_check_all_state_attrs_test.py ++++ b/dirsrvtests/tests/suites/plugins/accpol_check_all_state_attrs_test.py +@@ -21,10 +21,11 @@ from lib389._constants import ( + PASSWORD, + PLUGIN_ACCT_POLICY, + ) +-from lib389.idm.user import (UserAccount, UserAccounts) ++from lib389.idm.user import (UserAccount) + from lib389.plugins import (AccountPolicyPlugin, AccountPolicyConfig) ++from lib389.cos import (CosTemplate, CosPointerDefinition) + from lib389.idm.domain import Domain +-from datetime import datetime, timedelta ++ + + log = logging.getLogger(__name__) + +@@ -131,9 +132,79 @@ def test_inactivty_and_expiration(topo): + test_user.bind(NEW_PASSWORD) + + ++def test_alwaysrecordlogin_off(topo): ++ """Test the server does not crash when alwaysrecordlogin is "off" ++ ++ :id: 49eb0993-ee59-48a9-8324-fb965b202ba9 ++ :setup: Standalone Instance ++ :steps: ++ 1. Create test user ++ 2. Configure account policy, COS, and restart ++ 3. Bind as test user ++ :expectedresults: ++ 1. Success ++ 2. Success ++ 3. Success ++ """ ++ ++ LOCAL_POLICY = 'cn=Account Inactivation Policy,dc=example,dc=com' ++ TEMPL_COS = 'cn=TempltCoS,ou=people,dc=example,dc=com' ++ DEFIN_COS = 'cn=DefnCoS,ou=people,dc=example,dc=com' ++ TEST_USER_NAME = 'crash' ++ TEST_USER_DN = f'uid={TEST_USER_NAME},ou=people,{DEFAULT_SUFFIX}' ++ ++ inst = topo.standalone ++ ++ # Create the test user ++ test_user = UserAccount(inst, TEST_USER_DN) ++ test_user.create(properties={ ++ 'uid': TEST_USER_NAME, ++ 'cn': TEST_USER_NAME, ++ 'sn': TEST_USER_NAME, ++ 'userPassword': PASSWORD, ++ 'uidNumber': '1000', ++ 'gidNumber': '2000', ++ 'homeDirectory': '/home/crash', ++ }) ++ ++ # Configure account policy plugin ++ plugin = AccountPolicyPlugin(inst) ++ plugin.enable() ++ plugin.set('nsslapd-pluginarg0', ACCP_CONF) ++ accp = AccountPolicyConfig(inst, dn=ACCP_CONF) ++ accp.set('alwaysrecordlogin', 'no') ++ accp.set('stateattrname', 'lastLoginTime') ++ accp.set('altstateattrname', 'passwordexpirationtime') ++ accp.set('specattrname', 'acctPolicySubentry') ++ accp.set('limitattrname', 'accountInactivityLimit') ++ accp.set('accountInactivityLimit', '123456') ++ accp.set('checkAllStateAttrs', 'on') ++ inst.restart() ++ # Local policy ++ laccp = AccountPolicyConfig(inst, dn=LOCAL_POLICY) ++ laccp.create(properties={ ++ 'cn': 'Account Inactivation Policy', ++ 'accountInactivityLimit': '12312321' ++ }) ++ # COS ++ cos_template = CosTemplate(inst, dn=TEMPL_COS) ++ cos_template.create(properties={'cn': 'TempltCoS', ++ 'acctPolicySubentry': LOCAL_POLICY}) ++ cos_def = CosPointerDefinition(inst, dn=DEFIN_COS) ++ cos_def.create(properties={ ++ 'cn': 'DefnCoS', ++ 'cosTemplateDn': TEMPL_COS, ++ 'cosAttribute': 'acctPolicySubentry default operational-default'}) ++ inst.restart() ++ ++ # Bind as test user to make sure the server does not crash ++ conn = test_user.bind(PASSWORD) ++ test_user = UserAccount(conn, TEST_USER_DN) ++ test_user.bind(PASSWORD) ++ ++ + if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main(["-s", CURRENT_FILE]) +- +diff --git a/ldap/servers/plugins/acctpolicy/acct_plugin.c b/ldap/servers/plugins/acctpolicy/acct_plugin.c +index ba9705f74..220d0f6b2 100644 +--- a/ldap/servers/plugins/acctpolicy/acct_plugin.c ++++ b/ldap/servers/plugins/acctpolicy/acct_plugin.c +@@ -372,7 +372,9 @@ acct_record_login(const char *dn) + "acct_record_login - Recorded %s=%s on \"%s\"\n", cfg->always_record_login_attr, timestr, dn); + + /* update login history */ +- acct_update_login_history(dn, timestr); ++ if (cfg->login_history_attr) { ++ acct_update_login_history(dn, timestr); ++ } + } + + done: +-- +2.48.1 + diff --git a/SOURCES/0014-Issue-6386-backup-restore-broken-after-db-log-rotati.patch b/SOURCES/0014-Issue-6386-backup-restore-broken-after-db-log-rotati.patch new file mode 100644 index 0000000..5aa280d --- /dev/null +++ b/SOURCES/0014-Issue-6386-backup-restore-broken-after-db-log-rotati.patch @@ -0,0 +1,56 @@ +From deb3b14a69a31e3c61dc65ba2045637da8f52cf7 Mon Sep 17 00:00:00 2001 +From: progier389 +Date: Fri, 15 Nov 2024 12:43:30 +0100 +Subject: [PATCH] Issue 6386 - backup/restore broken after db log rotation + (#6406) + +Restore does fail if the db log file have rotated since the backup. The reason is that the current log files are not removed mixing old and new log file which confuse the database. +Solution is to remove all existing log files before copying the backuped files. +Note: the number of db region should not change and having an extra db file should not be a problem so only the log files must be removed. + +Issue: #6386 + +Reviewed by: @mreynolds389, @tbordaz (Thanks!) + +(cherry picked from commit a48be7b2b389a336e3f48a775e466c71f2eb2b31) +(cherry picked from commit 0c8829bf43a6b3c413618377e1fbd574cf818f32) +--- + .../slapd/back-ldbm/db-bdb/bdb_layer.c | 22 +++++++++++++++++++ + 1 file changed, 22 insertions(+) + +diff --git a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.c b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.c +index d2e1331b2..44f25098f 100644 +--- a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.c ++++ b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.c +@@ -5591,6 +5591,28 @@ bdb_restore(struct ldbminfo *li, char *src_dir, Slapi_Task *task) + /* Otherwise use the src_dir from the caller */ + real_src_dir = src_dir; + ++ /* Lets remove existing log files before copying the new ones (See issue #6386) */ ++ prefix = BDB_CONFIG(li)->bdb_log_directory; ++ if (prefix == NULL) { ++ prefix = home_dir; ++ } ++ dirhandle = PR_OpenDir(prefix); ++ if (NULL != dirhandle) { ++ while (NULL != ++ (direntry = PR_ReadDir(dirhandle, PR_SKIP_DOT | PR_SKIP_DOT_DOT))) { ++ if (NULL == direntry->name) { ++ /* NSPR doesn't behave like the docs say it should */ ++ break; ++ } ++ if (bdb_is_logfilename(direntry->name)) { ++ PR_snprintf(filename1, sizeof(filename2), "%s/%s", ++ prefix, direntry->name); ++ unlink(filename1); ++ } ++ } ++ } ++ PR_CloseDir(dirhandle); ++ + /* We copy the files over from the staging area */ + /* We want to treat the logfiles specially: if there's + * a log file directory configured, copy the logfiles there +-- +2.48.1 + diff --git a/SOURCES/0015-Issue-6446-on-replica-consumer-account-policy-plugin.patch b/SOURCES/0015-Issue-6446-on-replica-consumer-account-policy-plugin.patch new file mode 100644 index 0000000..ea0851d --- /dev/null +++ b/SOURCES/0015-Issue-6446-on-replica-consumer-account-policy-plugin.patch @@ -0,0 +1,182 @@ +From fc1d1b8c5438f29c3408adca26423fc6226f47ff Mon Sep 17 00:00:00 2001 +From: tbordaz +Date: Thu, 12 Dec 2024 16:30:50 +0100 +Subject: [PATCH] Issue 6446 - on replica consumer, account policy plugin fails + to manage the last login history (#6448) + +Bug description: + An internal update needs the flag SLAPI_OP_FLAG_BYPASS_REFERRALS + on a RO consumer. Else the server does not select the backend + and returns LDAP_REFERRAL to the internal operation. + In acct_update_login_history the flag is missing. + +Fix description: + Use the same flags in acct_update_login_history as it is + used in acct_record_login + +fixes: #6446 + +Reviewed by: Mark Reynolds (Thanks!) +--- + .../tests/suites/plugins/accpol_test.py | 123 +++++++++++++++++- + ldap/servers/plugins/acctpolicy/acct_plugin.c | 3 +- + 2 files changed, 124 insertions(+), 2 deletions(-) + +diff --git a/dirsrvtests/tests/suites/plugins/accpol_test.py b/dirsrvtests/tests/suites/plugins/accpol_test.py +index 964d98ee0..f5db8e461 100644 +--- a/dirsrvtests/tests/suites/plugins/accpol_test.py ++++ b/dirsrvtests/tests/suites/plugins/accpol_test.py +@@ -10,7 +10,7 @@ import pytest + import subprocess + from lib389.tasks import * + from lib389.utils import * +-from lib389.topologies import topology_st ++from lib389.topologies import topology_st, topology_m1c1 + from lib389.idm.user import (UserAccount, UserAccounts) + from lib389.plugins import (AccountPolicyPlugin, AccountPolicyConfig, AccountPolicyConfigs) + from lib389.cos import (CosTemplate, CosPointerDefinition) +@@ -1298,6 +1298,127 @@ def test_locact_modrdn(topology_st, accpol_local): + account_status(topology_st, suffix, subtree, userid, 1, 0, "Enabled") + del_users(topology_st, suffix, subtree, userid, nousrs) + ++def test_acct_policy_consumer(topology_m1c1, request): ++ """Test the lastLoginHistory is updated on consumer without ++ referral error ++ ++ :id: 53a9a2c7-6b10-41c9-b9f9-bde412d04acb ++ :setup: Supplier Instance, Consumer Instance ++ :steps: ++ 1. Create a test entry on the supplier ++ 2. Configure lastLoginTime on supplier and consumer ++ 3. On supplier 3 binds of the test entry ++ 4. On supplier check there is 3 lastLoginHistory values ++ 5. On supplier check there is no error in error logs ++ 6. On consumer 3 binds of the test entry ++ 7. On consumer check there is 3 lastLoginHistory values ++ 8. On consumer check there is no error in error logs ++ :expectedresults: ++ 1. Success ++ 2. Success ++ 3. Success ++ 4. Success ++ 5. Success ++ 6. Success ++ 7. Success ++ 8. Success ++ """ ++ ++ supplier = topology_m1c1.ms['supplier1'] ++ consumer = topology_m1c1.cs['consumer1'] ++ USER_PW = 'password' ++ ++ # Add on the supplier a test user entry and wait it is on the consumer ++ users_supplier = UserAccounts(supplier, DEFAULT_SUFFIX, rdn=None) ++ user_supplier = users_supplier.create_test_user(uid=1000, gid=2000) ++ user_supplier.replace('userPassword', USER_PW) ++ ++ users_consumer = UserAccounts(consumer, DEFAULT_SUFFIX, rdn=None) ++ for i in range(0, 10): ++ try: ++ user_consumer = users_consumer.get("test_user_1000") ++ break ++ except ldap.NO_SUCH_OBJECT: ++ time.sleep(1) ++ ++ # Configure lastLoginTime on supplier and consumer ++ plugin = AccountPolicyPlugin(supplier) ++ plugin.enable() ++ ACCPOL_DN = "cn={},{}".format(PLUGIN_ACCT_POLICY, DN_PLUGIN) ++ ACCP_CONF = "{},{}".format(DN_CONFIG, ACCPOL_DN) ++ plugin.set('nsslapd-pluginarg0', ACCP_CONF) ++ accp = AccountPolicyConfig(supplier, dn=ACCP_CONF) ++ accp.set('alwaysrecordlogin', 'yes') ++ accp.set('stateattrname', 'lastLoginTime') ++ supplier.restart(timeout=10) ++ ++ plugin = AccountPolicyPlugin(consumer) ++ plugin.enable() ++ ACCPOL_DN = "cn={},{}".format(PLUGIN_ACCT_POLICY, DN_PLUGIN) ++ ACCP_CONF = "{},{}".format(DN_CONFIG, ACCPOL_DN) ++ plugin.set('nsslapd-pluginarg0', ACCP_CONF) ++ accp = AccountPolicyConfig(consumer, dn=ACCP_CONF) ++ accp.set('alwaysrecordlogin', 'yes') ++ accp.set('stateattrname', 'lastLoginTime') ++ consumer.restart(timeout=10) ++ ++ # On supplier ++ # Do 3 binds with a delay to ++ # allow several values lastLoginHistory ++ user_supplier.bind(USER_PW) ++ time.sleep(2) ++ user_supplier = users_supplier.get("test_user_1000") ++ user_supplier.bind(USER_PW) ++ time.sleep(2) ++ user_supplier = users_supplier.get("test_user_1000") ++ user_supplier.bind(USER_PW) ++ ++ # Verify there is no referral error ++ results = supplier.ds_error_log.match('.*.acct_update_login_history - Modify error 10 on entry*') ++ assert not results ++ ++ # Verify that we got 3 values for lastLoginHistory ++ assert len(user_supplier.get_attr_vals_utf8_l('lastLoginHistory')) == 3 ++ ++ # On Consumer ++ # Do 3 binds with a delay to ++ # allow several values lastLoginHistory ++ user_supplier.bind(USER_PW) ++ user_consumer.bind(USER_PW) ++ time.sleep(2) ++ user_consumer = users_supplier.get("test_user_1000") ++ user_consumer.bind(USER_PW) ++ time.sleep(2) ++ user_consumer = users_supplier.get("test_user_1000") ++ user_consumer.bind(USER_PW) ++ ++ # Verify there is no referral error ++ results = consumer.ds_error_log.match('.*.acct_update_login_history - Modify error 10 on entry*') ++ assert not results ++ ++ # Verify that we got at least 3 values for lastLoginHistory ++ assert len(user_consumer.get_attr_vals_utf8_l('lastLoginHistory')) >= 3 ++ ++ ++ def fin(): ++ user_supplier.delete() ++ log.info('Disabling Global accpolicy plugin and removing pwpolicy attrs') ++ try: ++ plugin = AccountPolicyPlugin(supplier) ++ plugin.disable() ++ except ldap.LDAPError as e: ++ log.error('Failed to disable Global accpolicy plugin, {}'.format(e.message['desc'])) ++ assert False ++ supplier.restart(timeout=10) ++ try: ++ plugin = AccountPolicyPlugin(consumer) ++ plugin.disable() ++ except ldap.LDAPError as e: ++ log.error('Failed to disable Global accpolicy plugin, {}'.format(e.message['desc'])) ++ assert False ++ consumer.restart(timeout=10) ++ ++ request.addfinalizer(fin) + + if __name__ == '__main__': + # Run isolated +diff --git a/ldap/servers/plugins/acctpolicy/acct_plugin.c b/ldap/servers/plugins/acctpolicy/acct_plugin.c +index 220d0f6b2..af566b934 100644 +--- a/ldap/servers/plugins/acctpolicy/acct_plugin.c ++++ b/ldap/servers/plugins/acctpolicy/acct_plugin.c +@@ -290,7 +290,8 @@ acct_update_login_history(const char *dn, char *timestr) + list_of_mods[1] = NULL; + + mod_pb = slapi_pblock_new(); +- slapi_modify_internal_set_pb(mod_pb, dn, list_of_mods, NULL, NULL, plugin_id, 0); ++ slapi_modify_internal_set_pb(mod_pb, dn, list_of_mods, NULL, NULL, plugin_id, ++ SLAPI_OP_FLAG_NO_ACCESS_CHECK |SLAPI_OP_FLAG_BYPASS_REFERRALS); + slapi_modify_internal_pb(mod_pb); + slapi_pblock_get(mod_pb, SLAPI_PLUGIN_INTOP_RESULT, &rc); + if (rc != LDAP_SUCCESS) { +-- +2.48.1 + diff --git a/SOURCES/0016-Issue-6446-Fix-test_acct_policy_consumer-test-to-wai.patch b/SOURCES/0016-Issue-6446-Fix-test_acct_policy_consumer-test-to-wai.patch new file mode 100644 index 0000000..d000ceb --- /dev/null +++ b/SOURCES/0016-Issue-6446-Fix-test_acct_policy_consumer-test-to-wai.patch @@ -0,0 +1,41 @@ +From d43600284728d677ea6a98d9bd3d2e92bfbaecdb Mon Sep 17 00:00:00 2001 +From: Masahiro Matsuya +Date: Fri, 24 Jan 2025 15:08:22 +0900 +Subject: [PATCH] Issue 6446 - Fix test_acct_policy_consumer test to wait + enough for lastLoginHistory update (#6530) + +Adding a small sleep to wait for lastLoginHistory update after the bind +operation. + +Relates: #6446 + +Author: Masahiro Matsuya + +Reviewed by: tbordaz +--- + dirsrvtests/tests/suites/plugins/accpol_test.py | 2 ++ + 1 file changed, 2 insertions(+) + +diff --git a/dirsrvtests/tests/suites/plugins/accpol_test.py b/dirsrvtests/tests/suites/plugins/accpol_test.py +index f5db8e461..1354f0559 100644 +--- a/dirsrvtests/tests/suites/plugins/accpol_test.py ++++ b/dirsrvtests/tests/suites/plugins/accpol_test.py +@@ -1372,6 +1372,7 @@ def test_acct_policy_consumer(topology_m1c1, request): + time.sleep(2) + user_supplier = users_supplier.get("test_user_1000") + user_supplier.bind(USER_PW) ++ time.sleep(2) + + # Verify there is no referral error + results = supplier.ds_error_log.match('.*.acct_update_login_history - Modify error 10 on entry*') +@@ -1391,6 +1392,7 @@ def test_acct_policy_consumer(topology_m1c1, request): + time.sleep(2) + user_consumer = users_supplier.get("test_user_1000") + user_consumer.bind(USER_PW) ++ time.sleep(2) + + # Verify there is no referral error + results = consumer.ds_error_log.match('.*.acct_update_login_history - Modify error 10 on entry*') +-- +2.48.1 + diff --git a/SOURCES/0017-Issue-6554-During-import-of-entries-without-nsUnique.patch b/SOURCES/0017-Issue-6554-During-import-of-entries-without-nsUnique.patch new file mode 100644 index 0000000..c1dc87f --- /dev/null +++ b/SOURCES/0017-Issue-6554-During-import-of-entries-without-nsUnique.patch @@ -0,0 +1,165 @@ +From 39138128783d3e3cd337a6d6b01b19abbc7bb6de Mon Sep 17 00:00:00 2001 +From: tbordaz +Date: Thu, 6 Feb 2025 18:25:36 +0100 +Subject: [PATCH] Issue 6554 - During import of entries without nsUniqueId, a + supplier generates duplicate nsUniqueId (LMDB only) (#6582) + +Bug description: + During an import the entry is prepared (schema, operational + attributes, password encryption,...) before starting the + update of the database and indexes. + A step of the preparation is to assign a value to 'nsuniqueid' + operational attribute. 'nsuniqueid' must be unique. + In LMDB the preparation is done by multiple threads (workers). + In such case the 'nsuniqueid' are generated in parallel and + as it is time based several values can be duplicated. + +Fix description: + To prevent that the routine dbmdb_import_generate_uniqueid + should make sure to synchronize the workers. + +fixes: #6554 + +Reviewed by: Pierre Rogier +--- + .../tests/suites/import/import_test.py | 79 ++++++++++++++++++- + .../back-ldbm/db-mdb/mdb_import_threads.c | 11 +++ + 2 files changed, 89 insertions(+), 1 deletion(-) + +diff --git a/dirsrvtests/tests/suites/import/import_test.py b/dirsrvtests/tests/suites/import/import_test.py +index 832a27cf6..6b6630133 100644 +--- a/dirsrvtests/tests/suites/import/import_test.py ++++ b/dirsrvtests/tests/suites/import/import_test.py +@@ -14,11 +14,13 @@ import os + import pytest + import time + import glob ++import re + import logging + import subprocess + from datetime import datetime + from lib389.topologies import topology_st as topo +-from lib389._constants import DEFAULT_SUFFIX, TaskWarning ++from lib389.topologies import topology_m2 as topo_m2 ++from lib389._constants import DEFAULT_BENAME, DEFAULT_SUFFIX, TaskWarning + from lib389.dbgen import dbgen_users + from lib389.tasks import ImportTask + from lib389.index import Indexes +@@ -627,6 +629,81 @@ def test_crash_on_ldif2db_with_lmdb(topo, _import_clean): + __check_for_core(now) + + ++def test_duplicate_nsuniqueid(topo_m2, request): ++ """Test that after an offline import all ++ nsuniqueid are different ++ ++ :id: a2541677-a288-4633-bacf-4050cc56016d ++ :setup: MMR with 2 suppliers ++ :steps: ++ 1. stop the instance to do offline operations ++ 2. Generate a 5K users LDIF file ++ 3. Check that no uniqueid are present in the generated file ++ 4. import the generated LDIF ++ 5. export the database ++ 6. Check that that exported LDIF contains more than 5K nsuniqueid ++ 7. Check that there is no duplicate nsuniqued in exported LDIF ++ :expectedresults: ++ 1. Should succeeds ++ 2. Should succeeds ++ 3. Should succeeds ++ 4. Should succeeds ++ 5. Should succeeds ++ 6. Should succeeds ++ 7. Should succeeds ++ """ ++ m1 = topo_m2.ms["supplier1"] ++ ++ # Stop the instance ++ m1.stop() ++ ++ # Generate a test ldif (5k entries) ++ log.info("Generating LDIF...") ++ ldif_dir = m1.get_ldif_dir() ++ import_ldif = ldif_dir + '/5k_users_import.ldif' ++ dbgen_users(m1, 5000, import_ldif, DEFAULT_SUFFIX) ++ ++ # Check that the generated LDIF does not contain nsuniqueid ++ all_nsuniqueid = [] ++ with open(import_ldif, 'r') as file: ++ for line in file: ++ if line.lower().startswith("nsuniqueid: "): ++ all_nsuniqueid.append(line.split(': ')[1]) ++ log.info("import file contains " + str(len(all_nsuniqueid)) + " nsuniqueid") ++ assert len(all_nsuniqueid) == 0 ++ ++ # Import the "nsuniquied free" LDIF file ++ if not m1.ldif2db('userRoot', None, None, None, import_ldif): ++ assert False ++ ++ # Export the DB that now should contain nsuniqueid ++ export_ldif = ldif_dir + '/5k_user_export.ldif' ++ log.info("export to file " + export_ldif) ++ m1.db2ldif(bename=DEFAULT_BENAME, suffixes=[DEFAULT_SUFFIX], ++ excludeSuffixes=None, repl_data=False, ++ outputfile=export_ldif, encrypt=False) ++ ++ # Check that the export LDIF contain nsuniqueid ++ all_nsuniqueid = [] ++ with open(export_ldif, 'r') as file: ++ for line in file: ++ if line.lower().startswith("nsuniqueid: "): ++ all_nsuniqueid.append(line.split(': ')[1]) ++ log.info("export file " + export_ldif + " contains " + str(len(all_nsuniqueid)) + " nsuniqueid") ++ assert len(all_nsuniqueid) >= 5000 ++ ++ # Check that the nsuniqueid are unique ++ assert len(set(all_nsuniqueid)) == len(all_nsuniqueid) ++ ++ def fin(): ++ if os.path.exists(import_ldif): ++ os.remove(import_ldif) ++ if os.path.exists(export_ldif): ++ os.remove(export_ldif) ++ m1.start ++ ++ request.addfinalizer(fin) ++ + if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode +diff --git a/ldap/servers/slapd/back-ldbm/db-mdb/mdb_import_threads.c b/ldap/servers/slapd/back-ldbm/db-mdb/mdb_import_threads.c +index 65f564aff..716e3c0f3 100644 +--- a/ldap/servers/slapd/back-ldbm/db-mdb/mdb_import_threads.c ++++ b/ldap/servers/slapd/back-ldbm/db-mdb/mdb_import_threads.c +@@ -606,10 +606,20 @@ dbmdb_import_generate_uniqueid(ImportJob *job, Slapi_Entry *e) + { + const char *uniqueid = slapi_entry_get_uniqueid(e); + int rc = UID_SUCCESS; ++ static pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER; + + if (!uniqueid && (job->uuid_gen_type != SLAPI_UNIQUEID_GENERATE_NONE)) { + char *newuniqueid; + ++ /* With 'mdb' we have several workers generating nsuniqueid ++ * we need to serialize them to prevent generating duplicate value ++ * From performance pov it only impacts import ++ * The default value is SLAPI_UNIQUEID_GENERATE_TIME_BASED so ++ * the only syscall is clock_gettime and then string formating ++ * that should limit contention ++ */ ++ pthread_mutex_lock(&mutex); ++ + /* generate id based on dn */ + if (job->uuid_gen_type == SLAPI_UNIQUEID_GENERATE_NAME_BASED) { + char *dn = slapi_entry_get_dn(e); +@@ -620,6 +630,7 @@ dbmdb_import_generate_uniqueid(ImportJob *job, Slapi_Entry *e) + /* time based */ + rc = slapi_uniqueIDGenerateString(&newuniqueid); + } ++ pthread_mutex_unlock(&mutex); + + if (rc == UID_SUCCESS) { + slapi_entry_set_uniqueid(e, newuniqueid); +-- +2.48.1 + diff --git a/SOURCES/0018-Issue-6561-TLS-1.2-stickiness-in-FIPS-mode.patch b/SOURCES/0018-Issue-6561-TLS-1.2-stickiness-in-FIPS-mode.patch new file mode 100644 index 0000000..42751ec --- /dev/null +++ b/SOURCES/0018-Issue-6561-TLS-1.2-stickiness-in-FIPS-mode.patch @@ -0,0 +1,38 @@ +From a71c12881604b5e37d64093aedd158aa112b39cc Mon Sep 17 00:00:00 2001 +From: Viktor Ashirov +Date: Thu, 13 Feb 2025 16:37:43 +0100 +Subject: [PATCH] Issue 6561 - TLS 1.2 stickiness in FIPS mode + +Description: +TLS 1.3 works with NSS in FIPS mode for quite some time now, +this restriction is no longer needed. + +Fixes: https://github.com/389ds/389-ds-base/issues/6561 + +Reviewed by: @mreynolds389 (Thanks!) +--- + ldap/servers/slapd/ssl.c | 8 -------- + 1 file changed, 8 deletions(-) + +diff --git a/ldap/servers/slapd/ssl.c b/ldap/servers/slapd/ssl.c +index ef9e97fb9..329bd5c90 100644 +--- a/ldap/servers/slapd/ssl.c ++++ b/ldap/servers/slapd/ssl.c +@@ -1914,14 +1914,6 @@ slapd_ssl_init2(PRFileDesc **fd, int startTLS) + */ + sslStatus = SSL_VersionRangeGet(pr_sock, &slapdNSSVersions); + if (sslStatus == SECSuccess) { +- if (slapdNSSVersions.max > LDAP_OPT_X_TLS_PROTOCOL_TLS1_2 && fipsMode) { +- /* +- * FIPS & NSS currently only support a max version of TLS1.2 +- * (although NSS advertises 1.3 as a max range in FIPS mode), +- * hopefully this code block can be removed soon... +- */ +- slapdNSSVersions.max = LDAP_OPT_X_TLS_PROTOCOL_TLS1_2; +- } + /* Reset request range */ + sslStatus = SSL_VersionRangeSet(pr_sock, &slapdNSSVersions); + if (sslStatus == SECSuccess) { +-- +2.48.1 + diff --git a/SOURCES/0019-Issue-6229-After-an-initial-failure-subsequent-onlin.patch b/SOURCES/0019-Issue-6229-After-an-initial-failure-subsequent-onlin.patch new file mode 100644 index 0000000..c02fe35 --- /dev/null +++ b/SOURCES/0019-Issue-6229-After-an-initial-failure-subsequent-onlin.patch @@ -0,0 +1,566 @@ +From 2e5a470ab5195b5e5f31fa8fb7b03d0c20b6a90f Mon Sep 17 00:00:00 2001 +From: progier389 +Date: Fri, 28 Jun 2024 18:56:49 +0200 +Subject: [PATCH] Issue 6229 - After an initial failure, subsequent online + backups fail (#6230) + +* Issue 6229 - After an initial failure, subsequent online backups will not work + +Several issues related to backup task error handling: +Backends stay busy after the failure +Exit code is 0 in some cases +Crash if failing to open the backup directory +And a more general one: +lib389 Task DN collision + +Solutions: +Always reset the busy flags that have been set +Ensure that 0 is not returned in error case +Avoid closing NULL directory descriptor +Use a timestamp having milliseconds precision to create the task DN + +Issue: #6229 + +Reviewed by: @droideck (Thanks!) + +(cherry picked from commit 04a0b6ac776a1d588ec2e10ff651e5015078ad21) +--- + ldap/servers/slapd/back-ldbm/archive.c | 45 +++++----- + .../slapd/back-ldbm/db-mdb/mdb_layer.c | 3 + + src/lib389/lib389/__init__.py | 10 +-- + src/lib389/lib389/tasks.py | 82 +++++++++---------- + 4 files changed, 70 insertions(+), 70 deletions(-) + +diff --git a/ldap/servers/slapd/back-ldbm/archive.c b/ldap/servers/slapd/back-ldbm/archive.c +index 0460a42f6..6658cc80a 100644 +--- a/ldap/servers/slapd/back-ldbm/archive.c ++++ b/ldap/servers/slapd/back-ldbm/archive.c +@@ -16,6 +16,8 @@ + #include "back-ldbm.h" + #include "dblayer.h" + ++#define NO_OBJECT ((Object*)-1) ++ + int + ldbm_temporary_close_all_instances(Slapi_PBlock *pb) + { +@@ -270,6 +272,7 @@ ldbm_back_ldbm2archive(Slapi_PBlock *pb) + int run_from_cmdline = 0; + Slapi_Task *task; + struct stat sbuf; ++ Object *last_busy_inst_obj = NO_OBJECT; + + slapi_pblock_get(pb, SLAPI_PLUGIN_PRIVATE, &li); + slapi_pblock_get(pb, SLAPI_SEQ_VAL, &rawdirectory); +@@ -380,13 +383,12 @@ ldbm_back_ldbm2archive(Slapi_PBlock *pb) + + /* to avoid conflict w/ import, do this check for commandline, as well */ + { +- Object *inst_obj, *inst_obj2; + ldbm_instance *inst = NULL; + + /* server is up -- mark all backends busy */ +- for (inst_obj = objset_first_obj(li->li_instance_set); inst_obj; +- inst_obj = objset_next_obj(li->li_instance_set, inst_obj)) { +- inst = (ldbm_instance *)object_get_data(inst_obj); ++ for (last_busy_inst_obj = objset_first_obj(li->li_instance_set); last_busy_inst_obj; ++ last_busy_inst_obj = objset_next_obj(li->li_instance_set, last_busy_inst_obj)) { ++ inst = (ldbm_instance *)object_get_data(last_busy_inst_obj); + + /* check if an import/restore is already ongoing... */ + if (instance_set_busy(inst) != 0 || dblayer_in_import(inst) != 0) { +@@ -400,20 +402,6 @@ ldbm_back_ldbm2archive(Slapi_PBlock *pb) + "another task and cannot be disturbed.", + inst->inst_name); + } +- +- /* painfully, we have to clear the BUSY flags on the +- * backends we'd already marked... +- */ +- for (inst_obj2 = objset_first_obj(li->li_instance_set); +- inst_obj2 && (inst_obj2 != inst_obj); +- inst_obj2 = objset_next_obj(li->li_instance_set, +- inst_obj2)) { +- inst = (ldbm_instance *)object_get_data(inst_obj2); +- instance_set_not_busy(inst); +- } +- if (inst_obj2 && inst_obj2 != inst_obj) +- object_release(inst_obj2); +- object_release(inst_obj); + goto err; + } + } +@@ -427,18 +415,26 @@ ldbm_back_ldbm2archive(Slapi_PBlock *pb) + goto err; + } + +- if (!run_from_cmdline) { ++err: ++ /* Clear all BUSY flags that have been previously set */ ++ if (last_busy_inst_obj != NO_OBJECT) { + ldbm_instance *inst; + Object *inst_obj; + +- /* none of these backends are busy anymore */ +- for (inst_obj = objset_first_obj(li->li_instance_set); inst_obj; ++ for (inst_obj = objset_first_obj(li->li_instance_set); ++ inst_obj && (inst_obj != last_busy_inst_obj); + inst_obj = objset_next_obj(li->li_instance_set, inst_obj)) { + inst = (ldbm_instance *)object_get_data(inst_obj); + instance_set_not_busy(inst); + } ++ if (last_busy_inst_obj != NULL) { ++ /* release last seen object for aborted objset_next_obj iterations */ ++ if (inst_obj != NULL) { ++ object_release(inst_obj); ++ } ++ object_release(last_busy_inst_obj); ++ } + } +-err: + if (return_value) { + if (dir_bak) { + slapi_log_err(SLAPI_LOG_ERR, +@@ -727,7 +723,10 @@ ldbm_archive_config(char *bakdir, Slapi_Task *task) + } + + error: +- PR_CloseDir(dirhandle); ++ if (NULL != dirhandle) { ++ PR_CloseDir(dirhandle); ++ dirhandle = NULL; ++ } + dse_backup_unlock(); + slapi_ch_free_string(&backup_config_dir); + slapi_ch_free_string(&dse_file); +diff --git a/ldap/servers/slapd/back-ldbm/db-mdb/mdb_layer.c b/ldap/servers/slapd/back-ldbm/db-mdb/mdb_layer.c +index 457c5ed60..35f8173a7 100644 +--- a/ldap/servers/slapd/back-ldbm/db-mdb/mdb_layer.c ++++ b/ldap/servers/slapd/back-ldbm/db-mdb/mdb_layer.c +@@ -982,6 +982,9 @@ dbmdb_backup(struct ldbminfo *li, char *dest_dir, Slapi_Task *task) + if (ldbm_archive_config(dest_dir, task) != 0) { + slapi_log_err(SLAPI_LOG_ERR, "dbmdb_backup", + "Backup of config files failed or is incomplete\n"); ++ if (0 == return_value) { ++ return_value = -1; ++ } + } + + goto bail; +diff --git a/src/lib389/lib389/__init__.py b/src/lib389/lib389/__init__.py +index 4edea63b7..95926a5d6 100644 +--- a/src/lib389/lib389/__init__.py ++++ b/src/lib389/lib389/__init__.py +@@ -69,7 +69,7 @@ from lib389.utils import ( + get_user_is_root) + from lib389.paths import Paths + from lib389.nss_ssl import NssSsl +-from lib389.tasks import BackupTask, RestoreTask ++from lib389.tasks import BackupTask, RestoreTask, Task + from lib389.dseldif import DSEldif + + # mixin +@@ -1420,7 +1420,7 @@ class DirSrv(SimpleLDAPObject, object): + name, self.ds_paths.prefix) + + # create the archive +- name = "backup_%s_%s.tar.gz" % (self.serverid, time.strftime("%m%d%Y_%H%M%S")) ++ name = "backup_%s_%s.tar.gz" % (self.serverid, Task.get_timestamp()) + backup_file = os.path.join(backup_dir, name) + tar = tarfile.open(backup_file, "w:gz") + tar.extraction_filter = (lambda member, path: member) +@@ -2806,7 +2806,7 @@ class DirSrv(SimpleLDAPObject, object): + else: + # No output file specified. Use the default ldif location/name + cmd.append('-a') +- tnow = datetime.now().strftime("%Y_%m_%d_%H_%M_%S") ++ tnow = Task.get_timestamp() + if bename: + ldifname = os.path.join(self.ds_paths.ldif_dir, "%s-%s-%s.ldif" % (self.serverid, bename, tnow)) + else: +@@ -2877,7 +2877,7 @@ class DirSrv(SimpleLDAPObject, object): + + if archive_dir is None: + # Use the instance name and date/time as the default backup name +- tnow = datetime.now().strftime("%Y_%m_%d_%H_%M_%S") ++ tnow = Task.get_timestamp() + archive_dir = os.path.join(self.ds_paths.backup_dir, "%s-%s" % (self.serverid, tnow)) + elif not archive_dir.startswith("/"): + # Relative path, append it to the bak directory +@@ -3499,7 +3499,7 @@ class DirSrv(SimpleLDAPObject, object): + + if archive is None: + # Use the instance name and date/time as the default backup name +- tnow = datetime.now().strftime("%Y_%m_%d_%H_%M_%S") ++ tnow = Task.get_timestamp() + if self.serverid is not None: + backup_dir_name = "%s-%s" % (self.serverid, tnow) + else: +diff --git a/src/lib389/lib389/tasks.py b/src/lib389/lib389/tasks.py +index 193805780..c1a2e7aaa 100644 +--- a/src/lib389/lib389/tasks.py ++++ b/src/lib389/lib389/tasks.py +@@ -118,7 +118,7 @@ class Task(DSLdapObject): + return super(Task, self).create(rdn, properties, basedn) + + @staticmethod +- def _get_task_date(): ++ def get_timestamp(): + """Return a timestamp to use in naming new task entries.""" + + return datetime.now().isoformat() +@@ -132,7 +132,7 @@ class AutomemberRebuildMembershipTask(Task): + """ + + def __init__(self, instance, dn=None): +- self.cn = 'automember_rebuild_' + Task._get_task_date() ++ self.cn = 'automember_rebuild_' + Task.get_timestamp() + dn = "cn=" + self.cn + "," + DN_AUTOMEMBER_REBUILD_TASK + + super(AutomemberRebuildMembershipTask, self).__init__(instance, dn) +@@ -147,7 +147,7 @@ class AutomemberAbortRebuildTask(Task): + """ + + def __init__(self, instance, dn=None): +- self.cn = 'automember_abort_' + Task._get_task_date() ++ self.cn = 'automember_abort_' + Task.get_timestamp() + dn = "cn=" + self.cn + "," + DN_AUTOMEMBER_ABORT_REBUILD_TASK + + super(AutomemberAbortRebuildTask, self).__init__(instance, dn) +@@ -161,7 +161,7 @@ class FixupLinkedAttributesTask(Task): + """ + + def __init__(self, instance, dn=None): +- self.cn = 'fixup_linked_attrs_' + Task._get_task_date() ++ self.cn = 'fixup_linked_attrs_' + Task.get_timestamp() + dn = "cn=" + self.cn + "," + DN_FIXUP_LINKED_ATTIBUTES + + super(FixupLinkedAttributesTask, self).__init__(instance, dn) +@@ -175,7 +175,7 @@ class MemberUidFixupTask(Task): + """ + + def __init__(self, instance, dn=None): +- self.cn = 'memberUid_fixup_' + Task._get_task_date() ++ self.cn = 'memberUid_fixup_' + Task.get_timestamp() + dn = f"cn={self.cn},cn=memberuid task,cn=tasks,cn=config" + + super(MemberUidFixupTask, self).__init__(instance, dn) +@@ -190,7 +190,7 @@ class MemberOfFixupTask(Task): + """ + + def __init__(self, instance, dn=None): +- self.cn = 'memberOf_fixup_' + Task._get_task_date() ++ self.cn = 'memberOf_fixup_' + Task.get_timestamp() + dn = "cn=" + self.cn + "," + DN_MBO_TASK + + super(MemberOfFixupTask, self).__init__(instance, dn) +@@ -205,7 +205,7 @@ class USNTombstoneCleanupTask(Task): + """ + + def __init__(self, instance, dn=None): +- self.cn = 'usn_cleanup_' + Task._get_task_date() ++ self.cn = 'usn_cleanup_' + Task.get_timestamp() + dn = "cn=" + self.cn + ",cn=USN tombstone cleanup task," + DN_TASKS + + super(USNTombstoneCleanupTask, self).__init__(instance, dn) +@@ -225,7 +225,7 @@ class csngenTestTask(Task): + """ + + def __init__(self, instance, dn=None): +- self.cn = 'csngenTest_' + Task._get_task_date() ++ self.cn = 'csngenTest_' + Task.get_timestamp() + dn = "cn=" + self.cn + ",cn=csngen_test," + DN_TASKS + super(csngenTestTask, self).__init__(instance, dn) + +@@ -238,7 +238,7 @@ class EntryUUIDFixupTask(Task): + """ + + def __init__(self, instance, dn=None): +- self.cn = 'entryuuid_fixup_' + Task._get_task_date() ++ self.cn = 'entryuuid_fixup_' + Task.get_timestamp() + dn = "cn=" + self.cn + "," + DN_EUUID_TASK + super(EntryUUIDFixupTask, self).__init__(instance, dn) + self._must_attributes.extend(['basedn']) +@@ -252,7 +252,7 @@ class DBCompactTask(Task): + """ + + def __init__(self, instance, dn=None): +- self.cn = 'compact_db_' + Task._get_task_date() ++ self.cn = 'compact_db_' + Task.get_timestamp() + dn = "cn=" + self.cn + "," + DN_COMPACTDB_TASK + super(DBCompactTask, self).__init__(instance, dn) + +@@ -265,7 +265,7 @@ class SchemaReloadTask(Task): + """ + + def __init__(self, instance, dn=None): +- self.cn = 'schema_reload_' + Task._get_task_date() ++ self.cn = 'schema_reload_' + Task.get_timestamp() + dn = "cn=" + self.cn + ",cn=schema reload task," + DN_TASKS + super(SchemaReloadTask, self).__init__(instance, dn) + +@@ -278,7 +278,7 @@ class SyntaxValidateTask(Task): + """ + + def __init__(self, instance, dn=None): +- self.cn = 'syntax_validate_' + Task._get_task_date() ++ self.cn = 'syntax_validate_' + Task.get_timestamp() + dn = f"cn={self.cn},cn=syntax validate,cn=tasks,cn=config" + + super(SyntaxValidateTask, self).__init__(instance, dn) +@@ -295,7 +295,7 @@ class AbortCleanAllRUVTask(Task): + """ + + def __init__(self, instance, dn=None): +- self.cn = 'abortcleanallruv_' + Task._get_task_date() ++ self.cn = 'abortcleanallruv_' + Task.get_timestamp() + dn = "cn=" + self.cn + ",cn=abort cleanallruv," + DN_TASKS + + super(AbortCleanAllRUVTask, self).__init__(instance, dn) +@@ -312,7 +312,7 @@ class CleanAllRUVTask(Task): + """ + + def __init__(self, instance, dn=None): +- self.cn = 'cleanallruv_' + Task._get_task_date() ++ self.cn = 'cleanallruv_' + Task.get_timestamp() + dn = "cn=" + self.cn + ",cn=cleanallruv," + DN_TASKS + self._properties = None + +@@ -359,7 +359,7 @@ class ImportTask(Task): + """ + + def __init__(self, instance, dn=None): +- self.cn = 'import_' + Task._get_task_date() ++ self.cn = 'import_' + Task.get_timestamp() + dn = "cn=%s,%s" % (self.cn, DN_IMPORT_TASK) + self._properties = None + +@@ -388,7 +388,7 @@ class ExportTask(Task): + """ + + def __init__(self, instance, dn=None): +- self.cn = 'export_' + Task._get_task_date() ++ self.cn = 'export_' + Task.get_timestamp() + dn = "cn=%s,%s" % (self.cn, DN_EXPORT_TASK) + self._properties = None + +@@ -411,7 +411,7 @@ class BackupTask(Task): + """ + + def __init__(self, instance, dn=None): +- self.cn = 'backup_' + Task._get_task_date() ++ self.cn = 'backup_' + Task.get_timestamp() + dn = "cn=" + self.cn + ",cn=backup," + DN_TASKS + self._properties = None + +@@ -426,7 +426,7 @@ class RestoreTask(Task): + """ + + def __init__(self, instance, dn=None): +- self.cn = 'restore_' + Task._get_task_date() ++ self.cn = 'restore_' + Task.get_timestamp() + dn = "cn=" + self.cn + ",cn=restore," + DN_TASKS + self._properties = None + +@@ -513,7 +513,7 @@ class Tasks(object): + raise ValueError("Import file (%s) does not exist" % input_file) + + # Prepare the task entry +- cn = "import_" + time.strftime("%m%d%Y_%H%M%S", time.localtime()) ++ cn = "import_" + Task.get_timestamp() + dn = "cn=%s,%s" % (cn, DN_IMPORT_TASK) + entry = Entry(dn) + entry.setValues('objectclass', 'top', 'extensibleObject') +@@ -581,7 +581,7 @@ class Tasks(object): + raise ValueError("output_file is mandatory") + + # Prepare the task entry +- cn = "export_" + time.strftime("%m%d%Y_%H%M%S", time.localtime()) ++ cn = "export_" + Task.get_timestamp() + dn = "cn=%s,%s" % (cn, DN_EXPORT_TASK) + entry = Entry(dn) + entry.update({ +@@ -637,7 +637,7 @@ class Tasks(object): + raise ValueError("You must specify a backup directory.") + + # build the task entry +- cn = "backup_" + time.strftime("%m%d%Y_%H%M%S", time.localtime()) ++ cn = "backup_" + Task.get_timestamp() + dn = "cn=%s,%s" % (cn, DN_BACKUP_TASK) + entry = Entry(dn) + entry.update({ +@@ -694,7 +694,7 @@ class Tasks(object): + raise ValueError("Backup file (%s) does not exist" % backup_dir) + + # build the task entry +- cn = "restore_" + time.strftime("%m%d%Y_%H%M%S", time.localtime()) ++ cn = "restore_" + Task.get_timestamp() + dn = "cn=%s,%s" % (cn, DN_RESTORE_TASK) + entry = Entry(dn) + entry.update({ +@@ -789,7 +789,7 @@ class Tasks(object): + attrs.append(attr) + else: + attrs.append(attrname) +- cn = "index_vlv_%s" % (time.strftime("%m%d%Y_%H%M%S", time.localtime())) ++ cn = "index_vlv_%s" % (Task.get_timestamp()) + dn = "cn=%s,%s" % (cn, DN_INDEX_TASK) + entry = Entry(dn) + entry.update({ +@@ -803,7 +803,7 @@ class Tasks(object): + # + # Reindex all attributes - gather them first... + # +- cn = "index_all_%s" % (time.strftime("%m%d%Y_%H%M%S", time.localtime())) ++ cn = "index_all_%s" % (Task.get_timestamp()) + dn = ('cn=%s,cn=ldbm database,cn=plugins,cn=config' % backend) + try: + indexes = self.conn.search_s(dn, ldap.SCOPE_SUBTREE, '(objectclass=nsIndex)') +@@ -815,7 +815,7 @@ class Tasks(object): + # + # Reindex specific attributes + # +- cn = "index_attrs_%s" % (time.strftime("%m%d%Y_%H%M%S", time.localtime())) ++ cn = "index_attrs_%s" % (Task.get_timestamp()) + if isinstance(attrname, (tuple, list)): + # Need to guarantee this is a list (and not a tuple) + for attr in attrname: +@@ -903,8 +903,7 @@ class Tasks(object): + + suffix = ents[0].getValue(attr) + +- cn = "fixupmemberof_" + time.strftime("%m%d%Y_%H%M%S", +- time.localtime()) ++ cn = "fixupmemberof_" + Task.get_timestamp() + dn = "cn=%s,%s" % (cn, DN_MBO_TASK) + entry = Entry(dn) + entry.setValues('objectclass', 'top', 'extensibleObject') +@@ -965,8 +964,7 @@ class Tasks(object): + if len(ents) != 1: + raise ValueError("invalid backend name: %s" % bename) + +- cn = "fixupTombstone_" + time.strftime("%m%d%Y_%H%M%S", +- time.localtime()) ++ cn = "fixupTombstone_" + Task.get_timestamp() + dn = "cn=%s,%s" % (cn, DN_TOMB_FIXUP_TASK) + entry = Entry(dn) + entry.setValues('objectclass', 'top', 'extensibleObject') +@@ -1019,7 +1017,7 @@ class Tasks(object): + @return exit code + ''' + +- cn = 'task-' + time.strftime("%m%d%Y_%H%M%S", time.localtime()) ++ cn = 'task-' + Task.get_timestamp() + dn = ('cn=%s,cn=automember rebuild membership,cn=tasks,cn=config' % cn) + + entry = Entry(dn) +@@ -1077,7 +1075,7 @@ class Tasks(object): + if not ldif_out: + raise ValueError("Missing ldif_out") + +- cn = 'task-' + time.strftime("%m%d%Y_%H%M%S", time.localtime()) ++ cn = 'task-' + Task.get_timestamp() + dn = ('cn=%s,cn=automember export updates,cn=tasks,cn=config' % cn) + entry = Entry(dn) + entry.setValues('objectclass', 'top', 'extensibleObject') +@@ -1129,7 +1127,7 @@ class Tasks(object): + if not ldif_out or not ldif_in: + raise ValueError("Missing ldif_out and/or ldif_in") + +- cn = 'task-' + time.strftime("%m%d%Y_%H%M%S", time.localtime()) ++ cn = 'task-' + Task.get_timestamp() + dn = ('cn=%s,cn=automember map updates,cn=tasks,cn=config' % cn) + + entry = Entry(dn) +@@ -1175,7 +1173,7 @@ class Tasks(object): + @return exit code + ''' + +- cn = 'task-' + time.strftime("%m%d%Y_%H%M%S", time.localtime()) ++ cn = 'task-' + Task.get_timestamp() + dn = ('cn=%s,cn=fixup linked attributes,cn=tasks,cn=config' % cn) + entry = Entry(dn) + entry.setValues('objectclass', 'top', 'extensibleObject') +@@ -1219,7 +1217,7 @@ class Tasks(object): + @return exit code + ''' + +- cn = 'task-' + time.strftime("%m%d%Y_%H%M%S", time.localtime()) ++ cn = 'task-' + Task.get_timestamp() + dn = ('cn=%s,cn=schema reload task,cn=tasks,cn=config' % cn) + entry = Entry(dn) + entry.setValues('objectclass', 'top', 'extensibleObject') +@@ -1264,7 +1262,7 @@ class Tasks(object): + @return exit code + ''' + +- cn = 'task-' + time.strftime("%m%d%Y_%H%M%S", time.localtime()) ++ cn = 'task-' + Task.get_timestamp() + dn = ('cn=%s,cn=memberuid task,cn=tasks,cn=config' % cn) + entry = Entry(dn) + entry.setValues('objectclass', 'top', 'extensibleObject') +@@ -1311,7 +1309,7 @@ class Tasks(object): + @return exit code + ''' + +- cn = 'task-' + time.strftime("%m%d%Y_%H%M%S", time.localtime()) ++ cn = 'task-' + Task.get_timestamp() + dn = ('cn=%s,cn=syntax validate,cn=tasks,cn=config' % cn) + entry = Entry(dn) + entry.setValues('objectclass', 'top', 'extensibleObject') +@@ -1358,7 +1356,7 @@ class Tasks(object): + @return exit code + ''' + +- cn = 'task-' + time.strftime("%m%d%Y_%H%M%S", time.localtime()) ++ cn = 'task-' + Task.get_timestamp() + dn = ('cn=%s,cn=USN tombstone cleanup task,cn=tasks,cn=config' % cn) + entry = Entry(dn) + entry.setValues('objectclass', 'top', 'extensibleObject') +@@ -1413,7 +1411,7 @@ class Tasks(object): + if not configfile: + raise ValueError("Missing required paramter: configfile") + +- cn = 'task-' + time.strftime("%m%d%Y_%H%M%S", time.localtime()) ++ cn = 'task-' + Task.get_timestamp() + dn = ('cn=%s,cn=sysconfig reload,cn=tasks,cn=config' % cn) + entry = Entry(dn) + entry.setValues('objectclass', 'top', 'extensibleObject') +@@ -1464,7 +1462,7 @@ class Tasks(object): + if not suffix: + raise ValueError("Missing required paramter: suffix") + +- cn = 'task-' + time.strftime("%m%d%Y_%H%M%S", time.localtime()) ++ cn = 'task-' + Task.get_timestamp() + dn = ('cn=%s,cn=cleanallruv,cn=tasks,cn=config' % cn) + entry = Entry(dn) + entry.setValues('objectclass', 'top', 'extensibleObject') +@@ -1516,7 +1514,7 @@ class Tasks(object): + if not suffix: + raise ValueError("Missing required paramter: suffix") + +- cn = 'task-' + time.strftime("%m%d%Y_%H%M%S", time.localtime()) ++ cn = 'task-' + Task.get_timestamp() + dn = ('cn=%s,cn=abort cleanallruv,cn=tasks,cn=config' % cn) + entry = Entry(dn) + entry.setValues('objectclass', 'top', 'extensibleObject') +@@ -1571,7 +1569,7 @@ class Tasks(object): + if not nsArchiveDir: + raise ValueError("Missing required paramter: nsArchiveDir") + +- cn = 'task-' + time.strftime("%m%d%Y_%H%M%S", time.localtime()) ++ cn = 'task-' + Task.get_timestamp() + dn = ('cn=%s,cn=upgradedb,cn=tasks,cn=config' % cn) + entry = Entry(dn) + entry.setValues('objectclass', 'top', 'extensibleObject') +@@ -1616,6 +1614,6 @@ class LDAPIMappingReloadTask(Task): + """ + + def __init__(self, instance, dn=None): +- self.cn = 'reload-' + Task._get_task_date() ++ self.cn = 'reload-' + Task.get_timestamp() + dn = f'cn={self.cn},cn=reload ldapi mappings,cn=tasks,cn=config' + super(LDAPIMappingReloadTask, self).__init__(instance, dn) +-- +2.48.1 + diff --git a/SOURCES/0020-Issue-6372-Deadlock-while-doing-online-backup-6475.patch b/SOURCES/0020-Issue-6372-Deadlock-while-doing-online-backup-6475.patch new file mode 100644 index 0000000..f9bfc7c --- /dev/null +++ b/SOURCES/0020-Issue-6372-Deadlock-while-doing-online-backup-6475.patch @@ -0,0 +1,856 @@ +From ec7eb7e785076d19c0567f3af96837e8b94d3cb3 Mon Sep 17 00:00:00 2001 +From: progier389 +Date: Tue, 7 Jan 2025 11:40:16 +0100 +Subject: [PATCH] Issue 6372 - Deadlock while doing online backup (#6475) + +* Issue 6372 - Deadlock while doing online backup + +Sometime server hangs during online backup because of a deadlock due to lock order inversion between the dse backup_lock mutex and the dse rwlock. + +Solution: +Add functions to manage the lock/unlock to ensure consistency. +Ensure that threads always tries to lock dse_backup_lock mutex before the dse write lock +Code cleanup: + +Move the backup_lock into the dse struct +Avoid the obsolete warning during tests (I think that we will have to do a second cleanup phase later to see if we could not replace self.conn.add_s by self.create . +Issue: #6372 + +Reviewed by: @mreynolds389 (Thanks!) + +(cherry picked from commit 7e98ab34305c081859ca0ee5a30888991898a452) +--- + .../tests/suites/backups/backup_test.py | 249 +++++++++++++++++- + ldap/servers/slapd/dse.c | 170 +++++++----- + ldap/servers/slapd/libglobs.c | 3 - + ldap/servers/slapd/main.c | 1 - + ldap/servers/slapd/slapi-private.h | 2 - + src/lib389/lib389/tasks.py | 38 +-- + 6 files changed, 357 insertions(+), 106 deletions(-) + +diff --git a/dirsrvtests/tests/suites/backups/backup_test.py b/dirsrvtests/tests/suites/backups/backup_test.py +index 7d60cf44c..1246469a3 100644 +--- a/dirsrvtests/tests/suites/backups/backup_test.py ++++ b/dirsrvtests/tests/suites/backups/backup_test.py +@@ -7,21 +7,30 @@ + # --- END COPYRIGHT BLOCK --- + # + import logging +-import pytest + import os + import shutil ++import time ++import glob ++import subprocess ++import pytest ++import ldap + from datetime import datetime +-from lib389._constants import DEFAULT_SUFFIX, INSTALL_LATEST_CONFIG ++from lib389._constants import DN_DM, PASSWORD, DEFAULT_SUFFIX, INSTALL_LATEST_CONFIG + from lib389.properties import BACKEND_SAMPLE_ENTRIES, TASK_WAIT +-from lib389.topologies import topology_st as topo, topology_m2 as topo_m2 +-from lib389.backend import Backend ++from lib389.topologies import topology_st as topo, topology_m2 as topo_m2, set_timeout ++from lib389.backend import Backends, Backend ++from lib389.dbgen import dbgen_users + from lib389.tasks import BackupTask, RestoreTask + from lib389.config import BDB_LDBMConfig ++from lib389.idm.nscontainer import nsContainers + from lib389 import DSEldif + from lib389.utils import ds_is_older, get_default_db_lib + from lib389.replica import ReplicationManager ++from threading import Thread, Event + import tempfile + ++ ++ + pytestmark = pytest.mark.tier1 + + DEBUGGING = os.getenv("DEBUGGING", default=False) +@@ -31,6 +40,86 @@ else: + logging.getLogger(__name__).setLevel(logging.INFO) + log = logging.getLogger(__name__) + ++# test_online_backup_and_dse_write may hang if 6372 is not fixed ++# so lets use a shorter timeout ++set_timeout(30*60) ++ ++event = Event() ++ ++BESTRUCT = [ ++ { "bename" : "be1", "suffix": "dc=be1", "nbusers": 1000 }, ++ { "bename" : "be2", "suffix": "dc=be2", "nbusers": 1000 }, ++ { "bename" : "be3", "suffix": "dc=be3", "nbusers": 1000 }, ++] ++ ++class DseConfigContextManager: ++ """Change a config parameter is dse.ldif and restore it afterwards.""" ++ ++ def __init__(self, inst, dn, attr, value): ++ self.inst = inst ++ self.dn = dn ++ self.attr = attr ++ self.value = value ++ self.oldvalue = None ++ self.dseldif = DSEldif(inst) ++ ++ def __enter__(self): ++ self.inst.stop() ++ self.oldvalue = self.dseldif.get(self.dn, self.attr, single=True) ++ self.dseldif.replace(self.dn, self.attr, self.value) ++ log.info(f"Switching {self.dn}:{self.attr} to {self.value}") ++ self.inst.start() ++ ++ def __exit__(self, exc_type, exc_value, exc_tb): ++ self.inst.stop() ++ log.info(f"Switching {self.dn}:{self.attr} to {self.oldvalue}") ++ self.dseldif.replace(self.dn, self.attr, self.oldvalue) ++ self.inst.start() ++ ++ ++@pytest.fixture(scope="function") ++def mytopo(topo, request): ++ bes = [] ++ ++ def fin(): ++ for be in bes: ++ be.delete() ++ for bak_dir in glob.glob(f'{inst.ds_paths.backup_dir}/*'): ++ shutil.rmtree(bak_dir) ++ ++ if not DEBUGGING: ++ request.addfinalizer(fin) ++ ++ inst = topo.standalone ++ ++ ldif_files = {} ++ for d in BESTRUCT: ++ bename = d['bename'] ++ suffix = d['suffix'] ++ nbusers = d['nbusers'] ++ log.info(f'Adding suffix: {suffix} and backend: {bename}...') ++ backends = Backends(inst) ++ try: ++ be = backends.create(properties={'nsslapd-suffix': suffix, 'name': bename}) ++ # Insert at list head so that children backends get deleted before parent one. ++ bes.insert(0, be) ++ except ldap.UNWILLING_TO_PERFORM as e: ++ if str(e) == "Mapping tree for this suffix exists!": ++ pass ++ else: ++ raise e ++ ++ ldif_dir = inst.get_ldif_dir() ++ ldif_files[bename] = os.path.join(ldif_dir, f'default_{bename}.ldif') ++ dbgen_users(inst, nbusers, ldif_files[bename], suffix) ++ inst.stop() ++ for d in BESTRUCT: ++ bename = d['bename'] ++ inst.ldif2db(bename, None, None, None, ldif_files[bename]) ++ inst.start() ++ return topo ++ ++ + def test_missing_backend(topo): + """Test that an error is returned when a restore is performed for a + backend that is no longer present. +@@ -80,8 +169,6 @@ def test_missing_backend(topo): + assert restore_task.get_exit_code() != 0 + + +-@pytest.mark.bz1851967 +-@pytest.mark.ds4112 + @pytest.mark.skipif(ds_is_older('1.4.1'), reason="Not implemented") + @pytest.mark.skipif(get_default_db_lib() == "mdb", reason="Not supported over mdb") + def test_db_home_dir_online_backup(topo): +@@ -99,14 +186,16 @@ def test_db_home_dir_online_backup(topo): + 2. Failure + 3. Success + """ +- bdb_ldbmconfig = BDB_LDBMConfig(topo.standalone) +- dseldif = DSEldif(topo.standalone) +- topo.standalone.stop() +- with tempfile.TemporaryDirectory() as backup_dir: +- dseldif.replace(bdb_ldbmconfig.dn, 'nsslapd-db-home-directory', f'{backup_dir}') +- topo.standalone.start() +- topo.standalone.tasks.db2bak(backup_dir=f'{backup_dir}', args={TASK_WAIT: True}) +- assert topo.standalone.ds_error_log.match(f".*Failed renaming {backup_dir}.bak back to {backup_dir}") ++ inst = topo.standalone ++ dn = BDB_LDBMConfig(inst).dn ++ attr = 'nsslapd-db-home-directory' ++ with tempfile.TemporaryDirectory() as dbhome_dir: ++ with DseConfigContextManager(inst, dn, attr, dbhome_dir): ++ backup_dir = str(dbhome_dir) ++ inst.tasks.db2bak(backup_dir=backup_dir, args={TASK_WAIT: True}) ++ assert inst.ds_error_log.match(f".*Failed renaming {backup_dir}.bak back to {backup_dir}") ++ ++ + + def test_replication(topo_m2): + """Test that if the dbhome directory is set causing an online backup to fail, +@@ -168,6 +257,138 @@ def test_replication(topo_m2): + repl.wait_for_replication(S1, S2) + + ++def test_after_db_log_rotation(topo): ++ """Test that off line backup restore works as expected. ++ ++ :id: 8a091d92-a1cf-11ef-823a-482ae39447e5 ++ :setup: One standalone instance ++ :steps: ++ 1. Stop instance ++ 2. Perform off line backup on instance ++ 3. Start instance ++ 4. Perform modify operation until db log file rotates ++ 5. Stop instance ++ 6. Restore instance from backup ++ 7. Start instance ++ :expectedresults: ++ 1. Success ++ 2. Success ++ 3. Success ++ 4. Success ++ 5. Success ++ 6. Success ++ 7. Success ++ """ ++ inst = topo.standalone ++ with tempfile.TemporaryDirectory(dir=inst.ds_paths.backup_dir) as backup_dir: ++ # repl.wait_for_replication perform some changes and wait until they get replicated ++ inst.stop() ++ assert inst.db2bak(backup_dir) ++ inst.start() ++ cmd = [ 'ldclt', '-h', 'localhost', '-b', DEFAULT_SUFFIX, ++ '-p', str(inst.port), '-t', '60', '-N', '2', ++ '-D', DN_DM, '-w', PASSWORD, '-f', "ou=People", ++ '-e', 'attreplace=description:XXXXXX' ] ++ log.info(f'Running {cmd}') ++ # Perform modify operations until log file rolls ++ result = subprocess.run(cmd, capture_output=True, text=True, check=True) ++ log.info(f'STDOUT: {result.stdout}') ++ log.info(f'STDERR: {result.stderr}') ++ if get_default_db_lib() == 'bdb': ++ while os.path.isfile(f'{inst.ds_paths.db_dir}/log.0000000001'): ++ subprocess.run(cmd, capture_output=True, text=True, check=True) ++ inst.stop() ++ assert inst.bak2db(backup_dir) ++ inst.start() ++ ++ ++def test_backup_task_after_failure(mytopo): ++ """Test that new backup task is successful after a failure. ++ backend that is no longer present. ++ ++ :id: a6c24898-2cd9-11ef-8c09-482ae39447e5 ++ :setup: Standalone Instance with multiple backends ++ :steps: ++ 1. Cleanup ++ 2. Perform a back up ++ 3. Rename the backup directory while waiting for backup completion. ++ 4. Check that backup failed. ++ 5. Perform a back up ++ 6. Check that backup succeed. ++ :expectedresults: ++ 1. Success ++ 2. Success ++ 3. Success ++ 4. Backup should fail ++ 5. Success ++ 6. Backup should succeed ++ """ ++ ++ inst = mytopo.standalone ++ tasks = inst.tasks ++ archive_dir1 = f'{inst.ds_paths.backup_dir}/bak1' ++ archive_dir1b = f'{inst.ds_paths.backup_dir}/bak1b' ++ archive_dir2 = f'{inst.ds_paths.backup_dir}/bak2' ++ ++ # Sometime the backup complete too fast, so lets retry if first ++ # backup is successful ++ for retry in range(50): ++ # Step 1. Perform cleanup ++ for dir in glob.glob(f'{inst.ds_paths.backup_dir}/*'): ++ shutil.rmtree(dir) ++ # Step 2. Perform a backup ++ tasks.db2bak(backup_dir=archive_dir1) ++ # Step 3. Wait until task is completed, trying to rename backup directory ++ done,exitCode,warningCode = (False, None, None) ++ while not done: ++ if os.path.isdir(archive_dir1): ++ os.rename(archive_dir1, archive_dir1b) ++ done,exitCode,warningCode = tasks.checkTask(tasks.entry) ++ time.sleep(0.01) ++ if exitCode != 0: ++ break ++ # Step 4. Check that backup failed. ++ # If next assert fails too often, that means that the backup is too fast ++ # A fix would would probably be to add more backends within mytopo ++ assert exitCode != 0, "Backup did not fail as expected." ++ # Step 5. Perform a seconf backup after backup failure ++ exitCode = tasks.db2bak(backup_dir=archive_dir2, args={TASK_WAIT: True}) ++ # Step 6. Check it is successful ++ assert exitCode == 0, "Backup failed. Issue #6229 may not be fixed." ++ ++ ++def load_dse(inst): ++ conts = nsContainers(inst, 'cn=config') ++ while not event.is_set(): ++ cont = conts.create(properties={'cn': 'test_online_backup_and_dse_write'}) ++ cont.delete() ++ ++ ++def test_online_backup_and_dse_write(topo): ++ """Test online backup while attempting to add/delete entries in dse.ldif. ++ ++ :id: 4a1edd2c-be15-11ef-8bc8-482ae39447e5 ++ :setup: One standalone instance ++ :steps: ++ 1. Start a thread that loops adding then removing in the dse.ldif ++ 2. Perform 10 online backups ++ 3. Stop the thread ++ :expectedresults: ++ 1. Success ++ 2. Success (or timeout if issue #6372 is not fixed.) ++ 3. Success ++ """ ++ inst = topo.standalone ++ t = Thread(target=load_dse, args=[inst]) ++ t.start() ++ for x in range(10): ++ with tempfile.TemporaryDirectory() as backup_dir: ++ assert inst.tasks.db2bak(backup_dir=f'{backup_dir}', args={TASK_WAIT: True}) == 0 ++ event.set() ++ t.join() ++ event.clear() ++ ++ + if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode +diff --git a/ldap/servers/slapd/dse.c b/ldap/servers/slapd/dse.c +index 2edc3f1f6..e3157c1ce 100644 +--- a/ldap/servers/slapd/dse.c ++++ b/ldap/servers/slapd/dse.c +@@ -41,6 +41,7 @@ + #include + /* Needed to access read_config_dse */ + #include "proto-slap.h" ++#include + + #include /* provides fsync/close */ + +@@ -73,9 +74,6 @@ + #define DSE_USE_LOCK 1 + #define DSE_NO_LOCK 0 + +-/* Global lock used during backups */ +-static PRLock *backup_lock = NULL; +- + struct dse_callback + { + int operation; +@@ -104,6 +102,8 @@ struct dse + /* initialize the dse */ + int dse_is_updateable; /* if non-zero, this DSE can be written to */ + int dse_readonly_error_reported; /* used to ensure that read-only errors are logged only once */ ++ pthread_mutex_t dse_backup_lock; /* used to block write when online backup is in progress */ ++ bool dse_backup_in_progress; /* tell that online backup is in progress (protected by dse_rwlock) */ + }; + + struct dse_node +@@ -155,6 +155,91 @@ static int dse_write_entry(caddr_t data, caddr_t arg); + static int ldif_record_end(char *p); + static int dse_call_callback(struct dse *pdse, Slapi_PBlock *pb, int operation, int flags, Slapi_Entry *entryBefore, Slapi_Entry *entryAfter, int *returncode, char *returntext); + ++/* Lock the dse in read mode */ ++INLINE_DIRECTIVE static void ++dse_lock_read(struct dse *pdse, int use_lock) ++{ ++ if (use_lock == DSE_USE_LOCK && pdse->dse_rwlock) { ++ slapi_rwlock_rdlock(pdse->dse_rwlock); ++ } ++} ++ ++/* Lock the dse in write mode and wait until the */ ++INLINE_DIRECTIVE static void ++dse_lock_write(struct dse *pdse, int use_lock) ++{ ++ if (use_lock != DSE_USE_LOCK || !pdse->dse_rwlock) { ++ return; ++ } ++ slapi_rwlock_wrlock(pdse->dse_rwlock); ++ while (pdse->dse_backup_in_progress) { ++ slapi_rwlock_unlock(pdse->dse_rwlock); ++ /* Wait util dse_backup_lock is unlocked */ ++ pthread_mutex_lock(&pdse->dse_backup_lock); ++ pthread_mutex_unlock(&pdse->dse_backup_lock); ++ slapi_rwlock_wrlock(pdse->dse_rwlock); ++ } ++} ++ ++/* release the dse lock */ ++INLINE_DIRECTIVE static void ++dse_lock_unlock(struct dse *pdse, int use_lock) ++{ ++ if (use_lock == DSE_USE_LOCK && pdse->dse_rwlock) { ++ slapi_rwlock_unlock(pdse->dse_rwlock); ++ } ++} ++ ++/* Call cb(pdse) */ ++INLINE_DIRECTIVE static void ++dse_call_cb(void (*cb)(struct dse*)) ++{ ++ Slapi_Backend *be = slapi_be_select_by_instance_name("DSE"); ++ if (be) { ++ struct dse *pdse = NULL; ++ slapi_be_Rlock(be); ++ pdse = be->be_database->plg_private; ++ if (pdse) { ++ cb(pdse); ++ } ++ slapi_be_Unlock(be); ++ } ++} ++ ++/* Helper for dse_backup_lock() */ ++static void ++dse_backup_lock_cb(struct dse *pdse) ++{ ++ pthread_mutex_lock(&pdse->dse_backup_lock); ++ slapi_rwlock_wrlock(pdse->dse_rwlock); ++ pdse->dse_backup_in_progress = true; ++ slapi_rwlock_unlock(pdse->dse_rwlock); ++} ++ ++/* Helper for dse_backup_unlock() */ ++static void ++dse_backup_unlock_cb(struct dse *pdse) ++{ ++ slapi_rwlock_wrlock(pdse->dse_rwlock); ++ pdse->dse_backup_in_progress = false; ++ slapi_rwlock_unlock(pdse->dse_rwlock); ++ pthread_mutex_unlock(&pdse->dse_backup_lock); ++} ++ ++/* Tells that a backup thread is starting */ ++void ++dse_backup_lock() ++{ ++ dse_call_cb(dse_backup_lock_cb); ++} ++ ++/* Tells that a backup thread is ending */ ++void ++dse_backup_unlock() ++{ ++ dse_call_cb(dse_backup_unlock_cb); ++} ++ + /* + * Map a DN onto a dse_node. + * Returns NULL if not found. +@@ -192,18 +277,12 @@ dse_get_entry_copy(struct dse *pdse, const Slapi_DN *dn, int use_lock) + Slapi_Entry *e = NULL; + struct dse_node *n; + +- if (use_lock == DSE_USE_LOCK && pdse->dse_rwlock) { +- slapi_rwlock_rdlock(pdse->dse_rwlock); +- } +- ++ dse_lock_read(pdse, use_lock); + n = dse_find_node(pdse, dn); + if (n != NULL) { + e = slapi_entry_dup(n->entry); + } +- +- if (use_lock == DSE_USE_LOCK && pdse->dse_rwlock) { +- slapi_rwlock_unlock(pdse->dse_rwlock); +- } ++ dse_lock_unlock(pdse, use_lock); + + return e; + } +@@ -393,6 +472,7 @@ dse_new(char *filename, char *tmpfilename, char *backfilename, char *startokfile + pdse->dse_callback = NULL; + pdse->dse_is_updateable = dse_permission_to_write(pdse, + SLAPI_LOG_TRACE); ++ pthread_mutex_init(&pdse->dse_backup_lock, NULL); + } + slapi_ch_free((void **)&realconfigdir); + } +@@ -429,8 +509,7 @@ dse_destroy(struct dse *pdse) + if (NULL == pdse) { + return 0; /* no one checks this return value */ + } +- if (pdse->dse_rwlock) +- slapi_rwlock_wrlock(pdse->dse_rwlock); ++ dse_lock_write(pdse, DSE_USE_LOCK); + slapi_ch_free((void **)&(pdse->dse_filename)); + slapi_ch_free((void **)&(pdse->dse_tmpfile)); + slapi_ch_free((void **)&(pdse->dse_fileback)); +@@ -439,8 +518,8 @@ dse_destroy(struct dse *pdse) + dse_callback_deletelist(&pdse->dse_callback); + charray_free(pdse->dse_filelist); + nentries = avl_free(pdse->dse_tree, dse_internal_delete_entry); ++ dse_lock_unlock(pdse, DSE_USE_LOCK); + if (pdse->dse_rwlock) { +- slapi_rwlock_unlock(pdse->dse_rwlock); + slapi_destroy_rwlock(pdse->dse_rwlock); + } + slapi_ch_free((void **)&pdse); +@@ -928,9 +1007,7 @@ dse_check_for_readonly_error(Slapi_PBlock *pb, struct dse *pdse) + { + int rc = 0; /* default: no error */ + +- if (pdse->dse_rwlock) +- slapi_rwlock_rdlock(pdse->dse_rwlock); +- ++ dse_lock_read(pdse, DSE_USE_LOCK); + if (!pdse->dse_is_updateable) { + if (!pdse->dse_readonly_error_reported) { + if (NULL != pdse->dse_filename) { +@@ -944,9 +1021,7 @@ dse_check_for_readonly_error(Slapi_PBlock *pb, struct dse *pdse) + } + rc = 1; /* return an error to the client */ + } +- +- if (pdse->dse_rwlock) +- slapi_rwlock_unlock(pdse->dse_rwlock); ++ dse_lock_unlock(pdse, DSE_USE_LOCK); + + if (rc != 0) { + slapi_send_ldap_result(pb, LDAP_UNWILLING_TO_PERFORM, NULL, +@@ -973,8 +1048,6 @@ dse_write_file_nolock(struct dse *pdse) + fpw.fpw_rc = 0; + fpw.fpw_prfd = NULL; + +- dse_backup_lock(); +- + if (NULL != pdse->dse_filename) { + if ((fpw.fpw_prfd = PR_Open(pdse->dse_tmpfile, PR_RDWR | PR_CREATE_FILE | PR_TRUNCATE, SLAPD_DEFAULT_DSE_FILE_MODE)) == NULL) { + rc = PR_GetOSError(); +@@ -1107,8 +1180,7 @@ dse_add_entry_pb(struct dse *pdse, Slapi_Entry *e, Slapi_PBlock *pb) + slapi_pblock_get(pb, SLAPI_DSE_MERGE_WHEN_ADDING, &merge); + + /* keep write lock during both tree update and file write operations */ +- if (pdse->dse_rwlock) +- slapi_rwlock_wrlock(pdse->dse_rwlock); ++ dse_lock_write(pdse, DSE_USE_LOCK); + if (merge) { + rc = avl_insert(&(pdse->dse_tree), n, entry_dn_cmp, dupentry_merge); + } else { +@@ -1131,8 +1203,7 @@ dse_add_entry_pb(struct dse *pdse, Slapi_Entry *e, Slapi_PBlock *pb) + } else { /* duplicate entry ignored */ + dse_node_delete(&n); /* This also deletes the contained entry */ + } +- if (pdse->dse_rwlock) +- slapi_rwlock_unlock(pdse->dse_rwlock); ++ dse_lock_unlock(pdse, DSE_USE_LOCK); + + if (rc == -1) { + /* duplicate entry ignored */ +@@ -1299,8 +1370,7 @@ dse_replace_entry(struct dse *pdse, Slapi_Entry *e, int write_file, int use_lock + int rc = -1; + if (NULL != e) { + struct dse_node *n = dse_node_new(e); +- if (use_lock && pdse->dse_rwlock) +- slapi_rwlock_wrlock(pdse->dse_rwlock); ++ dse_lock_write(pdse, use_lock); + rc = avl_insert(&(pdse->dse_tree), n, entry_dn_cmp, dupentry_replace); + if (write_file) + dse_write_file_nolock(pdse); +@@ -1310,8 +1380,7 @@ dse_replace_entry(struct dse *pdse, Slapi_Entry *e, int write_file, int use_lock + dse_node_delete(&n); + rc = 0; /* for return to caller */ + } +- if (use_lock && pdse->dse_rwlock) +- slapi_rwlock_unlock(pdse->dse_rwlock); ++ dse_lock_unlock(pdse, use_lock); + } + return rc; + } +@@ -1398,8 +1467,7 @@ dse_delete_entry(struct dse *pdse, Slapi_PBlock *pb, const Slapi_Entry *e) + slapi_pblock_get(pb, SLAPI_DSE_DONT_WRITE_WHEN_ADDING, &dont_write_file); + + /* keep write lock for both tree deleting and file writing */ +- if (pdse->dse_rwlock) +- slapi_rwlock_wrlock(pdse->dse_rwlock); ++ dse_lock_write(pdse, DSE_USE_LOCK); + if ((deleted_node = (struct dse_node *)avl_delete(&pdse->dse_tree, n, entry_dn_cmp))) { + dse_node_delete(&deleted_node); + } +@@ -1411,8 +1479,7 @@ dse_delete_entry(struct dse *pdse, Slapi_PBlock *pb, const Slapi_Entry *e) + SLAPI_OPERATION_DELETE); + dse_write_file_nolock(pdse); + } +- if (pdse->dse_rwlock) +- slapi_rwlock_unlock(pdse->dse_rwlock); ++ dse_lock_unlock(pdse, DSE_USE_LOCK); + + return 1; + } +@@ -1574,11 +1641,9 @@ do_dse_search(struct dse *pdse, Slapi_PBlock *pb, int scope, const Slapi_DN *bas + * entries that change, we skip looking through the DSE entries. + */ + if (pb_op == NULL || !operation_is_flag_set(pb_op, OP_FLAG_PS_CHANGESONLY)) { +- if (pdse->dse_rwlock) +- slapi_rwlock_rdlock(pdse->dse_rwlock); ++ dse_lock_read(pdse, DSE_USE_LOCK); + dse_apply_nolock(pdse, dse_search_filter_entry, (caddr_t)&stuff); +- if (pdse->dse_rwlock) +- slapi_rwlock_unlock(pdse->dse_rwlock); ++ dse_lock_unlock(pdse, DSE_USE_LOCK); + } + + if (stuff.ss) /* something was found which matched our criteria */ +@@ -2925,32 +2990,3 @@ dse_next_search_entry(Slapi_PBlock *pb) + return 0; + } + +-/* When a backup is occurring we can not allow the writing the dse.ldif file */ +-void +-dse_init_backup_lock() +-{ +- backup_lock = PR_NewLock(); +-} +- +-void +-dse_destroy_backup_lock() +-{ +- PR_DestroyLock(backup_lock); +- backup_lock = NULL; +-} +- +-void +-dse_backup_lock() +-{ +- if (backup_lock) { +- PR_Lock(backup_lock); +- } +-} +- +-void +-dse_backup_unlock() +-{ +- if (backup_lock) { +- PR_Unlock(backup_lock); +- } +-} +diff --git a/ldap/servers/slapd/libglobs.c b/ldap/servers/slapd/libglobs.c +index caf6eaa6a..f8f6096c7 100644 +--- a/ldap/servers/slapd/libglobs.c ++++ b/ldap/servers/slapd/libglobs.c +@@ -2020,9 +2020,6 @@ FrontendConfig_init(void) + /* Done, unlock! */ + CFG_UNLOCK_WRITE(cfg); + +- /* init the dse file backup lock */ +- dse_init_backup_lock(); +- + init_config_get_and_set(); + } + +diff --git a/ldap/servers/slapd/main.c b/ldap/servers/slapd/main.c +index ee32c1a6c..5c841c528 100644 +--- a/ldap/servers/slapd/main.c ++++ b/ldap/servers/slapd/main.c +@@ -1157,7 +1157,6 @@ cleanup: + SSL_ClearSessionCache(); + ndn_cache_destroy(); + NSS_Shutdown(); +- dse_destroy_backup_lock(); + + /* + * Server has stopped, lets force everything to disk: logs +diff --git a/ldap/servers/slapd/slapi-private.h b/ldap/servers/slapd/slapi-private.h +index ee7659ac0..1ceb36beb 100644 +--- a/ldap/servers/slapd/slapi-private.h ++++ b/ldap/servers/slapd/slapi-private.h +@@ -1411,8 +1411,6 @@ void modify_update_last_modified_attr(Slapi_PBlock *pb, Slapi_Mods *smods); + void add_internal_modifiersname(Slapi_PBlock *pb, Slapi_Entry *e); + + /* dse.c */ +-void dse_init_backup_lock(void); +-void dse_destroy_backup_lock(void); + void dse_backup_lock(void); + void dse_backup_unlock(void); + +diff --git a/src/lib389/lib389/tasks.py b/src/lib389/lib389/tasks.py +index c1a2e7aaa..6bf302862 100644 +--- a/src/lib389/lib389/tasks.py ++++ b/src/lib389/lib389/tasks.py +@@ -525,7 +525,7 @@ class Tasks(object): + entry.setValues('nsIncludeSuffix', suffix) + + # start the task and possibly wait for task completion +- self.conn.add_s(entry) ++ self.conn.add_s(entry, escapehatch='i am sure') + + exitCode = 0 + warningCode = 0 +@@ -598,7 +598,7 @@ class Tasks(object): + entry.setValues('nsExportReplica', 'true') + + # start the task and possibly wait for task completion +- self.conn.add_s(entry) ++ self.conn.add_s(entry, escapehatch='i am sure') + exitCode = 0 + warningCode = 0 + if args and args.get(TASK_WAIT, False): +@@ -649,7 +649,7 @@ class Tasks(object): + + # start the task and possibly wait for task completion + try: +- self.conn.add_s(entry) ++ self.conn.add_s(entry, escapehatch='i am sure') + except ldap.ALREADY_EXISTS: + self.log.error("Fail to add the backup task (%s)", dn) + return -1 +@@ -706,7 +706,7 @@ class Tasks(object): + + # start the task and possibly wait for task completion + try: +- self.conn.add_s(entry) ++ self.conn.add_s(entry, escapehatch='i am sure') + except ldap.ALREADY_EXISTS: + self.log.error("Fail to add the backup task (%s)", dn) + return -1 +@@ -834,7 +834,7 @@ class Tasks(object): + + # start the task and possibly wait for task completion + try: +- self.conn.add_s(entry) ++ self.conn.add_s(entry, escapehatch='i am sure') + except ldap.ALREADY_EXISTS: + self.log.error("Fail to add the index task for %s", attrname) + return -1 +@@ -914,7 +914,7 @@ class Tasks(object): + + # start the task and possibly wait for task completion + try: +- self.conn.add_s(entry) ++ self.conn.add_s(entry, escapehatch='i am sure') + except ldap.ALREADY_EXISTS: + self.log.error("Fail to add the memberOf fixup task") + return -1 +@@ -975,7 +975,7 @@ class Tasks(object): + + # start the task and possibly wait for task completion + try: +- self.conn.add_s(entry) ++ self.conn.add_s(entry, escapehatch='i am sure') + except ldap.ALREADY_EXISTS: + self.log.error("Fail to add the fixup tombstone task") + return -1 +@@ -1031,7 +1031,7 @@ class Tasks(object): + + # start the task and possibly wait for task completion + try: +- self.conn.add_s(entry) ++ self.conn.add_s(entry, escapehatch='i am sure') + except ldap.ALREADY_EXISTS: + self.log.error("Fail to add Automember Rebuild Membership task") + return -1 +@@ -1087,7 +1087,7 @@ class Tasks(object): + + # start the task and possibly wait for task completion + try: +- self.conn.add_s(entry) ++ self.conn.add_s(entry, escapehatch='i am sure') + except ldap.ALREADY_EXISTS: + self.log.error("Fail to add Automember Export Updates task") + return -1 +@@ -1138,7 +1138,7 @@ class Tasks(object): + + # start the task and possibly wait for task completion + try: +- self.conn.add_s(entry) ++ self.conn.add_s(entry, escapehatch='i am sure') + except ldap.ALREADY_EXISTS: + self.log.error("Fail to add Automember Map Updates task") + return -1 +@@ -1183,7 +1183,7 @@ class Tasks(object): + + # start the task and possibly wait for task completion + try: +- self.conn.add_s(entry) ++ self.conn.add_s(entry, escapehatch='i am sure') + except ldap.ALREADY_EXISTS: + self.log.error("Fail to add Fixup Linked Attributes task") + return -1 +@@ -1227,7 +1227,7 @@ class Tasks(object): + + # start the task and possibly wait for task completion + try: +- self.conn.add_s(entry) ++ self.conn.add_s(entry, escapehatch='i am sure') + except ldap.ALREADY_EXISTS: + self.log.error("Fail to add Schema Reload task") + return -1 +@@ -1272,7 +1272,7 @@ class Tasks(object): + + # start the task and possibly wait for task completion + try: +- self.conn.add_s(entry) ++ self.conn.add_s(entry, escapehatch='i am sure') + except ldap.ALREADY_EXISTS: + self.log.error("Fail to add fixupWinsyncMembers 'memberuid task'") + return -1 +@@ -1319,7 +1319,7 @@ class Tasks(object): + + # start the task and possibly wait for task completion + try: +- self.conn.add_s(entry) ++ self.conn.add_s(entry, escapehatch='i am sure') + except ldap.ALREADY_EXISTS: + self.log.error("Fail to add Syntax Validate task") + return -1 +@@ -1370,7 +1370,7 @@ class Tasks(object): + + # start the task and possibly wait for task completion + try: +- self.conn.add_s(entry) ++ self.conn.add_s(entry, escapehatch='i am sure') + except ldap.ALREADY_EXISTS: + self.log.error("Fail to add USN tombstone cleanup task") + return -1 +@@ -1421,7 +1421,7 @@ class Tasks(object): + entry.setValues('logchanges', logchanges) + # start the task and possibly wait for task completion + try: +- self.conn.add_s(entry) ++ self.conn.add_s(entry, escapehatch='i am sure') + except ldap.ALREADY_EXISTS: + self.log.error("Fail to add Sysconfig Reload task") + return -1 +@@ -1473,7 +1473,7 @@ class Tasks(object): + entry.setValues('replica-force-cleaning', 'yes') + # start the task and possibly wait for task completion + try: +- self.conn.add_s(entry) ++ self.conn.add_s(entry, escapehatch='i am sure') + except ldap.ALREADY_EXISTS: + self.log.error("Fail to add cleanAllRUV task") + return (dn, -1) +@@ -1528,7 +1528,7 @@ class Tasks(object): + + # start the task and possibly wait for task completion + try: +- self.conn.add_s(entry) ++ self.conn.add_s(entry, escapehatch='i am sure') + except ldap.ALREADY_EXISTS: + self.log.error("Fail to add Abort cleanAllRUV task") + return (dn, -1) +@@ -1582,7 +1582,7 @@ class Tasks(object): + + # start the task and possibly wait for task completion + try: +- self.conn.add_s(entry) ++ self.conn.add_s(entry, escapehatch='i am sure') + except ldap.ALREADY_EXISTS: + self.log.error("Fail to add upgradedb task") + return -1 +-- +2.48.1 + diff --git a/SOURCES/0021-Issue-6509-Race-condition-with-Paged-Result-searches.patch b/SOURCES/0021-Issue-6509-Race-condition-with-Paged-Result-searches.patch new file mode 100644 index 0000000..dd5494e --- /dev/null +++ b/SOURCES/0021-Issue-6509-Race-condition-with-Paged-Result-searches.patch @@ -0,0 +1,245 @@ +From dab3b79e8de4ff8825c9f73685bf59be7bc19f97 Mon Sep 17 00:00:00 2001 +From: Mark Reynolds +Date: Thu, 16 Jan 2025 08:42:53 -0500 +Subject: [PATCH] Issue 6509 - Race condition with Paged Result searches + +Description: + +There is a race condition with Paged Result searches when a new operation comes +in while a paged search is finishing. This triggers an invalid time out error +and closes the connection with a T3 code. + +The problem is that we do not use the "PagedResult lock" when checking the +connection's paged result data for a timeout event. This causes the paged +result timeout value to change unexpectedly and trigger a false timeout when a +new operation arrives. + +Now we check the timeout without hte conn lock, if its expired it could +be a race condition and false positive. Try the lock again and test the +timeout. This also prevents blocking non-paged result searches from +getting held up by the lock when it's not necessary. + +This also fixes some memory leaks that occur when an error happens. + +Relates: https://github.com/389ds/389-ds-base/issues/6509 + +Reviewed by: tbordaz & proger (Thanks!!) +--- + ldap/servers/slapd/daemon.c | 59 ++++++++++++++++++------------- + ldap/servers/slapd/opshared.c | 32 ++++++++--------- + ldap/servers/slapd/pagedresults.c | 9 +++++ + ldap/servers/slapd/slap.h | 2 +- + 4 files changed, 61 insertions(+), 41 deletions(-) + +diff --git a/ldap/servers/slapd/daemon.c b/ldap/servers/slapd/daemon.c +index 56e20bd13..1a20796b9 100644 +--- a/ldap/servers/slapd/daemon.c ++++ b/ldap/servers/slapd/daemon.c +@@ -1546,7 +1546,29 @@ setup_pr_read_pds(Connection_Table *ct, int listnum) + if (c->c_state == CONN_STATE_FREE) { + connection_table_move_connection_out_of_active_list(ct, c); + } else { +- /* we try to acquire the connection mutex, if it is already ++ /* Check for a timeout for PAGED RESULTS */ ++ if (pagedresults_is_timedout_nolock(c)) { ++ /* ++ * There could be a race condition so lets try again with the ++ * right lock ++ */ ++ pthread_mutex_t *pr_mutex = pageresult_lock_get_addr(c); ++ if (pthread_mutex_trylock(pr_mutex) == EBUSY) { ++ c = next; ++ continue; ++ } ++ if (pagedresults_is_timedout_nolock(c)) { ++ pthread_mutex_unlock(pr_mutex); ++ disconnect_server(c, c->c_connid, -1, ++ SLAPD_DISCONNECT_PAGED_SEARCH_LIMIT, ++ 0); ++ } else { ++ pthread_mutex_unlock(pr_mutex); ++ } ++ } ++ ++ /* ++ * we try to acquire the connection mutex, if it is already + * acquired by another thread, don't wait + */ + if (pthread_mutex_trylock(&(c->c_mutex)) == EBUSY) { +@@ -1554,35 +1576,24 @@ setup_pr_read_pds(Connection_Table *ct, int listnum) + continue; + } + if (c->c_flags & CONN_FLAG_CLOSING) { +- /* A worker thread has marked that this connection +- * should be closed by calling disconnect_server. +- * move this connection out of the active list +- * the last thread to use the connection will close it ++ /* ++ * A worker thread, or paged result timeout, has marked that ++ * this connection should be closed by calling ++ * disconnect_server(). Move this connection out of the active ++ * list then the last thread to use the connection will close ++ * it. + */ + connection_table_move_connection_out_of_active_list(ct, c); + } else if (c->c_sd == SLAPD_INVALID_SOCKET) { + connection_table_move_connection_out_of_active_list(ct, c); + } else if (c->c_prfd != NULL) { + if ((!c->c_gettingber) && (c->c_threadnumber < c->c_max_threads_per_conn)) { +- int add_fd = 1; +- /* check timeout for PAGED RESULTS */ +- if (pagedresults_is_timedout_nolock(c)) { +- /* Exceeded the paged search timelimit; disconnect the client */ +- disconnect_server_nomutex(c, c->c_connid, -1, +- SLAPD_DISCONNECT_PAGED_SEARCH_LIMIT, +- 0); +- connection_table_move_connection_out_of_active_list(ct, +- c); +- add_fd = 0; /* do not poll on this fd */ +- } +- if (add_fd) { +- ct->fd[listnum][count].fd = c->c_prfd; +- ct->fd[listnum][count].in_flags = SLAPD_POLL_FLAGS; +- /* slot i of the connection table is mapped to slot +- * count of the fds array */ +- c->c_fdi = count; +- count++; +- } ++ ct->fd[listnum][count].fd = c->c_prfd; ++ ct->fd[listnum][count].in_flags = SLAPD_POLL_FLAGS; ++ /* slot i of the connection table is mapped to slot ++ * count of the fds array */ ++ c->c_fdi = count; ++ count++; + } else { + if (c->c_threadnumber >= c->c_max_threads_per_conn) { + c->c_maxthreadsblocked++; +diff --git a/ldap/servers/slapd/opshared.c b/ldap/servers/slapd/opshared.c +index 540597f45..7dc2d5983 100644 +--- a/ldap/servers/slapd/opshared.c ++++ b/ldap/servers/slapd/opshared.c +@@ -251,7 +251,7 @@ op_shared_search(Slapi_PBlock *pb, int send_result) + char *errtext = NULL; + int nentries, pnentries; + int flag_search_base_found = 0; +- int flag_no_such_object = 0; ++ bool flag_no_such_object = false; + int flag_referral = 0; + int flag_psearch = 0; + int err_code = LDAP_SUCCESS; +@@ -852,7 +852,7 @@ op_shared_search(Slapi_PBlock *pb, int send_result) + /* may be the object exist somewhere else + * wait the end of the loop to send back this error + */ +- flag_no_such_object = 1; ++ flag_no_such_object = true; + } else { + /* err something other than LDAP_NO_SUCH_OBJECT, so the backend will + * have sent the result - +@@ -862,35 +862,35 @@ op_shared_search(Slapi_PBlock *pb, int send_result) + /* fall through */ + + case -1: /* an error occurred */ ++ slapi_pblock_get(pb, SLAPI_RESULT_CODE, &err); + /* PAGED RESULTS */ + if (op_is_pagedresults(operation)) { + /* cleanup the slot */ + pthread_mutex_lock(pagedresults_mutex); ++ if (err != LDAP_NO_SUCH_OBJECT && !flag_no_such_object) { ++ /* Free the results if not "no_such_object" */ ++ void *sr = NULL; ++ slapi_pblock_get(pb, SLAPI_SEARCH_RESULT_SET, &sr); ++ be->be_search_results_release(&sr); ++ } + pagedresults_set_search_result(pb_conn, operation, NULL, 1, pr_idx); + rc = pagedresults_set_current_be(pb_conn, NULL, pr_idx, 1); + pthread_mutex_unlock(pagedresults_mutex); + } +- if (1 == flag_no_such_object) { +- break; +- } +- slapi_pblock_get(pb, SLAPI_RESULT_CODE, &err); +- if (err == LDAP_NO_SUCH_OBJECT) { +- /* may be the object exist somewhere else +- * wait the end of the loop to send back this error +- */ +- flag_no_such_object = 1; ++ ++ if (err == LDAP_NO_SUCH_OBJECT || flag_no_such_object) { ++ /* Maybe the object exists somewhere else, wait to the end ++ * of the loop to send back this error */ ++ flag_no_such_object = true; + break; + } else { +- /* for error other than LDAP_NO_SUCH_OBJECT +- * the error has already been sent +- * stop the search here +- */ ++ /* For error other than LDAP_NO_SUCH_OBJECT the error has ++ * already been sent stop the search here */ + cache_return_target_entry(pb, be, operation); + goto free_and_return; + } + + /* when rc == SLAPI_FAIL_DISKFULL this case is executed */ +- + case SLAPI_FAIL_DISKFULL: + operation_out_of_disk_space(); + cache_return_target_entry(pb, be, operation); +diff --git a/ldap/servers/slapd/pagedresults.c b/ldap/servers/slapd/pagedresults.c +index 9959c927e..642aefb3d 100644 +--- a/ldap/servers/slapd/pagedresults.c ++++ b/ldap/servers/slapd/pagedresults.c +@@ -121,12 +121,15 @@ pagedresults_parse_control_value(Slapi_PBlock *pb, + if (ber_scanf(ber, "{io}", pagesize, &cookie) == LBER_ERROR) { + slapi_log_err(SLAPI_LOG_ERR, "pagedresults_parse_control_value", + "<= corrupted control value\n"); ++ ber_free(ber, 1); + return LDAP_PROTOCOL_ERROR; + } + if (!maxreqs) { + slapi_log_err(SLAPI_LOG_ERR, "pagedresults_parse_control_value", + "Simple paged results requests per conn exceeded the limit: %d\n", + maxreqs); ++ ber_free(ber, 1); ++ slapi_ch_free_string(&cookie.bv_val); + return LDAP_UNWILLING_TO_PERFORM; + } + +@@ -376,6 +379,10 @@ pagedresults_free_one_msgid(Connection *conn, ber_int_t msgid, pthread_mutex_t * + } + prp->pr_flags |= CONN_FLAG_PAGEDRESULTS_ABANDONED; + prp->pr_flags &= ~CONN_FLAG_PAGEDRESULTS_PROCESSING; ++ if (conn->c_pagedresults.prl_count > 0) { ++ _pr_cleanup_one_slot(prp); ++ conn->c_pagedresults.prl_count--; ++ } + rc = 0; + break; + } +@@ -936,7 +943,9 @@ pagedresults_is_timedout_nolock(Connection *conn) + return 1; + } + } ++ + slapi_log_err(SLAPI_LOG_TRACE, "<-- pagedresults_is_timedout", "<= false 2\n"); ++ + return 0; + } + +diff --git a/ldap/servers/slapd/slap.h b/ldap/servers/slapd/slap.h +index 15b69e69e..ca2f1f12c 100644 +--- a/ldap/servers/slapd/slap.h ++++ b/ldap/servers/slapd/slap.h +@@ -77,7 +77,7 @@ static char ptokPBE[34] = "Internal (Software) Token "; + #include + #include + #include +- ++#include + #include /* For timespec definitions */ + + /* Provides our int types and platform specific requirements. */ +-- +2.48.1 + diff --git a/SOURCES/0022-Issue-6436-MOD-on-a-large-group-slow-if-substring-in.patch b/SOURCES/0022-Issue-6436-MOD-on-a-large-group-slow-if-substring-in.patch new file mode 100644 index 0000000..b49deb2 --- /dev/null +++ b/SOURCES/0022-Issue-6436-MOD-on-a-large-group-slow-if-substring-in.patch @@ -0,0 +1,157 @@ +From 294b9a665b7b77e2c6332618870d336e71f356b3 Mon Sep 17 00:00:00 2001 +From: James Chapman +Date: Wed, 29 Jan 2025 17:41:55 +0000 +Subject: [PATCH] Issue 6436 - MOD on a large group slow if substring index is + present (#6437) + +Bug Description: If the substring index is configured for the group +membership attribute ( member or uniqueMember ), the removal of a +member from a large static group is pretty slow. + +Fix Description: A solution to this issue would be to introduce +a new index to track a membership atttribute index. In the interm, +we add a check to healthcheck to inform the user of the implications +of this configuration. + +Fixes: https://github.com/389ds/389-ds-base/issues/6436 + +Reviewed by: @Firstyear, @tbordaz, @droideck (Thanks) +--- + .../suites/healthcheck/health_config_test.py | 10 ++--- + src/lib389/lib389/lint.py | 15 ++++++++ + src/lib389/lib389/plugins.py | 37 ++++++++++++++++++- + 3 files changed, 56 insertions(+), 6 deletions(-) + +diff --git a/dirsrvtests/tests/suites/healthcheck/health_config_test.py b/dirsrvtests/tests/suites/healthcheck/health_config_test.py +index c462805e1..c7570cbf2 100644 +--- a/dirsrvtests/tests/suites/healthcheck/health_config_test.py ++++ b/dirsrvtests/tests/suites/healthcheck/health_config_test.py +@@ -210,6 +210,7 @@ def test_healthcheck_RI_plugin_missing_indexes(topology_st): + MEMBER_DN = 'cn=member,cn=index,cn=userroot,cn=ldbm database,cn=plugins,cn=config' + + standalone = topology_st.standalone ++ standalone.config.set("nsslapd-accesslog-logbuffering", "on") + + log.info('Enable RI plugin') + plugin = ReferentialIntegrityPlugin(standalone) +@@ -231,7 +232,7 @@ def test_healthcheck_RI_plugin_missing_indexes(topology_st): + + + def test_healthcheck_MO_plugin_missing_indexes(topology_st): +- """Check if HealthCheck returns DSMOLE0002 code ++ """Check if HealthCheck returns DSMOLE0001 code + + :id: 236b0ec2-13da-48fb-b65a-db7406d56d5d + :setup: Standalone instance +@@ -246,8 +247,8 @@ def test_healthcheck_MO_plugin_missing_indexes(topology_st): + :expectedresults: + 1. Success + 2. Success +- 3. Healthcheck reports DSMOLE0002 code and related details +- 4. Healthcheck reports DSMOLE0002 code and related details ++ 3. Healthcheck reports DSMOLE0001 code and related details ++ 4. Healthcheck reports DSMOLE0001 code and related details + 5. Success + 6. Healthcheck reports no issue found + 7. Healthcheck reports no issue found +@@ -257,6 +258,7 @@ def test_healthcheck_MO_plugin_missing_indexes(topology_st): + MO_GROUP_ATTR = 'creatorsname' + + standalone = topology_st.standalone ++ standalone.config.set("nsslapd-accesslog-logbuffering", "on") + + log.info('Enable MO plugin') + plugin = MemberOfPlugin(standalone) +@@ -279,8 +281,6 @@ def test_healthcheck_MO_plugin_missing_indexes(topology_st): + standalone.restart() + + +-@pytest.mark.ds50873 +-@pytest.mark.bz1685160 + @pytest.mark.xfail(ds_is_older("1.4.1"), reason="Not implemented") + def test_healthcheck_virtual_attr_incorrectly_indexed(topology_st): + """Check if HealthCheck returns DSVIRTLE0001 code +diff --git a/src/lib389/lib389/lint.py b/src/lib389/lib389/lint.py +index 9baa710de..bc21d2355 100644 +--- a/src/lib389/lib389/lint.py ++++ b/src/lib389/lib389/lint.py +@@ -292,6 +292,21 @@ database after adding the missing index type. Here is an example using dsconf: + """ + } + ++DSMOLE0002 = { ++ 'dsle': 'DSMOLE0002', ++ 'severity': 'LOW', ++ 'description': 'Removal of a member can be slow ', ++ 'items': ['cn=memberof plugin,cn=plugins,cn=config', ], ++ 'detail': """If the substring index is configured for a membership attribute. The removal of a member ++from the large group can be slow. ++ ++""", ++ 'fix': """If not required, you can remove the substring index type using dsconf: ++ ++ # dsconf slapd-YOUR_INSTANCE backend index set --attr=ATTR BACKEND --del-type=sub ++""" ++} ++ + # Disk Space check. Note - PARTITION is replaced by the calling function + DSDSLE0001 = { + 'dsle': 'DSDSLE0001', +diff --git a/src/lib389/lib389/plugins.py b/src/lib389/lib389/plugins.py +index 2f1969f03..75c16a7c8 100644 +--- a/src/lib389/lib389/plugins.py ++++ b/src/lib389/lib389/plugins.py +@@ -12,7 +12,7 @@ import copy + import os.path + from lib389 import tasks + from lib389._mapped_object import DSLdapObjects, DSLdapObject +-from lib389.lint import DSRILE0001, DSRILE0002, DSMOLE0001 ++from lib389.lint import DSRILE0001, DSRILE0002, DSMOLE0001, DSMOLE0002 + from lib389.utils import ensure_str, ensure_list_bytes + from lib389.schema import Schema + from lib389._constants import ( +@@ -827,6 +827,41 @@ class MemberOfPlugin(Plugin): + report['check'] = f'memberof:attr_indexes' + yield report + ++ def _lint_member_substring_index(self): ++ if self.status(): ++ from lib389.backend import Backends ++ backends = Backends(self._instance).list() ++ membership_attrs = ['member', 'uniquemember'] ++ container = self.get_attr_val_utf8_l("nsslapd-plugincontainerscope") ++ for backend in backends: ++ suffix = backend.get_attr_val_utf8_l('nsslapd-suffix') ++ if suffix == "cn=changelog": ++ # Always skip retro changelog ++ continue ++ if container is not None: ++ # Check if this backend is in the scope ++ if not container.endswith(suffix): ++ # skip this backend that is not in the scope ++ continue ++ indexes = backend.get_indexes() ++ for attr in membership_attrs: ++ report = copy.deepcopy(DSMOLE0002) ++ try: ++ index = indexes.get(attr) ++ types = index.get_attr_vals_utf8_l("nsIndexType") ++ if "sub" in types: ++ report['detail'] = report['detail'].replace('ATTR', attr) ++ report['detail'] = report['detail'].replace('BACKEND', suffix) ++ report['fix'] = report['fix'].replace('ATTR', attr) ++ report['fix'] = report['fix'].replace('BACKEND', suffix) ++ report['fix'] = report['fix'].replace('YOUR_INSTANCE', self._instance.serverid) ++ report['items'].append(suffix) ++ report['items'].append(attr) ++ report['check'] = f'attr:substring_index' ++ yield report ++ except KeyError: ++ continue ++ + def get_attr(self): + """Get memberofattr attribute""" + +-- +2.48.1 + diff --git a/SOURCES/0023-Issue-6494-Various-errors-when-using-extended-matchi.patch b/SOURCES/0023-Issue-6494-Various-errors-when-using-extended-matchi.patch new file mode 100644 index 0000000..e3ff4cb --- /dev/null +++ b/SOURCES/0023-Issue-6494-Various-errors-when-using-extended-matchi.patch @@ -0,0 +1,791 @@ +From 09daeb90f13bd76004e377e27295606b0bc5f579 Mon Sep 17 00:00:00 2001 +From: progier389 +Date: Mon, 13 Jan 2025 18:03:07 +0100 +Subject: [PATCH] Issue 6494 - Various errors when using extended matching rule + on vlv sort filter (#6495) + +* Issue 6494 - Various errors when using extended matching rule on vlv sort filter + +Various issues when configuring and using extended matching rule within a vlv sort filter: + +Race condition about the keys storage while indexing leading to various heap and data corruption. (lmdb only) +Crash while indexing if vlv are misconfigured because NULL key is not checked. +Read after block because of data type mismatch between SlapiValue and berval +Memory leaks +Solution: + +Serialize the vlv index key generation if vlv filter has an extended matching rule. +Check null keys +Always provides SlapiValue even ifg we want to get keys as bervals +Free properly the resources +Issue: #6494 + +Reviewed by: @mreynolds389 (Thanks!) + +(cherry picked from commit 4bd27ecc4e1d21c8af5ab8cad795d70477179a98) +--- + .../tests/suites/indexes/regression_test.py | 29 +++ + .../tests/suites/vlv/regression_test.py | 184 +++++++++++++++++- + ldap/servers/slapd/back-ldbm/cleanup.c | 8 + + .../back-ldbm/db-mdb/mdb_import_threads.c | 34 +++- + .../slapd/back-ldbm/db-mdb/mdb_layer.c | 1 + + .../slapd/back-ldbm/db-mdb/mdb_layer.h | 1 + + ldap/servers/slapd/back-ldbm/db-mdb/mdb_txn.c | 12 +- + ldap/servers/slapd/back-ldbm/dblayer.c | 22 ++- + ldap/servers/slapd/back-ldbm/ldbm_attr.c | 2 +- + ldap/servers/slapd/back-ldbm/matchrule.c | 8 +- + .../servers/slapd/back-ldbm/proto-back-ldbm.h | 3 +- + ldap/servers/slapd/back-ldbm/sort.c | 33 ++-- + ldap/servers/slapd/back-ldbm/vlv.c | 26 +-- + ldap/servers/slapd/back-ldbm/vlv_srch.c | 4 +- + ldap/servers/slapd/generation.c | 5 + + ldap/servers/slapd/plugin_mr.c | 13 +- + src/lib389/lib389/backend.py | 10 + + 17 files changed, 335 insertions(+), 60 deletions(-) + +diff --git a/dirsrvtests/tests/suites/indexes/regression_test.py b/dirsrvtests/tests/suites/indexes/regression_test.py +index b077b529a..08d78a899 100644 +--- a/dirsrvtests/tests/suites/indexes/regression_test.py ++++ b/dirsrvtests/tests/suites/indexes/regression_test.py +@@ -547,6 +547,35 @@ def test_task_and_be(topo, add_backend_and_ldif_50K_users): + assert user.get_attr_val_utf8_l('description') == descval + + ++def test_reindex_extended_matching_rule(topo, add_backend_and_ldif_50K_users): ++ """Check that index with extended matching rule are reindexed properly. ++ ++ :id: 8a3198e8-cc5a-11ef-a3e7-482ae39447e5 ++ :setup: Standalone instance + a second backend with 50K users ++ :steps: ++ 1. Configure uid with 2.5.13.2 matching rule ++ 1. Configure cn with 2.5.13.2 matching rule ++ 2. Reindex ++ :expectedresults: ++ 1. Success ++ 2. Success ++ """ ++ ++ inst = topo.standalone ++ tasks = Tasks(inst) ++ be2 = Backends(topo.standalone).get_backend(SUFFIX2) ++ index = be2.get_index('uid') ++ index.replace('nsMatchingRule', '2.5.13.2') ++ index = be2.get_index('cn') ++ index.replace('nsMatchingRule', '2.5.13.2') ++ ++ assert tasks.reindex( ++ suffix=SUFFIX2, ++ args={TASK_WAIT: True} ++ ) == 0 ++ ++ ++ + if __name__ == "__main__": + # Run isolated + # -s for DEBUG mode +diff --git a/dirsrvtests/tests/suites/vlv/regression_test.py b/dirsrvtests/tests/suites/vlv/regression_test.py +index 873eada20..aca638f28 100644 +--- a/dirsrvtests/tests/suites/vlv/regression_test.py ++++ b/dirsrvtests/tests/suites/vlv/regression_test.py +@@ -138,7 +138,7 @@ def add_users(inst, users_num, suffix=DEFAULT_SUFFIX): + + + def create_vlv_search_and_index(inst, basedn=DEFAULT_SUFFIX, bename='userRoot', +- scope=ldap.SCOPE_SUBTREE, prefix="vlv"): ++ scope=ldap.SCOPE_SUBTREE, prefix="vlv", vlvsort="cn"): + vlv_searches = VLVSearch(inst) + vlv_search_properties = { + "objectclass": ["top", "vlvSearch"], +@@ -156,7 +156,7 @@ def create_vlv_search_and_index(inst, basedn=DEFAULT_SUFFIX, bename='userRoot', + vlv_index_properties = { + "objectclass": ["top", "vlvIndex"], + "cn": f"{prefix}Idx", +- "vlvsort": "cn", ++ "vlvsort": vlvsort, + } + vlv_index.create( + basedn=f"cn={prefix}Srch,cn={bename},cn=ldbm database,cn=plugins,cn=config", +@@ -266,6 +266,40 @@ def vlv_setup_with_two_backend(topology_st, request): + return topology_st + + ++@pytest.fixture ++def vlv_setup_with_uid_mr(topology_st, request): ++ inst = topology_st.standalone ++ bename = 'be1' ++ besuffix = f'o={bename}' ++ beh = BackendHandler(inst, { bename: besuffix }) ++ ++ def fin(): ++ # Cleanup function ++ if not DEBUGGING and inst.exists() and inst.status(): ++ beh.cleanup() ++ ++ request.addfinalizer(fin) ++ ++ # Make sure that our backend are not already present. ++ beh.cleanup() ++ ++ # Then add the new backend ++ beh.setup() ++ ++ index = Index(inst, f'cn=uid,cn=index,cn={bename},cn=ldbm database,cn=plugins,cn=config') ++ index.add('nsMatchingRule', '2.5.13.2') ++ reindex_task = Tasks(inst) ++ assert reindex_task.reindex( ++ suffix=besuffix, ++ attrname='uid', ++ args={TASK_WAIT: True} ++ ) == 0 ++ ++ topology_st.beh = beh ++ return topology_st ++ ++ ++ + @pytest.fixture + def freeipa(topology_st): + # generate a standalone instance with same vlv config than freeipa +@@ -939,6 +973,152 @@ def test_vlv_by_keyword(freeipa): + assert f'cn={idx},ou=certificateRepository,ou=ca,o=ipaca' in dns + + ++def get_timestamp_attr(): ++ current_datetime = datetime.now() ++ return f'tsattr-{current_datetime.timestamp()}'.replace(".", "-") ++ ++ ++def perform_vlv_search(conn, alog, basedn, vlvcrit=True, ssscrit=True, scope=ldap.SCOPE_SUBTREE, sss='cn', filter='(uid=*)'): ++ timestamp = get_timestamp_attr() ++ vlv_control = VLVRequestControl(criticality=vlvcrit, ++ before_count=1, ++ after_count=1, ++ offset=4, ++ content_count=0, ++ greater_than_or_equal=None, ++ context_id=None) ++ ++ sss_control = SSSRequestControl(criticality=ssscrit, ordering_rules=[sss]) ++ log.info(f'perform_vlv_search: basedn={basedn} ={vlvcrit} ssscrit={ssscrit} sss={sss} filter={filter} timestamp={timestamp}') ++ ++ with suppress(ldap.LDAPError): ++ result = conn.search_ext_s( ++ base=basedn, ++ scope=scope, ++ filterstr=filter, ++ attrlist=[timestamp], ++ serverctrls=[vlv_control, sss_control] ++ ) ++ line = alog.match(f'.*SRCH.*{timestamp}.*') ++ log.info(f'perform_vlv_search: line={line}') ++ match = re.match(r'.*conn=(\d+) op=(\d+).*', line[0]) ++ conn,op = match.group(1,2) ++ log.info(f'perform_vlv_search: conn={conn} op={op} ') ++ lines = ''.join(alog.match(f'.*conn={conn} op={op} .*', after_pattern=f'.*{timestamp}.*')) ++ log.info(f'perform_vlv_search: lines={lines}') ++ return lines ++ ++ ++def test_vlv_logs(vlv_setup_nested_backends): ++ """Test than VLV abd SORT lines are properply written ++ ++ :id: a1d9ad9e-7cbb-11ef-82e3-083a88554478 ++ :setup: Standalone instance with two backens and one level scoped vlv ++ :steps: ++ 1. Check that standard search returns 16 valid certificate ++ 2. Check that vlv search returns 16 valid certificate ++ ++ :expectedresults: ++ 1. Should Success. ++ 2. Should Success. ++ """ ++ inst = vlv_setup_nested_backends.standalone ++ beh = vlv_setup_nested_backends.beh ++ tasks = Tasks(inst) ++ conn = open_new_ldapi_conn(inst.serverid) ++ conn_demo = open_new_ldapi_conn(inst.serverid) ++ alog = DirsrvAccessLog(inst) ++ dn1 = beh.data['be1']['dn'] ++ dn2 = beh.data['be2']['dn'] ++ suffix1 = beh.data['be1']['suffix'] ++ suffix2 = beh.data['be2']['suffix'] ++ conn_demo.bind_s(dn2, DEMO_PW) ++ ++ VLV_FEATURE_DN = 'oid=2.16.840.1.113730.3.4.9,cn=features,cn=config' ++ VLV_DEFAULT_ACI = b'(targetattr != "aci")(version 3.0; acl "VLV Request Control"; ' + \ ++ b'allow( read , search, compare, proxy ) userdn = "ldap:///all";)' ++ VLV_DENY_ACI = b'(targetattr != "aci")(version 3.0; acl "VLV Request Control"; ' + \ ++ f'deny( read , search, compare, proxy ) userdn = "ldap:///{dn2}";)'.encode('utf8') ++ ++ # Set VLV feature ACI ++ mod = (ldap.MOD_REPLACE, 'aci', [ VLV_DEFAULT_ACI, VLV_DENY_ACI ]) ++ conn.modify_s(VLV_FEATURE_DN, [mod,]) ++ ++ # Invalid ACL ++ res = perform_vlv_search(conn_demo, alog, suffix2) ++ assert 'SORT' in res ++ assert 'VLV' in res ++ assert 'err=50 ' in res ++ ++ # Sucessful VLV ++ res = perform_vlv_search(conn, alog, suffix2) ++ assert 'SORT' in res ++ assert 'VLV' in res ++ assert 'err=0 ' in res ++ ++ # Multiple backends SSS and VLV are critical ++ res = perform_vlv_search(conn, alog, suffix1) ++ assert 'SORT' in res ++ assert 'VLV' in res ++ assert 'err=76 ' in res ++ ++ # Multiple backends SSS is critical VLV is not critical ++ res = perform_vlv_search(conn, alog, suffix1, vlvcrit=False) ++ assert 'SORT' in res ++ assert 'VLV' in res ++ assert 'err=12 ' in res ++ ++ # Multiple backends SSS and VLV are not critical ++ res = perform_vlv_search(conn, alog, suffix1, vlvcrit=False, ssscrit=False) ++ assert 'SORT' in res ++ assert 'VLV' in res ++ assert 'err=0 ' in res ++ ++ ++def test_vlv_with_mr(vlv_setup_with_uid_mr): ++ """ ++ Testing vlv having specific matching rule ++ ++ :id: 5e04afe2-beec-11ef-aa84-482ae39447e5 ++ :setup: Standalone with uid have a matching rule index ++ :steps: ++ 1. Append vlvIndex entries then vlvSearch entry in the dse.ldif ++ 2. Restart the server ++ :expectedresults: ++ 1. Should Success. ++ 2. Should Success. ++ """ ++ inst = vlv_setup_with_uid_mr.standalone ++ beh = vlv_setup_with_uid_mr.beh ++ bename, besuffix = next(iter(beh.bedict.items())) ++ vlv_searches, vlv_index = create_vlv_search_and_index( ++ inst, basedn=besuffix, bename=bename, ++ vlvsort="uid:2.5.13.2") ++ # Reindex the vlv ++ reindex_task = Tasks(inst) ++ assert reindex_task.reindex( ++ suffix=besuffix, ++ attrname=vlv_index.rdn, ++ args={TASK_WAIT: True}, ++ vlv=True ++ ) == 0 ++ ++ inst.restart() ++ users = UserAccounts(inst, besuffix) ++ user_properties = { ++ 'uid': f'a new testuser', ++ 'cn': f'a new testuser', ++ 'sn': 'user', ++ 'uidNumber': '0', ++ 'gidNumber': '0', ++ 'homeDirectory': 'foo' ++ } ++ user = users.create(properties=user_properties) ++ user.delete() ++ assert inst.status() ++ ++ ++ + if __name__ == "__main__": + # Run isolated + # -s for DEBUG mode +diff --git a/ldap/servers/slapd/back-ldbm/cleanup.c b/ldap/servers/slapd/back-ldbm/cleanup.c +index 6b2e9faef..939d8bc4f 100644 +--- a/ldap/servers/slapd/back-ldbm/cleanup.c ++++ b/ldap/servers/slapd/back-ldbm/cleanup.c +@@ -15,12 +15,14 @@ + + #include "back-ldbm.h" + #include "dblayer.h" ++#include "vlv_srch.h" + + int + ldbm_back_cleanup(Slapi_PBlock *pb) + { + struct ldbminfo *li; + Slapi_Backend *be; ++ struct vlvSearch *nextp; + + slapi_log_err(SLAPI_LOG_TRACE, "ldbm_back_cleanup", "ldbm backend cleaning up\n"); + slapi_pblock_get(pb, SLAPI_PLUGIN_PRIVATE, &li); +@@ -45,6 +47,12 @@ ldbm_back_cleanup(Slapi_PBlock *pb) + return 0; + } + ++ /* Release the vlv list */ ++ for (struct vlvSearch *p=be->vlvSearchList; p; p=nextp) { ++ nextp = p->vlv_next; ++ vlvSearch_delete(&p); ++ } ++ + /* + * We check if li is NULL. Because of an issue in how we create backends + * we share the li and plugin info between many unique backends. This causes +diff --git a/ldap/servers/slapd/back-ldbm/db-mdb/mdb_import_threads.c b/ldap/servers/slapd/back-ldbm/db-mdb/mdb_import_threads.c +index 716e3c0f3..1463069ad 100644 +--- a/ldap/servers/slapd/back-ldbm/db-mdb/mdb_import_threads.c ++++ b/ldap/servers/slapd/back-ldbm/db-mdb/mdb_import_threads.c +@@ -23,6 +23,7 @@ + */ + + #include ++#include + #include + #include "mdb_import.h" + #include "../vlv_srch.h" +@@ -152,6 +153,9 @@ static void dbmdb_import_writeq_push(ImportCtx_t *ctx, WriterQueueData_t *wqd); + static int have_workers_finished(ImportJob *job); + struct backentry *dbmdb_import_prepare_worker_entry(WorkerQueueData_t *wqelmnt); + ++/* Mutex needed for extended matching rules */ ++static pthread_mutex_t extended_mr_mutex = PTHREAD_MUTEX_INITIALIZER; ++ + /***************************************************************************/ + /**************************** utility functions ****************************/ + /***************************************************************************/ +@@ -3192,6 +3196,23 @@ is_reindexed_attr(const char *attrname, const ImportCtx_t *ctx, char **list) + return (list && attr_in_list(attrname, list)); + } + ++/* ++ * Determine if vlv require extended matching rule evaluation ++ */ ++static bool ++vlv_has_emr(struct vlvIndex *p) ++{ ++ if (p->vlv_sortkey != NULL) { ++ /* Foreach sorted attribute... */ ++ for (int sortattr = 0; p->vlv_sortkey[sortattr] != NULL; sortattr++) { ++ if (p->vlv_sortkey[sortattr]->sk_matchruleoid != NULL) { ++ return true; ++ } ++ } ++ } ++ return false; ++} ++ + static void + process_vlv_index(backentry *ep, ImportWorkerInfo *info) + { +@@ -3214,7 +3235,18 @@ process_vlv_index(backentry *ep, ImportWorkerInfo *info) + slapi_pblock_set(pb, SLAPI_BACKEND, be); + if (vlv_index && vlv_index->vlv_attrinfo && + is_reindexed_attr(vlv_index->vlv_attrinfo->ai_type , ctx, ctx->indexVlvs)) { +- ret = vlv_update_index(vlv_index, (dbi_txn_t*)&txn, inst->inst_li, pb, NULL, ep); ++ if (vlv_has_emr(vlv_index)) { ++ /* ++ * Serialize if there is an extended matching rule ++ * Because matchrule_values_to_keys is not thread safe when indexing ++ * because new mr_indexer are created) but that need to be double checked) ++ */ ++ pthread_mutex_lock(&extended_mr_mutex); ++ ret = vlv_update_index(vlv_index, (dbi_txn_t*)&txn, inst->inst_li, pb, NULL, ep); ++ pthread_mutex_unlock(&extended_mr_mutex); ++ } else { ++ ret = vlv_update_index(vlv_index, (dbi_txn_t*)&txn, inst->inst_li, pb, NULL, ep); ++ } + } + if (0 != ret) { + /* Something went wrong, eg disk filled up */ +diff --git a/ldap/servers/slapd/back-ldbm/db-mdb/mdb_layer.c b/ldap/servers/slapd/back-ldbm/db-mdb/mdb_layer.c +index 35f8173a7..40cc29c89 100644 +--- a/ldap/servers/slapd/back-ldbm/db-mdb/mdb_layer.c ++++ b/ldap/servers/slapd/back-ldbm/db-mdb/mdb_layer.c +@@ -346,6 +346,7 @@ dbmdb_close(struct ldbminfo *li, int dbmode) + } + + return_value |= dbmdb_post_close(li, dbmode); ++ shutdown_mdbtxn(); + + return return_value; + } +diff --git a/ldap/servers/slapd/back-ldbm/db-mdb/mdb_layer.h b/ldap/servers/slapd/back-ldbm/db-mdb/mdb_layer.h +index 96e02bf74..fe230d60e 100644 +--- a/ldap/servers/slapd/back-ldbm/db-mdb/mdb_layer.h ++++ b/ldap/servers/slapd/back-ldbm/db-mdb/mdb_layer.h +@@ -506,6 +506,7 @@ void dbmdb_free_stats(dbmdb_stats_t **stats); + int dbmdb_reset_vlv_file(backend *be, const char *filename); + + /* mdb_txn.c */ ++void shutdown_mdbtxn(void); + int dbmdb_start_txn(const char *funcname, dbi_txn_t *parent_txn, int flags, dbi_txn_t **txn); + int dbmdb_end_txn(const char *funcname, int rc, dbi_txn_t **txn); + void init_mdbtxn(dbmdb_ctx_t *ctx); +diff --git a/ldap/servers/slapd/back-ldbm/db-mdb/mdb_txn.c b/ldap/servers/slapd/back-ldbm/db-mdb/mdb_txn.c +index 21c53dd8c..74088db89 100644 +--- a/ldap/servers/slapd/back-ldbm/db-mdb/mdb_txn.c ++++ b/ldap/servers/slapd/back-ldbm/db-mdb/mdb_txn.c +@@ -41,8 +41,10 @@ cleanup_mdbtxn_stack(void *arg) + dbmdb_txn_t *txn2; + + *anchor = NULL; ++ if (anchor == (dbmdb_txn_t **) PR_GetThreadPrivate(thread_private_mdb_txn_stack)) { ++ PR_SetThreadPrivate(thread_private_mdb_txn_stack, NULL); ++ } + slapi_ch_free((void**)&anchor); +- PR_SetThreadPrivate(thread_private_mdb_txn_stack, NULL); + while (txn) { + txn2 = txn->parent; + TXN_ABORT(TXN(txn)); +@@ -68,6 +70,14 @@ static dbmdb_txn_t **get_mdbtxnanchor(void) + return anchor; + } + ++void shutdown_mdbtxn(void) ++{ ++ dbmdb_txn_t **anchor = (dbmdb_txn_t **) PR_GetThreadPrivate(thread_private_mdb_txn_stack); ++ if (anchor) { ++ PR_SetThreadPrivate(thread_private_mdb_txn_stack, NULL); ++ } ++} ++ + static void push_mdbtxn(dbmdb_txn_t *txn) + { + dbmdb_txn_t **anchor = get_mdbtxnanchor(); +diff --git a/ldap/servers/slapd/back-ldbm/dblayer.c b/ldap/servers/slapd/back-ldbm/dblayer.c +index 30cd0c76a..46421ef3b 100644 +--- a/ldap/servers/slapd/back-ldbm/dblayer.c ++++ b/ldap/servers/slapd/back-ldbm/dblayer.c +@@ -454,8 +454,12 @@ int + dblayer_close(struct ldbminfo *li, int dbmode) + { + dblayer_private *priv = (dblayer_private *)li->li_dblayer_private; +- +- return priv->dblayer_close_fn(li, dbmode); ++ int rc = priv->dblayer_close_fn(li, dbmode); ++ if (rc == 0) { ++ /* Clean thread specific data */ ++ dblayer_destroy_txn_stack(); ++ } ++ return rc; + } + + /* Routines for opening and closing random files in the dbi_env_t. +@@ -627,6 +631,9 @@ dblayer_erase_index_file(backend *be, struct attrinfo *a, PRBool use_lock, int n + return 0; + } + struct ldbminfo *li = (struct ldbminfo *)be->be_database->plg_private; ++ if (NULL == li) { ++ return 0; ++ } + dblayer_private *priv = (dblayer_private *)li->li_dblayer_private; + + return priv->dblayer_rm_db_file_fn(be, a, use_lock, no_force_chkpt); +@@ -1374,6 +1381,17 @@ dblayer_pop_pvt_txn(void) + return; + } + ++void ++dblayer_destroy_txn_stack(void) ++{ ++ /* ++ * Cleanup for the main thread to avoid false/positive leaks from libasan ++ * Note: data is freed because PR_SetThreadPrivate calls the ++ * dblayer_cleanup_txn_stack callback ++ */ ++ PR_SetThreadPrivate(thread_private_txn_stack, NULL); ++} ++ + const char * + dblayer_get_db_suffix(Slapi_Backend *be) + { +diff --git a/ldap/servers/slapd/back-ldbm/ldbm_attr.c b/ldap/servers/slapd/back-ldbm/ldbm_attr.c +index 07f3058a3..30bfd1349 100644 +--- a/ldap/servers/slapd/back-ldbm/ldbm_attr.c ++++ b/ldap/servers/slapd/back-ldbm/ldbm_attr.c +@@ -54,7 +54,7 @@ attrinfo_delete(struct attrinfo **pp) + idl_release_private(*pp); + (*pp)->ai_key_cmp_fn = NULL; + slapi_ch_free((void **)&((*pp)->ai_type)); +- slapi_ch_free((void **)(*pp)->ai_index_rules); ++ charray_free((*pp)->ai_index_rules); + slapi_ch_free((void **)&((*pp)->ai_attrcrypt)); + attr_done(&((*pp)->ai_sattr)); + attrinfo_delete_idlistinfo(&(*pp)->ai_idlistinfo); +diff --git a/ldap/servers/slapd/back-ldbm/matchrule.c b/ldap/servers/slapd/back-ldbm/matchrule.c +index 5d516b9f8..5365e8acf 100644 +--- a/ldap/servers/slapd/back-ldbm/matchrule.c ++++ b/ldap/servers/slapd/back-ldbm/matchrule.c +@@ -107,7 +107,7 @@ destroy_matchrule_indexer(Slapi_PBlock *pb) + * is destroyed + */ + int +-matchrule_values_to_keys(Slapi_PBlock *pb, struct berval **input_values, struct berval ***output_values) ++matchrule_values_to_keys(Slapi_PBlock *pb, Slapi_Value **input_values, struct berval ***output_values) + { + IFP mrINDEX = NULL; + +@@ -135,10 +135,8 @@ matchrule_values_to_keys_sv(Slapi_PBlock *pb, Slapi_Value **input_values, Slapi_ + slapi_pblock_get(pb, SLAPI_PLUGIN_MR_INDEX_SV_FN, &mrINDEX); + if (NULL == mrINDEX) { /* old school - does not have SV function */ + int rc; +- struct berval **bvi = NULL, **bvo = NULL; +- valuearray_get_bervalarray(input_values, &bvi); +- rc = matchrule_values_to_keys(pb, bvi, &bvo); +- ber_bvecfree(bvi); ++ struct berval **bvo = NULL; ++ rc = matchrule_values_to_keys(pb, input_values, &bvo); + /* note - the indexer owns bvo and will free it when destroyed */ + valuearray_init_bervalarray(bvo, output_values); + /* store output values in SV form - caller expects SLAPI_PLUGIN_MR_KEYS is Slapi_Value** */ +diff --git a/ldap/servers/slapd/back-ldbm/proto-back-ldbm.h b/ldap/servers/slapd/back-ldbm/proto-back-ldbm.h +index a1e57c172..5c26d44a2 100644 +--- a/ldap/servers/slapd/back-ldbm/proto-back-ldbm.h ++++ b/ldap/servers/slapd/back-ldbm/proto-back-ldbm.h +@@ -88,6 +88,7 @@ int dblayer_erase_index_file(backend *be, struct attrinfo *a, PRBool use_lock, i + int dblayer_get_id2entry(backend *be, dbi_db_t **ppDB); + int dblayer_get_changelog(backend *be, dbi_db_t ** ppDB, int create); + int dblayer_release_id2entry(backend *be, dbi_db_t *pDB); ++void dblayer_destroy_txn_stack(void); + int dblayer_txn_init(struct ldbminfo *li, back_txn *txn); + int dblayer_txn_begin(backend *be, back_txnid parent_txn, back_txn *txn); + int dblayer_txn_begin_ext(struct ldbminfo *li, back_txnid parent_txn, back_txn *txn, PRBool use_lock); +@@ -541,7 +542,7 @@ int compute_allids_limit(Slapi_PBlock *pb, struct ldbminfo *li); + */ + int create_matchrule_indexer(Slapi_PBlock **pb, char *matchrule, char *type); + int destroy_matchrule_indexer(Slapi_PBlock *pb); +-int matchrule_values_to_keys(Slapi_PBlock *pb, struct berval **input_values, struct berval ***output_values); ++int matchrule_values_to_keys(Slapi_PBlock *pb, Slapi_Value **input_values, struct berval ***output_values); + int matchrule_values_to_keys_sv(Slapi_PBlock *pb, Slapi_Value **input_values, Slapi_Value ***output_values); + + /* +diff --git a/ldap/servers/slapd/back-ldbm/sort.c b/ldap/servers/slapd/back-ldbm/sort.c +index 65af5dcf9..f6a77b39a 100644 +--- a/ldap/servers/slapd/back-ldbm/sort.c ++++ b/ldap/servers/slapd/back-ldbm/sort.c +@@ -528,30 +528,18 @@ compare_entries_sv(ID *id_a, ID *id_b, sort_spec *s, baggage_carrier *bc, int *e + valuearray_get_bervalarray(valueset_get_valuearray(&attr_b->a_present_values), &value_b); + } else { + /* Match rule case */ +- struct berval **actual_value_a = NULL; +- struct berval **actual_value_b = NULL; +- struct berval **temp_value = NULL; ++ Slapi_Value **va_a = valueset_get_valuearray(&attr_a->a_present_values); ++ Slapi_Value **va_b = valueset_get_valuearray(&attr_b->a_present_values); + +- valuearray_get_bervalarray(valueset_get_valuearray(&attr_a->a_present_values), &actual_value_a); +- valuearray_get_bervalarray(valueset_get_valuearray(&attr_b->a_present_values), &actual_value_b); +- matchrule_values_to_keys(this_one->mr_pb, actual_value_a, &temp_value); +- /* Now copy it, so the second call doesn't crap on it */ +- value_a = slapi_ch_bvecdup(temp_value); /* Really, we'd prefer to not call the chXXX variant...*/ +- matchrule_values_to_keys(this_one->mr_pb, actual_value_b, &value_b); ++ matchrule_values_to_keys(this_one->mr_pb, va_a, &value_a); ++ /* Plugin owns the memory ==> duplicate the key before next call garble it */ ++ value_a = slapi_ch_bvecdup(value_a); ++ matchrule_values_to_keys(this_one->mr_pb, va_b, &value_b); + +- if ((actual_value_a && !value_a) || +- (actual_value_b && !value_b)) { +- ber_bvecfree(actual_value_a); +- ber_bvecfree(actual_value_b); +- CACHE_RETURN(&inst->inst_cache, &a); +- CACHE_RETURN(&inst->inst_cache, &b); +- *error = 1; +- return 0; ++ if ((va_a && !value_a) || (va_b && !value_b)) { ++ result = 0; ++ goto bail; + } +- if (actual_value_a) +- ber_bvecfree(actual_value_a); +- if (actual_value_b) +- ber_bvecfree(actual_value_b); + } + /* Compare them */ + if (!order) { +@@ -574,9 +562,10 @@ compare_entries_sv(ID *id_a, ID *id_b, sort_spec *s, baggage_carrier *bc, int *e + } + /* If so, proceed to the next attribute for comparison */ + } ++ *error = 0; ++bail: + CACHE_RETURN(&inst->inst_cache, &a); + CACHE_RETURN(&inst->inst_cache, &b); +- *error = 0; + return result; + } + +diff --git a/ldap/servers/slapd/back-ldbm/vlv.c b/ldap/servers/slapd/back-ldbm/vlv.c +index 2a7747dfa..4e40d1a41 100644 +--- a/ldap/servers/slapd/back-ldbm/vlv.c ++++ b/ldap/servers/slapd/back-ldbm/vlv.c +@@ -705,7 +705,7 @@ vlv_getindices(IFP callback_fn, void *param, backend *be) + * generate the same composite key, so we append the EntryID + * to ensure the uniqueness of the key. + * +- * Always creates a key. Never returns NULL. ++ * May return NULL in case of errors (typically in some configuration error cases) + */ + static struct vlv_key * + vlv_create_key(struct vlvIndex *p, struct backentry *e) +@@ -759,10 +759,8 @@ vlv_create_key(struct vlvIndex *p, struct backentry *e) + /* Matching rule. Do the magic mangling. Plugin owns the memory. */ + if (p->vlv_mrpb[sortattr] != NULL) { + /* xxxPINAKI */ +- struct berval **bval = NULL; + Slapi_Value **va = valueset_get_valuearray(&attr->a_present_values); +- valuearray_get_bervalarray(va, &bval); +- matchrule_values_to_keys(p->vlv_mrpb[sortattr], bval, &value); ++ matchrule_values_to_keys(p->vlv_mrpb[sortattr], va, &value); + } + } + +@@ -882,6 +880,13 @@ do_vlv_update_index(back_txn *txn, struct ldbminfo *li, Slapi_PBlock *pb, struct + } + + key = vlv_create_key(pIndex, entry); ++ if (key == NULL) { ++ slapi_log_err(SLAPI_LOG_ERR, "vlv_create_key", "Unable to generate vlv %s index key." ++ " There may be a configuration issue.\n", pIndex->vlv_name); ++ dblayer_release_index_file(be, pIndex->vlv_attrinfo, db); ++ return rc; ++ } ++ + if (NULL != txn) { + db_txn = txn->back_txn_txn; + } else { +@@ -1073,11 +1078,11 @@ vlv_create_matching_rule_value(Slapi_PBlock *pb, struct berval *original_value) + struct berval **value = NULL; + if (pb != NULL) { + struct berval **outvalue = NULL; +- struct berval *invalue[2]; +- invalue[0] = original_value; /* jcm: cast away const */ +- invalue[1] = NULL; ++ Slapi_Value v_in = {0}; ++ Slapi_Value *va_in[2] = { &v_in, NULL }; ++ slapi_value_init_berval(&v_in, original_value); + /* The plugin owns the memory it returns in outvalue */ +- matchrule_values_to_keys(pb, invalue, &outvalue); ++ matchrule_values_to_keys(pb, va_in, &outvalue); + if (outvalue != NULL) { + value = slapi_ch_bvecdup(outvalue); + } +@@ -1726,11 +1731,8 @@ retry: + PRBool needFree = PR_FALSE; + + if (sort_control->mr_pb != NULL) { +- struct berval **tmp_entry_value = NULL; +- +- valuearray_get_bervalarray(csn_value, &tmp_entry_value); + /* Matching rule. Do the magic mangling. Plugin owns the memory. */ +- matchrule_values_to_keys(sort_control->mr_pb, /* xxxPINAKI needs modification attr->a_vals */ tmp_entry_value, &entry_value); ++ matchrule_values_to_keys(sort_control->mr_pb, csn_value, &entry_value); + } else { + valuearray_get_bervalarray(csn_value, &entry_value); + needFree = PR_TRUE; /* entry_value is a copy */ +diff --git a/ldap/servers/slapd/back-ldbm/vlv_srch.c b/ldap/servers/slapd/back-ldbm/vlv_srch.c +index 6b55fa7fd..5978af48f 100644 +--- a/ldap/servers/slapd/back-ldbm/vlv_srch.c ++++ b/ldap/servers/slapd/back-ldbm/vlv_srch.c +@@ -190,6 +190,9 @@ vlvSearch_delete(struct vlvSearch **ppvs) + { + if (ppvs != NULL && *ppvs != NULL) { + struct vlvIndex *pi, *ni; ++ if ((*ppvs)->vlv_e) { ++ slapi_entry_free((struct slapi_entry *)((*ppvs)->vlv_e)); ++ } + slapi_sdn_free(&((*ppvs)->vlv_dn)); + slapi_ch_free((void **)&((*ppvs)->vlv_name)); + slapi_sdn_free(&((*ppvs)->vlv_base)); +@@ -204,7 +207,6 @@ vlvSearch_delete(struct vlvSearch **ppvs) + pi = ni; + } + slapi_ch_free((void **)ppvs); +- *ppvs = NULL; + } + } + +diff --git a/ldap/servers/slapd/generation.c b/ldap/servers/slapd/generation.c +index c4f20f793..89f097322 100644 +--- a/ldap/servers/slapd/generation.c ++++ b/ldap/servers/slapd/generation.c +@@ -93,9 +93,13 @@ get_server_dataversion() + lenstr *l = NULL; + Slapi_Backend *be; + char *cookie; ++ static pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER; + ++ /* Serialize to avoid race condition */ ++ pthread_mutex_lock(&mutex); + /* we already cached the copy - just return it */ + if (server_dataversion_id != NULL) { ++ pthread_mutex_unlock(&mutex); + return server_dataversion_id; + } + +@@ -130,5 +134,6 @@ get_server_dataversion() + server_dataversion_id = slapi_ch_strdup(l->ls_buf); + } + lenstr_free(&l); ++ pthread_mutex_unlock(&mutex); + return server_dataversion_id; + } +diff --git a/ldap/servers/slapd/plugin_mr.c b/ldap/servers/slapd/plugin_mr.c +index aeba95dc7..b262820c5 100644 +--- a/ldap/servers/slapd/plugin_mr.c ++++ b/ldap/servers/slapd/plugin_mr.c +@@ -392,29 +392,18 @@ mr_wrap_mr_index_sv_fn(Slapi_PBlock *pb) + return rc; + } + +-/* this function takes SLAPI_PLUGIN_MR_VALUES as struct berval ** and ++/* this function takes SLAPI_PLUGIN_MR_VALUES as Slapi_Value ** and + returns SLAPI_PLUGIN_MR_KEYS as struct berval ** + */ + static int + mr_wrap_mr_index_fn(Slapi_PBlock *pb) + { + int rc = -1; +- struct berval **in_vals = NULL; + struct berval **out_vals = NULL; + struct mr_private *mrpriv = NULL; +- Slapi_Value **in_vals_sv = NULL; + Slapi_Value **out_vals_sv = NULL; + +- slapi_pblock_get(pb, SLAPI_PLUGIN_MR_VALUES, &in_vals); /* get bervals */ +- /* convert bervals to sv ary */ +- valuearray_init_bervalarray(in_vals, &in_vals_sv); +- slapi_pblock_set(pb, SLAPI_PLUGIN_MR_VALUES, in_vals_sv); /* use sv */ + rc = mr_wrap_mr_index_sv_fn(pb); +- /* clean up in_vals_sv */ +- valuearray_free(&in_vals_sv); +- /* restore old in_vals */ +- /* coverity[var_deref_model] */ +- slapi_pblock_set(pb, SLAPI_PLUGIN_MR_VALUES, in_vals); + /* get result sv keys */ + slapi_pblock_get(pb, SLAPI_PLUGIN_MR_KEYS, &out_vals_sv); + /* convert to bvec */ +diff --git a/src/lib389/lib389/backend.py b/src/lib389/lib389/backend.py +index 0ed00a4a7..1319fa0cd 100644 +--- a/src/lib389/lib389/backend.py ++++ b/src/lib389/lib389/backend.py +@@ -1105,6 +1105,16 @@ class Backends(DSLdapObjects): + for be in sorted(self.list(), key=lambda be: len(be.get_suffix()), reverse=True): + be.delete() + ++ def get_backend(self, suffix): ++ """ ++ Return the backend associated with the provided suffix. ++ """ ++ suffix_l = suffix.lower() ++ for be in self.list(): ++ if be.get_attr_val_utf8_l('nsslapd-suffix') == suffix_l: ++ return be ++ return None ++ + + class DatabaseConfig(DSLdapObject): + """Backend Database configuration +-- +2.48.1 + diff --git a/SOURCES/0024-Issue-6485-Fix-double-free-in-USN-cleanup-task.patch b/SOURCES/0024-Issue-6485-Fix-double-free-in-USN-cleanup-task.patch new file mode 100644 index 0000000..eed98ee --- /dev/null +++ b/SOURCES/0024-Issue-6485-Fix-double-free-in-USN-cleanup-task.patch @@ -0,0 +1,52 @@ +From 6286aee26537f4e1e7dd42af538c75ccd7cf9af8 Mon Sep 17 00:00:00 2001 +From: Mark Reynolds +Date: Wed, 8 Jan 2025 12:57:52 -0500 +Subject: [PATCH] Issue 6485 - Fix double free in USN cleanup task + +Description: + +ASAN report shows double free of bind dn in the USN cleanup task data. The bind +dn was passed as a reference so it should never have to be freed by the cleanup +task. + +Relates: https://github.com/389ds/389-ds-base/issues/6485 + +Reviewed by: tbordaz(Thanks!) +--- + ldap/servers/plugins/usn/usn_cleanup.c | 6 ++---- + 1 file changed, 2 insertions(+), 4 deletions(-) + +diff --git a/ldap/servers/plugins/usn/usn_cleanup.c b/ldap/servers/plugins/usn/usn_cleanup.c +index 8b5938350..fcc6ebfb4 100644 +--- a/ldap/servers/plugins/usn/usn_cleanup.c ++++ b/ldap/servers/plugins/usn/usn_cleanup.c +@@ -240,7 +240,7 @@ usn_cleanup_add(Slapi_PBlock *pb, + char *suffix = NULL; + char *backend_str = NULL; + char *maxusn = NULL; +- char *bind_dn; ++ char *bind_dn = NULL; + struct usn_cleanup_data *cleanup_data = NULL; + int rv = SLAPI_DSE_CALLBACK_OK; + Slapi_Task *task = NULL; +@@ -323,8 +323,7 @@ usn_cleanup_add(Slapi_PBlock *pb, + suffix = NULL; /* don't free in this function */ + cleanup_data->maxusn_to_delete = maxusn; + maxusn = NULL; /* don't free in this function */ +- cleanup_data->bind_dn = bind_dn; +- bind_dn = NULL; /* don't free in this function */ ++ cleanup_data->bind_dn = slapi_ch_strdup(bind_dn); + slapi_task_set_data(task, cleanup_data); + + /* start the USN tombstone cleanup task as a separate thread */ +@@ -363,7 +362,6 @@ usn_cleanup_task_destructor(Slapi_Task *task) + slapi_ch_free_string(&mydata->suffix); + slapi_ch_free_string(&mydata->maxusn_to_delete); + slapi_ch_free_string(&mydata->bind_dn); +- /* Need to cast to avoid a compiler warning */ + slapi_ch_free((void **)&mydata); + } + } +-- +2.48.1 + diff --git a/SOURCES/0025-Issue-6427-fix-various-memory-leaks.patch b/SOURCES/0025-Issue-6427-fix-various-memory-leaks.patch new file mode 100644 index 0000000..98c3b08 --- /dev/null +++ b/SOURCES/0025-Issue-6427-fix-various-memory-leaks.patch @@ -0,0 +1,219 @@ +From 1b48c4174857db5c7c1dc1aa6ad3d0a5155c72c5 Mon Sep 17 00:00:00 2001 +From: Mark Reynolds +Date: Thu, 21 Nov 2024 16:56:45 -0500 +Subject: [PATCH] Issue 6427 - fix various memory leaks + +Description: + +This was all on main branch + +- Display attributes were leaked on searches +- Work thread indexes were leaked at shutdown +- Accept thread was leaked at shutdown +- When we reused a search pblock, we did not free the "operation" and "search +attributes" before overwriting them with freshly allocated structures. + +Fixes: https://github.com/389ds/389-ds-base/issues/6427 + +Reviewed by: progier (Thanks!) +--- + ldap/servers/slapd/connection.c | 30 ++++++++++++++----------- + ldap/servers/slapd/daemon.c | 7 ++++-- + ldap/servers/slapd/plugin_internal_op.c | 18 +++++++++++++++ + ldap/servers/slapd/proto-slap.h | 1 + + ldap/servers/slapd/result.c | 5 ++--- + 5 files changed, 43 insertions(+), 18 deletions(-) + +diff --git a/ldap/servers/slapd/connection.c b/ldap/servers/slapd/connection.c +index 07d629475..bb4fcd77f 100644 +--- a/ldap/servers/slapd/connection.c ++++ b/ldap/servers/slapd/connection.c +@@ -40,20 +40,22 @@ static void log_ber_too_big_error(const Connection *conn, + + static PRStack *op_stack; /* stack of Slapi_Operation * objects so we don't have to malloc/free every time */ + static PRInt32 op_stack_size; /* size of op_stack */ +- + struct Slapi_op_stack + { + PRStackElem stackelem; /* must be first in struct for PRStack to work */ + Slapi_Operation *op; + }; + +-static void add_work_q(work_q_item *, struct Slapi_op_stack *); +-static work_q_item *get_work_q(struct Slapi_op_stack **); ++/* worker threads */ ++static int32_t max_threads = 0; ++static int32_t *threads_indexes = NULL; + + /* + * We maintain a global work queue of items that have not yet + * been handed off to an operation thread. + */ ++static void add_work_q(work_q_item *, struct Slapi_op_stack *); ++static work_q_item *get_work_q(struct Slapi_op_stack **); + struct Slapi_work_q + { + PRStackElem stackelem; /* must be first in struct for PRStack to work */ +@@ -430,9 +432,7 @@ void + init_op_threads() + { + pthread_condattr_t condAttr; +- int32_t max_threads = config_get_threadnumber(); + int32_t rc; +- int32_t *threads_indexes; + + /* Initialize the locks and cv */ + if ((rc = pthread_mutex_init(&work_q_lock, NULL)) != 0) { +@@ -463,13 +463,13 @@ init_op_threads() + op_stack = PR_CreateStack("connection_operation"); + alloc_per_thread_snmp_vars(max_threads); + init_thread_private_snmp_vars(); +- + ++ max_threads = config_get_threadnumber(); + threads_indexes = (int32_t *) slapi_ch_calloc(max_threads, sizeof(int32_t)); + for (size_t i = 0; i < max_threads; i++) { + threads_indexes[i] = i + 1; /* idx 0 is reserved for global snmp_vars */ + } +- ++ + /* start the operation threads */ + for (size_t i = 0; i < max_threads; i++) { + PR_SetConcurrency(4); +@@ -486,10 +486,14 @@ init_op_threads() + g_incr_active_threadcnt(); + } + } +- /* Here we should free thread_indexes, but because of the dynamic of the new +- * threads (connection_threadmain) we are not sure when it can be freed. +- * Let's accept that unique initialization leak (typically 32 to 64 bytes) +- */ ++ /* We will free threads_indexes at the very end of slapd_daemon() */ ++} ++ ++/* Called at shutdown to silence ASAN and friends */ ++void ++free_worker_thread_indexes() ++{ ++ slapi_ch_free((void **)&threads_indexes); + } + + static void +@@ -1227,7 +1231,7 @@ connection_read_operation(Connection *conn, Operation *op, ber_tag_t *tag, int * + /* Process HAProxy header */ + if (conn->c_haproxyheader_read == 0) { + conn->c_haproxyheader_read = 1; +- /* ++ /* + * We only check for HAProxy header if nsslapd-haproxy-trusted-ip is configured. + * If it is we proceed with the connection only if it's comming from trusted + * proxy server with correct and complete header. +@@ -1253,7 +1257,7 @@ connection_read_operation(Connection *conn, Operation *op, ber_tag_t *tag, int * + /* Now, reset RC and set it to 0 only if a match is found */ + haproxy_rc = -1; + +- /* ++ /* + * We need to allow a configuration where DS instance and HAProxy are on the same machine. + * In this case, we need to check if + * the HAProxy client IP (which will be a loopback address) matches one of the the trusted IP addresses, +diff --git a/ldap/servers/slapd/daemon.c b/ldap/servers/slapd/daemon.c +index 1a20796b9..c09e54c7f 100644 +--- a/ldap/servers/slapd/daemon.c ++++ b/ldap/servers/slapd/daemon.c +@@ -1323,8 +1323,11 @@ slapd_daemon(daemon_ports_t *ports) + slapi_log_err(SLAPI_LOG_ERR, "slapd_daemon", "Failed to remove pid file %s\n", get_pid_file()); + } + } +-} + ++ /* final cleanup for ASAN and other analyzers */ ++ PR_JoinThread(accept_thread_p); ++ free_worker_thread_indexes(); ++} + + void + ct_thread_cleanup(void) +@@ -1654,7 +1657,7 @@ handle_pr_read_ready(Connection_Table *ct, int list_num, PRIntn num_poll __attri + continue; + } + +- /* Try to get connection mutex, if not available just skip the connection and ++ /* Try to get connection mutex, if not available just skip the connection and + * process other connections events. May generates cpu load for listening thread + * if connection mutex is held for a long time + */ +diff --git a/ldap/servers/slapd/plugin_internal_op.c b/ldap/servers/slapd/plugin_internal_op.c +index 0d553d1ba..0164a70be 100644 +--- a/ldap/servers/slapd/plugin_internal_op.c ++++ b/ldap/servers/slapd/plugin_internal_op.c +@@ -249,6 +249,12 @@ slapi_search_internal_set_pb(Slapi_PBlock *pb, const char *base, int scope, cons + return; + } + ++ /* Free op just in case this pb is being reused */ ++ slapi_pblock_get(pb, SLAPI_OPERATION, &op); ++ operation_free(&op, NULL); ++ slapi_pblock_get(pb, SLAPI_SEARCH_ATTRS, &tmp_attrs); ++ slapi_ch_array_free(tmp_attrs); ++ + op = internal_operation_new(SLAPI_OPERATION_SEARCH, operation_flags); + slapi_pblock_set(pb, SLAPI_OPERATION, op); + slapi_pblock_set(pb, SLAPI_ORIGINAL_TARGET_DN, (void *)base); +@@ -276,6 +282,12 @@ slapi_search_internal_set_pb_ext(Slapi_PBlock *pb, Slapi_DN *sdn, int scope, con + return; + } + ++ /* Free op/attrs just in case this pb is being reused */ ++ slapi_pblock_get(pb, SLAPI_OPERATION, &op); ++ operation_free(&op, NULL); ++ slapi_pblock_get(pb, SLAPI_SEARCH_ATTRS, &tmp_attrs); ++ slapi_ch_array_free(tmp_attrs); ++ + op = internal_operation_new(SLAPI_OPERATION_SEARCH, operation_flags); + slapi_pblock_set(pb, SLAPI_OPERATION, op); + slapi_pblock_set(pb, SLAPI_ORIGINAL_TARGET_DN, +@@ -305,6 +317,12 @@ slapi_seq_internal_set_pb(Slapi_PBlock *pb, char *base, int type, char *attrname + return; + } + ++ /* Free op/attrs just in case this pb is being reused */ ++ slapi_pblock_get(pb, SLAPI_OPERATION, &op); ++ operation_free(&op, NULL); ++ slapi_pblock_get(pb, SLAPI_SEARCH_ATTRS, &tmp_attrs); ++ slapi_ch_array_free(tmp_attrs); ++ + op = internal_operation_new(SLAPI_OPERATION_SEARCH, operation_flags); + slapi_pblock_set(pb, SLAPI_OPERATION, op); + slapi_pblock_set(pb, SLAPI_ORIGINAL_TARGET_DN, (void *)base); +diff --git a/ldap/servers/slapd/proto-slap.h b/ldap/servers/slapd/proto-slap.h +index 214f2e609..2031cfb7d 100644 +--- a/ldap/servers/slapd/proto-slap.h ++++ b/ldap/servers/slapd/proto-slap.h +@@ -1543,6 +1543,7 @@ int connection_is_free(Connection *conn, int user_lock); + int connection_is_active_nolock(Connection *conn); + ber_slen_t openldap_read_function(Sockbuf_IO_Desc *sbiod, void *buf, ber_len_t len); + int32_t connection_has_psearch(Connection *c); ++void free_worker_thread_indexes(void); + + /* + * saslbind.c +diff --git a/ldap/servers/slapd/result.c b/ldap/servers/slapd/result.c +index 97af5a2b8..9772e8213 100644 +--- a/ldap/servers/slapd/result.c ++++ b/ldap/servers/slapd/result.c +@@ -1392,9 +1392,8 @@ exit: + if (NULL != typelist) { + slapi_vattr_attrs_free(&typelist, typelist_flags); + } +- if (NULL != default_attrs) { +- slapi_ch_free((void **)&default_attrs); +- } ++ slapi_ch_array_free(default_attrs); ++ + return rc; + } + +-- +2.48.1 + diff --git a/SOURCES/0026-Issue-6442-Fix-latest-covscan-memory-leaks.patch b/SOURCES/0026-Issue-6442-Fix-latest-covscan-memory-leaks.patch new file mode 100644 index 0000000..175870b --- /dev/null +++ b/SOURCES/0026-Issue-6442-Fix-latest-covscan-memory-leaks.patch @@ -0,0 +1,1002 @@ +From 0e958c2482c37e7de90c4b7bbc821c0c0acc05a0 Mon Sep 17 00:00:00 2001 +From: Mark Reynolds +Date: Tue, 10 Dec 2024 11:49:27 -0500 +Subject: [PATCH] Issue 6442 - Fix latest covscan memory leaks + +Description: + +Fixed a majority of the memory leaks (except ACL leaks). + +Fixes: https://github.com/389ds/389-ds-base/issues/6442 + +Reviewed by: progier(Thanks!) +--- + dirsrvtests/tests/suites/basic/basic_test.py | 3 +- + ldap/servers/plugins/acctpolicy/acct_plugin.c | 1 - + ldap/servers/plugins/acl/aclgroup.c | 13 +++++--- + ldap/servers/plugins/acl/acllas.c | 7 +++-- + .../plugins/chainingdb/cb_conn_stateless.c | 9 +++--- + ldap/servers/plugins/replication/cl5_api.c | 16 +++++++++- + ldap/servers/plugins/replication/cl5_init.c | 5 +-- + .../plugins/replication/repl5_connection.c | 4 +-- + .../plugins/replication/repl5_replica.c | 7 +++-- + .../plugins/replication/repl5_tot_protocol.c | 10 +++--- + .../plugins/replication/repl_cleanallruv.c | 31 ++++++++++++------- + .../plugins/replication/repl_controls.c | 10 ++++-- + ldap/servers/plugins/retrocl/retrocl_trim.c | 4 +-- + ldap/servers/plugins/syntaxes/inchain.c | 3 +- + .../slapd/back-ldbm/db-bdb/bdb_import.c | 2 +- + .../back-ldbm/db-bdb/bdb_import_threads.c | 3 +- + .../slapd/back-ldbm/db-bdb/bdb_layer.c | 18 ++++++----- + ldap/servers/slapd/back-ldbm/index.c | 3 +- + ldap/servers/slapd/back-ldbm/init.c | 2 ++ + ldap/servers/slapd/back-ldbm/ldbm_entryrdn.c | 2 +- + ldap/servers/slapd/back-ldbm/vlv.c | 7 ++--- + ldap/servers/slapd/daemon.c | 1 + + ldap/servers/slapd/entry.c | 3 ++ + ldap/servers/slapd/generation.c | 5 +++ + ldap/servers/slapd/libglobs.c | 12 +++++-- + ldap/servers/slapd/main.c | 3 +- + ldap/servers/slapd/passwd_extop.c | 5 ++- + ldap/servers/slapd/plugin_internal_op.c | 17 ++-------- + ldap/servers/slapd/proto-slap.h | 2 +- + ldap/servers/slapd/result.c | 9 +++++- + ldap/servers/slapd/task.c | 5 +-- + ldap/servers/slapd/tools/ldclt/ldclt.c | 1 + + ldap/servers/slapd/vattr.c | 1 + + lib/libsi18n/reshash.c | 23 ++++++-------- + src/rewriters/adfilter.c | 1 + + 35 files changed, 152 insertions(+), 96 deletions(-) + +diff --git a/dirsrvtests/tests/suites/basic/basic_test.py b/dirsrvtests/tests/suites/basic/basic_test.py +index a7efcc825..02cb13b10 100644 +--- a/dirsrvtests/tests/suites/basic/basic_test.py ++++ b/dirsrvtests/tests/suites/basic/basic_test.py +@@ -545,8 +545,7 @@ def test_basic_import_export(topology_st, import_example_ldif): + time.sleep(0.5) + + # Good as place as any to quick test the task has some expected attributes +- if ds_is_newer('1.4.1.2'): +- assert import_task.present('nstaskcreated') ++ assert import_task.present('nstaskcreated') + assert import_task.present('nstasklog') + assert import_task.present('nstaskcurrentitem') + assert import_task.present('nstasktotalitems') +diff --git a/ldap/servers/plugins/acctpolicy/acct_plugin.c b/ldap/servers/plugins/acctpolicy/acct_plugin.c +index af566b934..36d7ba19b 100644 +--- a/ldap/servers/plugins/acctpolicy/acct_plugin.c ++++ b/ldap/servers/plugins/acctpolicy/acct_plugin.c +@@ -272,7 +272,6 @@ acct_update_login_history(const char *dn, char *timestr) + } + /* first time round */ + } else if (cfg->login_history_size > 0) { +- login_hist = (char **)slapi_ch_calloc(2, sizeof(char *)); + /* alloc new array and append latest value */ + login_hist = (char **)slapi_ch_realloc((char *)login_hist, sizeof(char *) * (num_entries + 2)); + login_hist[num_entries] = slapi_ch_smprintf("%s", timestr); +diff --git a/ldap/servers/plugins/acl/aclgroup.c b/ldap/servers/plugins/acl/aclgroup.c +index 54bb23bc9..a5b0426f9 100644 +--- a/ldap/servers/plugins/acl/aclgroup.c ++++ b/ldap/servers/plugins/acl/aclgroup.c +@@ -263,11 +263,13 @@ aclg_get_usersGroup(struct acl_pblock *aclpb, char *n_dn) + return NULL; + } + +- if (aclpb->aclpb_groupinfo) +- return aclpb->aclpb_groupinfo; +- + ACLG_LOCK_GROUPCACHE_WRITE(); + ++ if (aclpb->aclpb_groupinfo) { ++ ACLG_ULOCK_GROUPCACHE_WRITE(); ++ return aclpb->aclpb_groupinfo; ++ } ++ + /* try it one more time. We might have one in the meantime */ + aclg_init_userGroup(aclpb, n_dn, 1 /* got the lock */); + if (aclpb->aclpb_groupinfo) { +@@ -337,11 +339,12 @@ aclg_get_usersGroup(struct acl_pblock *aclpb, char *n_dn) + + aclUserGroups->aclg_num_userGroups++; + ++ /* Now hang on to it */ ++ aclpb->aclpb_groupinfo = u_group; ++ + /* Put it in the queue */ + ACLG_ULOCK_GROUPCACHE_WRITE(); + +- /* Now hang on to it */ +- aclpb->aclpb_groupinfo = u_group; + return u_group; + } + +diff --git a/ldap/servers/plugins/acl/acllas.c b/ldap/servers/plugins/acl/acllas.c +index 9f643ea87..792216cca 100644 +--- a/ldap/servers/plugins/acl/acllas.c ++++ b/ldap/servers/plugins/acl/acllas.c +@@ -4305,6 +4305,8 @@ acllas_replace_attr_macro(char *rule, lasInfo *lasinfo) + * list which replaces the old one. + */ + l = acl_strstr(&str[0], ")"); ++ /* coverity false positive: l + 2 will always be positive */ ++ /* coverity[integer_overflow] */ + macro_str = slapi_ch_malloc(l + 2); + strncpy(macro_str, &str[0], l + 1); + macro_str[l + 1] = '\0'; +@@ -4320,18 +4322,19 @@ acllas_replace_attr_macro(char *rule, lasInfo *lasinfo) + + str++; /* skip the . */ + l = acl_strstr(&str[0], ")"); +- if (l == -1){ ++ if (l == -1) { + slapi_log_err(SLAPI_LOG_ERR, plugin_name, + "acllas_replace_attr_macro - Invalid macro str \"%s\".", str); + slapi_ch_free_string(¯o_str); + charray_free(working_list); + return NULL; + } ++ /* coverity false positive: l + 1 will always be positive */ ++ /* coverity[integer_overflow] */ + macro_attr_name = slapi_ch_malloc(l + 1); + strncpy(macro_attr_name, &str[0], l); + macro_attr_name[l] = '\0'; + +- + slapi_entry_attr_find(e, macro_attr_name, &attr); + if (NULL == attr) { + +diff --git a/ldap/servers/plugins/chainingdb/cb_conn_stateless.c b/ldap/servers/plugins/chainingdb/cb_conn_stateless.c +index c5866f51a..7deead3aa 100644 +--- a/ldap/servers/plugins/chainingdb/cb_conn_stateless.c ++++ b/ldap/servers/plugins/chainingdb/cb_conn_stateless.c +@@ -943,10 +943,10 @@ cb_update_failed_conn_cpt(cb_backend_instance *cb) + { + /* if the chaining BE is already unavailable, we do nothing*/ + time_t now; ++ ++ slapi_lock_mutex(cb->monitor_availability.cpt_lock); + if (cb->monitor_availability.farmserver_state == FARMSERVER_AVAILABLE) { +- slapi_lock_mutex(cb->monitor_availability.cpt_lock); + cb->monitor_availability.cpt++; +- slapi_unlock_mutex(cb->monitor_availability.cpt_lock); + if (cb->monitor_availability.cpt >= CB_NUM_CONN_BEFORE_UNAVAILABILITY) { + /* we reach the limit of authorized failed connections => we setup the chaining BE state to unavailable */ + now = slapi_current_rel_time_t(); +@@ -958,21 +958,22 @@ cb_update_failed_conn_cpt(cb_backend_instance *cb) + "cb_update_failed_conn_cpt - Farm server unavailable"); + } + } ++ slapi_unlock_mutex(cb->monitor_availability.cpt_lock); + } + + void + cb_reset_conn_cpt(cb_backend_instance *cb) + { ++ slapi_lock_mutex(cb->monitor_availability.cpt_lock); + if (cb->monitor_availability.cpt > 0) { +- slapi_lock_mutex(cb->monitor_availability.cpt_lock); + cb->monitor_availability.cpt = 0; + if (cb->monitor_availability.farmserver_state == FARMSERVER_UNAVAILABLE) { + cb->monitor_availability.farmserver_state = FARMSERVER_AVAILABLE; + slapi_log_err(SLAPI_LOG_PLUGIN, CB_PLUGIN_SUBSYSTEM, + "cb_reset_conn_cpt - Farm server is back"); + } +- slapi_unlock_mutex(cb->monitor_availability.cpt_lock); + } ++ slapi_unlock_mutex(cb->monitor_availability.cpt_lock); + } + + int +diff --git a/ldap/servers/plugins/replication/cl5_api.c b/ldap/servers/plugins/replication/cl5_api.c +index 92fb776dc..0b3dee86b 100644 +--- a/ldap/servers/plugins/replication/cl5_api.c ++++ b/ldap/servers/plugins/replication/cl5_api.c +@@ -558,7 +558,15 @@ cl5ImportLDIF(const char *clDir, const char *ldifFile, Replica *replica) + + /* Set changelog state to import */ + pthread_mutex_lock(&(cldb->stLock)); ++ ++ if (cldb->dbState == CL5_STATE_IMPORT) { ++ pthread_mutex_unlock(&(cldb->stLock)); ++ slapi_log_err(SLAPI_LOG_ERR, repl_plugin_name_cl, ++ "cl5ImportLDIF - changelog import already in progress\n"); ++ return CL5_IGNORE_OP; ++ } + cldb->dbState = CL5_STATE_IMPORT; ++ + pthread_mutex_unlock(&(cldb->stLock)); + + /* Wait for all the threads to stop */ +@@ -1363,6 +1371,7 @@ cldb_SetReplicaDB(Replica *replica, void *arg) + if (rc != CL5_SUCCESS) { + slapi_log_err(SLAPI_LOG_ERR, repl_plugin_name_cl, + "cldb_SetReplicaDB - failed to configure changelog trimming\n"); ++ changelog5_config_done(&config); + return CL5_BAD_DATA; + } + +@@ -1578,7 +1587,12 @@ _cl5Entry2DBData(const CL5Entry *entry, char **data, PRUint32 *len, void *clcryp + /* write change type */ + (*pos) = (unsigned char)op->operation_type; + pos++; +- /* write time */ ++ /* ++ * write time ++ * --> Y2K38 - will hopefully be fixed by a future C standard htonll() ++ * function. Then we can move "t" and "entry->time" to be uint64_t ++ * to fix the issue. ++ */ + t = PR_htonl((PRUint32)entry->time); + memcpy(pos, &t, sizeof(t)); + pos += sizeof(t); +diff --git a/ldap/servers/plugins/replication/cl5_init.c b/ldap/servers/plugins/replication/cl5_init.c +index b48e8e8a1..ef7f23ef0 100644 +--- a/ldap/servers/plugins/replication/cl5_init.c ++++ b/ldap/servers/plugins/replication/cl5_init.c +@@ -42,7 +42,7 @@ changelog5_upgrade(void) + + if (config.dir == NULL) { + /* we do not have a valid legacy config, nothing to upgrade */ +- return rc; ++ goto done; + } + + replica_enumerate_replicas(_cl5_upgrade_replica, (void *)&config); +@@ -51,6 +51,7 @@ changelog5_upgrade(void) + + rc |= _cl5_upgrade_removeconfig(); + ++done: + changelog5_config_done(&config); + + return rc; +@@ -122,7 +123,7 @@ _cl5_upgrade_replica_config(Replica *replica, changelog5Config *config) + slapi_entry_add_string(config_entry, CONFIG_CHANGELOG_TRIM_ATTRIBUTE, gen_duration(config->trimInterval)); + } + +- /* if changelog encryption is enabled then in the upgrade mode all backends will have ++ /* if changelog encryption is enabled then in the upgrade mode all backends will have + * an encrypted changelog, store the encryption attrs */ + if (config->encryptionAlgorithm) { + slapi_entry_add_string(config_entry, CONFIG_CHANGELOG_ENCRYPTION_ALGORITHM, config->encryptionAlgorithm); +diff --git a/ldap/servers/plugins/replication/repl5_connection.c b/ldap/servers/plugins/replication/repl5_connection.c +index bcc71834a..690241ebd 100644 +--- a/ldap/servers/plugins/replication/repl5_connection.c ++++ b/ldap/servers/plugins/replication/repl5_connection.c +@@ -679,8 +679,8 @@ check_flow_control_tot_init(Repl_Connection *conn, int optype, const char *extop + static ConnResult + conn_is_available(Repl_Connection *conn) + { +- time_t poll_timeout_sec = 1; /* Polling for 1sec */ +- time_t yield_delay_msec = 100; /* Delay to wait */ ++ int32_t poll_timeout_sec = 1; /* Polling for 1sec */ ++ int32_t yield_delay_msec = 100; /* Delay to wait */ + time_t start_time = slapi_current_rel_time_t(); + time_t time_now; + ConnResult return_value = CONN_OPERATION_SUCCESS; +diff --git a/ldap/servers/plugins/replication/repl5_replica.c b/ldap/servers/plugins/replication/repl5_replica.c +index f2f7086eb..1d5caf7d0 100644 +--- a/ldap/servers/plugins/replication/repl5_replica.c ++++ b/ldap/servers/plugins/replication/repl5_replica.c +@@ -35,7 +35,7 @@ struct replica + ReplicaUpdateDNList updatedn_list; /* list of dns with which a supplier should bind to update this replica */ + Slapi_ValueSet *updatedn_groups; /* set of groups whose memebers are allowed to update replica */ + ReplicaUpdateDNList groupdn_list; /* exploded listof dns from update group */ +- uint32_t updatedn_group_last_check; /* the time of the last group check */ ++ time_t updatedn_group_last_check; /* the time of the last group check */ + int64_t updatedn_group_check_interval; /* the group check interval */ + ReplicaType repl_type; /* is this replica read-only ? */ + ReplicaId repl_rid; /* replicaID */ +@@ -219,7 +219,7 @@ replica_new_from_entry(Slapi_Entry *e, char *errortext, PRBool is_add_operation, + * during replica initialization + */ + rc = _replica_update_entry(r, e, errortext); +- /* add changelog config entry to config ++ /* add changelog config entry to config + * this is only needed for replicas logging changes, + * but for now let it exist for all replicas. Makes handling + * of changing replica flags easier +@@ -2544,7 +2544,7 @@ _replica_get_config_dn(const Slapi_DN *root) + return dn; + } + /* when a replica is added the changelog config entry is created +- * it will only the container entry, specifications for trimming ++ * it will only the container entry, specifications for trimming + * or encyrption need to be added separately + */ + static int +@@ -2876,6 +2876,7 @@ replica_update_state(time_t when __attribute__((unused)), void *arg) + "replica_update_state - Failed to get the config dn for %s\n", + slapi_sdn_get_dn(r->repl_root)); + replica_unlock(r->repl_lock); ++ slapi_mod_done(&smod); + return; + } + pb = slapi_pblock_new(); +diff --git a/ldap/servers/plugins/replication/repl5_tot_protocol.c b/ldap/servers/plugins/replication/repl5_tot_protocol.c +index 83b37ef84..881b252ff 100644 +--- a/ldap/servers/plugins/replication/repl5_tot_protocol.c ++++ b/ldap/servers/plugins/replication/repl5_tot_protocol.c +@@ -43,7 +43,7 @@ typedef struct callback_data + Private_Repl_Protocol *prp; + int rc; + unsigned long num_entries; +- time_t sleep_on_busy; ++ uint32_t sleep_on_busy; + time_t last_busy; + pthread_mutex_t lock; /* Lock to protect access to this structure, the message id list and to force memory barriers */ + PRThread *result_tid; /* The async result thread */ +@@ -481,7 +481,7 @@ retry: + cb_data.prp = prp; + cb_data.rc = 0; + cb_data.num_entries = 1UL; +- cb_data.sleep_on_busy = 0UL; ++ cb_data.sleep_on_busy = 0; + cb_data.last_busy = slapi_current_rel_time_t(); + cb_data.flowcontrol_detection = 0; + pthread_mutex_init(&(cb_data.lock), NULL); +@@ -540,7 +540,7 @@ retry: + cb_data.prp = prp; + cb_data.rc = 0; + cb_data.num_entries = 0UL; +- cb_data.sleep_on_busy = 0UL; ++ cb_data.sleep_on_busy = 0; + cb_data.last_busy = slapi_current_rel_time_t(); + cb_data.flowcontrol_detection = 0; + pthread_mutex_init(&(cb_data.lock), NULL); +@@ -803,7 +803,7 @@ send_entry(Slapi_Entry *e, void *cb_data) + BerElement *bere; + struct berval *bv; + unsigned long *num_entriesp; +- time_t *sleep_on_busyp; ++ uint32_t *sleep_on_busyp; + time_t *last_busyp; + int message_id = 0; + int retval = 0; +@@ -899,7 +899,7 @@ send_entry(Slapi_Entry *e, void *cb_data) + *last_busyp = now; + + slapi_log_err(SLAPI_LOG_ERR, repl_plugin_name, +- "send_entry - Replica \"%s\" is busy. Waiting %lds while" ++ "send_entry - Replica \"%s\" is busy. Waiting %ds while" + " it finishes processing its current import queue\n", + agmt_get_long_name(prp->agmt), *sleep_on_busyp); + DS_Sleep(PR_SecondsToInterval(*sleep_on_busyp)); +diff --git a/ldap/servers/plugins/replication/repl_cleanallruv.c b/ldap/servers/plugins/replication/repl_cleanallruv.c +index b62c2fae0..4052d98fd 100644 +--- a/ldap/servers/plugins/replication/repl_cleanallruv.c ++++ b/ldap/servers/plugins/replication/repl_cleanallruv.c +@@ -2174,19 +2174,26 @@ replica_cleanallruv_thread(void *arg) + "Waiting to process all the updates from the deleted replica..."); + ruv_obj = replica_get_ruv(data->replica); + ruv = object_get_data(ruv_obj); +- while (data->maxcsn && !is_task_aborted(data->rid) && !is_cleaned_rid(data->rid) && !slapi_is_shutting_down()) { +- struct timespec current_time = {0}; +- if (csn_get_replicaid(data->maxcsn) == 0 || +- ruv_covers_csn_cleanallruv(ruv, data->maxcsn) || +- strcasecmp(data->force, "yes") == 0) { +- /* We are caught up, now we can clean the ruv's */ +- break; ++ if (data->maxcsn) { ++ while (!is_task_aborted(data->rid) && ++ !is_cleaned_rid(data->rid) && ++ !slapi_is_shutting_down()) ++ { ++ struct timespec current_time = {0}; ++ ++ if (csn_get_replicaid(data->maxcsn) == 0 || ++ ruv_covers_csn_cleanallruv(ruv, data->maxcsn) || ++ strcasecmp(data->force, "yes") == 0) ++ { ++ /* We are caught up, now we can clean the ruv's */ ++ break; ++ } ++ clock_gettime(CLOCK_MONOTONIC, ¤t_time); ++ current_time.tv_sec += CLEANALLRUV_SLEEP; ++ pthread_mutex_lock(¬ify_lock); ++ pthread_cond_timedwait(¬ify_cvar, ¬ify_lock, ¤t_time); ++ pthread_mutex_unlock(¬ify_lock); + } +- clock_gettime(CLOCK_MONOTONIC, ¤t_time); +- current_time.tv_sec += CLEANALLRUV_SLEEP; +- pthread_mutex_lock(¬ify_lock); +- pthread_cond_timedwait(¬ify_cvar, ¬ify_lock, ¤t_time); +- pthread_mutex_unlock(¬ify_lock); + } + object_release(ruv_obj); + /* +diff --git a/ldap/servers/plugins/replication/repl_controls.c b/ldap/servers/plugins/replication/repl_controls.c +index 8d50633d6..238f78eb0 100644 +--- a/ldap/servers/plugins/replication/repl_controls.c ++++ b/ldap/servers/plugins/replication/repl_controls.c +@@ -161,7 +161,7 @@ decode_NSDS50ReplUpdateInfoControl(LDAPControl **controlsp, + struct berval superior_uuid_val = {0}; + struct berval csn_val = {0}; + BerElement *tmp_bere = NULL; +- Slapi_Mods modrdn_smods; ++ Slapi_Mods modrdn_smods = {0}; + PRBool got_modrdn_mods = PR_FALSE; + ber_len_t len; + +@@ -232,11 +232,13 @@ decode_NSDS50ReplUpdateInfoControl(LDAPControl **controlsp, + + if (NULL != modrdn_mods && got_modrdn_mods) { + *modrdn_mods = slapi_mods_get_ldapmods_passout(&modrdn_smods); ++ } else { ++ slapi_mods_done(&modrdn_smods); + } +- slapi_mods_done(&modrdn_smods); + + rc = 1; + } else { ++ /* Control not present */ + rc = 0; + } + loser: +@@ -258,6 +260,10 @@ loser: + ldap_memfree(csn_val.bv_val); + csn_val.bv_val = NULL; + } ++ if (rc < 1) { ++ /* no control or error, free smods */ ++ slapi_mods_done(&modrdn_smods); ++ } + return rc; + } + +diff --git a/ldap/servers/plugins/retrocl/retrocl_trim.c b/ldap/servers/plugins/retrocl/retrocl_trim.c +index 158f801cc..8fcd3d32b 100644 +--- a/ldap/servers/plugins/retrocl/retrocl_trim.c ++++ b/ldap/servers/plugins/retrocl/retrocl_trim.c +@@ -239,7 +239,7 @@ trim_changelog(void) + time_t now_maxage; /* used for checking if the changelog entry can be trimmed */ + changeNumber first_in_log = 0, last_in_log = 0; + int num_deleted = 0; +- int max_age, last_trim, trim_interval; ++ time_t max_age, last_trim, trim_interval; + + now_interval = slapi_current_rel_time_t(); /* monotonic time for interval */ + +@@ -297,7 +297,7 @@ trim_changelog(void) + } + } + } else { +- slapi_log_err(SLAPI_LOG_PLUGIN, RETROCL_PLUGIN_NAME, "Not yet time to trim: %ld < (%d+%d)\n", ++ slapi_log_err(SLAPI_LOG_PLUGIN, RETROCL_PLUGIN_NAME, "Not yet time to trim: %ld < (%ld+%ld)\n", + now_interval, last_trim, trim_interval); + } + PR_Lock(ts.ts_s_trim_mutex); +diff --git a/ldap/servers/plugins/syntaxes/inchain.c b/ldap/servers/plugins/syntaxes/inchain.c +index 0a6a04e9f..07e58ea84 100644 +--- a/ldap/servers/plugins/syntaxes/inchain.c ++++ b/ldap/servers/plugins/syntaxes/inchain.c +@@ -341,7 +341,6 @@ inchain_values2keys(Slapi_PBlock *pb, Slapi_Value **vals, Slapi_Value ***ivals, + strncpy(value, v->bv.bv_val, v->bv.bv_len); + value[v->bv.bv_len] = '\0'; + slapi_log_err(SLAPI_LOG_FILTER, "inchain", " groupvals = %s\n", value); +- + } + + #if 1 +@@ -365,6 +364,8 @@ inchain_values2keys(Slapi_PBlock *pb, Slapi_Value **vals, Slapi_Value ***ivals, + groupvals.nsuniqueid_vals = NULL; + } + *ivals = result; ++ slapi_sdn_free(&member_sdn); ++ + return(0); + #else + return (string_values2keys(pb, vals, ivals, SYNTAX_CIS | SYNTAX_DN, +diff --git a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_import.c b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_import.c +index c13ba18c7..bc4ae5e2b 100644 +--- a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_import.c ++++ b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_import.c +@@ -1749,7 +1749,7 @@ bdb_import_push_progress_history(ImportJob *job, ID current_id, time_t current_t + } + + static void +-bdb_import_calc_rate(ImportWorkerInfo *info, int time_interval) ++bdb_import_calc_rate(ImportWorkerInfo *info, time_t time_interval) + { + size_t ids = info->last_ID_processed - info->previous_ID_counted; + double rate = (double)ids / time_interval; +diff --git a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_import_threads.c b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_import_threads.c +index b5774a143..ae6cd37da 100644 +--- a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_import_threads.c ++++ b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_import_threads.c +@@ -356,7 +356,7 @@ bdb_import_producer(void *param) + int fd, curr_file, curr_lineno = 0; + char *curr_filename = NULL; + int idx; +- ldif_context c; ++ ldif_context c = {0}; + int my_version = 0; + size_t newesize = 0; + Slapi_Attr *attr = NULL; +@@ -761,6 +761,7 @@ bdb_import_producer(void *param) + error: + slapi_value_free(&(job->usn_value)); + info->state = ABORTED; ++ bdb_import_free_ldif(&c); + } + + static int +diff --git a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.c b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.c +index 44f25098f..20d4091f9 100644 +--- a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.c ++++ b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.c +@@ -2034,13 +2034,15 @@ bdb_pre_close(struct ldbminfo *li) + conf = (bdb_config *)li->li_dblayer_config; + bdb_db_env *pEnv = (bdb_db_env *)priv->dblayer_env; + +- if (conf->bdb_stop_threads || !pEnv) /* already stopped. do nothing... */ +- return; ++ pthread_mutex_lock(&pEnv->bdb_thread_count_lock); ++ ++ if (conf->bdb_stop_threads || !pEnv) { ++ /* already stopped. do nothing... */ ++ goto timeout_escape; ++ } + + /* first, see if there are any housekeeping threads running */ +- pthread_mutex_lock(&pEnv->bdb_thread_count_lock); + threadcount = pEnv->bdb_thread_count; +- pthread_mutex_unlock(&pEnv->bdb_thread_count_lock); + + if (threadcount) { + PRIntervalTime cvwaittime = PR_MillisecondsToInterval(DBLAYER_SLEEP_INTERVAL * 100); +@@ -2048,7 +2050,7 @@ bdb_pre_close(struct ldbminfo *li) + /* Print handy-dandy log message */ + slapi_log_err(SLAPI_LOG_INFO, "bdb_pre_close", "Waiting for %d database threads to stop\n", + threadcount); +- pthread_mutex_lock(&pEnv->bdb_thread_count_lock); ++ + /* Tell them to stop - we wait until the last possible moment to invoke + this. If we do this much sooner than this, we could find ourselves + in a situation where the threads see the stop_threads and exit before +@@ -2080,7 +2082,7 @@ bdb_pre_close(struct ldbminfo *li) + /* else just a spurious interrupt */ + } + } +- pthread_mutex_unlock(&pEnv->bdb_thread_count_lock); ++ + if (timedout) { + slapi_log_err(SLAPI_LOG_ERR, + "bdb_pre_close", "Timeout after [%d] milliseconds; leave %d database thread(s)...\n", +@@ -2090,7 +2092,9 @@ bdb_pre_close(struct ldbminfo *li) + } + } + slapi_log_err(SLAPI_LOG_INFO, "bdb_pre_close", "All database threads now stopped\n"); ++ + timeout_escape: ++ pthread_mutex_unlock(&pEnv->bdb_thread_count_lock); + return; + } + +@@ -3870,7 +3874,7 @@ bdb_checkpoint_threadmain(void *param) + time_t checkpoint_interval_update = 0; + time_t compactdb_interval = 0; + time_t checkpoint_interval = 0; +- int32_t compactdb_time = 0; ++ uint64_t compactdb_time = 0; + + PR_ASSERT(NULL != param); + li = (struct ldbminfo *)param; +diff --git a/ldap/servers/slapd/back-ldbm/index.c b/ldap/servers/slapd/back-ldbm/index.c +index fc3e4d30f..90129b682 100644 +--- a/ldap/servers/slapd/back-ldbm/index.c ++++ b/ldap/servers/slapd/back-ldbm/index.c +@@ -1665,7 +1665,7 @@ index_range_read_ext( + /* exit the loop when we either run off the end of the table, + * fail to read a key, or read a key that's out of range. + */ +- IDList *tmp; ++ IDList *tmp = NULL; + /* + char encbuf [BUFSIZ]; + slapi_log_err(SLAPI_LOG_FILTER, " cur_key=%s(%li bytes)\n", +@@ -1716,6 +1716,7 @@ index_range_read_ext( + retry_count < IDL_FETCH_RETRY_COUNT; + retry_count++) { + *err = NEW_IDL_DEFAULT; ++ idl_free(&tmp); + tmp = idl_fetch_ext(be, db, &cur_key, NULL, ai, err, allidslimit); + if (*err == DBI_RC_RETRY) { + ldbm_nasty("index_range_read_ext", "Retrying transaction", 1090, *err); +diff --git a/ldap/servers/slapd/back-ldbm/init.c b/ldap/servers/slapd/back-ldbm/init.c +index e124b40a9..37bf08ac8 100644 +--- a/ldap/servers/slapd/back-ldbm/init.c ++++ b/ldap/servers/slapd/back-ldbm/init.c +@@ -160,6 +160,8 @@ ldbm_back_init(Slapi_PBlock *pb) + return (0); + + fail: ++ ++ objset_delete(&li->li_instance_set); + ldbm_config_destroy(li); + slapi_pblock_set(pb, SLAPI_PLUGIN_PRIVATE, NULL); + return (-1); +diff --git a/ldap/servers/slapd/back-ldbm/ldbm_entryrdn.c b/ldap/servers/slapd/back-ldbm/ldbm_entryrdn.c +index e3d765893..1047b4b94 100644 +--- a/ldap/servers/slapd/back-ldbm/ldbm_entryrdn.c ++++ b/ldap/servers/slapd/back-ldbm/ldbm_entryrdn.c +@@ -2112,7 +2112,7 @@ _entryrdn_replace_suffix_id(entryrdn_db_ctx_t *ctx, dbi_val_t *key, dbi_val_t *a + * DBI_RC_NOTFOUND means that redirect db is corrupted + */ + _ENTRYRDN_DEBUG_GOTO_BAIL(); +- goto bail; ++ goto bail0; + } + } + dblayer_value_set_buffer(ctx->be, &moddata, childelem, _entryrdn_rdn_elem_size(childelem)); +diff --git a/ldap/servers/slapd/back-ldbm/vlv.c b/ldap/servers/slapd/back-ldbm/vlv.c +index 4e40d1a41..c2c2c5cda 100644 +--- a/ldap/servers/slapd/back-ldbm/vlv.c ++++ b/ldap/servers/slapd/back-ldbm/vlv.c +@@ -503,23 +503,22 @@ vlv_init(ldbm_instance *inst) + + /* Initialize lock first time through */ + if (be->vlvSearchList_lock == NULL) { +- char *rwlockname = slapi_ch_smprintf("vlvSearchList_%s", inst->inst_name); + be->vlvSearchList_lock = slapi_new_rwlock(); +- slapi_ch_free((void **)&rwlockname); + } ++ ++ slapi_rwlock_wrlock(be->vlvSearchList_lock); + if (NULL != (struct vlvSearch *)be->vlvSearchList) { + struct vlvSearch *t = NULL; + struct vlvSearch *nt = NULL; + /* vlvSearchList is modified; need Wlock */ +- slapi_rwlock_wrlock(be->vlvSearchList_lock); + for (t = (struct vlvSearch *)be->vlvSearchList; NULL != t;) { + nt = t->vlv_next; + vlvSearch_delete(&t); + t = nt; + } + be->vlvSearchList = NULL; +- slapi_rwlock_unlock(be->vlvSearchList_lock); + } ++ slapi_rwlock_unlock(be->vlvSearchList_lock); + + { + basedn = slapi_create_dn_string("cn=%s,cn=%s,cn=plugins,cn=config", +diff --git a/ldap/servers/slapd/daemon.c b/ldap/servers/slapd/daemon.c +index c09e54c7f..298eb2122 100644 +--- a/ldap/servers/slapd/daemon.c ++++ b/ldap/servers/slapd/daemon.c +@@ -1327,6 +1327,7 @@ slapd_daemon(daemon_ports_t *ports) + /* final cleanup for ASAN and other analyzers */ + PR_JoinThread(accept_thread_p); + free_worker_thread_indexes(); ++ free_server_dataversion(); + } + + void +diff --git a/ldap/servers/slapd/entry.c b/ldap/servers/slapd/entry.c +index 358c802a7..658b9b279 100644 +--- a/ldap/servers/slapd/entry.c ++++ b/ldap/servers/slapd/entry.c +@@ -1502,6 +1502,9 @@ entry2str_internal_put_value(const char *attrtype, const CSN *attrcsn, CSNType a + strcpy(p, attrtype); + p += attrtypelen; + if (attrcsn != NULL) { ++ /* coverity false positive: there is no alloc involved in this case ++ * because p is not NULL */ ++ /* coverity[leaked_storage] */ + csn_as_attr_option_string(attrcsntype, attrcsn, p); + p += attrcsnlen; + } +diff --git a/ldap/servers/slapd/generation.c b/ldap/servers/slapd/generation.c +index 89f097322..1d98df3ac 100644 +--- a/ldap/servers/slapd/generation.c ++++ b/ldap/servers/slapd/generation.c +@@ -87,6 +87,11 @@ set_database_dataversion(const char *dn, const char *dataversion) + + static char *server_dataversion_id = NULL; + ++void ++free_server_dataversion(void) { ++ slapi_ch_free_string(&server_dataversion_id); ++} ++ + const char * + get_server_dataversion() + { +diff --git a/ldap/servers/slapd/libglobs.c b/ldap/servers/slapd/libglobs.c +index f8f6096c7..57228eafa 100644 +--- a/ldap/servers/slapd/libglobs.c ++++ b/ldap/servers/slapd/libglobs.c +@@ -9753,7 +9753,9 @@ validate_num_config_reservedescriptors(void) + for (be = slapi_get_first_backend(&cookie); be != NULL; be = slapi_get_next_backend(cookie)) { + entry_str = slapi_create_dn_string("cn=%s,cn=ldbm database,cn=plugins,cn=config", be->be_name); + if (NULL == entry_str) { +- slapi_log_err(SLAPI_LOG_ERR, "validate_num_config_reservedescriptors", "Failed to create backend dn string"); ++ slapi_log_err(SLAPI_LOG_ERR, "validate_num_config_reservedescriptors", ++ "Failed to create backend dn string\n"); ++ slapi_ch_free_string(&cookie); + return -1; + } + slapi_sdn_init_dn_byref(&sdn, entry_str); +@@ -9776,7 +9778,9 @@ validate_num_config_reservedescriptors(void) + for (be = slapi_get_first_backend(&cookie); be; be = slapi_get_next_backend(cookie)) { + entry_str = slapi_create_dn_string("cn=index,cn=%s,cn=ldbm database,cn=plugins,cn=config", be->be_name); + if (NULL == entry_str) { +- slapi_log_err(SLAPI_LOG_ERR, "validate_num_config_reservedescriptors", "Failed to create index dn string"); ++ slapi_log_err(SLAPI_LOG_ERR, "validate_num_config_reservedescriptors", ++ "Failed to create index dn string\n"); ++ slapi_ch_free_string(&cookie); + return -1; + } + slapi_sdn_init_dn_byref(&sdn, entry_str); +@@ -9841,7 +9845,9 @@ validate_num_config_reservedescriptors(void) + for (be = slapi_get_first_backend(&cookie); be; be = slapi_get_next_backend(cookie)) { + entry_str = slapi_create_dn_string("cn=%s,cn=chaining database,cn=plugins,cn=config", be->be_name); + if (NULL == entry_str) { +- slapi_log_err(SLAPI_LOG_ERR, "validate_num_config_reservedescriptors", "Failed to create chaining be dn string"); ++ slapi_log_err(SLAPI_LOG_ERR, "validate_num_config_reservedescriptors", ++ "Failed to create chaining be dn string\n"); ++ slapi_ch_free_string(&cookie); + return -1; + } + slapi_sdn_init_dn_byref(&sdn, entry_str); +diff --git a/ldap/servers/slapd/main.c b/ldap/servers/slapd/main.c +index 5c841c528..adfa6731c 100644 +--- a/ldap/servers/slapd/main.c ++++ b/ldap/servers/slapd/main.c +@@ -647,7 +647,7 @@ main(int argc, char **argv) + * consideration of rust etc) + */ + slapi_td_init(); +- ++ + /* Init the global counters */ + alloc_global_snmp_vars(); + +@@ -2790,6 +2790,7 @@ static struct slapd_debug_level_entry + {LDAP_DEBUG_TIMING, "timing", 0}, + {LDAP_DEBUG_ACLSUMMARY, "accesscontrolsummary", 0}, + {LDAP_DEBUG_BACKLDBM, "backend", 0}, ++ {LDAP_DEBUG_PWDPOLICY, "pwpolicy", 0}, + {LDAP_DEBUG_ALL_LEVELS, "ALL", 0}, + {0, NULL, 0}}; + +diff --git a/ldap/servers/slapd/passwd_extop.c b/ldap/servers/slapd/passwd_extop.c +index 4a89483cf..9f19d5060 100644 +--- a/ldap/servers/slapd/passwd_extop.c ++++ b/ldap/servers/slapd/passwd_extop.c +@@ -287,7 +287,9 @@ passwd_modify_generate_policy_passwd(passwdPolicy *pwpolicy, + + /* if only minalphas is set, divide it into minuppers and minlowers. */ + if (pwpolicy->pw_minalphas > 0 && +- (my_policy[idx_minuppers] == 0 && my_policy[idx_minlowers] == 0)) { ++ (my_policy[idx_minuppers] == 0 && my_policy[idx_minlowers] == 0)) ++ { ++ /* coverity[store_truncates_time_t] */ + unsigned int x = (unsigned int)time(NULL); + my_policy[idx_minuppers] = slapi_rand_r(&x) % pwpolicy->pw_minalphas; + my_policy[idx_minlowers] = pwpolicy->pw_minalphas - my_policy[idx_minuppers]; +@@ -346,6 +348,7 @@ passwd_modify_generate_policy_passwd(passwdPolicy *pwpolicy, + /* if password length is longer the sum of my_policy's, + let them share the burden */ + if (passlen > tmplen) { ++ /* coverity[store_truncates_time_t] */ + unsigned int x = (unsigned int)time(NULL); + int delta = passlen - tmplen; + for (i = 0; i < delta; i++) { +diff --git a/ldap/servers/slapd/plugin_internal_op.c b/ldap/servers/slapd/plugin_internal_op.c +index 0164a70be..4f87ad3cf 100644 +--- a/ldap/servers/slapd/plugin_internal_op.c ++++ b/ldap/servers/slapd/plugin_internal_op.c +@@ -249,11 +249,12 @@ slapi_search_internal_set_pb(Slapi_PBlock *pb, const char *base, int scope, cons + return; + } + +- /* Free op just in case this pb is being reused */ + slapi_pblock_get(pb, SLAPI_OPERATION, &op); + operation_free(&op, NULL); ++ slapi_pblock_set(pb, SLAPI_OPERATION, NULL); + slapi_pblock_get(pb, SLAPI_SEARCH_ATTRS, &tmp_attrs); + slapi_ch_array_free(tmp_attrs); ++ slapi_pblock_set(pb, SLAPI_SEARCH_ATTRS, NULL); + + op = internal_operation_new(SLAPI_OPERATION_SEARCH, operation_flags); + slapi_pblock_set(pb, SLAPI_OPERATION, op); +@@ -261,7 +262,7 @@ slapi_search_internal_set_pb(Slapi_PBlock *pb, const char *base, int scope, cons + slapi_pblock_set(pb, SLAPI_SEARCH_SCOPE, &scope); + slapi_pblock_set(pb, SLAPI_SEARCH_STRFILTER, (void *)filter); + slapi_pblock_set(pb, SLAPI_CONTROLS_ARG, controls); +- /* forbidden attrs could be removed in slapi_pblock_set. */ ++ /* forbidden attrs could be removed in slapi_pblock_set */ + tmp_attrs = slapi_ch_array_dup(attrs); + slapi_pblock_set(pb, SLAPI_SEARCH_ATTRS, tmp_attrs); + slapi_pblock_set(pb, SLAPI_SEARCH_ATTRSONLY, &attrsonly); +@@ -282,12 +283,6 @@ slapi_search_internal_set_pb_ext(Slapi_PBlock *pb, Slapi_DN *sdn, int scope, con + return; + } + +- /* Free op/attrs just in case this pb is being reused */ +- slapi_pblock_get(pb, SLAPI_OPERATION, &op); +- operation_free(&op, NULL); +- slapi_pblock_get(pb, SLAPI_SEARCH_ATTRS, &tmp_attrs); +- slapi_ch_array_free(tmp_attrs); +- + op = internal_operation_new(SLAPI_OPERATION_SEARCH, operation_flags); + slapi_pblock_set(pb, SLAPI_OPERATION, op); + slapi_pblock_set(pb, SLAPI_ORIGINAL_TARGET_DN, +@@ -317,12 +312,6 @@ slapi_seq_internal_set_pb(Slapi_PBlock *pb, char *base, int type, char *attrname + return; + } + +- /* Free op/attrs just in case this pb is being reused */ +- slapi_pblock_get(pb, SLAPI_OPERATION, &op); +- operation_free(&op, NULL); +- slapi_pblock_get(pb, SLAPI_SEARCH_ATTRS, &tmp_attrs); +- slapi_ch_array_free(tmp_attrs); +- + op = internal_operation_new(SLAPI_OPERATION_SEARCH, operation_flags); + slapi_pblock_set(pb, SLAPI_OPERATION, op); + slapi_pblock_set(pb, SLAPI_ORIGINAL_TARGET_DN, (void *)base); +diff --git a/ldap/servers/slapd/proto-slap.h b/ldap/servers/slapd/proto-slap.h +index 2031cfb7d..175a39a6c 100644 +--- a/ldap/servers/slapd/proto-slap.h ++++ b/ldap/servers/slapd/proto-slap.h +@@ -1374,11 +1374,11 @@ void g_set_global_mrl(struct matchingRuleList *newglobalmrl); + /* + * generation.c + */ ++void free_server_dataversion(void); + + /* + * factory.c + */ +- + int factory_register_type(const char *name, size_t offset); + void *factory_create_extension(int type, void *object, void *parent); + void factory_destroy_extension(int type, void *object, void *parent, void **extension); +diff --git a/ldap/servers/slapd/result.c b/ldap/servers/slapd/result.c +index 9772e8213..4e351eff5 100644 +--- a/ldap/servers/slapd/result.c ++++ b/ldap/servers/slapd/result.c +@@ -2106,7 +2106,7 @@ log_op_stat(Slapi_PBlock *pb, uint64_t connid, int32_t op_id, int32_t op_interna + key_info->id_lookup_cnt); + } + } +- ++ + /* total elapsed time */ + slapi_timespec_diff(&op_stat->search_stat->keys_lookup_end, &op_stat->search_stat->keys_lookup_start, &duration); + snprintf(stat_etime, ETIME_BUFSIZ, "%" PRId64 ".%.09" PRId64 "", (int64_t)duration.tv_sec, (int64_t)duration.tv_nsec); +@@ -2173,6 +2173,13 @@ log_result(Slapi_PBlock *pb, Operation *op, int err, ber_tag_t tag, int nentries + + + operation_notes = slapi_pblock_get_operation_notes(pb); ++ if (0 == operation_notes) { ++ notes_str = ""; ++ } else { ++ notes_str = notes_buf; ++ *notes_buf = ' '; ++ notes2str(operation_notes, notes_buf + 1, sizeof(notes_buf) - 1); ++ } + + if (0 == operation_notes) { + notes_str = ""; +diff --git a/ldap/servers/slapd/task.c b/ldap/servers/slapd/task.c +index 7e75b2082..b7fe19d9f 100644 +--- a/ldap/servers/slapd/task.c ++++ b/ldap/servers/slapd/task.c +@@ -1962,6 +1962,7 @@ task_upgradedb_add(Slapi_PBlock *pb __attribute__((unused)), + const char *database_type = "ldbm database"; + const char *my_database_type = NULL; + char *cookie = NULL; ++ char *seq_val = NULL; + + *returncode = LDAP_SUCCESS; + if (slapi_entry_attr_get_ref(e, "cn") == NULL) { +@@ -2030,7 +2031,7 @@ task_upgradedb_add(Slapi_PBlock *pb __attribute__((unused)), + int32_t seq_type = SLAPI_UPGRADEDB_FORCE; /* force; reindex all regardless the dbversion */ + slapi_pblock_set(mypb, SLAPI_SEQ_TYPE, &seq_type); + } +- char *seq_val = slapi_ch_strdup(archive_dir); ++ seq_val = slapi_ch_strdup(archive_dir); + slapi_pblock_set(pb, SLAPI_BACKEND_TASK, task); + int32_t task_flags = SLAPI_TASK_RUNNING_AS_TASK; + slapi_pblock_set(mypb, SLAPI_TASK_FLAGS, &task_flags); +@@ -2044,7 +2045,7 @@ task_upgradedb_add(Slapi_PBlock *pb __attribute__((unused)), + } + + out: +- slapi_ch_free((void **)&seq_val); ++ slapi_ch_free_string(&seq_val); + if (rv != 0) { + if (task) + destroy_task(1, task); +diff --git a/ldap/servers/slapd/tools/ldclt/ldclt.c b/ldap/servers/slapd/tools/ldclt/ldclt.c +index b0e3bedae..466622e92 100644 +--- a/ldap/servers/slapd/tools/ldclt/ldclt.c ++++ b/ldap/servers/slapd/tools/ldclt/ldclt.c +@@ -1179,6 +1179,7 @@ basicInit(void) + * Find the attribute name + */ + for (i = 0; (i < strlen(mctx.attrpl)) && (mctx.attrpl[i] != ':'); i++); ++ free(mctx.attrplName); + mctx.attrplName = (char *)calloc(1, i + 1); + if (mctx.attrplName == NULL) { + printf("Error: unable to allocate memory for attrplName\n"); +diff --git a/ldap/servers/slapd/vattr.c b/ldap/servers/slapd/vattr.c +index da7d244c2..0c3d312dc 100644 +--- a/ldap/servers/slapd/vattr.c ++++ b/ldap/servers/slapd/vattr.c +@@ -198,6 +198,7 @@ vattr_check_thread(void *arg) + } + } + slapi_free_search_results_internal(search_pb); ++ slapi_pblock_init(search_pb); + } /* check_suffix */ + } /* suffix */ + be = (backend *) slapi_get_next_backend(cookie); +diff --git a/lib/libsi18n/reshash.c b/lib/libsi18n/reshash.c +index 657287b8a..5e610e9d9 100644 +--- a/lib/libsi18n/reshash.c ++++ b/lib/libsi18n/reshash.c +@@ -221,37 +221,32 @@ ResHashCreate(char *name) + ResHash *pResHash; + + /* Create hash table */ +- pResHash = (ResHash *)malloc(sizeof(ResHash)); ++ pResHash = (ResHash *)calloc(1, sizeof(ResHash)); + if (pResHash == NULL) +- goto error; +- +- memset(pResHash, 0, sizeof(ResHash)); ++ return NULL; + + if (name) + pResHash->name = strdup(name); + + /* Create initial tree item and it's valuelist to hash table */ +- pResHash->treelist = (TreeNode *)malloc(sizeof(TreeNode)); ++ pResHash->treelist = (TreeNode *)calloc(1, sizeof(TreeNode)); + if (pResHash->treelist == NULL) + goto error; + +- memset(pResHash->treelist, 0, sizeof(TreeNode)); +- +- pResHash->treelist->vlist = (ValueNode *)malloc(sizeof(ValueNode)); ++ pResHash->treelist->vlist = (ValueNode *)calloc(1, sizeof(ValueNode)); + if (pResHash->treelist->vlist == NULL) + goto error; + +- memset(pResHash->treelist->vlist, 0, sizeof(ValueNode)); +- + goto done; + + error: +- if (pResHash && pResHash->treelist && pResHash->treelist->vlist) ++ if (pResHash->treelist) { + free(pResHash->treelist->vlist); +- if (pResHash && pResHash->treelist) + free(pResHash->treelist); +- if (pResHash) +- free(pResHash); ++ } ++ free(pResHash->name); ++ free(pResHash); ++ + return NULL; + + done: +diff --git a/src/rewriters/adfilter.c b/src/rewriters/adfilter.c +index 84582228f..bd77e54e4 100644 +--- a/src/rewriters/adfilter.c ++++ b/src/rewriters/adfilter.c +@@ -83,6 +83,7 @@ dom_sid_to_bin_sid(dom_sid_t *dom_sid, bin_sid_t *res) + + for (size_t i = 0; i < dom_sid->num_auths; i++) { + if ((p + sizeof(uint32_t)) > bin_sid.length) { ++ slapi_ch_free((void **)&bin_sid.sid); + return -1; + } + val = htole32(dom_sid->sub_auths[i]); +-- +2.48.1 + diff --git a/SOURCES/0027-Issue-6442-Fix-latest-covscan-memory-leaks-part-2.patch b/SOURCES/0027-Issue-6442-Fix-latest-covscan-memory-leaks-part-2.patch new file mode 100644 index 0000000..9f90901 --- /dev/null +++ b/SOURCES/0027-Issue-6442-Fix-latest-covscan-memory-leaks-part-2.patch @@ -0,0 +1,37 @@ +From 984c653218cf386e449dc4db89b085a49b535f8d Mon Sep 17 00:00:00 2001 +From: Mark Reynolds +Date: Wed, 18 Dec 2024 16:55:06 -0500 +Subject: [PATCH] Issue 6442 - Fix latest covscan memory leaks (part 2) + +Description: + +Missed part of the fix becuase of a rebase issue + +Fixes: https://github.com/389ds/389-ds-base/issues/6442 + +Reviewed by: progier & spichugi(Thanks!!) +--- + ldap/servers/slapd/plugin_internal_op.c | 7 ------- + 1 file changed, 7 deletions(-) + +diff --git a/ldap/servers/slapd/plugin_internal_op.c b/ldap/servers/slapd/plugin_internal_op.c +index 4f87ad3cf..96e705471 100644 +--- a/ldap/servers/slapd/plugin_internal_op.c ++++ b/ldap/servers/slapd/plugin_internal_op.c +@@ -249,13 +249,6 @@ slapi_search_internal_set_pb(Slapi_PBlock *pb, const char *base, int scope, cons + return; + } + +- slapi_pblock_get(pb, SLAPI_OPERATION, &op); +- operation_free(&op, NULL); +- slapi_pblock_set(pb, SLAPI_OPERATION, NULL); +- slapi_pblock_get(pb, SLAPI_SEARCH_ATTRS, &tmp_attrs); +- slapi_ch_array_free(tmp_attrs); +- slapi_pblock_set(pb, SLAPI_SEARCH_ATTRS, NULL); +- + op = internal_operation_new(SLAPI_OPERATION_SEARCH, operation_flags); + slapi_pblock_set(pb, SLAPI_OPERATION, op); + slapi_pblock_set(pb, SLAPI_ORIGINAL_TARGET_DN, (void *)base); +-- +2.48.1 + diff --git a/SPECS/389-ds-base.spec b/SPECS/389-ds-base.spec index 2218ab7..2a59451 100644 --- a/SPECS/389-ds-base.spec +++ b/SPECS/389-ds-base.spec @@ -47,7 +47,7 @@ ExcludeArch: i686 Summary: 389 Directory Server (base) Name: 389-ds-base Version: 2.5.2 -Release: 5%{?dist} +Release: 8%{?dist} License: GPL-3.0-or-later AND (0BSD OR Apache-2.0 OR MIT) AND (Apache-2.0 OR Apache-2.0 WITH LLVM-exception OR MIT) AND (Apache-2.0 OR BSL-1.0) AND (Apache-2.0 OR MIT OR Zlib) AND (Apache-2.0 OR MIT) AND (CC-BY-4.0 AND MIT) AND (MIT OR Apache-2.0) AND Unicode-DFS-2016 AND (MIT OR CC0-1.0) AND (MIT OR Unlicense) AND 0BSD AND Apache-2.0 AND BSD-2-Clause AND BSD-3-Clause AND ISC AND MIT AND MIT AND ISC AND MPL-2.0 AND PSF-2.0 URL: https://www.port389.org Conflicts: selinux-policy-base < 3.9.8 @@ -478,6 +478,24 @@ Patch: 0006-Issue-6390-Adjust-cleanAllRUV-max-per-txn-and-interv.patc Patch: 0007-Issue-6284-BUG-freelist-ordering-causes-high-wtime-6.patch Patch: 0008-Issue-6296-basic_test.py-test_conn_limits-fails-in-m.patch Patch: 0009-Issue-5798-Fix-dsconf-config-multi-valued-attr-opera.patch +Patch: 0010-Issue-6417-If-an-entry-RDN-is-identical-to-the-suffi.patch +Patch: 0011-Issue-6417-2nd-If-an-entry-RDN-is-identical-to-the-s.patch +Patch: 0012-Issue-6417-3rd-If-an-entry-RDN-is-identical-to-the-s.patch +Patch: 0013-Issue-6432-Crash-during-bind-when-acct-policy-plugin.patch +Patch: 0014-Issue-6386-backup-restore-broken-after-db-log-rotati.patch +Patch: 0015-Issue-6446-on-replica-consumer-account-policy-plugin.patch +Patch: 0016-Issue-6446-Fix-test_acct_policy_consumer-test-to-wai.patch +Patch: 0017-Issue-6554-During-import-of-entries-without-nsUnique.patch +Patch: 0018-Issue-6561-TLS-1.2-stickiness-in-FIPS-mode.patch +Patch: 0019-Issue-6229-After-an-initial-failure-subsequent-onlin.patch +Patch: 0020-Issue-6372-Deadlock-while-doing-online-backup-6475.patch +Patch: 0021-Issue-6509-Race-condition-with-Paged-Result-searches.patch +Patch: 0022-Issue-6436-MOD-on-a-large-group-slow-if-substring-in.patch +Patch: 0023-Issue-6494-Various-errors-when-using-extended-matchi.patch +Patch: 0024-Issue-6485-Fix-double-free-in-USN-cleanup-task.patch +Patch: 0025-Issue-6427-fix-various-memory-leaks.patch +Patch: 0026-Issue-6442-Fix-latest-covscan-memory-leaks.patch +Patch: 0027-Issue-6442-Fix-latest-covscan-memory-leaks-part-2.patch %description 389 Directory Server is an LDAPv3 compliant server. The base package includes @@ -920,6 +938,25 @@ exit 0 %endif %changelog +* Wed Mar 05 2025 Viktor Ashirov - 2.5.2-8 +- Fix changelog + +* Tue Mar 04 2025 Viktor Ashirov - 2.5.2-6 +- Resolves: RHEL-69825 - "Duplicated DN detected" errors when creating indexes or importing entries. [rhel-9.5.z] +- Resolves: RHEL-70126 - Crash in attrlist_find() when the Account Policy plugin is enabled. [rhel-9.5.z] +- Resolves: RHEL-74152 - backup/restore broken [rhel-9.5.z] +- Resolves: RHEL-74157 - If an entry RDN is identical to the suffix, then Entryrdn gets broken during a reindex [rhel-9.5.z] +- Resolves: RHEL-74167 - On replica consumer, account policy plugin fails to manage the last login history [rhel-9.5.z] +- Resolves: RHEL-78343 - During import of entries without nsUniqueId, a supplier generates duplicate nsUniqueId (LMDB only) [rhel-9.5.z] +- Resolves: RHEL-79497 - Failed to set sslversionmax to TLS1.3 in FIPS mode with dsconf $INSTANCE security set --tls-protocol-max TLS1.3 [rhel-9.5.z] +- Resolves: RHEL-81102 - After an initial failure, subsequent online backups will not work. [rhel-9.5.z] +- Resolves: RHEL-81107 - Online backup hangs sporadically. [rhel-9.5.z] +- Resolves: RHEL-81113 - IPA LDAP error code T3 when no exceeded time limit from a paged search result [rhel-9.5.z] +- Resolves: RHEL-81139 - Healthcheck tool should warn admin about creating a substring index on membership attribute [rhel-9.5.z] +- Resolves: RHEL-81146 - 389DirectoryServer Process Stops When Setting up Sorted VLV Index [rhel-9.5.z] +- Resolves: RHEL-81157 - AddressSanitizer: double-free [rhel-9.5.z] +- Resolves: RHEL-81171 - leaked_storage: Variable "childelems" going out of scope leaks the storage it points to. [rhel-9.5.z] + * Fri Jan 24 2025 Viktor Ashirov - 2.5.2-5 - Resolves: RHEL-74350 - Some nsslapd-haproxy-trusted-ip values are discarded upon a restart. [rhel-9.5.z]