From b1fa2c887de16fbda29e1c08eb025a3d0722dc9a Mon Sep 17 00:00:00 2001 From: eabdullin Date: Mon, 15 Sep 2025 11:39:15 +0000 Subject: [PATCH] import CS 389-ds-base-2.7.0-5.el9 --- .389-ds-base.metadata | 3 +- .gitignore | 3 +- ...e-6377-syntax-error-in-setup.py-6378.patch | 40 + ...uilding-for-older-versions-of-Python.patch | 60 - ...-log-rotation-refresh-the-FD-pointer.patch | 146 -- ...9-replica.py-is-using-nonexistent-da.patch | 37 + ...pd-mdb-max-dbs-autotuning-doesn-t-wo.patch | 311 --- ...stance-read-only-mode-is-broken-6681.patch | 351 ++++ ...ix-dbscan-options-and-man-pages-6315.patch | 894 --------- ...N-Access-Control-Plugin-with-wildcar.patch | 125 ++ ...ronise-accept_thread-with-slapd_daem.patch | 50 + ...ugin-failure-to-handle-a-modrdn-for-.patch | 70 - ...ate-race-condition-in-paged_results_.patch | 43 - ...ue-6782-Improve-paged-result-locking.patch | 127 ++ ...-an-initial-failure-subsequent-onlin.patch | 566 ------ ...nd-creation-cleanup-and-Database-UI-.patch | 488 +++++ ...g-import-of-entries-without-nsUnique.patch | 165 -- ...iq-allow-specifying-match-rules-in-t.patch | 45 + ...6561-TLS-1.2-stickiness-in-FIPS-mode.patch | 38 - ...I-Properly-handle-disabled-NDN-cache.patch | 1201 ++++++++++++ ...Issue-6090-dbscan-use-bdb-by-default.patch | 44 - ...ilter-is-not-fully-applying-matching.patch | 399 ++++ ...essed-log-rotation-creates-files-wit.patch | 163 ++ ...nt-repeated-disconnect-logs-during-s.patch | 116 ++ ...f-Replicas-with-the-consumer-role-al.patch | 67 + ...ser-that-is-updated-during-password-.patch | 143 ++ ...-if-repl-keep-alive-entry-can-not-be.patch | 98 + ...est-for-entryUSN-overflow-on-failed-.patch | 352 ++++ ...est-for-numSubordinates-replication-.patch | 172 ++ ...k-password-hashes-in-audit-logs-6885.patch | 814 ++++++++ ...isk-monitoring-test-failures-and-imp.patch | 1721 +++++++++++++++++ ...ss-Coverity-scan-issues-in-memberof-.patch | 63 + ...6468-CLI-Fix-default-error-log-level.patch | 31 + ...13-6886-6250-Adjust-xfail-marks-6914.patch | 222 +++ ...llow-system-to-manage-uid-gid-at-sta.patch | 32 + ...y-leak-in-roles_cache_create_object_.patch | 92 + ...y-leak-in-roles_cache_create_object_.patch | 262 +++ ...essSanitizer-memory-leak-in-mdb_init.patch | 65 + ...8-AddressSanitizer-leak-in-do_search.patch | 58 + ...ssSanitizer-leak-in-agmt_update_init.patch | 58 + ...apd-crashes-when-a-referral-is-added.patch | 97 + SOURCES/Cargo-2.7.0-1.lock | 1018 ++++++++++ SPECS/389-ds-base.spec | 395 ++-- 43 files changed, 8645 insertions(+), 2600 deletions(-) create mode 100644 SOURCES/0001-Issue-6377-syntax-error-in-setup.py-6378.patch delete mode 100644 SOURCES/0001-Issue-6468-Fix-building-for-older-versions-of-Python.patch delete mode 100644 SOURCES/0002-Issue-6489-After-log-rotation-refresh-the-FD-pointer.patch create mode 100644 SOURCES/0002-Issue-6838-lib389-replica.py-is-using-nonexistent-da.patch delete mode 100644 SOURCES/0003-Issue-6374-nsslapd-mdb-max-dbs-autotuning-doesn-t-wo.patch create mode 100644 SOURCES/0003-Issue-6680-instance-read-only-mode-is-broken-6681.patch delete mode 100644 SOURCES/0004-Issue-6090-Fix-dbscan-options-and-man-pages-6315.patch create mode 100644 SOURCES/0004-Issue-6825-RootDN-Access-Control-Plugin-with-wildcar.patch create mode 100644 SOURCES/0005-Issue-6119-Synchronise-accept_thread-with-slapd_daem.patch delete mode 100644 SOURCES/0005-Issue-6566-RI-plugin-failure-to-handle-a-modrdn-for-.patch delete mode 100644 SOURCES/0006-Issue-6258-Mitigate-race-condition-in-paged_results_.patch create mode 100644 SOURCES/0006-Issue-6782-Improve-paged-result-locking.patch delete mode 100644 SOURCES/0007-Issue-6229-After-an-initial-failure-subsequent-onlin.patch create mode 100644 SOURCES/0007-Issue-6822-Backend-creation-cleanup-and-Database-UI-.patch delete mode 100644 SOURCES/0008-Issue-6554-During-import-of-entries-without-nsUnique.patch create mode 100644 SOURCES/0008-Issue-6857-uiduniq-allow-specifying-match-rules-in-t.patch delete mode 100644 SOURCES/0009-Issue-6561-TLS-1.2-stickiness-in-FIPS-mode.patch create mode 100644 SOURCES/0009-Issue-6756-CLI-UI-Properly-handle-disabled-NDN-cache.patch delete mode 100644 SOURCES/0010-Issue-6090-dbscan-use-bdb-by-default.patch create mode 100644 SOURCES/0010-Issue-6859-str2filter-is-not-fully-applying-matching.patch create mode 100644 SOURCES/0011-Issue-6872-compressed-log-rotation-creates-files-wit.patch create mode 100644 SOURCES/0012-Issue-6878-Prevent-repeated-disconnect-logs-during-s.patch create mode 100644 SOURCES/0013-Issue-6772-dsconf-Replicas-with-the-consumer-role-al.patch create mode 100644 SOURCES/0014-Issue-6893-Log-user-that-is-updated-during-password-.patch create mode 100644 SOURCES/0015-Issue-6895-Crash-if-repl-keep-alive-entry-can-not-be.patch create mode 100644 SOURCES/0016-Issue-6250-Add-test-for-entryUSN-overflow-on-failed-.patch create mode 100644 SOURCES/0017-Issue-6594-Add-test-for-numSubordinates-replication-.patch create mode 100644 SOURCES/0018-Issue-6884-Mask-password-hashes-in-audit-logs-6885.patch create mode 100644 SOURCES/0019-Issue-6897-Fix-disk-monitoring-test-failures-and-imp.patch create mode 100644 SOURCES/0020-Issue-6339-Address-Coverity-scan-issues-in-memberof-.patch create mode 100644 SOURCES/0021-Issue-6468-CLI-Fix-default-error-log-level.patch create mode 100644 SOURCES/0022-Issues-6913-6886-6250-Adjust-xfail-marks-6914.patch create mode 100644 SOURCES/0023-Issue-6181-RFE-Allow-system-to-manage-uid-gid-at-sta.patch create mode 100644 SOURCES/0024-Issue-6778-Memory-leak-in-roles_cache_create_object_.patch create mode 100644 SOURCES/0025-Issue-6778-Memory-leak-in-roles_cache_create_object_.patch create mode 100644 SOURCES/0026-Issue-6850-AddressSanitizer-memory-leak-in-mdb_init.patch create mode 100644 SOURCES/0027-Issue-6848-AddressSanitizer-leak-in-do_search.patch create mode 100644 SOURCES/0028-Issue-6865-AddressSanitizer-leak-in-agmt_update_init.patch create mode 100644 SOURCES/0029-Issue-6768-ns-slapd-crashes-when-a-referral-is-added.patch create mode 100644 SOURCES/Cargo-2.7.0-1.lock diff --git a/.389-ds-base.metadata b/.389-ds-base.metadata index 43ee160..a112433 100644 --- a/.389-ds-base.metadata +++ b/.389-ds-base.metadata @@ -1,2 +1,3 @@ -25969f6e65d79aa29671eff7185e4307ff3c08a0 SOURCES/389-ds-base-2.6.1.tar.bz2 +e9ce5b0affef3f7a319958610c5382152f1b559f SOURCES/389-ds-base-2.7.0.tar.bz2 1c8f2d0dfbf39fa8cd86363bf3314351ab21f8d4 SOURCES/jemalloc-5.3.0.tar.bz2 +b183c1ebee9c1d81d4b394df6de6521a8b333cbc SOURCES/vendor-2.7.0-1.tar.gz diff --git a/.gitignore b/.gitignore index 13fa011..af6149d 100644 --- a/.gitignore +++ b/.gitignore @@ -1,2 +1,3 @@ -SOURCES/389-ds-base-2.6.1.tar.bz2 +SOURCES/389-ds-base-2.7.0.tar.bz2 SOURCES/jemalloc-5.3.0.tar.bz2 +SOURCES/vendor-2.7.0-1.tar.gz diff --git a/SOURCES/0001-Issue-6377-syntax-error-in-setup.py-6378.patch b/SOURCES/0001-Issue-6377-syntax-error-in-setup.py-6378.patch new file mode 100644 index 0000000..84ff8d0 --- /dev/null +++ b/SOURCES/0001-Issue-6377-syntax-error-in-setup.py-6378.patch @@ -0,0 +1,40 @@ +From 5903fac2334f984d18aea663735fb260d6b100ed Mon Sep 17 00:00:00 2001 +From: progier389 +Date: Tue, 22 Oct 2024 17:26:46 +0200 +Subject: [PATCH] Issue 6377 - syntax error in setup.py (#6378) + +Syntax error due to badly nested quotes in dblib.py cause trouble in setup.py and dsconf dblib b2b2mdb/mdb2dbd +Fix bit using double quotes in the f-expression and quotes for the embedded strings. + +Issue: #6377 + +Reviewed by: @tbordaz, @droideck (Thank!) +--- + src/lib389/lib389/cli_ctl/dblib.py | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +diff --git a/src/lib389/lib389/cli_ctl/dblib.py b/src/lib389/lib389/cli_ctl/dblib.py +index ff81f0e19..3f6e7b456 100644 +--- a/src/lib389/lib389/cli_ctl/dblib.py ++++ b/src/lib389/lib389/cli_ctl/dblib.py +@@ -183,7 +183,7 @@ def export_changelog(be, dblib): + return False + try: + cl5dbname = be['eccl5dbname'] if dblib == "bdb" else be['cl5dbname'] +- _log.info(f'Exporting changelog {cl5dbname} to {be['cl5name']}') ++ _log.info(f"Exporting changelog {cl5dbname} to {be['cl5name']}") + run_dbscan(['-D', dblib, '-f', cl5dbname, '-X', be['cl5name']]) + return True + except subprocess.CalledProcessError as e: +@@ -194,7 +194,7 @@ def import_changelog(be, dblib): + # import backend changelog + try: + cl5dbname = be['eccl5dbname'] if dblib == "bdb" else be['cl5dbname'] +- _log.info(f'Importing changelog {cl5dbname} from {be['cl5name']}') ++ _log.info(f"Importing changelog {cl5dbname} from {be['cl5name']}") + run_dbscan(['-D', dblib, '-f', cl5dbname, '--import', be['cl5name'], '--do-it']) + return True + except subprocess.CalledProcessError as e: +-- +2.49.0 + diff --git a/SOURCES/0001-Issue-6468-Fix-building-for-older-versions-of-Python.patch b/SOURCES/0001-Issue-6468-Fix-building-for-older-versions-of-Python.patch deleted file mode 100644 index b3e87c5..0000000 --- a/SOURCES/0001-Issue-6468-Fix-building-for-older-versions-of-Python.patch +++ /dev/null @@ -1,60 +0,0 @@ -From 0921400a39b61687db2bc55ebd5021eef507e960 Mon Sep 17 00:00:00 2001 -From: Viktor Ashirov -Date: Tue, 28 Jan 2025 21:05:49 +0100 -Subject: [PATCH] Issue 6468 - Fix building for older versions of Python - -Bug Description: -Structural Pattern Matching has been added in Python 3.10, older version -do not support it. - -Fix Description: -Replace `match` and `case` statements with `if-elif`. - -Relates: https://github.com/389ds/389-ds-base/issues/6468 - -Reviewed by: @droideck (Thanks!) ---- - src/lib389/lib389/cli_conf/logging.py | 27 ++++++++++++++------------- - 1 file changed, 14 insertions(+), 13 deletions(-) - -diff --git a/src/lib389/lib389/cli_conf/logging.py b/src/lib389/lib389/cli_conf/logging.py -index 2e86f2de8..d1e32822c 100644 ---- a/src/lib389/lib389/cli_conf/logging.py -+++ b/src/lib389/lib389/cli_conf/logging.py -@@ -234,19 +234,20 @@ def get_log_config(inst, basedn, log, args): - attr_map = {} - levels = {} - -- match args.logtype: -- case "access": -- attr_map = ACCESS_ATTR_MAP -- levels = ACCESS_LEVELS -- case "error": -- attr_map = ERROR_ATTR_MAP -- levels = ERROR_LEVELS -- case "security": -- attr_map = SECURITY_ATTR_MAP -- case "audit": -- attr_map = AUDIT_ATTR_MAP -- case "auditfail": -- attr_map = AUDITFAIL_ATTR_MAP -+ if args.logtype == "access": -+ attr_map = ACCESS_ATTR_MAP -+ levels = ACCESS_LEVELS -+ elif args.logtype == "error": -+ attr_map = ERROR_ATTR_MAP -+ levels = ERROR_LEVELS -+ elif args.logtype == "security": -+ attr_map = SECURITY_ATTR_MAP -+ elif args.logtype == "audit": -+ attr_map = AUDIT_ATTR_MAP -+ elif args.logtype == "auditfail": -+ attr_map = AUDITFAIL_ATTR_MAP -+ else: -+ raise ValueError(f"Unknown logtype: {args.logtype}") - - sorted_results = [] - for attr, value in attrs.items(): --- -2.48.0 - diff --git a/SOURCES/0002-Issue-6489-After-log-rotation-refresh-the-FD-pointer.patch b/SOURCES/0002-Issue-6489-After-log-rotation-refresh-the-FD-pointer.patch deleted file mode 100644 index 42ca433..0000000 --- a/SOURCES/0002-Issue-6489-After-log-rotation-refresh-the-FD-pointer.patch +++ /dev/null @@ -1,146 +0,0 @@ -From 12f9bf81e834549db02b1243ecf769b511c9f69f Mon Sep 17 00:00:00 2001 -From: Mark Reynolds -Date: Fri, 31 Jan 2025 08:54:27 -0500 -Subject: [PATCH] Issue 6489 - After log rotation refresh the FD pointer - -Description: - -When flushing a log buffer we get a FD for log prior to checking if the -log should be rotated. If the log is rotated that FD reference is now -invalid, and it needs to be refrehed before proceeding - -Relates: https://github.com/389ds/389-ds-base/issues/6489 - -Reviewed by: tbordaz(Thanks!) ---- - .../suites/logging/log_flush_rotation_test.py | 81 +++++++++++++++++++ - ldap/servers/slapd/log.c | 18 +++++ - 2 files changed, 99 insertions(+) - create mode 100644 dirsrvtests/tests/suites/logging/log_flush_rotation_test.py - -diff --git a/dirsrvtests/tests/suites/logging/log_flush_rotation_test.py b/dirsrvtests/tests/suites/logging/log_flush_rotation_test.py -new file mode 100644 -index 000000000..b33a622e1 ---- /dev/null -+++ b/dirsrvtests/tests/suites/logging/log_flush_rotation_test.py -@@ -0,0 +1,81 @@ -+# --- BEGIN COPYRIGHT BLOCK --- -+# Copyright (C) 2025 Red Hat, Inc. -+# All rights reserved. -+# -+# License: GPL (version 3 or any later version). -+# See LICENSE for details. -+# --- END COPYRIGHT BLOCK --- -+# -+import os -+import logging -+import time -+import pytest -+from lib389._constants import DEFAULT_SUFFIX, PW_DM -+from lib389.tasks import ImportTask -+from lib389.idm.user import UserAccounts -+from lib389.topologies import topology_st as topo -+ -+ -+log = logging.getLogger(__name__) -+ -+ -+def test_log_flush_and_rotation_crash(topo): -+ """Make sure server does not crash whening flushing a buffer and rotating -+ the log at the same time -+ -+ :id: d4b0af2f-48b2-45f5-ae8b-f06f692c3133 -+ :setup: Standalone Instance -+ :steps: -+ 1. Enable all logs -+ 2. Enable log buffering for all logs -+ 3. Set rotation time unit to 1 minute -+ 4. Make sure server is still running after 1 minute -+ :expectedresults: -+ 1. Success -+ 2. Success -+ 3. Success -+ 4. Success -+ """ -+ -+ inst = topo.standalone -+ -+ # Enable logging and buffering -+ inst.config.set("nsslapd-auditlog-logging-enabled", "on") -+ inst.config.set("nsslapd-accesslog-logbuffering", "on") -+ inst.config.set("nsslapd-auditlog-logbuffering", "on") -+ inst.config.set("nsslapd-errorlog-logbuffering", "on") -+ inst.config.set("nsslapd-securitylog-logbuffering", "on") -+ -+ # Set rotation policy to trigger rotation asap -+ inst.config.set("nsslapd-accesslog-logrotationtimeunit", "minute") -+ inst.config.set("nsslapd-auditlog-logrotationtimeunit", "minute") -+ inst.config.set("nsslapd-errorlog-logrotationtimeunit", "minute") -+ inst.config.set("nsslapd-securitylog-logrotationtimeunit", "minute") -+ -+ # -+ # Performs ops to populate all the logs -+ # -+ # Access & audit log -+ users = UserAccounts(topo.standalone, DEFAULT_SUFFIX) -+ user = users.create_test_user() -+ user.set("userPassword", PW_DM) -+ # Security log -+ user.bind(PW_DM) -+ # Error log -+ import_task = ImportTask(inst) -+ import_task.import_suffix_from_ldif(ldiffile="/not/here", -+ suffix=DEFAULT_SUFFIX) -+ -+ # Wait a minute and make sure the server did not crash -+ log.info("Sleep until logs are flushed and rotated") -+ time.sleep(61) -+ -+ assert inst.status() -+ -+ -+if __name__ == '__main__': -+ # Run isolated -+ # -s for DEBUG mode -+ CURRENT_FILE = os.path.realpath(__file__) -+ pytest.main(["-s", CURRENT_FILE]) -+ -diff --git a/ldap/servers/slapd/log.c b/ldap/servers/slapd/log.c -index 8352f4abd..c1260a203 100644 ---- a/ldap/servers/slapd/log.c -+++ b/ldap/servers/slapd/log.c -@@ -6746,6 +6746,23 @@ log_refresh_state(int32_t log_type) - return 0; - } - } -+static LOGFD -+log_refresh_fd(int32_t log_type) -+{ -+ switch (log_type) { -+ case SLAPD_ACCESS_LOG: -+ return loginfo.log_access_fdes; -+ case SLAPD_SECURITY_LOG: -+ return loginfo.log_security_fdes; -+ case SLAPD_AUDIT_LOG: -+ return loginfo.log_audit_fdes; -+ case SLAPD_AUDITFAIL_LOG: -+ return loginfo.log_auditfail_fdes; -+ case SLAPD_ERROR_LOG: -+ return loginfo.log_error_fdes; -+ } -+ return NULL; -+} - - /* this function assumes the lock is already acquired */ - /* if sync_now is non-zero, data is flushed to physical storage */ -@@ -6857,6 +6874,7 @@ log_flush_buffer(LogBufferInfo *lbi, int log_type, int sync_now, int locked) - rotationtime_secs); - } - log_state = log_refresh_state(log_type); -+ fd = log_refresh_fd(log_type); - } - - if (log_state & LOGGING_NEED_TITLE) { --- -2.48.0 - diff --git a/SOURCES/0002-Issue-6838-lib389-replica.py-is-using-nonexistent-da.patch b/SOURCES/0002-Issue-6838-lib389-replica.py-is-using-nonexistent-da.patch new file mode 100644 index 0000000..5a04986 --- /dev/null +++ b/SOURCES/0002-Issue-6838-lib389-replica.py-is-using-nonexistent-da.patch @@ -0,0 +1,37 @@ +From a91c2641646824e44ef3b31a7eea238e3f55e5c3 Mon Sep 17 00:00:00 2001 +From: Viktor Ashirov +Date: Tue, 1 Jul 2025 12:44:04 +0200 +Subject: [PATCH] Issue 6838 - lib389/replica.py is using nonexistent + datetime.UTC in Python 3.9 + +Bug Description: +389-ds-base-2.x is supposed to be used with Python 3.9. +But lib389/replica.py is using `datetime.UTC`, which is an alias +to `datetime.timezone.utc` was added only in Python 3.11. + +Fix Description: +Use `datetime.timezone.utc` instead. + +Fixes: https://github.com/389ds/389-ds-base/issues/6838 + +Reviewed by: @mreynolds389 (Thanks!) +--- + src/lib389/lib389/replica.py | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/src/lib389/lib389/replica.py b/src/lib389/lib389/replica.py +index 8791f7f4c..78d6eb4eb 100644 +--- a/src/lib389/lib389/replica.py ++++ b/src/lib389/lib389/replica.py +@@ -917,7 +917,7 @@ class RUV(object): + ValueError("Wrong CSN value was supplied") + + timestamp = int(csn[:8], 16) +- time_str = datetime.datetime.fromtimestamp(timestamp, datetime.UTC).strftime('%Y-%m-%d %H:%M:%S') ++ time_str = datetime.datetime.fromtimestamp(timestamp, datetime.timezone.utc).strftime('%Y-%m-%d %H:%M:%S') + # We are parsing shorter CSN which contains only timestamp + if len(csn) == 8: + return time_str +-- +2.49.0 + diff --git a/SOURCES/0003-Issue-6374-nsslapd-mdb-max-dbs-autotuning-doesn-t-wo.patch b/SOURCES/0003-Issue-6374-nsslapd-mdb-max-dbs-autotuning-doesn-t-wo.patch deleted file mode 100644 index 44a0cb1..0000000 --- a/SOURCES/0003-Issue-6374-nsslapd-mdb-max-dbs-autotuning-doesn-t-wo.patch +++ /dev/null @@ -1,311 +0,0 @@ -From f077f9692d1625a1bc2dc6ee02a4fca71ee30b03 Mon Sep 17 00:00:00 2001 -From: progier389 -Date: Wed, 13 Nov 2024 15:31:35 +0100 -Subject: [PATCH] Issue 6374 - nsslapd-mdb-max-dbs autotuning doesn't work - properly (#6400) - -* Issue 6374 - nsslapd-mdb-max-dbs autotuning doesn't work properly - -Several issues: - -After restarting the server nsslapd-mdb-max-dbs may not be high enough to add a new backend -because the value computation is wrong. -dbscan fails to open the database if nsslapd-mdb-max-dbs has been increased. -dbscan crashes when closing the database (typically when using -S) -When starting the instance the nsslapd-mdb-max-dbs parameter is increased to ensure that a new backend may be added. -When dse.ldif path is not specified, the db environment is now open using the INFO.mdb data instead of using the default values. -synchronization between thread closure and database context destruction is hardened -Issue: #6374 - -Reviewed by: @tbordaz , @vashirov (Thanks!) - -(cherry picked from commit 56cd3389da608a3f6eeee58d20dffbcd286a8033) ---- - .../tests/suites/config/config_test.py | 86 +++++++++++++++++++ - ldap/servers/slapd/back-ldbm/back-ldbm.h | 2 + - .../slapd/back-ldbm/db-mdb/mdb_config.c | 17 ++-- - .../back-ldbm/db-mdb/mdb_import_threads.c | 9 +- - .../slapd/back-ldbm/db-mdb/mdb_instance.c | 8 ++ - ldap/servers/slapd/back-ldbm/dbimpl.c | 2 +- - ldap/servers/slapd/back-ldbm/import.c | 14 ++- - 7 files changed, 128 insertions(+), 10 deletions(-) - -diff --git a/dirsrvtests/tests/suites/config/config_test.py b/dirsrvtests/tests/suites/config/config_test.py -index 57b155af7..34dac36b6 100644 ---- a/dirsrvtests/tests/suites/config/config_test.py -+++ b/dirsrvtests/tests/suites/config/config_test.py -@@ -17,6 +17,7 @@ from lib389.topologies import topology_m2, topology_st as topo - from lib389.utils import * - from lib389._constants import DN_CONFIG, DEFAULT_SUFFIX, DEFAULT_BENAME - from lib389._mapped_object import DSLdapObjects -+from lib389.agreement import Agreements - from lib389.cli_base import FakeArgs - from lib389.cli_conf.backend import db_config_set - from lib389.idm.user import UserAccounts, TEST_USER_PROPERTIES -@@ -27,6 +28,8 @@ from lib389.cos import CosPointerDefinitions, CosTemplates - from lib389.backend import Backends, DatabaseConfig - from lib389.monitor import MonitorLDBM, Monitor - from lib389.plugins import ReferentialIntegrityPlugin -+from lib389.replica import BootstrapReplicationManager, Replicas -+from lib389.passwd import password_generate - - pytestmark = pytest.mark.tier0 - -@@ -36,6 +39,8 @@ PSTACK_CMD = '/usr/bin/pstack' - logging.getLogger(__name__).setLevel(logging.INFO) - log = logging.getLogger(__name__) - -+DEBUGGING = os.getenv("DEBUGGING", default=False) -+ - @pytest.fixture(scope="module") - def big_file(): - TEMP_BIG_FILE = '' -@@ -811,6 +816,87 @@ def test_numlisteners_limit(topo): - assert numlisteners[0] == '4' - - -+def bootstrap_replication(inst_from, inst_to, creds): -+ manager = BootstrapReplicationManager(inst_to) -+ rdn_val = 'replication manager' -+ if manager.exists(): -+ manager.delete() -+ manager.create(properties={ -+ 'cn': rdn_val, -+ 'uid': rdn_val, -+ 'userPassword': creds -+ }) -+ for replica in Replicas(inst_to).list(): -+ replica.remove_all('nsDS5ReplicaBindDNGroup') -+ replica.replace('nsDS5ReplicaBindDN', manager.dn) -+ for agmt in Agreements(inst_from).list(): -+ agmt.replace('nsDS5ReplicaBindDN', manager.dn) -+ agmt.replace('nsDS5ReplicaCredentials', creds) -+ -+ -+@pytest.mark.skipif(get_default_db_lib() != "mdb", reason="This test requires lmdb") -+def test_lmdb_autotuned_maxdbs(topology_m2, request): -+ """Verify that after restart, nsslapd-mdb-max-dbs is large enough to add a new backend. -+ -+ :id: 0272d432-9080-11ef-8f40-482ae39447e5 -+ :setup: Two suppliers configuration -+ :steps: -+ 1. loop 20 times -+ 3. In 1 loop: restart instance -+ 3. In 1 loop: add a new backend -+ 4. In 1 loop: check that instance is still alive -+ :expectedresults: -+ 1. Success -+ 2. Success -+ 3. Success -+ 4. Success -+ """ -+ -+ s1 = topology_m2.ms["supplier1"] -+ s2 = topology_m2.ms["supplier2"] -+ -+ backends = Backends(s1) -+ db_config = DatabaseConfig(s1) -+ # Generate the teardown finalizer -+ belist = [] -+ creds=password_generate() -+ bootstrap_replication(s2, s1, creds) -+ bootstrap_replication(s1, s2, creds) -+ -+ def fin(): -+ s1.start() -+ for be in belist: -+ be.delete() -+ -+ if not DEBUGGING: -+ request.addfinalizer(fin) -+ -+ # 1. Set autotuning (off-line to be able to decrease the value) -+ s1.stop() -+ dse_ldif = DSEldif(s1) -+ dse_ldif.replace(db_config.dn, 'nsslapd-mdb-max-dbs', '0') -+ os.remove(f'{s1.dbdir}/data.mdb') -+ s1.start() -+ -+ # 2. Reinitialize the db: -+ log.info("Bulk import...") -+ agmt = Agreements(s2).list()[0] -+ agmt.begin_reinit() -+ (done, error) = agmt.wait_reinit() -+ log.info(f'Bulk importresult is ({done}, {error})') -+ assert done is True -+ assert error is False -+ -+ # 3. loop 20 times -+ for idx in range(20): -+ s1.restart() -+ log.info(f'Adding backend test{idx}') -+ belist.append(backends.create(properties={'cn': f'test{idx}', -+ 'nsslapd-suffix': f'dc=test{idx}'})) -+ assert s1.status() -+ -+ -+ - if __name__ == '__main__': - # Run isolated - # -s for DEBUG mode -diff --git a/ldap/servers/slapd/back-ldbm/back-ldbm.h b/ldap/servers/slapd/back-ldbm/back-ldbm.h -index 8fea63e35..35d0ece04 100644 ---- a/ldap/servers/slapd/back-ldbm/back-ldbm.h -+++ b/ldap/servers/slapd/back-ldbm/back-ldbm.h -@@ -896,4 +896,6 @@ typedef struct _back_search_result_set - ((L)->size == (R)->size && !memcmp((L)->data, (R)->data, (L)->size)) - - typedef int backend_implement_init_fn(struct ldbminfo *li, config_info *config_array); -+ -+pthread_mutex_t *get_import_ctx_mutex(); - #endif /* _back_ldbm_h_ */ -diff --git a/ldap/servers/slapd/back-ldbm/db-mdb/mdb_config.c b/ldap/servers/slapd/back-ldbm/db-mdb/mdb_config.c -index 351f54037..1f7b71442 100644 ---- a/ldap/servers/slapd/back-ldbm/db-mdb/mdb_config.c -+++ b/ldap/servers/slapd/back-ldbm/db-mdb/mdb_config.c -@@ -83,7 +83,7 @@ dbmdb_compute_limits(struct ldbminfo *li) - uint64_t total_space = 0; - uint64_t avail_space = 0; - uint64_t cur_dbsize = 0; -- int nbchangelogs = 0; -+ int nbvlvs = 0; - int nbsuffixes = 0; - int nbindexes = 0; - int nbagmt = 0; -@@ -99,8 +99,8 @@ dbmdb_compute_limits(struct ldbminfo *li) - * But some tunable may be autotuned. - */ - if (dbmdb_count_config_entries("(objectClass=nsMappingTree)", &nbsuffixes) || -- dbmdb_count_config_entries("(objectClass=nsIndex)", &nbsuffixes) || -- dbmdb_count_config_entries("(&(objectClass=nsds5Replica)(nsDS5Flags=1))", &nbchangelogs) || -+ dbmdb_count_config_entries("(objectClass=nsIndex)", &nbindexes) || -+ dbmdb_count_config_entries("(objectClass=vlvIndex)", &nbvlvs) || - dbmdb_count_config_entries("(objectClass=nsds5replicationagreement)", &nbagmt)) { - /* error message is already logged */ - return 1; -@@ -120,8 +120,15 @@ dbmdb_compute_limits(struct ldbminfo *li) - - info->pagesize = sysconf(_SC_PAGE_SIZE); - limits->min_readers = config_get_threadnumber() + nbagmt + DBMDB_READERS_MARGIN; -- /* Default indexes are counted in "nbindexes" so we should always have enough resource to add 1 new suffix */ -- limits->min_dbs = nbsuffixes + nbindexes + nbchangelogs + DBMDB_DBS_MARGIN; -+ /* -+ * For each suffix there are 4 databases instances: -+ * long-entryrdn, replication_changelog, id2entry and ancestorid -+ * then the indexes and the vlv and vlv cache -+ * -+ * Default indexes are counted in "nbindexes" so we should always have enough -+ * resource to add 1 new suffix -+ */ -+ limits->min_dbs = 4*nbsuffixes + nbindexes + 2*nbvlvs + DBMDB_DBS_MARGIN; - - total_space = ((uint64_t)(buf.f_blocks)) * ((uint64_t)(buf.f_bsize)); - avail_space = ((uint64_t)(buf.f_bavail)) * ((uint64_t)(buf.f_bsize)); -diff --git a/ldap/servers/slapd/back-ldbm/db-mdb/mdb_import_threads.c b/ldap/servers/slapd/back-ldbm/db-mdb/mdb_import_threads.c -index 8c879da31..707a110c5 100644 ---- a/ldap/servers/slapd/back-ldbm/db-mdb/mdb_import_threads.c -+++ b/ldap/servers/slapd/back-ldbm/db-mdb/mdb_import_threads.c -@@ -4312,9 +4312,12 @@ dbmdb_import_init_writer(ImportJob *job, ImportRole_t role) - void - dbmdb_free_import_ctx(ImportJob *job) - { -- if (job->writer_ctx) { -- ImportCtx_t *ctx = job->writer_ctx; -- job->writer_ctx = NULL; -+ ImportCtx_t *ctx = NULL; -+ pthread_mutex_lock(get_import_ctx_mutex()); -+ ctx = job->writer_ctx; -+ job->writer_ctx = NULL; -+ pthread_mutex_unlock(get_import_ctx_mutex()); -+ if (ctx) { - pthread_mutex_destroy(&ctx->workerq.mutex); - pthread_cond_destroy(&ctx->workerq.cv); - slapi_ch_free((void**)&ctx->workerq.slots); -diff --git a/ldap/servers/slapd/back-ldbm/db-mdb/mdb_instance.c b/ldap/servers/slapd/back-ldbm/db-mdb/mdb_instance.c -index 6386ecf06..05f1e348d 100644 ---- a/ldap/servers/slapd/back-ldbm/db-mdb/mdb_instance.c -+++ b/ldap/servers/slapd/back-ldbm/db-mdb/mdb_instance.c -@@ -287,6 +287,13 @@ int add_dbi(dbi_open_ctx_t *octx, backend *be, const char *fname, int flags) - slapi_ch_free((void**)&treekey.dbname); - return octx->rc; - } -+ if (treekey.dbi >= ctx->dsecfg.max_dbs) { -+ octx->rc = MDB_DBS_FULL; -+ slapi_log_err(SLAPI_LOG_ERR, "add_dbi", "Failed to open database instance %s slots: %d/%d. Error is %d: %s.\n", -+ treekey.dbname, treekey.dbi, ctx->dsecfg.max_dbs, octx->rc, mdb_strerror(octx->rc)); -+ slapi_ch_free((void**)&treekey.dbname); -+ return octx->rc; -+ } - if (octx->ai && octx->ai->ai_key_cmp_fn) { - octx->rc = dbmdb_update_dbi_cmp_fn(ctx, &treekey, octx->ai->ai_key_cmp_fn, octx->txn); - if (octx->rc) { -@@ -689,6 +696,7 @@ int dbmdb_make_env(dbmdb_ctx_t *ctx, int readOnly, mdb_mode_t mode) - rc = dbmdb_write_infofile(ctx); - } else { - /* No Config ==> read it from info file */ -+ ctx->dsecfg = ctx->startcfg; - } - if (rc) { - return rc; -diff --git a/ldap/servers/slapd/back-ldbm/dbimpl.c b/ldap/servers/slapd/back-ldbm/dbimpl.c -index da4a4548e..42f4a0718 100644 ---- a/ldap/servers/slapd/back-ldbm/dbimpl.c -+++ b/ldap/servers/slapd/back-ldbm/dbimpl.c -@@ -463,7 +463,7 @@ int dblayer_show_statistics(const char *dbimpl_name, const char *dbhome, FILE *f - li->li_plugin = be->be_database; - li->li_plugin->plg_name = (char*) "back-ldbm-dbimpl"; - li->li_plugin->plg_libpath = (char*) "libback-ldbm"; -- li->li_directory = (char*)dbhome; -+ li->li_directory = get_li_directory(dbhome); - - /* Initialize database plugin */ - rc = dbimpl_setup(li, dbimpl_name); -diff --git a/ldap/servers/slapd/back-ldbm/import.c b/ldap/servers/slapd/back-ldbm/import.c -index 2bb8cb581..30ec462fa 100644 ---- a/ldap/servers/slapd/back-ldbm/import.c -+++ b/ldap/servers/slapd/back-ldbm/import.c -@@ -27,6 +27,9 @@ - #define NEED_DN_NORM_SP -25 - #define NEED_DN_NORM_BT -26 - -+/* Protect against import context destruction */ -+static pthread_mutex_t import_ctx_mutex = PTHREAD_MUTEX_INITIALIZER; -+ - - /********** routines to manipulate the entry fifo **********/ - -@@ -143,6 +146,14 @@ ldbm_back_wire_import(Slapi_PBlock *pb) - - /* Threads management */ - -+/* Return the mutex that protects against import context destruction */ -+pthread_mutex_t * -+get_import_ctx_mutex() -+{ -+ return &import_ctx_mutex; -+} -+ -+ - /* tell all the threads to abort */ - void - import_abort_all(ImportJob *job, int wait_for_them) -@@ -151,7 +162,7 @@ import_abort_all(ImportJob *job, int wait_for_them) - - /* tell all the worker threads to abort */ - job->flags |= FLAG_ABORT; -- -+ pthread_mutex_lock(&import_ctx_mutex); - for (worker = job->worker_list; worker; worker = worker->next) - worker->command = ABORT; - -@@ -167,6 +178,7 @@ import_abort_all(ImportJob *job, int wait_for_them) - } - } - } -+ pthread_mutex_unlock(&import_ctx_mutex); - } - - --- -2.48.0 - diff --git a/SOURCES/0003-Issue-6680-instance-read-only-mode-is-broken-6681.patch b/SOURCES/0003-Issue-6680-instance-read-only-mode-is-broken-6681.patch new file mode 100644 index 0000000..ac0af7b --- /dev/null +++ b/SOURCES/0003-Issue-6680-instance-read-only-mode-is-broken-6681.patch @@ -0,0 +1,351 @@ +From 4eef34cec551582d1de23266bc6cde84a7e38b5d Mon Sep 17 00:00:00 2001 +From: progier389 +Date: Mon, 24 Mar 2025 10:43:21 +0100 +Subject: [PATCH] Issue 6680 - instance read-only mode is broken (#6681) + +Read only mode is broken because some plugins fails to starts as they are not able to create/updates some entries in the dse backend. +Solution is to allow interrnal operations to write in dse.backend but not modify the dse.ldif (except for the special case when trying to modify nsslapd-readonly flags (to be allowed to set/unset the readonly mode) + +Issue: #6680 + +Reviewed by: @droideck, @tbordaz (thanks!) +--- + .../tests/suites/config/regression_test.py | 60 ++++++++++ + ldap/servers/slapd/dse.c | 110 +++++++++++++++++- + ldap/servers/slapd/mapping_tree.c | 90 ++++++++++++-- + 3 files changed, 247 insertions(+), 13 deletions(-) + +diff --git a/dirsrvtests/tests/suites/config/regression_test.py b/dirsrvtests/tests/suites/config/regression_test.py +index 8dbba8cd2..6e313ac8a 100644 +--- a/dirsrvtests/tests/suites/config/regression_test.py ++++ b/dirsrvtests/tests/suites/config/regression_test.py +@@ -28,6 +28,8 @@ CUSTOM_MEM = '9100100100' + IDLETIMEOUT = 5 + DN_TEST_USER = f'uid={TEST_USER_PROPERTIES["uid"]},ou=People,{DEFAULT_SUFFIX}' + ++RO_ATTR = 'nsslapd-readonly' ++ + + @pytest.fixture(scope="module") + def idletimeout_topo(topo, request): +@@ -190,3 +192,61 @@ def test_idletimeout(idletimeout_topo, dn, expected_result): + except ldap.SERVER_DOWN: + result = True + assert expected_result == result ++ ++ ++def test_instance_readonly_mode(topo): ++ """Check that readonly mode is supported ++ ++ :id: 34d2e28e-04d7-11f0-b0cf-482ae39447e5 ++ :setup: Standalone Instance ++ :steps: ++ 1. Set readonly mode ++ 2. Stop the instance ++ 3. Get dse.ldif modification time ++ 4. Start the instance ++ 5. Get dse.ldif modification time ++ 6. Check that modification time has not changed ++ 7. Check that readonly mode is set ++ 8. Try to modify another config attribute ++ 9. Unset readonly mode ++ 10. Restart the instance ++ 11. Check that modification time has not changed ++ 12. Check that modification time has changed ++ 13. Check that readonly mode is unset ++ 14. Try to modify another config attribute ++ :expectedresults: ++ 1. Success ++ 2. Success ++ 3. Success ++ 4. Success ++ 5. Success ++ 6. Success ++ 7. Success ++ 8. Should get ldap.UNWILLING_TO_PERFORM exception ++ 9. Success ++ 10. Success ++ 11. Success ++ 12. Success ++ 13. Success ++ 14. Success ++ """ ++ ++ inst = topo.standalone ++ dse_path = f'{topo.standalone.get_config_dir()}/dse.ldif' ++ inst.config.replace(RO_ATTR, 'on') ++ inst.stop() ++ dse_mtime = os.stat(dse_path).st_mtime ++ inst.start() ++ new_dse_mtime = os.stat(dse_path).st_mtime ++ assert dse_mtime == new_dse_mtime ++ assert inst.config.get_attr_val_utf8(RO_ATTR) == "on" ++ attr = 'nsslapd-errorlog-maxlogsize' ++ val = inst.config.get_attr_val_utf8(attr) ++ with pytest.raises(ldap.UNWILLING_TO_PERFORM): ++ inst.config.replace(attr, val) ++ inst.config.replace(RO_ATTR, 'off') ++ inst.restart() ++ new_dse_mtime = os.stat(dse_path).st_mtime ++ assert dse_mtime != new_dse_mtime ++ assert inst.config.get_attr_val_utf8(RO_ATTR) == "off" ++ inst.config.replace(attr, val) +diff --git a/ldap/servers/slapd/dse.c b/ldap/servers/slapd/dse.c +index e3157c1ce..0f266f0d7 100644 +--- a/ldap/servers/slapd/dse.c ++++ b/ldap/servers/slapd/dse.c +@@ -1031,6 +1031,114 @@ dse_check_for_readonly_error(Slapi_PBlock *pb, struct dse *pdse) + return rc; /* no error */ + } + ++/* Trivial wrapper around slapi_re_comp to handle errors */ ++static Slapi_Regex * ++recomp(const char *regexp) ++{ ++ char *error = ""; ++ Slapi_Regex *re = slapi_re_comp(regexp, &error); ++ if (re == NULL) { ++ slapi_log_err(SLAPI_LOG_ERR, "is_readonly_set_in_dse", ++ "Failed to compile '%s' regular expression. Error is %s\n", ++ regexp, error); ++ } ++ slapi_ch_free_string(&error); ++ return re; ++} ++ ++/* ++ * Check if "nsslapd-readonly: on" is in cn-config in dse.ldif file ++ * ( If the flag is set in memory but on in the file, the file should ++ * be written (to let dsconf able to modify the nsslapd-readonly flag) ++ */ ++static bool ++is_readonly_set_in_dse(const char *dsename) ++{ ++ Slapi_Regex *re_config = recomp("^dn:\\s+cn=config\\s*$"); ++ Slapi_Regex *re_isro = recomp("^" CONFIG_READONLY_ATTRIBUTE ":\\s+on\\s*$"); ++ Slapi_Regex *re_eoe = recomp("^$"); ++ bool isconfigentry = false; ++ bool isro = false; ++ FILE *fdse = NULL; ++ char line[128]; ++ char *error = NULL; ++ const char *regexp = ""; ++ ++ if (!dsename) { ++ goto done; ++ } ++ if (re_config == NULL || re_isro == NULL || re_eoe == NULL) { ++ goto done; ++ } ++ fdse = fopen(dsename, "r"); ++ if (fdse == NULL) { ++ /* No dse file, we need to write it */ ++ goto done; ++ } ++ while (fgets(line, (sizeof line), fdse)) { ++ /* Convert the read line to lowercase */ ++ for (char *pt=line; *pt; pt++) { ++ if (isalpha(*pt)) { ++ *pt = tolower(*pt); ++ } ++ } ++ if (slapi_re_exec_nt(re_config, line)) { ++ isconfigentry = true; ++ } ++ if (slapi_re_exec_nt(re_eoe, line)) { ++ if (isconfigentry) { ++ /* End of config entry ==> readonly flag is not set */ ++ break; ++ } ++ } ++ if (isconfigentry && slapi_re_exec_nt(re_isro, line)) { ++ /* Found readonly flag */ ++ isro = true; ++ break; ++ } ++ } ++done: ++ if (fdse) { ++ (void) fclose(fdse); ++ } ++ slapi_re_free(re_config); ++ slapi_re_free(re_isro); ++ slapi_re_free(re_eoe); ++ return isro; ++} ++ ++/* ++ * Check if dse.ldif can be written ++ * Beware that even in read-only mode dse.ldif file ++ * should still be written to change the nsslapd-readonly value ++ */ ++static bool ++check_if_readonly(struct dse *pdse) ++{ ++ static bool ro = false; ++ ++ if (pdse->dse_filename == NULL) { ++ return false; ++ } ++ if (!slapi_config_get_readonly()) { ++ ro = false; ++ return ro; ++ } ++ if (ro) { ++ /* read-only mode and dse is up to date ==> Do not modify it. */ ++ return ro; ++ } ++ /* First attempt to write the dse.ldif since readonly mode is enabled. ++ * Lets check if "nsslapd-readonly: on" is in cn=config entry ++ * and allow to write the dse.ldif if it is the case ++ */ ++ if (is_readonly_set_in_dse(pdse->dse_filename)) { ++ /* read-only mode and dse is up to date ==> Do not modify it. */ ++ ro = true; ++ } ++ /* Read only mode but nsslapd-readonly value is not up to date. */ ++ return ro; ++} + + /* + * Write the AVL tree of entries back to the LDIF file. +@@ -1041,7 +1149,7 @@ dse_write_file_nolock(struct dse *pdse) + FPWrapper fpw; + int rc = 0; + +- if (dont_ever_write_dse_files) { ++ if (dont_ever_write_dse_files || check_if_readonly(pdse)) { + return rc; + } + +diff --git a/ldap/servers/slapd/mapping_tree.c b/ldap/servers/slapd/mapping_tree.c +index dd7b1af37..e51b3b948 100644 +--- a/ldap/servers/slapd/mapping_tree.c ++++ b/ldap/servers/slapd/mapping_tree.c +@@ -2058,6 +2058,82 @@ slapi_dn_write_needs_referral(Slapi_DN *target_sdn, Slapi_Entry **referral) + done: + return ret; + } ++ ++/* ++ * This function dermines if an operation should be rejected ++ * when readonly mode is enabled. ++ * All operations are rejected except: ++ * - if they target a private backend that is not the DSE backend ++ * - if they are read operations (SEARCH, COMPARE, BIND, UNBIND) ++ * - if they are tombstone fixup operation (i.e: tombstone purging) ++ * - if they are internal operation that targets the DSE backend. ++ * (change will then be done in memory but not written in dse.ldif) ++ * - single modify modify operation on cn=config changing nsslapd-readonly ++ * (to allow "dsconf instance config replace nsslapd-readonly=xxx", ++ change will then be done both in memory and in dse.ldif) ++ */ ++static bool ++is_rejected_op(Slapi_Operation *op, Slapi_Backend *be) ++{ ++ const char *betype = slapi_be_gettype(be); ++ unsigned long be_op_type = operation_get_type(op); ++ int isdse = (betype && strcmp(betype, "DSE") == 0); ++ ++ /* Private backend operations are not rejected */ ++ ++ /* Read operations are not rejected */ ++ if ((be_op_type == SLAPI_OPERATION_SEARCH) || ++ (be_op_type == SLAPI_OPERATION_COMPARE) || ++ (be_op_type == SLAPI_OPERATION_BIND) || ++ (be_op_type == SLAPI_OPERATION_UNBIND)) { ++ return false; ++ } ++ ++ /* Tombstone fixup are not rejected. */ ++ if (operation_is_flag_set(op, OP_FLAG_TOMBSTONE_FIXUP)) { ++ return false; ++ } ++ ++ if (!isdse) { ++ /* write operation on readonly backends are rejected */ ++ if (be->be_readonly) { ++ return true; ++ } ++ ++ /* private backends (DSE excepted) are not backed on files ++ * so write operations are accepted. ++ * but other operations (not on DSE) are rejected. ++ */ ++ if (slapi_be_private(be)) { ++ return false; ++ } else { ++ return true; ++ } ++ } ++ ++ /* Allowed operations in dse backend are: ++ * - the internal operations and ++ * - modify of nsslapd-readonly flag in cn=config ++ */ ++ ++ if (operation_is_flag_set(op, OP_FLAG_INTERNAL)) { ++ return false; ++ } ++ if (be_op_type == SLAPI_OPERATION_MODIFY) { ++ Slapi_DN *sdn = operation_get_target_spec(op); ++ Slapi_DN config = {0}; ++ LDAPMod **mods = op->o_params.p.p_modify.modify_mods; ++ slapi_sdn_init_ndn_byref(&config, SLAPD_CONFIG_DN); ++ if (mods && mods[0] && !mods[1] && ++ slapi_sdn_compare(sdn, &config) == 0 && ++ strcasecmp(mods[0]->mod_type, CONFIG_READONLY_ATTRIBUTE) == 0) { ++ /* Single modifier impacting nsslapd-readonly */ ++ return false; ++ } ++ } ++ return true; ++} ++ + /* + * Description: + * The reason we have a mapping tree. This function selects a backend or +@@ -2095,7 +2171,6 @@ slapi_mapping_tree_select(Slapi_PBlock *pb, Slapi_Backend **be, Slapi_Entry **re + int ret; + int scope = LDAP_SCOPE_BASE; + int op_type; +- int fixup = 0; + + if (slapi_atomic_load_32(&mapping_tree_freed, __ATOMIC_RELAXED)) { + /* shutdown detected */ +@@ -2112,7 +2187,6 @@ slapi_mapping_tree_select(Slapi_PBlock *pb, Slapi_Backend **be, Slapi_Entry **re + + /* Get the target for this op */ + target_sdn = operation_get_target_spec(op); +- fixup = operation_is_flag_set(op, OP_FLAG_TOMBSTONE_FIXUP); + + PR_ASSERT(mapping_tree_inited == 1); + +@@ -2161,22 +2235,14 @@ slapi_mapping_tree_select(Slapi_PBlock *pb, Slapi_Backend **be, Slapi_Entry **re + * or if the whole server is readonly AND backend is public (!private) + */ + if ((ret == LDAP_SUCCESS) && *be && !be_isdeleted(*be) && +- (((*be)->be_readonly && !fixup) || +- ((slapi_config_get_readonly() && !fixup) && +- !slapi_be_private(*be)))) { +- unsigned long be_op_type = operation_get_type(op); +- +- if ((be_op_type != SLAPI_OPERATION_SEARCH) && +- (be_op_type != SLAPI_OPERATION_COMPARE) && +- (be_op_type != SLAPI_OPERATION_BIND) && +- (be_op_type != SLAPI_OPERATION_UNBIND)) { ++ ((*be)->be_readonly || slapi_config_get_readonly()) && ++ is_rejected_op(op, *be)) { + if (errorbuf) { + PL_strncpyz(errorbuf, slapi_config_get_readonly() ? "Server is read-only" : "database is read-only", ebuflen); + } + ret = LDAP_UNWILLING_TO_PERFORM; + slapi_be_Unlock(*be); + *be = NULL; +- } + } + + return ret; +-- +2.49.0 + diff --git a/SOURCES/0004-Issue-6090-Fix-dbscan-options-and-man-pages-6315.patch b/SOURCES/0004-Issue-6090-Fix-dbscan-options-and-man-pages-6315.patch deleted file mode 100644 index 648eea5..0000000 --- a/SOURCES/0004-Issue-6090-Fix-dbscan-options-and-man-pages-6315.patch +++ /dev/null @@ -1,894 +0,0 @@ -From b53faa9e7289383bbc02fc260b1b34958a317fdd Mon Sep 17 00:00:00 2001 -From: progier389 -Date: Fri, 6 Sep 2024 14:45:06 +0200 -Subject: [PATCH] Issue 6090 - Fix dbscan options and man pages (#6315) - -* Issue 6090 - Fix dbscan options and man pages - -dbscan -d option is dangerously confusing as it removes a database instance while in db_stat it identify the database -(cf issue #5609 ). -This fix implements long options in dbscan, rename -d in --remove, and requires a new --do-it option for action that change the database content. -The fix should also align both the usage and the dbscan man page with the new set of options - -Issue: #6090 - -Reviewed by: @tbordaz, @droideck (Thanks!) - -(cherry picked from commit 25e1d16887ebd299dfe0088080b9ee0deec1e41f) ---- - dirsrvtests/tests/suites/clu/dbscan_test.py | 253 ++++++++++++++++++ - .../tests/suites/clu/repl_monitor_test.py | 4 +- - .../slapd/back-ldbm/db-bdb/bdb_layer.c | 12 +- - ldap/servers/slapd/back-ldbm/dbimpl.c | 50 +++- - ldap/servers/slapd/tools/dbscan.c | 182 ++++++++++--- - man/man1/dbscan.1 | 74 +++-- - src/lib389/lib389/__init__.py | 9 +- - src/lib389/lib389/cli_ctl/dblib.py | 13 +- - 8 files changed, 531 insertions(+), 66 deletions(-) - create mode 100644 dirsrvtests/tests/suites/clu/dbscan_test.py - -diff --git a/dirsrvtests/tests/suites/clu/dbscan_test.py b/dirsrvtests/tests/suites/clu/dbscan_test.py -new file mode 100644 -index 000000000..2c9a9651a ---- /dev/null -+++ b/dirsrvtests/tests/suites/clu/dbscan_test.py -@@ -0,0 +1,253 @@ -+# --- BEGIN COPYRIGHT BLOCK --- -+# Copyright (C) 2024 Red Hat, Inc. -+# All rights reserved. -+# -+# License: GPL (version 3 or any later version). -+# See LICENSE for details. -+# --- END COPYRIGHT BLOCK --- -+# -+import logging -+import os -+import pytest -+import re -+import subprocess -+import sys -+ -+from lib389 import DirSrv -+from lib389._constants import DBSCAN -+from lib389.topologies import topology_m2 as topo_m2 -+from difflib import context_diff -+ -+pytestmark = pytest.mark.tier0 -+ -+logging.getLogger(__name__).setLevel(logging.DEBUG) -+log = logging.getLogger(__name__) -+ -+DEBUGGING = os.getenv("DEBUGGING", default=False) -+ -+ -+class CalledProcessUnexpectedReturnCode(subprocess.CalledProcessError): -+ def __init__(self, result, expected_rc): -+ super().__init__(cmd=result.args, returncode=result.returncode, output=result.stdout, stderr=result.stderr) -+ self.expected_rc = expected_rc -+ self.result = result -+ -+ def __str__(self): -+ return f'Command {self.result.args} returned {self.result.returncode} instead of {self.expected_rc}' -+ -+ -+class DbscanPaths: -+ @staticmethod -+ def list_instances(inst, dblib, dbhome): -+ # compute db instance pathnames -+ instances = dbscan(['-D', dblib, '-L', dbhome], inst=inst).stdout -+ dbis = [] -+ if dblib == 'bdb': -+ pattern = r'^ (.*) $' -+ prefix = f'{dbhome}/' -+ else: -+ pattern = r'^ (.*) flags:' -+ prefix = f'' -+ for match in re.finditer(pattern, instances, flags=re.MULTILINE): -+ dbis.append(prefix+match.group(1)) -+ return dbis -+ -+ @staticmethod -+ def list_options(inst): -+ # compute supported options -+ options = [] -+ usage = dbscan(['-h'], inst=inst, expected_rc=None).stdout -+ pattern = r'^\s+(?:(-[^-,]+), +)?(--[^ ]+).*$' -+ for match in re.finditer(pattern, usage, flags=re.MULTILINE): -+ for idx in range(1,3): -+ if match.group(idx) is not None: -+ options.append(match.group(idx)) -+ return options -+ -+ def __init__(self, inst): -+ dblib = inst.get_db_lib() -+ dbhome = inst.ds_paths.db_home_dir -+ self.inst = inst -+ self.dblib = dblib -+ self.dbhome = dbhome -+ self.options = DbscanPaths.list_options(inst) -+ self.dbis = DbscanPaths.list_instances(inst, dblib, dbhome) -+ self.ldif_dir = inst.ds_paths.ldif_dir -+ -+ def get_dbi(self, attr, backend='userroot'): -+ for dbi in self.dbis: -+ if f'{backend}/{attr}.'.lower() in dbi.lower(): -+ return dbi -+ raise KeyError(f'Unknown dbi {backend}/{attr}') -+ -+ def __repr__(self): -+ attrs = ['inst', 'dblib', 'dbhome', 'ldif_dir', 'options', 'dbis' ] -+ res = ", ".join(map(lambda x: f'{x}={self.__dict__[x]}', attrs)) -+ return f'DbscanPaths({res})' -+ -+ -+def dbscan(args, inst=None, expected_rc=0): -+ if inst is None: -+ prefix = os.environ.get('PREFIX', "") -+ prog = f'{prefix}/bin/dbscan' -+ else: -+ prog = os.path.join(inst.ds_paths.bin_dir, DBSCAN) -+ args.insert(0, prog) -+ output = subprocess.run(args, encoding='utf-8', stdout=subprocess.PIPE, stderr=subprocess.STDOUT) -+ log.debug(f'{args} result is {output.returncode} output is {output.stdout}') -+ if expected_rc is not None and expected_rc != output.returncode: -+ raise CalledProcessUnexpectedReturnCode(output, expected_rc) -+ return output -+ -+ -+def log_export_file(filename): -+ with open(filename, 'r') as file: -+ log.debug(f'=========== Dump of {filename} ================') -+ for line in file: -+ log.debug(line.rstrip('\n')) -+ log.debug(f'=========== Enf of {filename} =================') -+ -+ -+@pytest.fixture(scope='module') -+def paths(topo_m2, request): -+ inst = topo_m2.ms["supplier1"] -+ if sys.version_info < (3,5): -+ pytest.skip('requires python version >= 3.5') -+ paths = DbscanPaths(inst) -+ if '--do-it' not in paths.options: -+ pytest.skip('Not supported with this dbscan version') -+ inst.stop() -+ return paths -+ -+ -+def test_dbscan_destructive_actions(paths, request): -+ """Test that dbscan remove/import actions -+ -+ :id: f40b0c42-660a-11ef-9544-083a88554478 -+ :setup: Stopped standalone instance -+ :steps: -+ 1. Export cn instance with dbscan -+ 2. Run dbscan --remove ... -+ 3. Check the error message about missing --do-it -+ 4. Check that cn instance is still present -+ 5. Run dbscan -I import_file ... -+ 6. Check it was properly imported -+ 7. Check that cn instance is still present -+ 8. Run dbscan --remove ... --doit -+ 9. Check the error message about missing --do-it -+ 10. Check that cn instance is still present -+ 11. Run dbscan -I import_file ... --do-it -+ 12. Check it was properly imported -+ 13. Check that cn instance is still present -+ 14. Export again the database -+ 15. Check that content of export files are the same -+ :expectedresults: -+ 1. Success -+ 2. dbscan return code should be 1 (error) -+ 3. Error message should be present -+ 4. cn instance should be present -+ 5. dbscan return code should be 1 (error) -+ 6. Error message should be present -+ 7. cn instance should be present -+ 8. dbscan return code should be 0 (success) -+ 9. Error message should not be present -+ 10. cn instance should not be present -+ 11. dbscan return code should be 0 (success) -+ 12. Error message should not be present -+ 13. cn instance should be present -+ 14. Success -+ 15. Export files content should be the same -+ """ -+ -+ # Export cn instance with dbscan -+ export_cn = f'{paths.ldif_dir}/dbscan_cn.data' -+ export_cn2 = f'{paths.ldif_dir}/dbscan_cn2.data' -+ cndbi = paths.get_dbi('replication_changelog') -+ inst = paths.inst -+ dblib = paths.dblib -+ exportok = False -+ def fin(): -+ if os.path.exists(export_cn): -+ # Restore cn if it was exported successfully but does not exists any more -+ if exportok and cndbi not in DbscanPaths.list_instances(inst, dblib, paths.dbhome): -+ dbscan(['-D', dblib, '-f', cndbi, '-I', export_cn, '--do-it'], inst=inst) -+ if not DEBUGGING: -+ os.remove(export_cn) -+ if os.path.exists(export_cn) and not DEBUGGING: -+ os.remove(export_cn2) -+ -+ fin() -+ request.addfinalizer(fin) -+ dbscan(['-D', dblib, '-f', cndbi, '-X', export_cn], inst=inst) -+ exportok = True -+ -+ expected_msg = "without specifying '--do-it' parameter." -+ -+ # Run dbscan --remove ... -+ result = dbscan(['-D', paths.dblib, '--remove', '-f', cndbi], -+ inst=paths.inst, expected_rc=1) -+ -+ # Check the error message about missing --do-it -+ assert expected_msg in result.stdout -+ -+ # Check that cn instance is still present -+ curdbis = DbscanPaths.list_instances(paths.inst, paths.dblib, paths.dbhome) -+ assert cndbi in curdbis -+ -+ # Run dbscan -I import_file ... -+ result = dbscan(['-D', paths.dblib, '-f', cndbi, '-I', export_cn], -+ inst=paths.inst, expected_rc=1) -+ -+ # Check the error message about missing --do-it -+ assert expected_msg in result.stdout -+ -+ # Check that cn instance is still present -+ curdbis = DbscanPaths.list_instances(paths.inst, paths.dblib, paths.dbhome) -+ assert cndbi in curdbis -+ -+ # Run dbscan --remove ... --doit -+ result = dbscan(['-D', paths.dblib, '--remove', '-f', cndbi, '--do-it'], -+ inst=paths.inst, expected_rc=0) -+ -+ # Check the error message about missing --do-it -+ assert expected_msg not in result.stdout -+ -+ # Check that cn instance is still present -+ curdbis = DbscanPaths.list_instances(paths.inst, paths.dblib, paths.dbhome) -+ assert cndbi not in curdbis -+ -+ # Run dbscan -I import_file ... --do-it -+ result = dbscan(['-D', paths.dblib, '-f', cndbi, -+ '-I', export_cn, '--do-it'], -+ inst=paths.inst, expected_rc=0) -+ -+ # Check the error message about missing --do-it -+ assert expected_msg not in result.stdout -+ -+ # Check that cn instance is still present -+ curdbis = DbscanPaths.list_instances(paths.inst, paths.dblib, paths.dbhome) -+ assert cndbi in curdbis -+ -+ # Export again the database -+ dbscan(['-D', dblib, '-f', cndbi, '-X', export_cn2], inst=inst) -+ -+ # Check that content of export files are the same -+ with open(export_cn) as f1: -+ f1lines = f1.readlines() -+ with open(export_cn2) as f2: -+ f2lines = f2.readlines() -+ diffs = list(context_diff(f1lines, f2lines)) -+ if len(diffs) > 0: -+ log.debug("Export file differences are:") -+ for d in diffs: -+ log.debug(d) -+ log_export_file(export_cn) -+ log_export_file(export_cn2) -+ assert diffs is None -+ -+ -+if __name__ == '__main__': -+ # Run isolated -+ # -s for DEBUG mode -+ CURRENT_FILE = os.path.realpath(__file__) -+ pytest.main("-s %s" % CURRENT_FILE) -diff --git a/dirsrvtests/tests/suites/clu/repl_monitor_test.py b/dirsrvtests/tests/suites/clu/repl_monitor_test.py -index d83416847..842dd96fd 100644 ---- a/dirsrvtests/tests/suites/clu/repl_monitor_test.py -+++ b/dirsrvtests/tests/suites/clu/repl_monitor_test.py -@@ -77,13 +77,13 @@ def get_hostnames_from_log(port1, port2): - # search for Supplier :hostname:port - # and use \D to insure there is no more number is after - # the matched port (i.e that 10 is not matching 101) -- regexp = '(Supplier: )([^:]*)(:' + str(port1) + '\D)' -+ regexp = '(Supplier: )([^:]*)(:' + str(port1) + r'\D)' - match=re.search(regexp, logtext) - host_m1 = 'localhost.localdomain' - if (match is not None): - host_m1 = match.group(2) - # Same for supplier 2 -- regexp = '(Supplier: )([^:]*)(:' + str(port2) + '\D)' -+ regexp = '(Supplier: )([^:]*)(:' + str(port2) + r'\D)' - match=re.search(regexp, logtext) - host_m2 = 'localhost.localdomain' - if (match is not None): -diff --git a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.c b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.c -index de6be0f42..4b30e8e87 100644 ---- a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.c -+++ b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.c -@@ -5820,8 +5820,16 @@ bdb_import_file_name(ldbm_instance *inst) - static char * - bdb_restore_file_name(struct ldbminfo *li) - { -- char *fname = slapi_ch_smprintf("%s/../.restore", li->li_directory); -- -+ char *pt = strrchr(li->li_directory, '/'); -+ char *fname = NULL; -+ if (pt == NULL) { -+ fname = slapi_ch_strdup(".restore"); -+ } else { -+ size_t len = pt-li->li_directory; -+ fname = slapi_ch_malloc(len+10); -+ strncpy(fname, li->li_directory, len); -+ strcpy(fname+len, "/.restore"); -+ } - return fname; - } - -diff --git a/ldap/servers/slapd/back-ldbm/dbimpl.c b/ldap/servers/slapd/back-ldbm/dbimpl.c -index 42f4a0718..134d06480 100644 ---- a/ldap/servers/slapd/back-ldbm/dbimpl.c -+++ b/ldap/servers/slapd/back-ldbm/dbimpl.c -@@ -397,7 +397,48 @@ const char *dblayer_op2str(dbi_op_t op) - return str[idx]; - } - --/* Open db env, db and db file privately */ -+/* Get the li_directory directory from the database instance name - -+ * Caller should free the returned value -+ */ -+static char * -+get_li_directory(const char *fname) -+{ -+ /* -+ * li_directory is an existing directory. -+ * it can be fname or its parent or its greatparent -+ * in case of problem returns the provided name -+ */ -+ char *lid = slapi_ch_strdup(fname); -+ struct stat sbuf = {0}; -+ char *pt = NULL; -+ for (int count=0; count<3; count++) { -+ if (stat(lid, &sbuf) == 0) { -+ if (S_ISDIR(sbuf.st_mode)) { -+ return lid; -+ } -+ /* Non directory existing file could be regular -+ * at the first iteration otherwise it is an error. -+ */ -+ if (count>0 || !S_ISREG(sbuf.st_mode)) { -+ break; -+ } -+ } -+ pt = strrchr(lid, '/'); -+ if (pt == NULL) { -+ slapi_ch_free_string(&lid); -+ return slapi_ch_strdup("."); -+ } -+ *pt = '\0'; -+ } -+ /* -+ * Error case. Returns a copy of the original string: -+ * and let dblayer_private_open_fn fail to open the database -+ */ -+ slapi_ch_free_string(&lid); -+ return slapi_ch_strdup(fname); -+} -+ -+/* Open db env, db and db file privately (for dbscan) */ - int dblayer_private_open(const char *plgname, const char *dbfilename, int rw, Slapi_Backend **be, dbi_env_t **env, dbi_db_t **db) - { - struct ldbminfo *li; -@@ -412,7 +453,7 @@ int dblayer_private_open(const char *plgname, const char *dbfilename, int rw, Sl - li->li_plugin = (*be)->be_database; - li->li_plugin->plg_name = (char*) "back-ldbm-dbimpl"; - li->li_plugin->plg_libpath = (char*) "libback-ldbm"; -- li->li_directory = slapi_ch_strdup(dbfilename); -+ li->li_directory = get_li_directory(dbfilename); - - /* Initialize database plugin */ - rc = dbimpl_setup(li, plgname); -@@ -439,7 +480,10 @@ int dblayer_private_close(Slapi_Backend **be, dbi_env_t **env, dbi_db_t **db) - } - slapi_ch_free((void**)&li->li_dblayer_private); - slapi_ch_free((void**)&li->li_dblayer_config); -- ldbm_config_destroy(li); -+ if (dblayer_is_lmdb(*be)) { -+ /* Generate use after free and double free in bdb case */ -+ ldbm_config_destroy(li); -+ } - slapi_ch_free((void**)&(*be)->be_database); - slapi_ch_free((void**)&(*be)->be_instance_info); - slapi_ch_free((void**)be); -diff --git a/ldap/servers/slapd/tools/dbscan.c b/ldap/servers/slapd/tools/dbscan.c -index 2d28dd951..12edf7c5b 100644 ---- a/ldap/servers/slapd/tools/dbscan.c -+++ b/ldap/servers/slapd/tools/dbscan.c -@@ -26,6 +26,7 @@ - #include - #include - #include -+#include - #include "../back-ldbm/dbimpl.h" - #include "../slapi-plugin.h" - #include "nspr.h" -@@ -85,6 +86,8 @@ - #define DB_BUFFER_SMALL ENOMEM - #endif - -+#define COUNTOF(array) ((sizeof(array))/sizeof(*(array))) -+ - #if defined(linux) - #include - #endif -@@ -130,9 +133,43 @@ long ind_cnt = 0; - long allids_cnt = 0; - long other_cnt = 0; - char *dump_filename = NULL; -+int do_it = 0; - - static Slapi_Backend *be = NULL; /* Pseudo backend used to interact with db */ - -+/* For Long options without shortcuts */ -+enum { -+ OPT_FIRST = 0x1000, -+ OPT_DO_IT, -+ OPT_REMOVE, -+}; -+ -+static const struct option options[] = { -+ /* Options without shortcut */ -+ { "do-it", no_argument, 0, OPT_DO_IT }, -+ { "remove", no_argument, 0, OPT_REMOVE }, -+ /* Options with shortcut */ -+ { "import", required_argument, 0, 'I' }, -+ { "export", required_argument, 0, 'X' }, -+ { "db-type", required_argument, 0, 'D' }, -+ { "dbi", required_argument, 0, 'f' }, -+ { "ascii", no_argument, 0, 'A' }, -+ { "raw", no_argument, 0, 'R' }, -+ { "truncate-entry", required_argument, 0, 't' }, -+ { "entry-id", required_argument, 0, 'K' }, -+ { "key", required_argument, 0, 'k' }, -+ { "list", required_argument, 0, 'L' }, -+ { "stats", required_argument, 0, 'S' }, -+ { "id-list-max-size", required_argument, 0, 'l' }, -+ { "id-list-min-size", required_argument, 0, 'G' }, -+ { "show-id-list-lenghts", no_argument, 0, 'n' }, -+ { "show-id-list", no_argument, 0, 'r' }, -+ { "summary", no_argument, 0, 's' }, -+ { "help", no_argument, 0, 'h' }, -+ { 0, 0, 0, 0 } -+}; -+ -+ - /** db_printf - functioning same as printf but a place for manipluating output. - */ - void -@@ -899,7 +936,7 @@ is_changelog(char *filename) - } - - static void --usage(char *argv0) -+usage(char *argv0, int error) - { - char *copy = strdup(argv0); - char *p0 = NULL, *p1 = NULL; -@@ -922,42 +959,52 @@ usage(char *argv0) - } - printf("\n%s - scan a db file and dump the contents\n", p0); - printf(" common options:\n"); -- printf(" -D specify db implementaion (may be: bdb or mdb)\n"); -- printf(" -f specify db file\n"); -- printf(" -A dump as ascii data\n"); -- printf(" -R dump as raw data\n"); -- printf(" -t entry truncate size (bytes)\n"); -+ printf(" -A, --ascii dump as ascii data\n"); -+ printf(" -D, --db-type specify db implementaion (may be: bdb or mdb)\n"); -+ printf(" -f, --dbi specify db instance\n"); -+ printf(" -R, --raw dump as raw data\n"); -+ printf(" -t, --truncate-entry entry truncate size (bytes)\n"); -+ - printf(" entry file options:\n"); -- printf(" -K lookup only a specific entry id\n"); -+ printf(" -K, --entry-id lookup only a specific entry id\n"); -+ - printf(" index file options:\n"); -- printf(" -k lookup only a specific key\n"); -- printf(" -L list all db files\n"); -- printf(" -S show statistics\n"); -- printf(" -l max length of dumped id list\n"); -- printf(" (default %" PRIu32 "; 40 bytes <= size <= 1048576 bytes)\n", MAX_BUFFER); -- printf(" -G only display index entries with more than ids\n"); -- printf(" -n display ID list lengths\n"); -- printf(" -r display the conents of ID list\n"); -- printf(" -s Summary of index counts\n"); -- printf(" -I file Import database content from file\n"); -- printf(" -X file Export database content in file\n"); -+ printf(" -G, --id-list-min-size only display index entries with more than ids\n"); -+ printf(" -I, --import file Import database instance from file.\n"); -+ printf(" -k, --key lookup only a specific key\n"); -+ printf(" -l, --id-list-max-size max length of dumped id list\n"); -+ printf(" (default %" PRIu32 "; 40 bytes <= size <= 1048576 bytes)\n", MAX_BUFFER); -+ printf(" -n, --show-id-list-lenghts display ID list lengths\n"); -+ printf(" --remove remove database instance\n"); -+ printf(" -r, --show-id-list display the conents of ID list\n"); -+ printf(" -S, --stats show statistics\n"); -+ printf(" -X, --export file export database instance in file\n"); -+ -+ printf(" other options:\n"); -+ printf(" -s, --summary summary of index counts\n"); -+ printf(" -L, --list list all db files\n"); -+ printf(" --do-it confirmation flags for destructive actions like --remove or --import\n"); -+ printf(" -h, --help display this usage\n"); -+ - printf(" sample usages:\n"); -- printf(" # list the db files\n"); -- printf(" %s -D mdb -L /var/lib/dirsrv/slapd-i/db/\n", p0); -- printf(" %s -f id2entry.db\n", p0); -+ printf(" # list the database instances\n"); -+ printf(" %s -L /var/lib/dirsrv/slapd-supplier1/db/\n", p0); - printf(" # dump the entry file\n"); - printf(" %s -f id2entry.db\n", p0); - printf(" # display index keys in cn.db4\n"); - printf(" %s -f cn.db4\n", p0); -+ printf(" # display index keys in cn on lmdb\n"); -+ printf(" %s -f /var/lib/dirsrv/slapd-supplier1/db/userroot/cn.db\n", p0); -+ printf(" (Note: Use 'dbscan -L db_home_dir' to get the db instance path)\n"); - printf(" # display index keys and the count of entries having the key in mail.db4\n"); - printf(" %s -r -f mail.db4\n", p0); - printf(" # display index keys and the IDs having more than 20 IDs in sn.db4\n"); - printf(" %s -r -G 20 -f sn.db4\n", p0); - printf(" # display summary of objectclass.db4\n"); -- printf(" %s -f objectclass.db4\n", p0); -+ printf(" %s -s -f objectclass.db4\n", p0); - printf("\n"); - free(copy); -- exit(1); -+ exit(error?1:0); - } - - void dump_ascii_val(const char *str, dbi_val_t *val) -@@ -1126,13 +1173,12 @@ importdb(const char *dbimpl_name, const char *filename, const char *dump_name) - dblayer_init_pvt_txn(); - - if (!dump) { -- printf("Failed to open dump file %s. Error %d: %s\n", dump_name, errno, strerror(errno)); -- fclose(dump); -+ printf("Error: Failed to open dump file %s. Error %d: %s\n", dump_name, errno, strerror(errno)); - return 1; - } - - if (dblayer_private_open(dbimpl_name, filename, 1, &be, &env, &db)) { -- printf("Can't initialize db plugin: %s\n", dbimpl_name); -+ printf("Error: Can't initialize db plugin: %s\n", dbimpl_name); - fclose(dump); - return 1; - } -@@ -1142,11 +1188,16 @@ importdb(const char *dbimpl_name, const char *filename, const char *dump_name) - !_read_line(dump, &keyword, &data) && keyword == 'v') { - ret = dblayer_db_op(be, db, txn.txn, DBI_OP_PUT, &key, &data); - } -+ if (ret !=0) { -+ printf("Error: failed to write record in database. Error %d: %s\n", ret, dblayer_strerror(ret)); -+ dump_ascii_val("Failing record key", &key); -+ dump_ascii_val("Failing record value", &data); -+ } - fclose(dump); - dblayer_value_free(be, &key); - dblayer_value_free(be, &data); - if (dblayer_private_close(&be, &env, &db)) { -- printf("Unable to shutdown the db plugin: %s\n", dblayer_strerror(1)); -+ printf("Error: Unable to shutdown the db plugin: %s\n", dblayer_strerror(1)); - return 1; - } - return ret; -@@ -1243,6 +1294,7 @@ removedb(const char *dbimpl_name, const char *filename) - return 1; - } - -+ db = NULL; /* Database is already closed by dblayer_db_remove */ - if (dblayer_private_close(&be, &env, &db)) { - printf("Unable to shutdown the db plugin: %s\n", dblayer_strerror(1)); - return 1; -@@ -1250,7 +1302,6 @@ removedb(const char *dbimpl_name, const char *filename) - return 0; - } - -- - int - main(int argc, char **argv) - { -@@ -1262,11 +1313,46 @@ main(int argc, char **argv) - int ret = 0; - char *find_key = NULL; - uint32_t entry_id = 0xffffffff; -- char *dbimpl_name = (char*) "bdb"; -- int c; -+ char *defdbimpl = getenv("NSSLAPD_DB_LIB"); -+ char *dbimpl_name = (char*) "mdb"; -+ int longopt_idx = 0; -+ int c = 0; -+ char optstring[2*COUNTOF(options)+1] = {0}; -+ -+ if (defdbimpl) { -+ if (strcasecmp(defdbimpl, "bdb") == 0) { -+ dbimpl_name = (char*) "bdb"; -+ } -+ if (strcasecmp(defdbimpl, "mdb") == 0) { -+ dbimpl_name = (char*) "mdb"; -+ } -+ } -+ -+ /* Compute getopt short option string */ -+ { -+ char *pt = optstring; -+ for (const struct option *opt = options; opt->name; opt++) { -+ if (opt->val>0 && opt->valval); -+ if (opt->has_arg == required_argument) { -+ *pt++ = ':'; -+ } -+ } -+ } -+ *pt = '\0'; -+ } - -- while ((c = getopt(argc, argv, "Af:RL:S:l:nG:srk:K:hvt:D:X:I:d")) != EOF) { -+ while ((c = getopt_long(argc, argv, optstring, options, &longopt_idx)) != EOF) { -+ if (c == 0) { -+ c = longopt_idx; -+ } - switch (c) { -+ case OPT_DO_IT: -+ do_it = 1; -+ break; -+ case OPT_REMOVE: -+ display_mode |= REMOVE; -+ break; - case 'A': - display_mode |= ASCIIDATA; - break; -@@ -1332,32 +1418,48 @@ main(int argc, char **argv) - display_mode |= IMPORT; - dump_filename = optarg; - break; -- case 'd': -- display_mode |= REMOVE; -- break; - case 'h': - default: -- usage(argv[0]); -+ usage(argv[0], 1); - } - } - -+ if (filename == NULL) { -+ fprintf(stderr, "PARAMETER ERROR! 'filename' parameter is missing.\n"); -+ usage(argv[0], 1); -+ } -+ - if (display_mode & EXPORT) { - return exportdb(dbimpl_name, filename, dump_filename); - } - - if (display_mode & IMPORT) { -+ if (!strstr(filename, "/id2entry") && !strstr(filename, "/replication_changelog")) { -+ /* schema is unknown in dbscan ==> duplicate keys sort order is unknown -+ * ==> cannot create dbi with duplicate keys -+ * ==> only id2entry and repl changelog is importable. -+ */ -+ fprintf(stderr, "ERROR: The only database instances that may be imported with dbscan are id2entry and replication_changelog.\n"); -+ exit(1); -+ } -+ -+ if (do_it == 0) { -+ fprintf(stderr, "PARAMETER ERROR! Trying to perform a destructive action (import)\n" -+ " without specifying '--do-it' parameter.\n"); -+ exit(1); -+ } - return importdb(dbimpl_name, filename, dump_filename); - } - - if (display_mode & REMOVE) { -+ if (do_it == 0) { -+ fprintf(stderr, "PARAMETER ERROR! Trying to perform a destructive action (remove)\n" -+ " without specifying '--do-it' parameter.\n"); -+ exit(1); -+ } - return removedb(dbimpl_name, filename); - } - -- if (filename == NULL) { -- fprintf(stderr, "PARAMETER ERROR! 'filename' parameter is missing.\n"); -- usage(argv[0]); -- } -- - if (display_mode & LISTDBS) { - dbi_dbslist_t *dbs = dblayer_list_dbs(dbimpl_name, filename); - if (dbs) { -diff --git a/man/man1/dbscan.1 b/man/man1/dbscan.1 -index 810608371..dfb6e8351 100644 ---- a/man/man1/dbscan.1 -+++ b/man/man1/dbscan.1 -@@ -31,50 +31,94 @@ Scans a Directory Server database index file and dumps the contents. - .\" respectively. - .SH OPTIONS - A summary of options is included below: -+.IP -+common options: -+.TP -+.B \fB\-A, \-\-ascii\fR -+dump as ascii data -+.TP -+.B \fB\-D, \-\-db\-type\fR -+specify db type: bdb or mdb - .TP --.B \fB\-f\fR --specify db file -+.B \fB\-f, \-\-dbi\fR -+specify db instance - .TP --.B \fB\-R\fR -+.B \fB\-R, \-\-raw\fR - dump as raw data - .TP --.B \fB\-t\fR -+.B \fB\-t, \-\-truncate\-entry\fR - entry truncate size (bytes) - .IP - entry file options: - .TP --.B \fB\-K\fR -+.B \fB\-K, \-\-entry\-id\fR - lookup only a specific entry id -+.IP - index file options: - .TP --.B \fB\-k\fR -+.B \fB\-G, \-\-id\-list\-min\-size\fR -+only display index entries with more than ids -+.TP -+.B \fB\-I, \-\-import\fR -+Import database instance from file. Requires \-\-do\-it parameter -+WARNING! Only the id2entry and replication_changelog database instances -+may be imported by dbscan. -+.TP -+.B \fB\-k, \-\-key\fR - lookup only a specific key - .TP --.B \fB\-l\fR -+.B \fB\-l, \-\-id\-list\-max\-size\fR - max length of dumped id list - (default 4096; 40 bytes <= size <= 1048576 bytes) - .TP --.B \fB\-G\fR --only display index entries with more than ids --.TP --.B \fB\-n\fR -+.B \fB\-n, \-\-show\-id\-list\-lenghts\fR - display ID list lengths - .TP --.B \fB\-r\fR -+.B \fB\-\-remove\fR -+remove a db instance. Requires \-\-do\-it parameter -+.TP -+.B \fB\-r, \-\-show\-id\-list\fR - display the contents of ID list - .TP --.B \fB\-s\fR -+.B \fB\-S, \-\-stats\fR -+display statistics -+.TP -+.B \fB\-X, \-\-export\fR -+Export database instance to file -+.IP -+other options: -+.TP -+.B \fB\-s, \-\-summary\fR - Summary of index counts -+.TP -+.B \fB\-L, \-\-list\fR -+List od database instances -+.TP -+.B \fB\-\-do\-it\fR -+confirmation required for actions that change the database contents -+.TP -+.B \fB\-h, \-\-help\-it\fR -+display the usage - .IP - .SH USAGE - Sample usages: - .TP -+List the database instances -+.B -+dbscan -L /var/lib/dirsrv/slapd-supplier1/db -+.TP - Dump the entry file: - .B - dbscan \fB\-f\fR id2entry.db4 - .TP - Display index keys in cn.db4: --.B dbscan \fB\-f\fR cn.db4 -+.B -+dbscan \fB\-f\fR cn.db4 -+.TP -+Display index keys in cn on lmdb: -+.B -+dbscan \fB\-f\fR /var/lib/dirsrv/slapd\-supplier1/db/userroot/cn.db -+ (Note: Use \fBdbscan \-L db_home_dir\R to get the db instance path) - .TP - Display index keys and the count of entries having the key in mail.db4: - .B -@@ -86,7 +130,7 @@ dbscan \fB\-r\fR \fB\-G\fR 20 \fB\-f\fR sn.db4 - .TP - Display summary of objectclass.db4: - .B --dbscan \fB\-f\fR objectclass.db4 -+dbscan \fB\-s \-f\fR objectclass.db4 - .br - .SH AUTHOR - dbscan was written by the 389 Project. -diff --git a/src/lib389/lib389/__init__.py b/src/lib389/lib389/__init__.py -index e87582d9e..368741a66 100644 ---- a/src/lib389/lib389/__init__.py -+++ b/src/lib389/lib389/__init__.py -@@ -3039,14 +3039,17 @@ class DirSrv(SimpleLDAPObject, object): - return self._dbisupport - # check if -D and -L options are supported - try: -- cmd = ["%s/dbscan" % self.get_bin_dir(), "--help"] -+ cmd = ["%s/dbscan" % self.get_bin_dir(), "-h"] - self.log.debug("DEBUG: checking dbscan supported options %s" % cmd) - p = subprocess.Popen(cmd, stdout=subprocess.PIPE) - except subprocess.CalledProcessError: - pass - output, stderr = p.communicate() -- self.log.debug("is_dbi_supported output " + output.decode()) -- if "-D " in output.decode() and "-L " in output.decode(): -+ output = output.decode() -+ self.log.debug("is_dbi_supported output " + output) -+ if "-D " in output and "-L " in output: -+ self._dbisupport = True -+ elif "--db-type" in output and "--list" in output: - self._dbisupport = True - else: - self._dbisupport = False -diff --git a/src/lib389/lib389/cli_ctl/dblib.py b/src/lib389/lib389/cli_ctl/dblib.py -index e9269e340..82f09c70c 100644 ---- a/src/lib389/lib389/cli_ctl/dblib.py -+++ b/src/lib389/lib389/cli_ctl/dblib.py -@@ -158,6 +158,14 @@ def run_dbscan(args): - return output - - -+def does_dbscan_need_do_it(): -+ prefix = os.environ.get('PREFIX', "") -+ prog = f'{prefix}/bin/dbscan' -+ args = [ prog, '-h' ] -+ output = subprocess.run(args, encoding='utf-8', stdout=subprocess.PIPE, stderr=subprocess.STDOUT) -+ return '--do-it' in output.stdout -+ -+ - def export_changelog(be, dblib): - # Export backend changelog - try: -@@ -172,7 +180,10 @@ def import_changelog(be, dblib): - # import backend changelog - try: - cl5dbname = be['eccl5dbname'] if dblib == "bdb" else be['cl5dbname'] -- run_dbscan(['-D', dblib, '-f', cl5dbname, '-I', be['cl5name']]) -+ if does_dbscan_need_do_it(): -+ run_dbscan(['-D', dblib, '-f', cl5dbname, '-I', be['cl5name'], '--do-it']) -+ else: -+ run_dbscan(['-D', dblib, '-f', cl5dbname, '-I', be['cl5name']]) - return True - except subprocess.CalledProcessError as e: - return False --- -2.48.0 - diff --git a/SOURCES/0004-Issue-6825-RootDN-Access-Control-Plugin-with-wildcar.patch b/SOURCES/0004-Issue-6825-RootDN-Access-Control-Plugin-with-wildcar.patch new file mode 100644 index 0000000..67ee402 --- /dev/null +++ b/SOURCES/0004-Issue-6825-RootDN-Access-Control-Plugin-with-wildcar.patch @@ -0,0 +1,125 @@ +From 5613937623f0037a54490b22c60f7eb1aa52cf4e Mon Sep 17 00:00:00 2001 +From: James Chapman +Date: Wed, 25 Jun 2025 14:11:05 +0000 +Subject: [PATCH] =?UTF-8?q?Issue=206825=20-=20RootDN=20Access=20Control=20?= + =?UTF-8?q?Plugin=20with=20wildcards=20for=20IP=20addre=E2=80=A6=20(#6826)?= +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +Bug description: +RootDN Access Control Plugin with wildcards for IP addresses fails withi +an error "Invalid IP address" + +socket.inet_aton() validates IPv4 IP addresses and does not support wildcards. + +Fix description: +Add a regex pattern to match wildcard IP addresses, check each octet is +between 0-255 + +Fixes: https://github.com/389ds/389-ds-base/issues/6825 + +Reviewed by: @droideck (Thank you) +--- + .../lib389/cli_conf/plugins/rootdn_ac.py | 16 +++----- + src/lib389/lib389/utils.py | 40 +++++++++++++++++++ + 2 files changed, 45 insertions(+), 11 deletions(-) + +diff --git a/src/lib389/lib389/cli_conf/plugins/rootdn_ac.py b/src/lib389/lib389/cli_conf/plugins/rootdn_ac.py +index 65486fff8..1456f5ebe 100644 +--- a/src/lib389/lib389/cli_conf/plugins/rootdn_ac.py ++++ b/src/lib389/lib389/cli_conf/plugins/rootdn_ac.py +@@ -8,7 +8,7 @@ + + import socket + from lib389.plugins import RootDNAccessControlPlugin +-from lib389.utils import is_valid_hostname ++from lib389.utils import is_valid_hostname, is_valid_ip + from lib389.cli_conf import add_generic_plugin_parsers, generic_object_edit + from lib389.cli_base import CustomHelpFormatter + +@@ -62,19 +62,13 @@ def validate_args(args): + + if args.allow_ip is not None: + for ip in args.allow_ip: +- if ip != "delete": +- try: +- socket.inet_aton(ip) +- except socket.error: +- raise ValueError(f"Invalid IP address ({ip}) for '--allow-ip'") ++ if ip != "delete" and not is_valid_ip(ip): ++ raise ValueError(f"Invalid IP address ({ip}) for '--allow-ip'") + + if args.deny_ip is not None and args.deny_ip != "delete": + for ip in args.deny_ip: +- if ip != "delete": +- try: +- socket.inet_aton(ip) +- except socket.error: +- raise ValueError(f"Invalid IP address ({ip}) for '--deny-ip'") ++ if ip != "delete" and not is_valid_ip(ip): ++ raise ValueError(f"Invalid IP address ({ip}) for '--deny-ip'") + + if args.allow_host is not None: + for hostname in args.allow_host: +diff --git a/src/lib389/lib389/utils.py b/src/lib389/lib389/utils.py +index afc282e94..3937fc1a8 100644 +--- a/src/lib389/lib389/utils.py ++++ b/src/lib389/lib389/utils.py +@@ -31,6 +31,7 @@ import logging + import shutil + import ldap + import socket ++import ipaddress + import time + import stat + from datetime import (datetime, timedelta) +@@ -1707,6 +1708,45 @@ def is_valid_hostname(hostname): + allowed = re.compile(r"(?!-)[A-Z\d-]{1,63}(? +Date: Fri, 8 Mar 2024 16:15:52 +0000 +Subject: [PATCH] Issue 6119 - Synchronise accept_thread with slapd_daemon + (#6120) + +Bug Description: A corner cases exists, where the slapd_daemon has +begun its shutdown process but the accept_thread is still running +and capable of handling new connections. When this scenario occurs, +the connection subsystem has been partially deallocated and is in +an unstable state. A segfault is generated when attempting to get a +new connection from the connection table. + +Fix Description: The connection table is only deallocated when the +number of active threads is 0. Modify the accept_thread to adjust the +the active thread count during creation/destruction, meaning the connection +table can only be freed when the accept_thread has completed + +Relates: https://github.com/389ds/389-ds-base/issues/6119 + +Reviewed by: @tbordaz, @Firstyear , @mreynolds389 (Thank you) +--- + ldap/servers/slapd/daemon.c | 4 ++++ + 1 file changed, 4 insertions(+) + +diff --git a/ldap/servers/slapd/daemon.c b/ldap/servers/slapd/daemon.c +index 5d01a2526..a43fc9285 100644 +--- a/ldap/servers/slapd/daemon.c ++++ b/ldap/servers/slapd/daemon.c +@@ -868,6 +868,8 @@ accept_thread(void *vports) + slapi_ch_free((void **)&listener_idxs); + slapd_sockets_ports_free(ports); + slapi_ch_free((void **)&fds); ++ g_decr_active_threadcnt(); ++ slapi_log_err(SLAPI_LOG_INFO, "slapd_daemon", "slapd shutting down - accept_thread\n"); + } + + void +@@ -1158,6 +1160,8 @@ slapd_daemon(daemon_ports_t *ports) + slapi_log_err(SLAPI_LOG_EMERG, "slapd_daemon", "Unable to fd accept thread - Shutting Down (" SLAPI_COMPONENT_NAME_NSPR " error %d - %s)\n", + errorCode, slapd_pr_strerror(errorCode)); + g_set_shutdown(SLAPI_SHUTDOWN_EXIT); ++ } else{ ++ g_incr_active_threadcnt(); + } + + #ifdef WITH_SYSTEMD +-- +2.49.0 + diff --git a/SOURCES/0005-Issue-6566-RI-plugin-failure-to-handle-a-modrdn-for-.patch b/SOURCES/0005-Issue-6566-RI-plugin-failure-to-handle-a-modrdn-for-.patch deleted file mode 100644 index 8fea644..0000000 --- a/SOURCES/0005-Issue-6566-RI-plugin-failure-to-handle-a-modrdn-for-.patch +++ /dev/null @@ -1,70 +0,0 @@ -From de52853a3551f1d1876ea21b33a5242ad669fec1 Mon Sep 17 00:00:00 2001 -From: James Chapman -Date: Tue, 4 Feb 2025 15:40:16 +0000 -Subject: [PATCH] Issue 6566 - RI plugin failure to handle a modrdn for rename - of member of multiple groups (#6567) - -Bug description: -With AM and RI plugins enabled, the rename of a user that is part of multiple groups -fails with a "value exists" error. - -Fix description: -For a modrdn the RI plugin creates a new DN, before a modify is attempted check -if the new DN already exists in the attr being updated. - -Fixes: https://github.com/389ds/389-ds-base/issues/6566 - -Reviewed by: @progier389 , @tbordaz (Thank you) ---- - ldap/servers/plugins/referint/referint.c | 15 ++++++++++++--- - 1 file changed, 12 insertions(+), 3 deletions(-) - -diff --git a/ldap/servers/plugins/referint/referint.c b/ldap/servers/plugins/referint/referint.c -index 468fdc239..218863ea5 100644 ---- a/ldap/servers/plugins/referint/referint.c -+++ b/ldap/servers/plugins/referint/referint.c -@@ -924,6 +924,7 @@ _update_all_per_mod(Slapi_DN *entrySDN, /* DN of the searched entry */ - { - Slapi_Mods *smods = NULL; - char *newDN = NULL; -+ struct berval bv = {0}; - char **dnParts = NULL; - char *sval = NULL; - char *newvalue = NULL; -@@ -1026,22 +1027,30 @@ _update_all_per_mod(Slapi_DN *entrySDN, /* DN of the searched entry */ - } - /* else: normalize_rc < 0) Ignore the DN normalization error for now. */ - -+ bv.bv_val = newDN; -+ bv.bv_len = strlen(newDN); - p = PL_strstr(sval, slapi_sdn_get_ndn(origDN)); - if (p == sval) { - /* (case 1) */ - slapi_mods_add_string(smods, LDAP_MOD_DELETE, attrName, sval); -- slapi_mods_add_string(smods, LDAP_MOD_ADD, attrName, newDN); -- -+ /* Add only if the attr value does not exist */ -+ if (VALUE_PRESENT != attr_value_find_wsi(attr, &bv, &v)) { -+ slapi_mods_add_string(smods, LDAP_MOD_ADD, attrName, newDN); -+ } - } else if (p) { - /* (case 2) */ - slapi_mods_add_string(smods, LDAP_MOD_DELETE, attrName, sval); - *p = '\0'; - newvalue = slapi_ch_smprintf("%s%s", sval, newDN); -- slapi_mods_add_string(smods, LDAP_MOD_ADD, attrName, newvalue); -+ /* Add only if the attr value does not exist */ -+ if (VALUE_PRESENT != attr_value_find_wsi(attr, &bv, &v)) { -+ slapi_mods_add_string(smods, LDAP_MOD_ADD, attrName, newvalue); -+ } - slapi_ch_free_string(&newvalue); - } - /* else: value does not include the modified DN. Ignore it. */ - slapi_ch_free_string(&sval); -+ bv = (struct berval){0}; - } - rc = _do_modify(mod_pb, entrySDN, slapi_mods_get_ldapmods_byref(smods)); - if (rc) { --- -2.48.0 - diff --git a/SOURCES/0006-Issue-6258-Mitigate-race-condition-in-paged_results_.patch b/SOURCES/0006-Issue-6258-Mitigate-race-condition-in-paged_results_.patch deleted file mode 100644 index 2f66c4f..0000000 --- a/SOURCES/0006-Issue-6258-Mitigate-race-condition-in-paged_results_.patch +++ /dev/null @@ -1,43 +0,0 @@ -From a634756784056270773d67747061e26152d85469 Mon Sep 17 00:00:00 2001 -From: Masahiro Matsuya -Date: Wed, 5 Feb 2025 11:38:04 +0900 -Subject: [PATCH] Issue 6258 - Mitigate race condition in paged_results_test.py - (#6433) - -The regression test dirsrvtests/tests/suites/paged_results/paged_results_test.py::test_multi_suffix_search has a race condition causing it to fail due to multiple queries potentially writing their logs out of chronological order. - -This failure is mitigated by sorting the retrieved access_log_lines by their "op" value. This ensures the log lines are in chronological order, as expected by the assertions at the end of test_multi_suffix_search(). - -Helps fix: #6258 - -Reviewed by: @droideck , @progier389 (Thanks!) - -Co-authored-by: Anuar Beisembayev <111912342+abeisemb@users.noreply.github.com> ---- - dirsrvtests/tests/suites/paged_results/paged_results_test.py | 3 +++ - 1 file changed, 3 insertions(+) - -diff --git a/dirsrvtests/tests/suites/paged_results/paged_results_test.py b/dirsrvtests/tests/suites/paged_results/paged_results_test.py -index eaf0e0da9..fca48db0f 100644 ---- a/dirsrvtests/tests/suites/paged_results/paged_results_test.py -+++ b/dirsrvtests/tests/suites/paged_results/paged_results_test.py -@@ -7,6 +7,7 @@ - # --- END COPYRIGHT BLOCK --- - # - import socket -+import re - from random import sample, randrange - - import pytest -@@ -1126,6 +1127,8 @@ def test_multi_suffix_search(topology_st, create_user, new_suffixes): - topology_st.standalone.restart(timeout=10) - - access_log_lines = topology_st.standalone.ds_access_log.match('.*pr_cookie=.*') -+ # Sort access_log_lines by op number to mitigate race condition effects. -+ access_log_lines.sort(key=lambda x: int(re.search(r"op=(\d+) RESULT", x).group(1))) - pr_cookie_list = ([line.rsplit('=', 1)[-1] for line in access_log_lines]) - pr_cookie_list = [int(pr_cookie) for pr_cookie in pr_cookie_list] - log.info('Assert that last pr_cookie == -1 and others pr_cookie == 0') --- -2.48.0 - diff --git a/SOURCES/0006-Issue-6782-Improve-paged-result-locking.patch b/SOURCES/0006-Issue-6782-Improve-paged-result-locking.patch new file mode 100644 index 0000000..5c9558b --- /dev/null +++ b/SOURCES/0006-Issue-6782-Improve-paged-result-locking.patch @@ -0,0 +1,127 @@ +From 7943443bb92fca6676922349fb12503a527cb6b1 Mon Sep 17 00:00:00 2001 +From: Mark Reynolds +Date: Thu, 15 May 2025 10:35:27 -0400 +Subject: [PATCH] Issue 6782 - Improve paged result locking + +Description: + +When cleaning a slot, instead of mem setting everything to Zero and restoring +the mutex, manually reset all the values leaving the mutex pointer +intact. + +There is also a deadlock possibility when checking for abandoned PR search +in opshared.c, and we were checking a flag value outside of the per_conn +lock. + +Relates: https://github.com/389ds/389-ds-base/issues/6782 + +Reviewed by: progier & spichugi(Thanks!!) +--- + ldap/servers/slapd/opshared.c | 10 +++++++++- + ldap/servers/slapd/pagedresults.c | 27 +++++++++++++++++---------- + 2 files changed, 26 insertions(+), 11 deletions(-) + +diff --git a/ldap/servers/slapd/opshared.c b/ldap/servers/slapd/opshared.c +index 7dc2d5983..14a7dcdfb 100644 +--- a/ldap/servers/slapd/opshared.c ++++ b/ldap/servers/slapd/opshared.c +@@ -592,6 +592,14 @@ op_shared_search(Slapi_PBlock *pb, int send_result) + int32_t tlimit; + slapi_pblock_get(pb, SLAPI_SEARCH_TIMELIMIT, &tlimit); + pagedresults_set_timelimit(pb_conn, operation, (time_t)tlimit, pr_idx); ++ /* When using this mutex in conjunction with the main paged ++ * result lock, you must do so in this order: ++ * ++ * --> pagedresults_lock() ++ * --> pagedresults_mutex ++ * <-- pagedresults_mutex ++ * <-- pagedresults_unlock() ++ */ + pagedresults_mutex = pageresult_lock_get_addr(pb_conn); + } + +@@ -717,11 +725,11 @@ op_shared_search(Slapi_PBlock *pb, int send_result) + pr_search_result = pagedresults_get_search_result(pb_conn, operation, 1 /*locked*/, pr_idx); + if (pr_search_result) { + if (pagedresults_is_abandoned_or_notavailable(pb_conn, 1 /*locked*/, pr_idx)) { ++ pthread_mutex_unlock(pagedresults_mutex); + pagedresults_unlock(pb_conn, pr_idx); + /* Previous operation was abandoned and the simplepaged object is not in use. */ + send_ldap_result(pb, 0, NULL, "Simple Paged Results Search abandoned", 0, NULL); + rc = LDAP_SUCCESS; +- pthread_mutex_unlock(pagedresults_mutex); + goto free_and_return; + } else { + slapi_pblock_set(pb, SLAPI_SEARCH_RESULT_SET, pr_search_result); +diff --git a/ldap/servers/slapd/pagedresults.c b/ldap/servers/slapd/pagedresults.c +index 642aefb3d..c3f3aae01 100644 +--- a/ldap/servers/slapd/pagedresults.c ++++ b/ldap/servers/slapd/pagedresults.c +@@ -48,7 +48,6 @@ pageresult_lock_get_addr(Connection *conn) + static void + _pr_cleanup_one_slot(PagedResults *prp) + { +- PRLock *prmutex = NULL; + if (!prp) { + return; + } +@@ -56,13 +55,17 @@ _pr_cleanup_one_slot(PagedResults *prp) + /* sr is left; release it. */ + prp->pr_current_be->be_search_results_release(&(prp->pr_search_result_set)); + } +- /* clean up the slot */ +- if (prp->pr_mutex) { +- /* pr_mutex is reused; back it up and reset it. */ +- prmutex = prp->pr_mutex; +- } +- memset(prp, '\0', sizeof(PagedResults)); +- prp->pr_mutex = prmutex; ++ ++ /* clean up the slot except the mutex */ ++ prp->pr_current_be = NULL; ++ prp->pr_search_result_set = NULL; ++ prp->pr_search_result_count = 0; ++ prp->pr_search_result_set_size_estimate = 0; ++ prp->pr_sort_result_code = 0; ++ prp->pr_timelimit_hr.tv_sec = 0; ++ prp->pr_timelimit_hr.tv_nsec = 0; ++ prp->pr_flags = 0; ++ prp->pr_msgid = 0; + } + + /* +@@ -1007,7 +1010,8 @@ op_set_pagedresults(Operation *op) + + /* + * pagedresults_lock/unlock -- introduced to protect search results for the +- * asynchronous searches. ++ * asynchronous searches. Do not call these functions while the PR conn lock ++ * is held (e.g. pageresult_lock_get_addr(conn)) + */ + void + pagedresults_lock(Connection *conn, int index) +@@ -1045,6 +1049,8 @@ int + pagedresults_is_abandoned_or_notavailable(Connection *conn, int locked, int index) + { + PagedResults *prp; ++ int32_t result; ++ + if (!conn || (index < 0) || (index >= conn->c_pagedresults.prl_maxlen)) { + return 1; /* not abandoned, but do not want to proceed paged results op. */ + } +@@ -1052,10 +1058,11 @@ pagedresults_is_abandoned_or_notavailable(Connection *conn, int locked, int inde + pthread_mutex_lock(pageresult_lock_get_addr(conn)); + } + prp = conn->c_pagedresults.prl_list + index; ++ result = prp->pr_flags & CONN_FLAG_PAGEDRESULTS_ABANDONED; + if (!locked) { + pthread_mutex_unlock(pageresult_lock_get_addr(conn)); + } +- return prp->pr_flags & CONN_FLAG_PAGEDRESULTS_ABANDONED; ++ return result; + } + + int +-- +2.49.0 + diff --git a/SOURCES/0007-Issue-6229-After-an-initial-failure-subsequent-onlin.patch b/SOURCES/0007-Issue-6229-After-an-initial-failure-subsequent-onlin.patch deleted file mode 100644 index 0abffa3..0000000 --- a/SOURCES/0007-Issue-6229-After-an-initial-failure-subsequent-onlin.patch +++ /dev/null @@ -1,566 +0,0 @@ -From 769e71499880a0820424bf925c0f0fe793e11cc8 Mon Sep 17 00:00:00 2001 -From: progier389 -Date: Fri, 28 Jun 2024 18:56:49 +0200 -Subject: [PATCH] Issue 6229 - After an initial failure, subsequent online - backups fail (#6230) - -* Issue 6229 - After an initial failure, subsequent online backups will not work - -Several issues related to backup task error handling: -Backends stay busy after the failure -Exit code is 0 in some cases -Crash if failing to open the backup directory -And a more general one: -lib389 Task DN collision - -Solutions: -Always reset the busy flags that have been set -Ensure that 0 is not returned in error case -Avoid closing NULL directory descriptor -Use a timestamp having milliseconds precision to create the task DN - -Issue: #6229 - -Reviewed by: @droideck (Thanks!) - -(cherry picked from commit 04a0b6ac776a1d588ec2e10ff651e5015078ad21) ---- - ldap/servers/slapd/back-ldbm/archive.c | 45 +++++----- - .../slapd/back-ldbm/db-mdb/mdb_layer.c | 3 + - src/lib389/lib389/__init__.py | 10 +-- - src/lib389/lib389/tasks.py | 82 +++++++++---------- - 4 files changed, 70 insertions(+), 70 deletions(-) - -diff --git a/ldap/servers/slapd/back-ldbm/archive.c b/ldap/servers/slapd/back-ldbm/archive.c -index 0460a42f6..6658cc80a 100644 ---- a/ldap/servers/slapd/back-ldbm/archive.c -+++ b/ldap/servers/slapd/back-ldbm/archive.c -@@ -16,6 +16,8 @@ - #include "back-ldbm.h" - #include "dblayer.h" - -+#define NO_OBJECT ((Object*)-1) -+ - int - ldbm_temporary_close_all_instances(Slapi_PBlock *pb) - { -@@ -270,6 +272,7 @@ ldbm_back_ldbm2archive(Slapi_PBlock *pb) - int run_from_cmdline = 0; - Slapi_Task *task; - struct stat sbuf; -+ Object *last_busy_inst_obj = NO_OBJECT; - - slapi_pblock_get(pb, SLAPI_PLUGIN_PRIVATE, &li); - slapi_pblock_get(pb, SLAPI_SEQ_VAL, &rawdirectory); -@@ -380,13 +383,12 @@ ldbm_back_ldbm2archive(Slapi_PBlock *pb) - - /* to avoid conflict w/ import, do this check for commandline, as well */ - { -- Object *inst_obj, *inst_obj2; - ldbm_instance *inst = NULL; - - /* server is up -- mark all backends busy */ -- for (inst_obj = objset_first_obj(li->li_instance_set); inst_obj; -- inst_obj = objset_next_obj(li->li_instance_set, inst_obj)) { -- inst = (ldbm_instance *)object_get_data(inst_obj); -+ for (last_busy_inst_obj = objset_first_obj(li->li_instance_set); last_busy_inst_obj; -+ last_busy_inst_obj = objset_next_obj(li->li_instance_set, last_busy_inst_obj)) { -+ inst = (ldbm_instance *)object_get_data(last_busy_inst_obj); - - /* check if an import/restore is already ongoing... */ - if (instance_set_busy(inst) != 0 || dblayer_in_import(inst) != 0) { -@@ -400,20 +402,6 @@ ldbm_back_ldbm2archive(Slapi_PBlock *pb) - "another task and cannot be disturbed.", - inst->inst_name); - } -- -- /* painfully, we have to clear the BUSY flags on the -- * backends we'd already marked... -- */ -- for (inst_obj2 = objset_first_obj(li->li_instance_set); -- inst_obj2 && (inst_obj2 != inst_obj); -- inst_obj2 = objset_next_obj(li->li_instance_set, -- inst_obj2)) { -- inst = (ldbm_instance *)object_get_data(inst_obj2); -- instance_set_not_busy(inst); -- } -- if (inst_obj2 && inst_obj2 != inst_obj) -- object_release(inst_obj2); -- object_release(inst_obj); - goto err; - } - } -@@ -427,18 +415,26 @@ ldbm_back_ldbm2archive(Slapi_PBlock *pb) - goto err; - } - -- if (!run_from_cmdline) { -+err: -+ /* Clear all BUSY flags that have been previously set */ -+ if (last_busy_inst_obj != NO_OBJECT) { - ldbm_instance *inst; - Object *inst_obj; - -- /* none of these backends are busy anymore */ -- for (inst_obj = objset_first_obj(li->li_instance_set); inst_obj; -+ for (inst_obj = objset_first_obj(li->li_instance_set); -+ inst_obj && (inst_obj != last_busy_inst_obj); - inst_obj = objset_next_obj(li->li_instance_set, inst_obj)) { - inst = (ldbm_instance *)object_get_data(inst_obj); - instance_set_not_busy(inst); - } -+ if (last_busy_inst_obj != NULL) { -+ /* release last seen object for aborted objset_next_obj iterations */ -+ if (inst_obj != NULL) { -+ object_release(inst_obj); -+ } -+ object_release(last_busy_inst_obj); -+ } - } --err: - if (return_value) { - if (dir_bak) { - slapi_log_err(SLAPI_LOG_ERR, -@@ -727,7 +723,10 @@ ldbm_archive_config(char *bakdir, Slapi_Task *task) - } - - error: -- PR_CloseDir(dirhandle); -+ if (NULL != dirhandle) { -+ PR_CloseDir(dirhandle); -+ dirhandle = NULL; -+ } - dse_backup_unlock(); - slapi_ch_free_string(&backup_config_dir); - slapi_ch_free_string(&dse_file); -diff --git a/ldap/servers/slapd/back-ldbm/db-mdb/mdb_layer.c b/ldap/servers/slapd/back-ldbm/db-mdb/mdb_layer.c -index 70a289bdb..de4161b0c 100644 ---- a/ldap/servers/slapd/back-ldbm/db-mdb/mdb_layer.c -+++ b/ldap/servers/slapd/back-ldbm/db-mdb/mdb_layer.c -@@ -983,6 +983,9 @@ dbmdb_backup(struct ldbminfo *li, char *dest_dir, Slapi_Task *task) - if (ldbm_archive_config(dest_dir, task) != 0) { - slapi_log_err(SLAPI_LOG_ERR, "dbmdb_backup", - "Backup of config files failed or is incomplete\n"); -+ if (0 == return_value) { -+ return_value = -1; -+ } - } - - goto bail; -diff --git a/src/lib389/lib389/__init__.py b/src/lib389/lib389/__init__.py -index 368741a66..cb372c138 100644 ---- a/src/lib389/lib389/__init__.py -+++ b/src/lib389/lib389/__init__.py -@@ -69,7 +69,7 @@ from lib389.utils import ( - get_user_is_root) - from lib389.paths import Paths - from lib389.nss_ssl import NssSsl --from lib389.tasks import BackupTask, RestoreTask -+from lib389.tasks import BackupTask, RestoreTask, Task - from lib389.dseldif import DSEldif - - # mixin -@@ -1424,7 +1424,7 @@ class DirSrv(SimpleLDAPObject, object): - name, self.ds_paths.prefix) - - # create the archive -- name = "backup_%s_%s.tar.gz" % (self.serverid, time.strftime("%m%d%Y_%H%M%S")) -+ name = "backup_%s_%s.tar.gz" % (self.serverid, Task.get_timestamp()) - backup_file = os.path.join(backup_dir, name) - tar = tarfile.open(backup_file, "w:gz") - tar.extraction_filter = (lambda member, path: member) -@@ -2810,7 +2810,7 @@ class DirSrv(SimpleLDAPObject, object): - else: - # No output file specified. Use the default ldif location/name - cmd.append('-a') -- tnow = datetime.now().strftime("%Y_%m_%d_%H_%M_%S") -+ tnow = Task.get_timestamp() - if bename: - ldifname = os.path.join(self.ds_paths.ldif_dir, "%s-%s-%s.ldif" % (self.serverid, bename, tnow)) - else: -@@ -2881,7 +2881,7 @@ class DirSrv(SimpleLDAPObject, object): - - if archive_dir is None: - # Use the instance name and date/time as the default backup name -- tnow = datetime.now().strftime("%Y_%m_%d_%H_%M_%S") -+ tnow = Task.get_timestamp() - archive_dir = os.path.join(self.ds_paths.backup_dir, "%s-%s" % (self.serverid, tnow)) - elif not archive_dir.startswith("/"): - # Relative path, append it to the bak directory -@@ -3506,7 +3506,7 @@ class DirSrv(SimpleLDAPObject, object): - - if archive is None: - # Use the instance name and date/time as the default backup name -- tnow = datetime.now().strftime("%Y_%m_%d_%H_%M_%S") -+ tnow = Task.get_timestamp() - if self.serverid is not None: - backup_dir_name = "%s-%s" % (self.serverid, tnow) - else: -diff --git a/src/lib389/lib389/tasks.py b/src/lib389/lib389/tasks.py -index 6c2adb5b2..6bf302862 100644 ---- a/src/lib389/lib389/tasks.py -+++ b/src/lib389/lib389/tasks.py -@@ -118,7 +118,7 @@ class Task(DSLdapObject): - return super(Task, self).create(rdn, properties, basedn) - - @staticmethod -- def _get_task_date(): -+ def get_timestamp(): - """Return a timestamp to use in naming new task entries.""" - - return datetime.now().isoformat() -@@ -132,7 +132,7 @@ class AutomemberRebuildMembershipTask(Task): - """ - - def __init__(self, instance, dn=None): -- self.cn = 'automember_rebuild_' + Task._get_task_date() -+ self.cn = 'automember_rebuild_' + Task.get_timestamp() - dn = "cn=" + self.cn + "," + DN_AUTOMEMBER_REBUILD_TASK - - super(AutomemberRebuildMembershipTask, self).__init__(instance, dn) -@@ -147,7 +147,7 @@ class AutomemberAbortRebuildTask(Task): - """ - - def __init__(self, instance, dn=None): -- self.cn = 'automember_abort_' + Task._get_task_date() -+ self.cn = 'automember_abort_' + Task.get_timestamp() - dn = "cn=" + self.cn + "," + DN_AUTOMEMBER_ABORT_REBUILD_TASK - - super(AutomemberAbortRebuildTask, self).__init__(instance, dn) -@@ -161,7 +161,7 @@ class FixupLinkedAttributesTask(Task): - """ - - def __init__(self, instance, dn=None): -- self.cn = 'fixup_linked_attrs_' + Task._get_task_date() -+ self.cn = 'fixup_linked_attrs_' + Task.get_timestamp() - dn = "cn=" + self.cn + "," + DN_FIXUP_LINKED_ATTIBUTES - - super(FixupLinkedAttributesTask, self).__init__(instance, dn) -@@ -175,7 +175,7 @@ class MemberUidFixupTask(Task): - """ - - def __init__(self, instance, dn=None): -- self.cn = 'memberUid_fixup_' + Task._get_task_date() -+ self.cn = 'memberUid_fixup_' + Task.get_timestamp() - dn = f"cn={self.cn},cn=memberuid task,cn=tasks,cn=config" - - super(MemberUidFixupTask, self).__init__(instance, dn) -@@ -190,7 +190,7 @@ class MemberOfFixupTask(Task): - """ - - def __init__(self, instance, dn=None): -- self.cn = 'memberOf_fixup_' + Task._get_task_date() -+ self.cn = 'memberOf_fixup_' + Task.get_timestamp() - dn = "cn=" + self.cn + "," + DN_MBO_TASK - - super(MemberOfFixupTask, self).__init__(instance, dn) -@@ -205,7 +205,7 @@ class USNTombstoneCleanupTask(Task): - """ - - def __init__(self, instance, dn=None): -- self.cn = 'usn_cleanup_' + Task._get_task_date() -+ self.cn = 'usn_cleanup_' + Task.get_timestamp() - dn = "cn=" + self.cn + ",cn=USN tombstone cleanup task," + DN_TASKS - - super(USNTombstoneCleanupTask, self).__init__(instance, dn) -@@ -225,7 +225,7 @@ class csngenTestTask(Task): - """ - - def __init__(self, instance, dn=None): -- self.cn = 'csngenTest_' + Task._get_task_date() -+ self.cn = 'csngenTest_' + Task.get_timestamp() - dn = "cn=" + self.cn + ",cn=csngen_test," + DN_TASKS - super(csngenTestTask, self).__init__(instance, dn) - -@@ -238,7 +238,7 @@ class EntryUUIDFixupTask(Task): - """ - - def __init__(self, instance, dn=None): -- self.cn = 'entryuuid_fixup_' + Task._get_task_date() -+ self.cn = 'entryuuid_fixup_' + Task.get_timestamp() - dn = "cn=" + self.cn + "," + DN_EUUID_TASK - super(EntryUUIDFixupTask, self).__init__(instance, dn) - self._must_attributes.extend(['basedn']) -@@ -252,7 +252,7 @@ class DBCompactTask(Task): - """ - - def __init__(self, instance, dn=None): -- self.cn = 'compact_db_' + Task._get_task_date() -+ self.cn = 'compact_db_' + Task.get_timestamp() - dn = "cn=" + self.cn + "," + DN_COMPACTDB_TASK - super(DBCompactTask, self).__init__(instance, dn) - -@@ -265,7 +265,7 @@ class SchemaReloadTask(Task): - """ - - def __init__(self, instance, dn=None): -- self.cn = 'schema_reload_' + Task._get_task_date() -+ self.cn = 'schema_reload_' + Task.get_timestamp() - dn = "cn=" + self.cn + ",cn=schema reload task," + DN_TASKS - super(SchemaReloadTask, self).__init__(instance, dn) - -@@ -278,7 +278,7 @@ class SyntaxValidateTask(Task): - """ - - def __init__(self, instance, dn=None): -- self.cn = 'syntax_validate_' + Task._get_task_date() -+ self.cn = 'syntax_validate_' + Task.get_timestamp() - dn = f"cn={self.cn},cn=syntax validate,cn=tasks,cn=config" - - super(SyntaxValidateTask, self).__init__(instance, dn) -@@ -295,7 +295,7 @@ class AbortCleanAllRUVTask(Task): - """ - - def __init__(self, instance, dn=None): -- self.cn = 'abortcleanallruv_' + Task._get_task_date() -+ self.cn = 'abortcleanallruv_' + Task.get_timestamp() - dn = "cn=" + self.cn + ",cn=abort cleanallruv," + DN_TASKS - - super(AbortCleanAllRUVTask, self).__init__(instance, dn) -@@ -312,7 +312,7 @@ class CleanAllRUVTask(Task): - """ - - def __init__(self, instance, dn=None): -- self.cn = 'cleanallruv_' + Task._get_task_date() -+ self.cn = 'cleanallruv_' + Task.get_timestamp() - dn = "cn=" + self.cn + ",cn=cleanallruv," + DN_TASKS - self._properties = None - -@@ -359,7 +359,7 @@ class ImportTask(Task): - """ - - def __init__(self, instance, dn=None): -- self.cn = 'import_' + Task._get_task_date() -+ self.cn = 'import_' + Task.get_timestamp() - dn = "cn=%s,%s" % (self.cn, DN_IMPORT_TASK) - self._properties = None - -@@ -388,7 +388,7 @@ class ExportTask(Task): - """ - - def __init__(self, instance, dn=None): -- self.cn = 'export_' + Task._get_task_date() -+ self.cn = 'export_' + Task.get_timestamp() - dn = "cn=%s,%s" % (self.cn, DN_EXPORT_TASK) - self._properties = None - -@@ -411,7 +411,7 @@ class BackupTask(Task): - """ - - def __init__(self, instance, dn=None): -- self.cn = 'backup_' + Task._get_task_date() -+ self.cn = 'backup_' + Task.get_timestamp() - dn = "cn=" + self.cn + ",cn=backup," + DN_TASKS - self._properties = None - -@@ -426,7 +426,7 @@ class RestoreTask(Task): - """ - - def __init__(self, instance, dn=None): -- self.cn = 'restore_' + Task._get_task_date() -+ self.cn = 'restore_' + Task.get_timestamp() - dn = "cn=" + self.cn + ",cn=restore," + DN_TASKS - self._properties = None - -@@ -513,7 +513,7 @@ class Tasks(object): - raise ValueError("Import file (%s) does not exist" % input_file) - - # Prepare the task entry -- cn = "import_" + time.strftime("%m%d%Y_%H%M%S", time.localtime()) -+ cn = "import_" + Task.get_timestamp() - dn = "cn=%s,%s" % (cn, DN_IMPORT_TASK) - entry = Entry(dn) - entry.setValues('objectclass', 'top', 'extensibleObject') -@@ -581,7 +581,7 @@ class Tasks(object): - raise ValueError("output_file is mandatory") - - # Prepare the task entry -- cn = "export_" + time.strftime("%m%d%Y_%H%M%S", time.localtime()) -+ cn = "export_" + Task.get_timestamp() - dn = "cn=%s,%s" % (cn, DN_EXPORT_TASK) - entry = Entry(dn) - entry.update({ -@@ -637,7 +637,7 @@ class Tasks(object): - raise ValueError("You must specify a backup directory.") - - # build the task entry -- cn = "backup_" + time.strftime("%m%d%Y_%H%M%S", time.localtime()) -+ cn = "backup_" + Task.get_timestamp() - dn = "cn=%s,%s" % (cn, DN_BACKUP_TASK) - entry = Entry(dn) - entry.update({ -@@ -694,7 +694,7 @@ class Tasks(object): - raise ValueError("Backup file (%s) does not exist" % backup_dir) - - # build the task entry -- cn = "restore_" + time.strftime("%m%d%Y_%H%M%S", time.localtime()) -+ cn = "restore_" + Task.get_timestamp() - dn = "cn=%s,%s" % (cn, DN_RESTORE_TASK) - entry = Entry(dn) - entry.update({ -@@ -789,7 +789,7 @@ class Tasks(object): - attrs.append(attr) - else: - attrs.append(attrname) -- cn = "index_vlv_%s" % (time.strftime("%m%d%Y_%H%M%S", time.localtime())) -+ cn = "index_vlv_%s" % (Task.get_timestamp()) - dn = "cn=%s,%s" % (cn, DN_INDEX_TASK) - entry = Entry(dn) - entry.update({ -@@ -803,7 +803,7 @@ class Tasks(object): - # - # Reindex all attributes - gather them first... - # -- cn = "index_all_%s" % (time.strftime("%m%d%Y_%H%M%S", time.localtime())) -+ cn = "index_all_%s" % (Task.get_timestamp()) - dn = ('cn=%s,cn=ldbm database,cn=plugins,cn=config' % backend) - try: - indexes = self.conn.search_s(dn, ldap.SCOPE_SUBTREE, '(objectclass=nsIndex)') -@@ -815,7 +815,7 @@ class Tasks(object): - # - # Reindex specific attributes - # -- cn = "index_attrs_%s" % (time.strftime("%m%d%Y_%H%M%S", time.localtime())) -+ cn = "index_attrs_%s" % (Task.get_timestamp()) - if isinstance(attrname, (tuple, list)): - # Need to guarantee this is a list (and not a tuple) - for attr in attrname: -@@ -903,8 +903,7 @@ class Tasks(object): - - suffix = ents[0].getValue(attr) - -- cn = "fixupmemberof_" + time.strftime("%m%d%Y_%H%M%S", -- time.localtime()) -+ cn = "fixupmemberof_" + Task.get_timestamp() - dn = "cn=%s,%s" % (cn, DN_MBO_TASK) - entry = Entry(dn) - entry.setValues('objectclass', 'top', 'extensibleObject') -@@ -965,8 +964,7 @@ class Tasks(object): - if len(ents) != 1: - raise ValueError("invalid backend name: %s" % bename) - -- cn = "fixupTombstone_" + time.strftime("%m%d%Y_%H%M%S", -- time.localtime()) -+ cn = "fixupTombstone_" + Task.get_timestamp() - dn = "cn=%s,%s" % (cn, DN_TOMB_FIXUP_TASK) - entry = Entry(dn) - entry.setValues('objectclass', 'top', 'extensibleObject') -@@ -1019,7 +1017,7 @@ class Tasks(object): - @return exit code - ''' - -- cn = 'task-' + time.strftime("%m%d%Y_%H%M%S", time.localtime()) -+ cn = 'task-' + Task.get_timestamp() - dn = ('cn=%s,cn=automember rebuild membership,cn=tasks,cn=config' % cn) - - entry = Entry(dn) -@@ -1077,7 +1075,7 @@ class Tasks(object): - if not ldif_out: - raise ValueError("Missing ldif_out") - -- cn = 'task-' + time.strftime("%m%d%Y_%H%M%S", time.localtime()) -+ cn = 'task-' + Task.get_timestamp() - dn = ('cn=%s,cn=automember export updates,cn=tasks,cn=config' % cn) - entry = Entry(dn) - entry.setValues('objectclass', 'top', 'extensibleObject') -@@ -1129,7 +1127,7 @@ class Tasks(object): - if not ldif_out or not ldif_in: - raise ValueError("Missing ldif_out and/or ldif_in") - -- cn = 'task-' + time.strftime("%m%d%Y_%H%M%S", time.localtime()) -+ cn = 'task-' + Task.get_timestamp() - dn = ('cn=%s,cn=automember map updates,cn=tasks,cn=config' % cn) - - entry = Entry(dn) -@@ -1175,7 +1173,7 @@ class Tasks(object): - @return exit code - ''' - -- cn = 'task-' + time.strftime("%m%d%Y_%H%M%S", time.localtime()) -+ cn = 'task-' + Task.get_timestamp() - dn = ('cn=%s,cn=fixup linked attributes,cn=tasks,cn=config' % cn) - entry = Entry(dn) - entry.setValues('objectclass', 'top', 'extensibleObject') -@@ -1219,7 +1217,7 @@ class Tasks(object): - @return exit code - ''' - -- cn = 'task-' + time.strftime("%m%d%Y_%H%M%S", time.localtime()) -+ cn = 'task-' + Task.get_timestamp() - dn = ('cn=%s,cn=schema reload task,cn=tasks,cn=config' % cn) - entry = Entry(dn) - entry.setValues('objectclass', 'top', 'extensibleObject') -@@ -1264,7 +1262,7 @@ class Tasks(object): - @return exit code - ''' - -- cn = 'task-' + time.strftime("%m%d%Y_%H%M%S", time.localtime()) -+ cn = 'task-' + Task.get_timestamp() - dn = ('cn=%s,cn=memberuid task,cn=tasks,cn=config' % cn) - entry = Entry(dn) - entry.setValues('objectclass', 'top', 'extensibleObject') -@@ -1311,7 +1309,7 @@ class Tasks(object): - @return exit code - ''' - -- cn = 'task-' + time.strftime("%m%d%Y_%H%M%S", time.localtime()) -+ cn = 'task-' + Task.get_timestamp() - dn = ('cn=%s,cn=syntax validate,cn=tasks,cn=config' % cn) - entry = Entry(dn) - entry.setValues('objectclass', 'top', 'extensibleObject') -@@ -1358,7 +1356,7 @@ class Tasks(object): - @return exit code - ''' - -- cn = 'task-' + time.strftime("%m%d%Y_%H%M%S", time.localtime()) -+ cn = 'task-' + Task.get_timestamp() - dn = ('cn=%s,cn=USN tombstone cleanup task,cn=tasks,cn=config' % cn) - entry = Entry(dn) - entry.setValues('objectclass', 'top', 'extensibleObject') -@@ -1413,7 +1411,7 @@ class Tasks(object): - if not configfile: - raise ValueError("Missing required paramter: configfile") - -- cn = 'task-' + time.strftime("%m%d%Y_%H%M%S", time.localtime()) -+ cn = 'task-' + Task.get_timestamp() - dn = ('cn=%s,cn=sysconfig reload,cn=tasks,cn=config' % cn) - entry = Entry(dn) - entry.setValues('objectclass', 'top', 'extensibleObject') -@@ -1464,7 +1462,7 @@ class Tasks(object): - if not suffix: - raise ValueError("Missing required paramter: suffix") - -- cn = 'task-' + time.strftime("%m%d%Y_%H%M%S", time.localtime()) -+ cn = 'task-' + Task.get_timestamp() - dn = ('cn=%s,cn=cleanallruv,cn=tasks,cn=config' % cn) - entry = Entry(dn) - entry.setValues('objectclass', 'top', 'extensibleObject') -@@ -1516,7 +1514,7 @@ class Tasks(object): - if not suffix: - raise ValueError("Missing required paramter: suffix") - -- cn = 'task-' + time.strftime("%m%d%Y_%H%M%S", time.localtime()) -+ cn = 'task-' + Task.get_timestamp() - dn = ('cn=%s,cn=abort cleanallruv,cn=tasks,cn=config' % cn) - entry = Entry(dn) - entry.setValues('objectclass', 'top', 'extensibleObject') -@@ -1571,7 +1569,7 @@ class Tasks(object): - if not nsArchiveDir: - raise ValueError("Missing required paramter: nsArchiveDir") - -- cn = 'task-' + time.strftime("%m%d%Y_%H%M%S", time.localtime()) -+ cn = 'task-' + Task.get_timestamp() - dn = ('cn=%s,cn=upgradedb,cn=tasks,cn=config' % cn) - entry = Entry(dn) - entry.setValues('objectclass', 'top', 'extensibleObject') -@@ -1616,6 +1614,6 @@ class LDAPIMappingReloadTask(Task): - """ - - def __init__(self, instance, dn=None): -- self.cn = 'reload-' + Task._get_task_date() -+ self.cn = 'reload-' + Task.get_timestamp() - dn = f'cn={self.cn},cn=reload ldapi mappings,cn=tasks,cn=config' - super(LDAPIMappingReloadTask, self).__init__(instance, dn) --- -2.48.0 - diff --git a/SOURCES/0007-Issue-6822-Backend-creation-cleanup-and-Database-UI-.patch b/SOURCES/0007-Issue-6822-Backend-creation-cleanup-and-Database-UI-.patch new file mode 100644 index 0000000..cd0029d --- /dev/null +++ b/SOURCES/0007-Issue-6822-Backend-creation-cleanup-and-Database-UI-.patch @@ -0,0 +1,488 @@ +From b6729a99f3a3d4c6ebe82d4bb60ea2a6f8727782 Mon Sep 17 00:00:00 2001 +From: Simon Pichugin +Date: Fri, 27 Jun 2025 18:43:39 -0700 +Subject: [PATCH] Issue 6822 - Backend creation cleanup and Database UI tab + error handling (#6823) + +Description: Add rollback functionality when mapping tree creation fails +during backend creation to prevent orphaned backends. +Improve error handling in Database, Replication and Monitoring UI tabs +to gracefully handle backend get-tree command failures. + +Fixes: https://github.com/389ds/389-ds-base/issues/6822 + +Reviewed by: @mreynolds389 (Thanks!) +--- + src/cockpit/389-console/src/database.jsx | 119 ++++++++------ + src/cockpit/389-console/src/monitor.jsx | 172 +++++++++++--------- + src/cockpit/389-console/src/replication.jsx | 55 ++++--- + src/lib389/lib389/backend.py | 18 +- + 4 files changed, 210 insertions(+), 154 deletions(-) + +diff --git a/src/cockpit/389-console/src/database.jsx b/src/cockpit/389-console/src/database.jsx +index c0c4be414..276125dfc 100644 +--- a/src/cockpit/389-console/src/database.jsx ++++ b/src/cockpit/389-console/src/database.jsx +@@ -478,6 +478,59 @@ export class Database extends React.Component { + } + + loadSuffixTree(fullReset) { ++ const treeData = [ ++ { ++ name: _("Global Database Configuration"), ++ icon: , ++ id: "dbconfig", ++ }, ++ { ++ name: _("Chaining Configuration"), ++ icon: , ++ id: "chaining-config", ++ }, ++ { ++ name: _("Backups & LDIFs"), ++ icon: , ++ id: "backups", ++ }, ++ { ++ name: _("Password Policies"), ++ id: "pwp", ++ icon: , ++ children: [ ++ { ++ name: _("Global Policy"), ++ icon: , ++ id: "pwpolicy", ++ }, ++ { ++ name: _("Local Policies"), ++ icon: , ++ id: "localpwpolicy", ++ }, ++ ], ++ defaultExpanded: true ++ }, ++ { ++ name: _("Suffixes"), ++ icon: , ++ id: "suffixes-tree", ++ children: [], ++ defaultExpanded: true, ++ action: ( ++ ++ ), ++ } ++ ]; ++ + const cmd = [ + "dsconf", "-j", "ldapi://%2fvar%2frun%2fslapd-" + this.props.serverId + ".socket", + "backend", "get-tree", +@@ -491,58 +544,20 @@ export class Database extends React.Component { + suffixData = JSON.parse(content); + this.processTree(suffixData); + } +- const treeData = [ +- { +- name: _("Global Database Configuration"), +- icon: , +- id: "dbconfig", +- }, +- { +- name: _("Chaining Configuration"), +- icon: , +- id: "chaining-config", +- }, +- { +- name: _("Backups & LDIFs"), +- icon: , +- id: "backups", +- }, +- { +- name: _("Password Policies"), +- id: "pwp", +- icon: , +- children: [ +- { +- name: _("Global Policy"), +- icon: , +- id: "pwpolicy", +- }, +- { +- name: _("Local Policies"), +- icon: , +- id: "localpwpolicy", +- }, +- ], +- defaultExpanded: true +- }, +- { +- name: _("Suffixes"), +- icon: , +- id: "suffixes-tree", +- children: suffixData, +- defaultExpanded: true, +- action: ( +- +- ), +- } +- ]; ++ ++ let current_node = this.state.node_name; ++ if (fullReset) { ++ current_node = DB_CONFIG; ++ } ++ ++ treeData[4].children = suffixData; // suffixes node ++ this.setState(() => ({ ++ nodes: treeData, ++ node_name: current_node, ++ }), this.loadAttrs); ++ }) ++ .fail(err => { ++ // Handle backend get-tree failure gracefully + let current_node = this.state.node_name; + if (fullReset) { + current_node = DB_CONFIG; +diff --git a/src/cockpit/389-console/src/monitor.jsx b/src/cockpit/389-console/src/monitor.jsx +index ad48d1f87..91a8e3e37 100644 +--- a/src/cockpit/389-console/src/monitor.jsx ++++ b/src/cockpit/389-console/src/monitor.jsx +@@ -200,6 +200,84 @@ export class Monitor extends React.Component { + } + + loadSuffixTree(fullReset) { ++ const basicData = [ ++ { ++ name: _("Server Statistics"), ++ icon: , ++ id: "server-monitor", ++ type: "server", ++ }, ++ { ++ name: _("Replication"), ++ icon: , ++ id: "replication-monitor", ++ type: "replication", ++ defaultExpanded: true, ++ children: [ ++ { ++ name: _("Synchronization Report"), ++ icon: , ++ id: "sync-report", ++ item: "sync-report", ++ type: "repl-mon", ++ }, ++ { ++ name: _("Log Analysis"), ++ icon: , ++ id: "log-analysis", ++ item: "log-analysis", ++ type: "repl-mon", ++ } ++ ], ++ }, ++ { ++ name: _("Database"), ++ icon: , ++ id: "database-monitor", ++ type: "database", ++ children: [], // Will be populated with treeData on success ++ defaultExpanded: true, ++ }, ++ { ++ name: _("Logging"), ++ icon: , ++ id: "log-monitor", ++ defaultExpanded: true, ++ children: [ ++ { ++ name: _("Access Log"), ++ icon: , ++ id: "access-log-monitor", ++ type: "log", ++ }, ++ { ++ name: _("Audit Log"), ++ icon: , ++ id: "audit-log-monitor", ++ type: "log", ++ }, ++ { ++ name: _("Audit Failure Log"), ++ icon: , ++ id: "auditfail-log-monitor", ++ type: "log", ++ }, ++ { ++ name: _("Errors Log"), ++ icon: , ++ id: "error-log-monitor", ++ type: "log", ++ }, ++ { ++ name: _("Security Log"), ++ icon: , ++ id: "security-log-monitor", ++ type: "log", ++ }, ++ ] ++ }, ++ ]; ++ + const cmd = [ + "dsconf", "-j", "ldapi://%2fvar%2frun%2fslapd-" + this.props.serverId + ".socket", + "backend", "get-tree", +@@ -210,83 +288,7 @@ export class Monitor extends React.Component { + .done(content => { + const treeData = JSON.parse(content); + this.processTree(treeData); +- const basicData = [ +- { +- name: _("Server Statistics"), +- icon: , +- id: "server-monitor", +- type: "server", +- }, +- { +- name: _("Replication"), +- icon: , +- id: "replication-monitor", +- type: "replication", +- defaultExpanded: true, +- children: [ +- { +- name: _("Synchronization Report"), +- icon: , +- id: "sync-report", +- item: "sync-report", +- type: "repl-mon", +- }, +- { +- name: _("Log Analysis"), +- icon: , +- id: "log-analysis", +- item: "log-analysis", +- type: "repl-mon", +- } +- ], +- }, +- { +- name: _("Database"), +- icon: , +- id: "database-monitor", +- type: "database", +- children: [], +- defaultExpanded: true, +- }, +- { +- name: _("Logging"), +- icon: , +- id: "log-monitor", +- defaultExpanded: true, +- children: [ +- { +- name: _("Access Log"), +- icon: , +- id: "access-log-monitor", +- type: "log", +- }, +- { +- name: _("Audit Log"), +- icon: , +- id: "audit-log-monitor", +- type: "log", +- }, +- { +- name: _("Audit Failure Log"), +- icon: , +- id: "auditfail-log-monitor", +- type: "log", +- }, +- { +- name: _("Errors Log"), +- icon: , +- id: "error-log-monitor", +- type: "log", +- }, +- { +- name: _("Security Log"), +- icon: , +- id: "security-log-monitor", +- type: "log", +- }, +- ] +- }, +- ]; ++ + let current_node = this.state.node_name; + let type = this.state.node_type; + if (fullReset) { +@@ -296,6 +298,22 @@ export class Monitor extends React.Component { + basicData[2].children = treeData; // database node + this.processReplSuffixes(basicData[1].children); + ++ this.setState(() => ({ ++ nodes: basicData, ++ node_name: current_node, ++ node_type: type, ++ }), this.update_tree_nodes); ++ }) ++ .fail(err => { ++ // Handle backend get-tree failure gracefully ++ let current_node = this.state.node_name; ++ let type = this.state.node_type; ++ if (fullReset) { ++ current_node = "server-monitor"; ++ type = "server"; ++ } ++ this.processReplSuffixes(basicData[1].children); ++ + this.setState(() => ({ + nodes: basicData, + node_name: current_node, +diff --git a/src/cockpit/389-console/src/replication.jsx b/src/cockpit/389-console/src/replication.jsx +index fa492fd2a..aa535bfc7 100644 +--- a/src/cockpit/389-console/src/replication.jsx ++++ b/src/cockpit/389-console/src/replication.jsx +@@ -177,6 +177,16 @@ export class Replication extends React.Component { + loaded: false + }); + ++ const basicData = [ ++ { ++ name: _("Suffixes"), ++ icon: , ++ id: "repl-suffixes", ++ children: [], ++ defaultExpanded: true ++ } ++ ]; ++ + const cmd = [ + "dsconf", "-j", "ldapi://%2fvar%2frun%2fslapd-" + this.props.serverId + ".socket", + "backend", "get-tree", +@@ -199,15 +209,7 @@ export class Replication extends React.Component { + } + } + } +- const basicData = [ +- { +- name: _("Suffixes"), +- icon: , +- id: "repl-suffixes", +- children: [], +- defaultExpanded: true +- } +- ]; ++ + let current_node = this.state.node_name; + let current_type = this.state.node_type; + let replicated = this.state.node_replicated; +@@ -258,6 +260,19 @@ export class Replication extends React.Component { + } + + basicData[0].children = treeData; ++ this.setState({ ++ nodes: basicData, ++ node_name: current_node, ++ node_type: current_type, ++ node_replicated: replicated, ++ }, () => { this.update_tree_nodes() }); ++ }) ++ .fail(err => { ++ // Handle backend get-tree failure gracefully ++ let current_node = this.state.node_name; ++ let current_type = this.state.node_type; ++ let replicated = this.state.node_replicated; ++ + this.setState({ + nodes: basicData, + node_name: current_node, +@@ -905,18 +920,18 @@ export class Replication extends React.Component { + disableTree: false + }); + }); +- }) +- .fail(err => { +- const errMsg = JSON.parse(err); +- this.props.addNotification( +- "error", +- cockpit.format(_("Error loading replication agreements configuration - $0"), errMsg.desc) +- ); +- this.setState({ +- suffixLoading: false, +- disableTree: false ++ }) ++ .fail(err => { ++ const errMsg = JSON.parse(err); ++ this.props.addNotification( ++ "error", ++ cockpit.format(_("Error loading replication agreements configuration - $0"), errMsg.desc) ++ ); ++ this.setState({ ++ suffixLoading: false, ++ disableTree: false ++ }); + }); +- }); + }) + .fail(err => { + // changelog failure +diff --git a/src/lib389/lib389/backend.py b/src/lib389/lib389/backend.py +index 1319fa0cd..5bff61c58 100644 +--- a/src/lib389/lib389/backend.py ++++ b/src/lib389/lib389/backend.py +@@ -694,24 +694,32 @@ class Backend(DSLdapObject): + parent_suffix = properties.pop('parent', False) + + # Okay, now try to make the backend. +- super(Backend, self).create(dn, properties, basedn) ++ backend_obj = super(Backend, self).create(dn, properties, basedn) + + # We check if the mapping tree exists in create, so do this *after* + if create_mapping_tree is True: +- properties = { ++ mapping_tree_properties = { + 'cn': self._nprops_stash['nsslapd-suffix'], + 'nsslapd-state': 'backend', + 'nsslapd-backend': self._nprops_stash['cn'], + } + if parent_suffix: + # This is a subsuffix, set the parent suffix +- properties['nsslapd-parent-suffix'] = parent_suffix +- self._mts.create(properties=properties) ++ mapping_tree_properties['nsslapd-parent-suffix'] = parent_suffix ++ ++ try: ++ self._mts.create(properties=mapping_tree_properties) ++ except Exception as e: ++ try: ++ backend_obj.delete() ++ except Exception as cleanup_error: ++ self._instance.log.error(f"Failed to cleanup backend after mapping tree creation failure: {cleanup_error}") ++ raise e + + # We can't create the sample entries unless a mapping tree was installed. + if sample_entries is not False and create_mapping_tree is True: + self.create_sample_entries(sample_entries) +- return self ++ return backend_obj + + def delete(self): + """Deletes the backend, it's mapping tree and all related indices. +-- +2.49.0 + diff --git a/SOURCES/0008-Issue-6554-During-import-of-entries-without-nsUnique.patch b/SOURCES/0008-Issue-6554-During-import-of-entries-without-nsUnique.patch deleted file mode 100644 index f5dd5f0..0000000 --- a/SOURCES/0008-Issue-6554-During-import-of-entries-without-nsUnique.patch +++ /dev/null @@ -1,165 +0,0 @@ -From b2511553590f0d9b41856d8baff5f3cd103dd46f Mon Sep 17 00:00:00 2001 -From: tbordaz -Date: Thu, 6 Feb 2025 18:25:36 +0100 -Subject: [PATCH] Issue 6554 - During import of entries without nsUniqueId, a - supplier generates duplicate nsUniqueId (LMDB only) (#6582) - -Bug description: - During an import the entry is prepared (schema, operational - attributes, password encryption,...) before starting the - update of the database and indexes. - A step of the preparation is to assign a value to 'nsuniqueid' - operational attribute. 'nsuniqueid' must be unique. - In LMDB the preparation is done by multiple threads (workers). - In such case the 'nsuniqueid' are generated in parallel and - as it is time based several values can be duplicated. - -Fix description: - To prevent that the routine dbmdb_import_generate_uniqueid - should make sure to synchronize the workers. - -fixes: #6554 - -Reviewed by: Pierre Rogier ---- - .../tests/suites/import/import_test.py | 79 ++++++++++++++++++- - .../back-ldbm/db-mdb/mdb_import_threads.c | 11 +++ - 2 files changed, 89 insertions(+), 1 deletion(-) - -diff --git a/dirsrvtests/tests/suites/import/import_test.py b/dirsrvtests/tests/suites/import/import_test.py -index b7cba32fd..18caec633 100644 ---- a/dirsrvtests/tests/suites/import/import_test.py -+++ b/dirsrvtests/tests/suites/import/import_test.py -@@ -14,11 +14,13 @@ import os - import pytest - import time - import glob -+import re - import logging - import subprocess - from datetime import datetime - from lib389.topologies import topology_st as topo --from lib389._constants import DEFAULT_SUFFIX, TaskWarning -+from lib389.topologies import topology_m2 as topo_m2 -+from lib389._constants import DEFAULT_BENAME, DEFAULT_SUFFIX, TaskWarning - from lib389.dbgen import dbgen_users - from lib389.tasks import ImportTask - from lib389.index import Indexes -@@ -690,6 +692,81 @@ def test_online_import_under_load(topo): - assert import_task.get_exit_code() == 0 - - -+def test_duplicate_nsuniqueid(topo_m2, request): -+ """Test that after an offline import all -+ nsuniqueid are different -+ -+ :id: a2541677-a288-4633-bacf-4050cc56016d -+ :setup: MMR with 2 suppliers -+ :steps: -+ 1. stop the instance to do offline operations -+ 2. Generate a 5K users LDIF file -+ 3. Check that no uniqueid are present in the generated file -+ 4. import the generated LDIF -+ 5. export the database -+ 6. Check that that exported LDIF contains more than 5K nsuniqueid -+ 7. Check that there is no duplicate nsuniqued in exported LDIF -+ :expectedresults: -+ 1. Should succeeds -+ 2. Should succeeds -+ 3. Should succeeds -+ 4. Should succeeds -+ 5. Should succeeds -+ 6. Should succeeds -+ 7. Should succeeds -+ """ -+ m1 = topo_m2.ms["supplier1"] -+ -+ # Stop the instance -+ m1.stop() -+ -+ # Generate a test ldif (5k entries) -+ log.info("Generating LDIF...") -+ ldif_dir = m1.get_ldif_dir() -+ import_ldif = ldif_dir + '/5k_users_import.ldif' -+ dbgen_users(m1, 5000, import_ldif, DEFAULT_SUFFIX) -+ -+ # Check that the generated LDIF does not contain nsuniqueid -+ all_nsuniqueid = [] -+ with open(import_ldif, 'r') as file: -+ for line in file: -+ if line.lower().startswith("nsuniqueid: "): -+ all_nsuniqueid.append(line.split(': ')[1]) -+ log.info("import file contains " + str(len(all_nsuniqueid)) + " nsuniqueid") -+ assert len(all_nsuniqueid) == 0 -+ -+ # Import the "nsuniquied free" LDIF file -+ if not m1.ldif2db('userRoot', None, None, None, import_ldif): -+ assert False -+ -+ # Export the DB that now should contain nsuniqueid -+ export_ldif = ldif_dir + '/5k_user_export.ldif' -+ log.info("export to file " + export_ldif) -+ m1.db2ldif(bename=DEFAULT_BENAME, suffixes=[DEFAULT_SUFFIX], -+ excludeSuffixes=None, repl_data=False, -+ outputfile=export_ldif, encrypt=False) -+ -+ # Check that the export LDIF contain nsuniqueid -+ all_nsuniqueid = [] -+ with open(export_ldif, 'r') as file: -+ for line in file: -+ if line.lower().startswith("nsuniqueid: "): -+ all_nsuniqueid.append(line.split(': ')[1]) -+ log.info("export file " + export_ldif + " contains " + str(len(all_nsuniqueid)) + " nsuniqueid") -+ assert len(all_nsuniqueid) >= 5000 -+ -+ # Check that the nsuniqueid are unique -+ assert len(set(all_nsuniqueid)) == len(all_nsuniqueid) -+ -+ def fin(): -+ if os.path.exists(import_ldif): -+ os.remove(import_ldif) -+ if os.path.exists(export_ldif): -+ os.remove(export_ldif) -+ m1.start -+ -+ request.addfinalizer(fin) -+ - if __name__ == '__main__': - # Run isolated - # -s for DEBUG mode -diff --git a/ldap/servers/slapd/back-ldbm/db-mdb/mdb_import_threads.c b/ldap/servers/slapd/back-ldbm/db-mdb/mdb_import_threads.c -index 707a110c5..0f445bb56 100644 ---- a/ldap/servers/slapd/back-ldbm/db-mdb/mdb_import_threads.c -+++ b/ldap/servers/slapd/back-ldbm/db-mdb/mdb_import_threads.c -@@ -610,10 +610,20 @@ dbmdb_import_generate_uniqueid(ImportJob *job, Slapi_Entry *e) - { - const char *uniqueid = slapi_entry_get_uniqueid(e); - int rc = UID_SUCCESS; -+ static pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER; - - if (!uniqueid && (job->uuid_gen_type != SLAPI_UNIQUEID_GENERATE_NONE)) { - char *newuniqueid; - -+ /* With 'mdb' we have several workers generating nsuniqueid -+ * we need to serialize them to prevent generating duplicate value -+ * From performance pov it only impacts import -+ * The default value is SLAPI_UNIQUEID_GENERATE_TIME_BASED so -+ * the only syscall is clock_gettime and then string formating -+ * that should limit contention -+ */ -+ pthread_mutex_lock(&mutex); -+ - /* generate id based on dn */ - if (job->uuid_gen_type == SLAPI_UNIQUEID_GENERATE_NAME_BASED) { - char *dn = slapi_entry_get_dn(e); -@@ -624,6 +634,7 @@ dbmdb_import_generate_uniqueid(ImportJob *job, Slapi_Entry *e) - /* time based */ - rc = slapi_uniqueIDGenerateString(&newuniqueid); - } -+ pthread_mutex_unlock(&mutex); - - if (rc == UID_SUCCESS) { - slapi_entry_set_uniqueid(e, newuniqueid); --- -2.48.0 - diff --git a/SOURCES/0008-Issue-6857-uiduniq-allow-specifying-match-rules-in-t.patch b/SOURCES/0008-Issue-6857-uiduniq-allow-specifying-match-rules-in-t.patch new file mode 100644 index 0000000..5d62b75 --- /dev/null +++ b/SOURCES/0008-Issue-6857-uiduniq-allow-specifying-match-rules-in-t.patch @@ -0,0 +1,45 @@ +From 0a7fe7c6e18759459499f468443ded4313ebdeab Mon Sep 17 00:00:00 2001 +From: Alexander Bokovoy +Date: Wed, 9 Jul 2025 12:08:09 +0300 +Subject: [PATCH] Issue 6857 - uiduniq: allow specifying match rules in the + filter + +Allow uniqueness plugin to work with attributes where uniqueness should +be enforced using different matching rule than the one defined for the +attribute itself. + +Since uniqueness plugin configuration can contain multiple attributes, +add matching rule right to the attribute as it is used in the LDAP rule +(e.g. 'attribute:caseIgnoreMatch:' to force 'attribute' to be searched +with case-insensitive matching rule instead of the original matching +rule. + +Fixes: https://github.com/389ds/389-ds-base/issues/6857 + +Signed-off-by: Alexander Bokovoy +--- + ldap/servers/plugins/uiduniq/uid.c | 7 +++++++ + 1 file changed, 7 insertions(+) + +diff --git a/ldap/servers/plugins/uiduniq/uid.c b/ldap/servers/plugins/uiduniq/uid.c +index 053af4f9d..887e79d78 100644 +--- a/ldap/servers/plugins/uiduniq/uid.c ++++ b/ldap/servers/plugins/uiduniq/uid.c +@@ -1030,7 +1030,14 @@ preop_add(Slapi_PBlock *pb) + } + + for (i = 0; attrNames && attrNames[i]; i++) { ++ char *attr_match = strchr(attrNames[i], ':'); ++ if (attr_match != NULL) { ++ attr_match[0] = '\0'; ++ } + err = slapi_entry_attr_find(e, attrNames[i], &attr); ++ if (attr_match != NULL) { ++ attr_match[0] = ':'; ++ } + if (!err) { + /* + * Passed all the requirements - this is an operation we +-- +2.49.0 + diff --git a/SOURCES/0009-Issue-6561-TLS-1.2-stickiness-in-FIPS-mode.patch b/SOURCES/0009-Issue-6561-TLS-1.2-stickiness-in-FIPS-mode.patch deleted file mode 100644 index 93fef39..0000000 --- a/SOURCES/0009-Issue-6561-TLS-1.2-stickiness-in-FIPS-mode.patch +++ /dev/null @@ -1,38 +0,0 @@ -From 116b7cf21618ad7e717ae7f535709508a824f7d9 Mon Sep 17 00:00:00 2001 -From: Viktor Ashirov -Date: Thu, 13 Feb 2025 16:37:43 +0100 -Subject: [PATCH] Issue 6561 - TLS 1.2 stickiness in FIPS mode - -Description: -TLS 1.3 works with NSS in FIPS mode for quite some time now, -this restriction is no longer needed. - -Fixes: https://github.com/389ds/389-ds-base/issues/6561 - -Reviewed by: @mreynolds389 (Thanks!) ---- - ldap/servers/slapd/ssl.c | 8 -------- - 1 file changed, 8 deletions(-) - -diff --git a/ldap/servers/slapd/ssl.c b/ldap/servers/slapd/ssl.c -index 94259efe7..84a7fb004 100644 ---- a/ldap/servers/slapd/ssl.c -+++ b/ldap/servers/slapd/ssl.c -@@ -1929,14 +1929,6 @@ slapd_ssl_init2(PRFileDesc **fd, int startTLS) - */ - sslStatus = SSL_VersionRangeGet(pr_sock, &slapdNSSVersions); - if (sslStatus == SECSuccess) { -- if (slapdNSSVersions.max > LDAP_OPT_X_TLS_PROTOCOL_TLS1_2 && fipsMode) { -- /* -- * FIPS & NSS currently only support a max version of TLS1.2 -- * (although NSS advertises 1.3 as a max range in FIPS mode), -- * hopefully this code block can be removed soon... -- */ -- slapdNSSVersions.max = LDAP_OPT_X_TLS_PROTOCOL_TLS1_2; -- } - /* Reset request range */ - sslStatus = SSL_VersionRangeSet(pr_sock, &slapdNSSVersions); - if (sslStatus == SECSuccess) { --- -2.48.1 - diff --git a/SOURCES/0009-Issue-6756-CLI-UI-Properly-handle-disabled-NDN-cache.patch b/SOURCES/0009-Issue-6756-CLI-UI-Properly-handle-disabled-NDN-cache.patch new file mode 100644 index 0000000..f519007 --- /dev/null +++ b/SOURCES/0009-Issue-6756-CLI-UI-Properly-handle-disabled-NDN-cache.patch @@ -0,0 +1,1201 @@ +From b28b00ee5169cfb00414bc9bcca67f88432ad567 Mon Sep 17 00:00:00 2001 +From: Simon Pichugin +Date: Thu, 10 Jul 2025 11:53:12 -0700 +Subject: [PATCH] Issue 6756 - CLI, UI - Properly handle disabled NDN cache + (#6757) + +Description: Fix the db_monitor function in monitor.py to check if +nsslapd-ndn-cache-enabled is off and conditionally include NDN cache +statistics only when enabled. + +Update dbMonitor.jsx components to detect when NDN cache is disabled and +conditionally render NDN cache tabs, charts, and related content with proper +fallback display when disabled. + +Add test_ndn_cache_disabled to verify both JSON and non-JSON output formats +correctly handle when NDN cache is turned off and on. + +Fixes: https://github.com/389ds/389-ds-base/issues/6756 + +Reviewed by: @mreynolds389 (Thanks!) +--- + dirsrvtests/tests/suites/clu/dbmon_test.py | 90 +++ + src/cockpit/389-console/src/database.jsx | 4 +- + .../src/lib/database/databaseConfig.jsx | 48 +- + .../389-console/src/lib/monitor/dbMonitor.jsx | 735 ++++++++++-------- + src/lib389/lib389/cli_conf/monitor.py | 77 +- + 5 files changed, 580 insertions(+), 374 deletions(-) + +diff --git a/dirsrvtests/tests/suites/clu/dbmon_test.py b/dirsrvtests/tests/suites/clu/dbmon_test.py +index 4a82eb0ef..b04ee67c9 100644 +--- a/dirsrvtests/tests/suites/clu/dbmon_test.py ++++ b/dirsrvtests/tests/suites/clu/dbmon_test.py +@@ -11,6 +11,7 @@ import subprocess + import pytest + import json + import glob ++import re + + from lib389.tasks import * + from lib389.utils import * +@@ -274,6 +275,95 @@ def test_dbmon_mp_pagesize(topology_st): + assert real_free_percentage == dbmon_free_percentage + + ++def test_ndn_cache_disabled(topology_st): ++ """Test dbmon output when ndn-cache-enabled is turned off ++ ++ :id: 760e217c-70e8-4767-b504-dda7ba2e1f64 ++ :setup: Standalone instance ++ :steps: ++ 1. Run dbmon with nsslapd-ndn-cache-enabled=on (default) ++ 2. Verify NDN cache stats are present in the output ++ 3. Set nsslapd-ndn-cache-enabled=off and restart ++ 4. Run dbmon again and verify NDN cache stats are not present ++ 5. Set nsslapd-ndn-cache-enabled=on and restart ++ 6. Run dbmon again and verify NDN cache stats are back ++ :expectedresults: ++ 1. Success ++ 2. Should display NDN cache data ++ 3. Success ++ 4. Should not display NDN cache data ++ 5. Success ++ 6. Should display NDN cache data ++ """ ++ inst = topology_st.standalone ++ args = FakeArgs() ++ args.backends = None ++ args.indexes = False ++ args.json = True ++ lc = LogCapture() ++ ++ log.info("Testing with NDN cache enabled (default)") ++ db_monitor(inst, DEFAULT_SUFFIX, lc.log, args) ++ db_mon_as_str = "".join((str(rec) for rec in lc.outputs)) ++ db_mon_as_str = re.sub("^[^{]*{", "{", db_mon_as_str)[:-2] ++ db_mon = json.loads(db_mon_as_str) ++ ++ assert 'ndncache' in db_mon ++ assert 'hit_ratio' in db_mon['ndncache'] ++ lc.flush() ++ ++ log.info("Setting nsslapd-ndn-cache-enabled to OFF") ++ inst.config.set('nsslapd-ndn-cache-enabled', 'off') ++ inst.restart() ++ ++ log.info("Testing with NDN cache disabled") ++ db_monitor(inst, DEFAULT_SUFFIX, lc.log, args) ++ db_mon_as_str = "".join((str(rec) for rec in lc.outputs)) ++ db_mon_as_str = re.sub("^[^{]*{", "{", db_mon_as_str)[:-2] ++ db_mon = json.loads(db_mon_as_str) ++ ++ assert 'ndncache' not in db_mon ++ lc.flush() ++ ++ log.info("Setting nsslapd-ndn-cache-enabled to ON") ++ inst.config.set('nsslapd-ndn-cache-enabled', 'on') ++ inst.restart() ++ ++ log.info("Testing with NDN cache re-enabled") ++ db_monitor(inst, DEFAULT_SUFFIX, lc.log, args) ++ db_mon_as_str = "".join((str(rec) for rec in lc.outputs)) ++ db_mon_as_str = re.sub("^[^{]*{", "{", db_mon_as_str)[:-2] ++ db_mon = json.loads(db_mon_as_str) ++ ++ assert 'ndncache' in db_mon ++ assert 'hit_ratio' in db_mon['ndncache'] ++ lc.flush() ++ ++ args.json = False ++ ++ log.info("Testing with NDN cache enabled - non-JSON output") ++ db_monitor(inst, DEFAULT_SUFFIX, lc.log, args) ++ output = "".join((str(rec) for rec in lc.outputs)) ++ ++ assert "Normalized DN Cache:" in output ++ assert "Cache Hit Ratio:" in output ++ lc.flush() ++ ++ log.info("Setting nsslapd-ndn-cache-enabled to OFF") ++ inst.config.set('nsslapd-ndn-cache-enabled', 'off') ++ inst.restart() ++ ++ log.info("Testing with NDN cache disabled - non-JSON output") ++ db_monitor(inst, DEFAULT_SUFFIX, lc.log, args) ++ output = "".join((str(rec) for rec in lc.outputs)) ++ ++ assert "Normalized DN Cache:" not in output ++ lc.flush() ++ ++ inst.config.set('nsslapd-ndn-cache-enabled', 'on') ++ inst.restart() ++ ++ + if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode +diff --git a/src/cockpit/389-console/src/database.jsx b/src/cockpit/389-console/src/database.jsx +index 276125dfc..86b642b92 100644 +--- a/src/cockpit/389-console/src/database.jsx ++++ b/src/cockpit/389-console/src/database.jsx +@@ -198,7 +198,7 @@ export class Database extends React.Component { + }); + const cmd = [ + "dsconf", "-j", "ldapi://%2fvar%2frun%2fslapd-" + this.props.serverId + ".socket", +- "config", "get", "nsslapd-ndn-cache-max-size" ++ "config", "get", "nsslapd-ndn-cache-max-size", "nsslapd-ndn-cache-enabled" + ]; + log_cmd("loadNDN", "Load NDN cache size", cmd); + cockpit +@@ -206,10 +206,12 @@ export class Database extends React.Component { + .done(content => { + const config = JSON.parse(content); + const attrs = config.attrs; ++ const ndn_cache_enabled = attrs['nsslapd-ndn-cache-enabled'][0] === "on"; + this.setState(prevState => ({ + globalDBConfig: { + ...prevState.globalDBConfig, + ndncachemaxsize: attrs['nsslapd-ndn-cache-max-size'][0], ++ ndn_cache_enabled: ndn_cache_enabled, + }, + configUpdated: 0, + loaded: true, +diff --git a/src/cockpit/389-console/src/lib/database/databaseConfig.jsx b/src/cockpit/389-console/src/lib/database/databaseConfig.jsx +index 4c7fce706..adb8227d7 100644 +--- a/src/cockpit/389-console/src/lib/database/databaseConfig.jsx ++++ b/src/cockpit/389-console/src/lib/database/databaseConfig.jsx +@@ -2,12 +2,16 @@ import cockpit from "cockpit"; + import React from "react"; + import { log_cmd } from "../tools.jsx"; + import { ++ Alert, + Button, + Checkbox, ++ Form, + Grid, + GridItem, ++ Hr, + NumberInput, + Spinner, ++ Switch, + Tab, + Tabs, + TabTitleText, +@@ -852,12 +856,29 @@ export class GlobalDatabaseConfig extends React.Component { + + {_("NDN Cache")}}> +
++ ++ {this.props.data.ndn_cache_enabled === false && ( ++ ++ ++ {_("The Normalized DN Cache is currently disabled. To enable it, go to Server Settings → Tuning & Limits and enable 'Normalized DN Cache', then restart the server for the changes to take effect.")} ++ ++ ++ )} ++ + + +- {_("Normalized DN Cache Max Size")} ++ {_("Normalized DN Cache Max Size") } + + + + + +@@ -1470,7 +1491,7 @@ export class GlobalDatabaseConfigMDB extends React.Component { + {_("Database Size")}}> +
+ + +@@ -1641,6 +1662,23 @@ export class GlobalDatabaseConfigMDB extends React.Component { + + {_("NDN Cache")}}> +
++ ++ {this.props.data.ndn_cache_enabled === false && ( ++ ++ ++ {_("The Normalized DN Cache is currently disabled. To enable it, go to Server Settings → Tuning & Limits and enable 'Normalized DN Cache', then restart the server for the changes to take effect.")} ++ ++ ++ )} ++ + 0; ++ let ndn_chart_data = this.state.ndnCacheList; ++ let ndn_util_chart_data = this.state.ndnCacheUtilList; ++ ++ // Only build NDN cache chart data if NDN cache is enabled ++ if (ndn_cache_enabled) { ++ const ndnratio = config.attrs.normalizeddncachehitratio[0]; ++ ndn_chart_data = this.state.ndnCacheList; ++ ndn_chart_data.shift(); ++ ndn_chart_data.push({ name: _("Cache Hit Ratio"), x: count.toString(), y: parseInt(ndnratio) }); ++ ++ // Build up the NDN Cache Util chart data ++ ndn_util_chart_data = this.state.ndnCacheUtilList; ++ const currNDNSize = parseInt(config.attrs.currentnormalizeddncachesize[0]); ++ const maxNDNSize = parseInt(config.attrs.maxnormalizeddncachesize[0]); ++ const ndn_utilization = (currNDNSize / maxNDNSize) * 100; ++ ndn_util_chart_data.shift(); ++ ndn_util_chart_data.push({ name: _("Cache Utilization"), x: ndnCount.toString(), y: parseInt(ndn_utilization) }); ++ } + + this.setState({ + data: config.attrs, +@@ -157,7 +167,8 @@ export class DatabaseMonitor extends React.Component { + ndnCacheList: ndn_chart_data, + ndnCacheUtilList: ndn_util_chart_data, + count, +- ndnCount ++ ndnCount, ++ ndn_cache_enabled + }); + }) + .fail(() => { +@@ -197,13 +208,20 @@ export class DatabaseMonitor extends React.Component { + + if (!this.state.loading) { + dbcachehit = parseInt(this.state.data.dbcachehitratio[0]); +- ndncachehit = parseInt(this.state.data.normalizeddncachehitratio[0]); +- ndncachemax = parseInt(this.state.data.maxnormalizeddncachesize[0]); +- ndncachecurr = parseInt(this.state.data.currentnormalizeddncachesize[0]); +- utilratio = Math.round((ndncachecurr / ndncachemax) * 100); +- if (utilratio === 0) { +- // Just round up to 1 +- utilratio = 1; ++ ++ // Check if NDN cache is enabled ++ const ndn_cache_enabled = this.state.data.normalizeddncachehitratio && ++ this.state.data.normalizeddncachehitratio.length > 0; ++ ++ if (ndn_cache_enabled) { ++ ndncachehit = parseInt(this.state.data.normalizeddncachehitratio[0]); ++ ndncachemax = parseInt(this.state.data.maxnormalizeddncachesize[0]); ++ ndncachecurr = parseInt(this.state.data.currentnormalizeddncachesize[0]); ++ utilratio = Math.round((ndncachecurr / ndncachemax) * 100); ++ if (utilratio === 0) { ++ // Just round up to 1 ++ utilratio = 1; ++ } + } + + // Database cache +@@ -214,119 +232,131 @@ export class DatabaseMonitor extends React.Component { + } else { + chartColor = ChartThemeColor.purple; + } +- // NDN cache ratio +- if (ndncachehit > 89) { +- ndnChartColor = ChartThemeColor.green; +- } else if (ndncachehit > 74) { +- ndnChartColor = ChartThemeColor.orange; +- } else { +- ndnChartColor = ChartThemeColor.purple; +- } +- // NDN cache utilization +- if (utilratio > 95) { +- ndnUtilColor = ChartThemeColor.purple; +- } else if (utilratio > 90) { +- ndnUtilColor = ChartThemeColor.orange; +- } else { +- ndnUtilColor = ChartThemeColor.green; ++ ++ // NDN cache colors only if enabled ++ if (ndn_cache_enabled) { ++ // NDN cache ratio ++ if (ndncachehit > 89) { ++ ndnChartColor = ChartThemeColor.green; ++ } else if (ndncachehit > 74) { ++ ndnChartColor = ChartThemeColor.orange; ++ } else { ++ ndnChartColor = ChartThemeColor.purple; ++ } ++ // NDN cache utilization ++ if (utilratio > 95) { ++ ndnUtilColor = ChartThemeColor.purple; ++ } else if (utilratio > 90) { ++ ndnUtilColor = ChartThemeColor.orange; ++ } else { ++ ndnUtilColor = ChartThemeColor.green; ++ } + } + +- content = ( +- +- {_("Database Cache")}}> +-
+- +- +-
+-
+- +- +- {_("Cache Hit Ratio")} +- +- +- +- +- {dbcachehit}% +- +- +-
+-
+- `${datum.name}: ${datum.y}`} constrainToVisibleArea />} +- height={200} +- maxDomain={{ y: 100 }} +- minDomain={{ y: 0 }} +- padding={{ +- bottom: 30, +- left: 40, +- top: 10, +- right: 10, +- }} +- width={500} +- themeColor={chartColor} +- > +- +- +- +- +- +- +-
++ // Create tabs based on what caches are available ++ const tabs = []; ++ ++ // Database Cache tab is always available ++ tabs.push( ++ {_("Database Cache")}}> ++
++ ++ ++
++
++ ++ ++ {_("Cache Hit Ratio")} ++ ++ ++ ++ ++ {dbcachehit}% ++ ++ +
+- +- +-
++
++ `${datum.name}: ${datum.y}`} constrainToVisibleArea />} ++ height={200} ++ maxDomain={{ y: 100 }} ++ minDomain={{ y: 0 }} ++ padding={{ ++ bottom: 30, ++ left: 40, ++ top: 10, ++ right: 10, ++ }} ++ width={500} ++ themeColor={chartColor} ++ > ++ ++ ++ ++ ++ ++ ++
++
++ ++ ++
++ ++ ++ ++ {_("Database Cache Hit Ratio:")} ++ ++ ++ {this.state.data.dbcachehitratio}% ++ ++ ++ {_("Database Cache Tries:")} ++ ++ ++ {numToCommas(this.state.data.dbcachetries)} ++ ++ ++ {_("Database Cache Hits:")} ++ ++ ++ {numToCommas(this.state.data.dbcachehits)} ++ ++ ++ {_("Cache Pages Read:")} ++ ++ ++ {numToCommas(this.state.data.dbcachepagein)} ++ ++ ++ {_("Cache Pages Written:")} ++ ++ ++ {numToCommas(this.state.data.dbcachepageout)} ++ ++ ++ {_("Read-Only Page Evictions:")} ++ ++ ++ {numToCommas(this.state.data.dbcacheroevict)} ++ ++ ++ {_("Read-Write Page Evictions:")} ++ ++ ++ {numToCommas(this.state.data.dbcacherwevict)} ++ ++ ++ ++ ); + +- +- +- {_("Database Cache Hit Ratio:")} +- +- +- {this.state.data.dbcachehitratio}% +- +- +- {_("Database Cache Tries:")} +- +- +- {numToCommas(this.state.data.dbcachetries)} +- +- +- {_("Database Cache Hits:")} +- +- +- {numToCommas(this.state.data.dbcachehits)} +- +- +- {_("Cache Pages Read:")} +- +- +- {numToCommas(this.state.data.dbcachepagein)} +- +- +- {_("Cache Pages Written:")} +- +- +- {numToCommas(this.state.data.dbcachepageout)} +- +- +- {_("Read-Only Page Evictions:")} +- +- +- {numToCommas(this.state.data.dbcacheroevict)} +- +- +- {_("Read-Write Page Evictions:")} +- +- +- {numToCommas(this.state.data.dbcacherwevict)} +- +- +- +- {_("Normalized DN Cache")}}> ++ // Only add NDN Cache tab if NDN cache is enabled ++ if (ndn_cache_enabled) { ++ tabs.push( ++ {_("Normalized DN Cache")}}> +
+ + +@@ -487,6 +517,12 @@ export class DatabaseMonitor extends React.Component { + +
+
++ ); ++ } ++ ++ content = ( ++ ++ {tabs} + + ); + } +@@ -533,7 +569,8 @@ export class DatabaseMonitorMDB extends React.Component { + ndnCount: 5, + dbCacheList: [], + ndnCacheList: [], +- ndnCacheUtilList: [] ++ ndnCacheUtilList: [], ++ ndn_cache_enabled: false + }; + + // Toggle currently active tab +@@ -585,6 +622,7 @@ export class DatabaseMonitorMDB extends React.Component { + { name: "", x: "4", y: 0 }, + { name: "", x: "5", y: 0 }, + ], ++ ndn_cache_enabled: false + }); + } + +@@ -605,19 +643,28 @@ export class DatabaseMonitorMDB extends React.Component { + count = 1; + } + +- // Build up the NDN Cache chart data +- const ndnratio = config.attrs.normalizeddncachehitratio[0]; +- const ndn_chart_data = this.state.ndnCacheList; +- ndn_chart_data.shift(); +- ndn_chart_data.push({ name: _("Cache Hit Ratio"), x: count.toString(), y: parseInt(ndnratio) }); +- +- // Build up the DB Cache Util chart data +- const ndn_util_chart_data = this.state.ndnCacheUtilList; +- const currNDNSize = parseInt(config.attrs.currentnormalizeddncachesize[0]); +- const maxNDNSize = parseInt(config.attrs.maxnormalizeddncachesize[0]); +- const ndn_utilization = (currNDNSize / maxNDNSize) * 100; +- ndn_util_chart_data.shift(); +- ndn_util_chart_data.push({ name: _("Cache Utilization"), x: ndnCount.toString(), y: parseInt(ndn_utilization) }); ++ // Check if NDN cache is enabled ++ const ndn_cache_enabled = config.attrs.normalizeddncachehitratio && ++ config.attrs.normalizeddncachehitratio.length > 0; ++ let ndn_chart_data = this.state.ndnCacheList; ++ let ndn_util_chart_data = this.state.ndnCacheUtilList; ++ ++ // Only build NDN cache chart data if NDN cache is enabled ++ if (ndn_cache_enabled) { ++ // Build up the NDN Cache chart data ++ const ndnratio = config.attrs.normalizeddncachehitratio[0]; ++ ndn_chart_data = this.state.ndnCacheList; ++ ndn_chart_data.shift(); ++ ndn_chart_data.push({ name: _("Cache Hit Ratio"), x: count.toString(), y: parseInt(ndnratio) }); ++ ++ // Build up the DB Cache Util chart data ++ ndn_util_chart_data = this.state.ndnCacheUtilList; ++ const currNDNSize = parseInt(config.attrs.currentnormalizeddncachesize[0]); ++ const maxNDNSize = parseInt(config.attrs.maxnormalizeddncachesize[0]); ++ const ndn_utilization = (currNDNSize / maxNDNSize) * 100; ++ ndn_util_chart_data.shift(); ++ ndn_util_chart_data.push({ name: _("Cache Utilization"), x: ndnCount.toString(), y: parseInt(ndn_utilization) }); ++ } + + this.setState({ + data: config.attrs, +@@ -625,7 +672,8 @@ export class DatabaseMonitorMDB extends React.Component { + ndnCacheList: ndn_chart_data, + ndnCacheUtilList: ndn_util_chart_data, + count, +- ndnCount ++ ndnCount, ++ ndn_cache_enabled + }); + }) + .fail(() => { +@@ -662,197 +710,214 @@ export class DatabaseMonitorMDB extends React.Component { + ); + + if (!this.state.loading) { +- ndncachehit = parseInt(this.state.data.normalizeddncachehitratio[0]); +- ndncachemax = parseInt(this.state.data.maxnormalizeddncachesize[0]); +- ndncachecurr = parseInt(this.state.data.currentnormalizeddncachesize[0]); +- utilratio = Math.round((ndncachecurr / ndncachemax) * 100); +- if (utilratio === 0) { +- // Just round up to 1 +- utilratio = 1; +- } +- +- // NDN cache ratio +- if (ndncachehit > 89) { +- ndnChartColor = ChartThemeColor.green; +- } else if (ndncachehit > 74) { +- ndnChartColor = ChartThemeColor.orange; +- } else { +- ndnChartColor = ChartThemeColor.purple; +- } +- // NDN cache utilization +- if (utilratio > 95) { +- ndnUtilColor = ChartThemeColor.purple; +- } else if (utilratio > 90) { +- ndnUtilColor = ChartThemeColor.orange; +- } else { +- ndnUtilColor = ChartThemeColor.green; +- } +- +- content = ( +- +- {_("Normalized DN Cache")}}> +-
+- +- +- +- +-
+-
+- +- +- {_("Cache Hit Ratio")} +- +- +- +- +- {ndncachehit}% +- +- +-
+-
+- `${datum.name}: ${datum.y}`} constrainToVisibleArea />} +- height={200} +- maxDomain={{ y: 100 }} +- minDomain={{ y: 0 }} +- padding={{ +- bottom: 40, +- left: 60, +- top: 10, +- right: 15, +- }} +- width={350} +- themeColor={ndnChartColor} +- > +- +- +- +- +- +- +-
+-
+-
+-
+-
+- +- +- +-
+-
+- +- +- {_("Cache Utilization")} +- +- +- +- +- {utilratio}% +- +- +- +- +- {_("Cached DN's")} +- +- +- {numToCommas(this.state.data.currentnormalizeddncachecount[0])} ++ // Check if NDN cache is enabled ++ const ndn_cache_enabled = this.state.data.normalizeddncachehitratio && ++ this.state.data.normalizeddncachehitratio.length > 0; ++ ++ if (ndn_cache_enabled) { ++ ndncachehit = parseInt(this.state.data.normalizeddncachehitratio[0]); ++ ndncachemax = parseInt(this.state.data.maxnormalizeddncachesize[0]); ++ ndncachecurr = parseInt(this.state.data.currentnormalizeddncachesize[0]); ++ utilratio = Math.round((ndncachecurr / ndncachemax) * 100); ++ if (utilratio === 0) { ++ // Just round up to 1 ++ utilratio = 1; ++ } ++ ++ // NDN cache ratio ++ if (ndncachehit > 89) { ++ ndnChartColor = ChartThemeColor.green; ++ } else if (ndncachehit > 74) { ++ ndnChartColor = ChartThemeColor.orange; ++ } else { ++ ndnChartColor = ChartThemeColor.purple; ++ } ++ // NDN cache utilization ++ if (utilratio > 95) { ++ ndnUtilColor = ChartThemeColor.purple; ++ } else if (utilratio > 90) { ++ ndnUtilColor = ChartThemeColor.orange; ++ } else { ++ ndnUtilColor = ChartThemeColor.green; ++ } ++ ++ content = ( ++ ++ {_("Normalized DN Cache")}}> ++
++ ++ ++ ++ ++
++
++ ++ ++ {_("Cache Hit Ratio")} ++ ++ ++ ++ ++ {ndncachehit}% ++ ++ ++
++
++ `${datum.name}: ${datum.y}`} constrainToVisibleArea />} ++ height={200} ++ maxDomain={{ y: 100 }} ++ minDomain={{ y: 0 }} ++ padding={{ ++ bottom: 40, ++ left: 60, ++ top: 10, ++ right: 15, ++ }} ++ width={350} ++ themeColor={ndnChartColor} ++ > ++ ++ ++ ++ ++ ++ ++
+
+-
+- `${datum.name}: ${datum.y}`} constrainToVisibleArea />} +- height={200} +- maxDomain={{ y: 100 }} +- minDomain={{ y: 0 }} +- padding={{ +- bottom: 40, +- left: 60, +- top: 10, +- right: 15, +- }} +- width={350} +- themeColor={ndnUtilColor} +- > +- +- +- +- +- +- ++ ++ ++ ++ ++ ++ ++
++
++ ++ ++ {_("Cache Utilization")} ++ ++ ++ ++ ++ {utilratio}% ++ ++ ++ ++ ++ {_("Cached DN's")} ++ ++ ++ {numToCommas(this.state.data.currentnormalizeddncachecount[0])} ++
++
++ `${datum.name}: ${datum.y}`} constrainToVisibleArea />} ++ height={200} ++ maxDomain={{ y: 100 }} ++ minDomain={{ y: 0 }} ++ padding={{ ++ bottom: 40, ++ left: 60, ++ top: 10, ++ right: 15, ++ }} ++ width={350} ++ themeColor={ndnUtilColor} ++ > ++ ++ ++ ++ ++ ++ ++
+
+-
+-
+-
+-
+-
+- +- +- +- {_("NDN Cache Hit Ratio:")} +- +- +- {this.state.data.normalizeddncachehitratio}% +- +- +- {_("NDN Cache Max Size:")} +- +- +- {displayBytes(this.state.data.maxnormalizeddncachesize)} +- +- +- {_("NDN Cache Tries:")} +- +- +- {numToCommas(this.state.data.normalizeddncachetries)} +- +- +- {_("NDN Current Cache Size:")} +- +- +- {displayBytes(this.state.data.currentnormalizeddncachesize)} +- +- +- {_("NDN Cache Hits:")} +- +- +- {numToCommas(this.state.data.normalizeddncachehits)} +- +- +- {_("NDN Cache DN Count:")} +- +- +- {numToCommas(this.state.data.currentnormalizeddncachecount)} +- +- +- {_("NDN Cache Evictions:")} +- +- +- {numToCommas(this.state.data.normalizeddncacheevictions)} +- +- +- {_("NDN Cache Thread Size:")} +- +- +- {numToCommas(this.state.data.normalizeddncachethreadsize)} +- +- +- {_("NDN Cache Thread Slots:")} +- +- +- {numToCommas(this.state.data.normalizeddncachethreadslots)} +- +- +-
+-
+-
+- ); ++ ++ ++ ++ ++ ++ ++ ++ {_("NDN Cache Hit Ratio:")} ++ ++ ++ {this.state.data.normalizeddncachehitratio}% ++ ++ ++ {_("NDN Cache Max Size:")} ++ ++ ++ {displayBytes(this.state.data.maxnormalizeddncachesize)} ++ ++ ++ {_("NDN Cache Tries:")} ++ ++ ++ {numToCommas(this.state.data.normalizeddncachetries)} ++ ++ ++ {_("NDN Current Cache Size:")} ++ ++ ++ {displayBytes(this.state.data.currentnormalizeddncachesize)} ++ ++ ++ {_("NDN Cache Hits:")} ++ ++ ++ {numToCommas(this.state.data.normalizeddncachehits)} ++ ++ ++ {_("NDN Cache DN Count:")} ++ ++ ++ {numToCommas(this.state.data.currentnormalizeddncachecount)} ++ ++ ++ {_("NDN Cache Evictions:")} ++ ++ ++ {numToCommas(this.state.data.normalizeddncacheevictions)} ++ ++ ++ {_("NDN Cache Thread Size:")} ++ ++ ++ {numToCommas(this.state.data.normalizeddncachethreadsize)} ++ ++ ++ {_("NDN Cache Thread Slots:")} ++ ++ ++ {numToCommas(this.state.data.normalizeddncachethreadslots)} ++ ++ ++
++ ++ ++ ); ++ } else { ++ // No NDN cache available ++ content = ( ++
++ ++ ++ {_("Normalized DN Cache is disabled")} ++ ++ ++
++ ); ++ } + } + + return ( +diff --git a/src/lib389/lib389/cli_conf/monitor.py b/src/lib389/lib389/cli_conf/monitor.py +index b01796549..c7f9322d1 100644 +--- a/src/lib389/lib389/cli_conf/monitor.py ++++ b/src/lib389/lib389/cli_conf/monitor.py +@@ -129,6 +129,14 @@ def db_monitor(inst, basedn, log, args): + # Gather the global DB stats + report_time = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") + ldbm_mon = ldbm_monitor.get_status() ++ ndn_cache_enabled = inst.config.get_attr_val_utf8('nsslapd-ndn-cache-enabled') == 'on' ++ ++ # Build global cache stats ++ result = { ++ 'date': report_time, ++ 'backends': {}, ++ } ++ + if ldbm_monitor.inst_db_impl == DB_IMPL_BDB: + dbcachesize = int(ldbm_mon['nsslapd-db-cache-size-bytes'][0]) + # Warning: there are two different page sizes associated with bdb: +@@ -153,32 +161,6 @@ def db_monitor(inst, basedn, log, args): + dbcachefree = max(int(dbcachesize - (pagesize * dbpages)), 0) + dbcachefreeratio = dbcachefree/dbcachesize + +- ndnratio = ldbm_mon['normalizeddncachehitratio'][0] +- ndncursize = int(ldbm_mon['currentnormalizeddncachesize'][0]) +- ndnmaxsize = int(ldbm_mon['maxnormalizeddncachesize'][0]) +- ndncount = ldbm_mon['currentnormalizeddncachecount'][0] +- ndnevictions = ldbm_mon['normalizeddncacheevictions'][0] +- if ndncursize > ndnmaxsize: +- ndnfree = 0 +- ndnfreeratio = 0 +- else: +- ndnfree = ndnmaxsize - ndncursize +- ndnfreeratio = "{:.1f}".format(ndnfree / ndnmaxsize * 100) +- +- # Build global cache stats +- result = { +- 'date': report_time, +- 'ndncache': { +- 'hit_ratio': ndnratio, +- 'free': convert_bytes(str(ndnfree)), +- 'free_percentage': ndnfreeratio, +- 'count': ndncount, +- 'evictions': ndnevictions +- }, +- 'backends': {}, +- } +- +- if ldbm_monitor.inst_db_impl == DB_IMPL_BDB: + result['dbcache'] = { + 'hit_ratio': dbhitratio, + 'free': convert_bytes(str(dbcachefree)), +@@ -188,6 +170,32 @@ def db_monitor(inst, basedn, log, args): + 'pageout': dbcachepageout + } + ++ # Add NDN cache stats only if enabled ++ if ndn_cache_enabled: ++ try: ++ ndnratio = ldbm_mon['normalizeddncachehitratio'][0] ++ ndncursize = int(ldbm_mon['currentnormalizeddncachesize'][0]) ++ ndnmaxsize = int(ldbm_mon['maxnormalizeddncachesize'][0]) ++ ndncount = ldbm_mon['currentnormalizeddncachecount'][0] ++ ndnevictions = ldbm_mon['normalizeddncacheevictions'][0] ++ if ndncursize > ndnmaxsize: ++ ndnfree = 0 ++ ndnfreeratio = 0 ++ else: ++ ndnfree = ndnmaxsize - ndncursize ++ ndnfreeratio = "{:.1f}".format(ndnfree / ndnmaxsize * 100) ++ ++ result['ndncache'] = { ++ 'hit_ratio': ndnratio, ++ 'free': convert_bytes(str(ndnfree)), ++ 'free_percentage': ndnfreeratio, ++ 'count': ndncount, ++ 'evictions': ndnevictions ++ } ++ # In case, the user enabled NDN cache but still have not restarted the instance ++ except IndexError: ++ ndn_cache_enabled = False ++ + # Build the backend results + for be in backend_objs: + be_name = be.rdn +@@ -277,13 +285,16 @@ def db_monitor(inst, basedn, log, args): + log.info(" - Pages In: {}".format(result['dbcache']['pagein'])) + log.info(" - Pages Out: {}".format(result['dbcache']['pageout'])) + log.info("") +- log.info("Normalized DN Cache:") +- log.info(" - Cache Hit Ratio: {}%".format(result['ndncache']['hit_ratio'])) +- log.info(" - Free Space: {}".format(result['ndncache']['free'])) +- log.info(" - Free Percentage: {}%".format(result['ndncache']['free_percentage'])) +- log.info(" - DN Count: {}".format(result['ndncache']['count'])) +- log.info(" - Evictions: {}".format(result['ndncache']['evictions'])) +- log.info("") ++ ++ if ndn_cache_enabled: ++ log.info("Normalized DN Cache:") ++ log.info(" - Cache Hit Ratio: {}%".format(result['ndncache']['hit_ratio'])) ++ log.info(" - Free Space: {}".format(result['ndncache']['free'])) ++ log.info(" - Free Percentage: {}%".format(result['ndncache']['free_percentage'])) ++ log.info(" - DN Count: {}".format(result['ndncache']['count'])) ++ log.info(" - Evictions: {}".format(result['ndncache']['evictions'])) ++ log.info("") ++ + log.info("Backends:") + for be_name, attr_dict in result['backends'].items(): + log.info(f" - {attr_dict['suffix']} ({be_name}):") +-- +2.49.0 + diff --git a/SOURCES/0010-Issue-6090-dbscan-use-bdb-by-default.patch b/SOURCES/0010-Issue-6090-dbscan-use-bdb-by-default.patch deleted file mode 100644 index f39eb11..0000000 --- a/SOURCES/0010-Issue-6090-dbscan-use-bdb-by-default.patch +++ /dev/null @@ -1,44 +0,0 @@ -From 39d91c4b86fc2ad7e35f8bebd510dff984e8ba56 Mon Sep 17 00:00:00 2001 -From: Viktor Ashirov -Date: Wed, 5 Mar 2025 23:46:02 +0100 -Subject: [PATCH] Issue 6090 - dbscan: use bdb by default - -Bug Description: -dbscan started to use mdb by default on versions where it's not the -default. - -Fix Description: -Use bdb by default on 2.x versions. - -Relates: https://github.com/389ds/389-ds-base/issues/6090 - -Reviewed by: @mreynolds389 (Thanks!) ---- - ldap/servers/slapd/tools/dbscan.c | 4 ++-- - 1 file changed, 2 insertions(+), 2 deletions(-) - -diff --git a/ldap/servers/slapd/tools/dbscan.c b/ldap/servers/slapd/tools/dbscan.c -index 12edf7c5b..9260c1532 100644 ---- a/ldap/servers/slapd/tools/dbscan.c -+++ b/ldap/servers/slapd/tools/dbscan.c -@@ -1280,7 +1280,7 @@ removedb(const char *dbimpl_name, const char *filename) - - if (!filename) { - printf("Error: -f option is missing.\n" -- "Usage: dbscan -D mdb -d -f //\n"); -+ "Usage: dbscan -D bdb -d -f //\n"); - return 1; - } - -@@ -1314,7 +1314,7 @@ main(int argc, char **argv) - char *find_key = NULL; - uint32_t entry_id = 0xffffffff; - char *defdbimpl = getenv("NSSLAPD_DB_LIB"); -- char *dbimpl_name = (char*) "mdb"; -+ char *dbimpl_name = (char*) "bdb"; - int longopt_idx = 0; - int c = 0; - char optstring[2*COUNTOF(options)+1] = {0}; --- -2.48.1 - diff --git a/SOURCES/0010-Issue-6859-str2filter-is-not-fully-applying-matching.patch b/SOURCES/0010-Issue-6859-str2filter-is-not-fully-applying-matching.patch new file mode 100644 index 0000000..09cc802 --- /dev/null +++ b/SOURCES/0010-Issue-6859-str2filter-is-not-fully-applying-matching.patch @@ -0,0 +1,399 @@ +From 5198da59d622dbc39afe2ece9c6f40f4fb249d52 Mon Sep 17 00:00:00 2001 +From: Mark Reynolds +Date: Wed, 9 Jul 2025 14:18:50 -0400 +Subject: [PATCH] Issue 6859 - str2filter is not fully applying matching rules + +Description: + +When we have an extended filter, one with a MR applied, it is ignored during +internal searches: + + "(cn:CaseExactMatch:=Value)" + +For internal searches we use str2filter() and it doesn't fully apply extended +search filter matching rules + +Also needed to update attr uniqueness plugin to apply this change for mod +operations (previously only Adds were correctly handling these attribute +filters) + +Relates: https://github.com/389ds/389-ds-base/issues/6857 +Relates: https://github.com/389ds/389-ds-base/issues/6859 + +Reviewed by: spichugi & tbordaz(Thanks!!) +--- + .../tests/suites/plugins/attruniq_test.py | 295 +++++++++++++++++- + ldap/servers/plugins/uiduniq/uid.c | 7 + + ldap/servers/slapd/plugin_mr.c | 2 +- + ldap/servers/slapd/str2filter.c | 8 + + 4 files changed, 309 insertions(+), 3 deletions(-) + +diff --git a/dirsrvtests/tests/suites/plugins/attruniq_test.py b/dirsrvtests/tests/suites/plugins/attruniq_test.py +index b190e0ec1..b338f405f 100644 +--- a/dirsrvtests/tests/suites/plugins/attruniq_test.py ++++ b/dirsrvtests/tests/suites/plugins/attruniq_test.py +@@ -1,5 +1,5 @@ + # --- BEGIN COPYRIGHT BLOCK --- +-# Copyright (C) 2021 Red Hat, Inc. ++# Copyright (C) 2025 Red Hat, Inc. + # All rights reserved. + # + # License: GPL (version 3 or any later version). +@@ -80,4 +80,295 @@ def test_modrdn_attr_uniqueness(topology_st): + log.debug(excinfo.value) + + log.debug('Move user2 to group1') +- user2.rename(f'uid={user2.rdn}', group1.dn) +\ No newline at end of file ++ ++ user2.rename(f'uid={user2.rdn}', group1.dn) ++ ++ # Cleanup for next test ++ user1.delete() ++ user2.delete() ++ attruniq.disable() ++ attruniq.delete() ++ ++ ++def test_multiple_attr_uniqueness(topology_st): ++ """ Test that attribute uniqueness works properly with multiple attributes ++ ++ :id: c49aa5c1-7e65-45fd-b064-55e0b815e9bc ++ :setup: Standalone instance ++ :steps: ++ 1. Setup attribute uniqueness plugin to ensure uniqueness of attributes 'mail' and 'mailAlternateAddress' ++ 2. Add user with unique 'mail=non-uniq@value.net' and 'mailAlternateAddress=alt-mail@value.net' ++ 3. Try adding another user with 'mail=non-uniq@value.net' ++ 4. Try adding another user with 'mailAlternateAddress=alt-mail@value.net' ++ 5. Try adding another user with 'mail=alt-mail@value.net' ++ 6. Try adding another user with 'mailAlternateAddress=non-uniq@value.net' ++ :expectedresults: ++ 1. Success ++ 2. Success ++ 3. Should raise CONSTRAINT_VIOLATION ++ 4. Should raise CONSTRAINT_VIOLATION ++ 5. Should raise CONSTRAINT_VIOLATION ++ 6. Should raise CONSTRAINT_VIOLATION ++ """ ++ attruniq = AttributeUniquenessPlugin(topology_st.standalone, dn="cn=attruniq,cn=plugins,cn=config") ++ ++ try: ++ log.debug(f'Setup PLUGIN_ATTR_UNIQUENESS plugin for {MAIL_ATTR_VALUE} attribute for the group2') ++ attruniq.create(properties={'cn': 'attruniq'}) ++ attruniq.add_unique_attribute('mail') ++ attruniq.add_unique_attribute('mailAlternateAddress') ++ attruniq.add_unique_subtree(DEFAULT_SUFFIX) ++ attruniq.enable_all_subtrees() ++ log.debug(f'Enable PLUGIN_ATTR_UNIQUENESS plugin as "ON"') ++ attruniq.enable() ++ except ldap.LDAPError as e: ++ log.fatal('test_multiple_attribute_uniqueness: Failed to configure plugin for "mail": error {}'.format(e.args[0]['desc'])) ++ assert False ++ ++ topology_st.standalone.restart() ++ ++ users = UserAccounts(topology_st.standalone, DEFAULT_SUFFIX) ++ ++ testuser1 = users.create_test_user(100,100) ++ testuser1.add('objectclass', 'extensibleObject') ++ testuser1.add('mail', MAIL_ATTR_VALUE) ++ testuser1.add('mailAlternateAddress', MAIL_ATTR_VALUE_ALT) ++ ++ testuser2 = users.create_test_user(200, 200) ++ testuser2.add('objectclass', 'extensibleObject') ++ ++ with pytest.raises(ldap.CONSTRAINT_VIOLATION): ++ testuser2.add('mail', MAIL_ATTR_VALUE) ++ ++ with pytest.raises(ldap.CONSTRAINT_VIOLATION): ++ testuser2.add('mailAlternateAddress', MAIL_ATTR_VALUE_ALT) ++ ++ with pytest.raises(ldap.CONSTRAINT_VIOLATION): ++ testuser2.add('mail', MAIL_ATTR_VALUE_ALT) ++ ++ with pytest.raises(ldap.CONSTRAINT_VIOLATION): ++ testuser2.add('mailAlternateAddress', MAIL_ATTR_VALUE) ++ ++ # Cleanup ++ testuser1.delete() ++ testuser2.delete() ++ attruniq.disable() ++ attruniq.delete() ++ ++ ++def test_exclude_subtrees(topology_st): ++ """ Test attribute uniqueness with exclude scope ++ ++ :id: 43d29a60-40e1-4ebd-b897-6ef9f20e9f27 ++ :setup: Standalone instance ++ :steps: ++ 1. Setup and enable attribute uniqueness plugin for telephonenumber unique attribute ++ 2. Create subtrees and test users ++ 3. Add a unique attribute to a user within uniqueness scope ++ 4. Add exclude subtree ++ 5. Try to add existing value attribute to an entry within uniqueness scope ++ 6. Try to add existing value attribute to an entry within exclude scope ++ 7. Remove the attribute from affected entries ++ 8. Add a unique attribute to a user within exclude scope ++ 9. Try to add existing value attribute to an entry within uniqueness scope ++ 10. Try to add existing value attribute to another entry within uniqueness scope ++ 11. Remove the attribute from affected entries ++ 12. Add another exclude subtree ++ 13. Add a unique attribute to a user within uniqueness scope ++ 14. Try to add existing value attribute to an entry within uniqueness scope ++ 15. Try to add existing value attribute to an entry within exclude scope ++ 16. Try to add existing value attribute to an entry within another exclude scope ++ 17. Clean up entries ++ :expectedresults: ++ 1. Success ++ 2. Success ++ 3. Success ++ 4. Success ++ 5. Should raise CONSTRAINT_VIOLATION ++ 6. Success ++ 7. Success ++ 8. Success ++ 9. Success ++ 10. Should raise CONSTRAINT_VIOLATION ++ 11. Success ++ 12. Success ++ 13. Success ++ 14. Should raise CONSTRAINT_VIOLATION ++ 15. Success ++ 16. Success ++ 17. Success ++ """ ++ log.info('Setup attribute uniqueness plugin') ++ attruniq = AttributeUniquenessPlugin(topology_st.standalone, dn="cn=attruniq,cn=plugins,cn=config") ++ attruniq.create(properties={'cn': 'attruniq'}) ++ attruniq.add_unique_attribute('telephonenumber') ++ attruniq.add_unique_subtree(DEFAULT_SUFFIX) ++ attruniq.enable_all_subtrees() ++ attruniq.enable() ++ topology_st.standalone.restart() ++ ++ log.info('Create subtrees container') ++ containers = nsContainers(topology_st.standalone, DEFAULT_SUFFIX) ++ cont1 = containers.create(properties={'cn': EXCLUDED_CONTAINER_CN}) ++ cont2 = containers.create(properties={'cn': EXCLUDED_BIS_CONTAINER_CN}) ++ cont3 = containers.create(properties={'cn': ENFORCED_CONTAINER_CN}) ++ ++ log.info('Create test users') ++ users = UserAccounts(topology_st.standalone, DEFAULT_SUFFIX, ++ rdn='cn={}'.format(ENFORCED_CONTAINER_CN)) ++ users_excluded = UserAccounts(topology_st.standalone, DEFAULT_SUFFIX, ++ rdn='cn={}'.format(EXCLUDED_CONTAINER_CN)) ++ users_excluded2 = UserAccounts(topology_st.standalone, DEFAULT_SUFFIX, ++ rdn='cn={}'.format(EXCLUDED_BIS_CONTAINER_CN)) ++ ++ user1 = users.create(properties={'cn': USER_1_CN, ++ 'uid': USER_1_CN, ++ 'sn': USER_1_CN, ++ 'uidNumber': '1', ++ 'gidNumber': '11', ++ 'homeDirectory': '/home/{}'.format(USER_1_CN)}) ++ user2 = users.create(properties={'cn': USER_2_CN, ++ 'uid': USER_2_CN, ++ 'sn': USER_2_CN, ++ 'uidNumber': '2', ++ 'gidNumber': '22', ++ 'homeDirectory': '/home/{}'.format(USER_2_CN)}) ++ user3 = users_excluded.create(properties={'cn': USER_3_CN, ++ 'uid': USER_3_CN, ++ 'sn': USER_3_CN, ++ 'uidNumber': '3', ++ 'gidNumber': '33', ++ 'homeDirectory': '/home/{}'.format(USER_3_CN)}) ++ user4 = users_excluded2.create(properties={'cn': USER_4_CN, ++ 'uid': USER_4_CN, ++ 'sn': USER_4_CN, ++ 'uidNumber': '4', ++ 'gidNumber': '44', ++ 'homeDirectory': '/home/{}'.format(USER_4_CN)}) ++ ++ UNIQUE_VALUE = '1234' ++ ++ try: ++ log.info('Create user with unique attribute') ++ user1.add('telephonenumber', UNIQUE_VALUE) ++ assert user1.present('telephonenumber', UNIQUE_VALUE) ++ ++ log.info('Add exclude subtree') ++ attruniq.add_exclude_subtree(EXCLUDED_CONTAINER_DN) ++ topology_st.standalone.restart() ++ ++ log.info('Verify an already used attribute value cannot be added within the same subtree') ++ with pytest.raises(ldap.CONSTRAINT_VIOLATION): ++ user2.add('telephonenumber', UNIQUE_VALUE) ++ ++ log.info('Verify an entry with same attribute value can be added within exclude subtree') ++ user3.add('telephonenumber', UNIQUE_VALUE) ++ assert user3.present('telephonenumber', UNIQUE_VALUE) ++ ++ log.info('Cleanup unique attribute values') ++ user1.remove_all('telephonenumber') ++ user3.remove_all('telephonenumber') ++ ++ log.info('Add a unique value to an entry in excluded scope') ++ user3.add('telephonenumber', UNIQUE_VALUE) ++ assert user3.present('telephonenumber', UNIQUE_VALUE) ++ ++ log.info('Verify the same value can be added to an entry within uniqueness scope') ++ user1.add('telephonenumber', UNIQUE_VALUE) ++ assert user1.present('telephonenumber', UNIQUE_VALUE) ++ ++ log.info('Verify that yet another same value cannot be added to another entry within uniqueness scope') ++ with pytest.raises(ldap.CONSTRAINT_VIOLATION): ++ user2.add('telephonenumber', UNIQUE_VALUE) ++ ++ log.info('Cleanup unique attribute values') ++ user1.remove_all('telephonenumber') ++ user3.remove_all('telephonenumber') ++ ++ log.info('Add another exclude subtree') ++ attruniq.add_exclude_subtree(EXCLUDED_BIS_CONTAINER_DN) ++ topology_st.standalone.restart() ++ ++ user1.add('telephonenumber', UNIQUE_VALUE) ++ log.info('Verify an already used attribute value cannot be added within the same subtree') ++ with pytest.raises(ldap.CONSTRAINT_VIOLATION): ++ user2.add('telephonenumber', UNIQUE_VALUE) ++ ++ log.info('Verify an already used attribute can be added to an entry in exclude scope') ++ user3.add('telephonenumber', UNIQUE_VALUE) ++ assert user3.present('telephonenumber', UNIQUE_VALUE) ++ user4.add('telephonenumber', UNIQUE_VALUE) ++ assert user4.present('telephonenumber', UNIQUE_VALUE) ++ ++ finally: ++ log.info('Clean up users, containers and attribute uniqueness plugin') ++ user1.delete() ++ user2.delete() ++ user3.delete() ++ user4.delete() ++ cont1.delete() ++ cont2.delete() ++ cont3.delete() ++ attruniq.disable() ++ attruniq.delete() ++ ++ ++def test_matchingrule_attr(topology_st): ++ """ Test list extension MR attribute. Check for "cn" using CES (versus it ++ being defined as CIS) ++ ++ :id: 5cde4342-6fa3-4225-b23d-0af918981075 ++ :setup: Standalone instance ++ :steps: ++ 1. Setup and enable attribute uniqueness plugin to use CN attribute ++ with a matching rule of CaseExactMatch. ++ 2. Add user with CN value is lowercase ++ 3. Add second user with same lowercase CN which should be rejected ++ 4. Add second user with same CN value but with mixed case ++ 5. Modify second user replacing CN value to lc which should be rejected ++ ++ :expectedresults: ++ 1. Success ++ 2. Success ++ 3. Success ++ 4. Success ++ 5. Success ++ """ ++ ++ inst = topology_st.standalone ++ ++ attruniq = AttributeUniquenessPlugin(inst, ++ dn="cn=attribute uniqueness,cn=plugins,cn=config") ++ attruniq.add_unique_attribute('cn:CaseExactMatch:') ++ attruniq.enable_all_subtrees() ++ attruniq.enable() ++ inst.restart() ++ ++ users = UserAccounts(inst, DEFAULT_SUFFIX) ++ users.create(properties={'cn': "common_name", ++ 'uid': "uid_name", ++ 'sn': "uid_name", ++ 'uidNumber': '1', ++ 'gidNumber': '11', ++ 'homeDirectory': '/home/uid_name'}) ++ ++ log.info('Add entry with the exact CN value which should be rejected') ++ with pytest.raises(ldap.CONSTRAINT_VIOLATION): ++ users.create(properties={'cn': "common_name", ++ 'uid': "uid_name2", ++ 'sn': "uid_name2", ++ 'uidNumber': '11', ++ 'gidNumber': '111', ++ 'homeDirectory': '/home/uid_name2'}) ++ ++ log.info('Add entry with the mixed case CN value which should be allowed') ++ user = users.create(properties={'cn': "Common_Name", ++ 'uid': "uid_name2", ++ 'sn': "uid_name2", ++ 'uidNumber': '11', ++ 'gidNumber': '111', ++ 'homeDirectory': '/home/uid_name2'}) ++ ++ log.info('Mod entry with exact case CN value which should be rejected') ++ with pytest.raises(ldap.CONSTRAINT_VIOLATION): ++ user.replace('cn', 'common_name') +diff --git a/ldap/servers/plugins/uiduniq/uid.c b/ldap/servers/plugins/uiduniq/uid.c +index 887e79d78..fdb1404a0 100644 +--- a/ldap/servers/plugins/uiduniq/uid.c ++++ b/ldap/servers/plugins/uiduniq/uid.c +@@ -1178,6 +1178,10 @@ preop_modify(Slapi_PBlock *pb) + for (; mods && *mods; mods++) { + mod = *mods; + for (i = 0; attrNames && attrNames[i]; i++) { ++ char *attr_match = strchr(attrNames[i], ':'); ++ if (attr_match != NULL) { ++ attr_match[0] = '\0'; ++ } + if ((slapi_attr_type_cmp(mod->mod_type, attrNames[i], 1) == 0) && /* mod contains target attr */ + (mod->mod_op & LDAP_MOD_BVALUES) && /* mod is bval encoded (not string val) */ + (mod->mod_bvalues && mod->mod_bvalues[0]) && /* mod actually contains some values */ +@@ -1186,6 +1190,9 @@ preop_modify(Slapi_PBlock *pb) + { + addMod(&checkmods, &checkmodsCapacity, &modcount, mod); + } ++ if (attr_match != NULL) { ++ attr_match[0] = ':'; ++ } + } + } + if (modcount == 0) { +diff --git a/ldap/servers/slapd/plugin_mr.c b/ldap/servers/slapd/plugin_mr.c +index b262820c5..67051a5ff 100644 +--- a/ldap/servers/slapd/plugin_mr.c ++++ b/ldap/servers/slapd/plugin_mr.c +@@ -626,7 +626,7 @@ attempt_mr_filter_create(mr_filter_t *f, struct slapdplugin *mrp, Slapi_PBlock * + int rc; + IFP mrf_create = NULL; + f->mrf_match = NULL; +- pblock_init(pb); ++ slapi_pblock_init(pb); + if (!(rc = slapi_pblock_set(pb, SLAPI_PLUGIN, mrp)) && + !(rc = slapi_pblock_get(pb, SLAPI_PLUGIN_MR_FILTER_CREATE_FN, &mrf_create)) && + mrf_create != NULL && +diff --git a/ldap/servers/slapd/str2filter.c b/ldap/servers/slapd/str2filter.c +index 9fdc500f7..5620b7439 100644 +--- a/ldap/servers/slapd/str2filter.c ++++ b/ldap/servers/slapd/str2filter.c +@@ -344,6 +344,14 @@ str2simple(char *str, int unescape_filter) + return NULL; /* error */ + } else { + f->f_choice = LDAP_FILTER_EXTENDED; ++ if (f->f_mr_oid) { ++ /* apply the MR indexers */ ++ rc = plugin_mr_filter_create(&f->f_mr); ++ if (rc) { ++ slapi_filter_free(f, 1); ++ return NULL; /* error */ ++ } ++ } + } + } else if (str_find_star(value) == NULL) { + f->f_choice = LDAP_FILTER_EQUALITY; +-- +2.49.0 + diff --git a/SOURCES/0011-Issue-6872-compressed-log-rotation-creates-files-wit.patch b/SOURCES/0011-Issue-6872-compressed-log-rotation-creates-files-wit.patch new file mode 100644 index 0000000..cc717a3 --- /dev/null +++ b/SOURCES/0011-Issue-6872-compressed-log-rotation-creates-files-wit.patch @@ -0,0 +1,163 @@ +From 406563c136d78235751e34a3c7e22ccaf114f754 Mon Sep 17 00:00:00 2001 +From: Mark Reynolds +Date: Tue, 15 Jul 2025 17:56:18 -0400 +Subject: [PATCH] Issue 6872 - compressed log rotation creates files with world + readable permission + +Description: + +When compressing a log file, first create the empty file using open() +so we can set the correct permissions right from the start. gzopen() +always uses permission 644 and that is not safe. So after creating it +with open(), with the correct permissions, then pass the FD to gzdopen() +and write the compressed content. + +relates: https://github.com/389ds/389-ds-base/issues/6872 + +Reviewed by: progier(Thanks!) +--- + .../logging/logging_compression_test.py | 15 ++++++++-- + ldap/servers/slapd/log.c | 28 +++++++++++++------ + ldap/servers/slapd/schema.c | 2 +- + 3 files changed, 33 insertions(+), 12 deletions(-) + +diff --git a/dirsrvtests/tests/suites/logging/logging_compression_test.py b/dirsrvtests/tests/suites/logging/logging_compression_test.py +index e30874cc0..3a987d62c 100644 +--- a/dirsrvtests/tests/suites/logging/logging_compression_test.py ++++ b/dirsrvtests/tests/suites/logging/logging_compression_test.py +@@ -1,5 +1,5 @@ + # --- BEGIN COPYRIGHT BLOCK --- +-# Copyright (C) 2022 Red Hat, Inc. ++# Copyright (C) 2025 Red Hat, Inc. + # All rights reserved. + # + # License: GPL (version 3 or any later version). +@@ -22,12 +22,21 @@ log = logging.getLogger(__name__) + + pytestmark = pytest.mark.tier1 + ++ + def log_rotated_count(log_type, log_dir, check_compressed=False): +- # Check if the log was rotated ++ """ ++ Check if the log was rotated and has the correct permissions ++ """ + log_file = f'{log_dir}/{log_type}.2*' + if check_compressed: + log_file += ".gz" +- return len(glob.glob(log_file)) ++ log_files = glob.glob(log_file) ++ for logf in log_files: ++ # Check permissions ++ st = os.stat(logf) ++ assert oct(st.st_mode) == '0o100600' # 0600 ++ ++ return len(log_files) + + + def update_and_sleep(inst, suffix, sleep=True): +diff --git a/ldap/servers/slapd/log.c b/ldap/servers/slapd/log.c +index a018ca2d5..178d29b89 100644 +--- a/ldap/servers/slapd/log.c ++++ b/ldap/servers/slapd/log.c +@@ -172,17 +172,28 @@ get_syslog_loglevel(int loglevel) + } + + static int +-compress_log_file(char *log_name) ++compress_log_file(char *log_name, int32_t mode) + { + char gzip_log[BUFSIZ] = {0}; + char buf[LOG_CHUNK] = {0}; + size_t bytes_read = 0; + gzFile outfile = NULL; + FILE *source = NULL; ++ int fd = 0; + + PR_snprintf(gzip_log, sizeof(gzip_log), "%s.gz", log_name); +- if ((outfile = gzopen(gzip_log,"wb")) == NULL) { +- /* Failed to open new gzip file */ ++ ++ /* ++ * Try to open the file as we may have an incorrect path. We also need to ++ * set the permissions using open() as gzopen() creates the file with ++ * 644 permissions (world readable - bad). So we create an empty file with ++ * the correct permissions, then we pass the FD to gzdopen() to write the ++ * compressed content. ++ */ ++ if ((fd = open(gzip_log, O_WRONLY|O_CREAT|O_TRUNC, mode)) >= 0) { ++ /* FIle successfully created, now pass the FD to gzdopen() */ ++ outfile = gzdopen(fd, "ab"); ++ } else { + return -1; + } + +@@ -191,6 +202,7 @@ compress_log_file(char *log_name) + gzclose(outfile); + return -1; + } ++ + bytes_read = fread(buf, 1, LOG_CHUNK, source); + while (bytes_read > 0) { + int bytes_written = gzwrite(outfile, buf, bytes_read); +@@ -3291,7 +3303,7 @@ log__open_accesslogfile(int logfile_state, int locked) + return LOG_UNABLE_TO_OPENFILE; + } + } else if (loginfo.log_access_compress) { +- if (compress_log_file(newfile) != 0) { ++ if (compress_log_file(newfile, loginfo.log_access_mode) != 0) { + slapi_log_err(SLAPI_LOG_ERR, "log__open_auditfaillogfile", + "failed to compress rotated access log (%s)\n", + newfile); +@@ -3455,7 +3467,7 @@ log__open_securitylogfile(int logfile_state, int locked) + return LOG_UNABLE_TO_OPENFILE; + } + } else if (loginfo.log_security_compress) { +- if (compress_log_file(newfile) != 0) { ++ if (compress_log_file(newfile, loginfo.log_security_mode) != 0) { + slapi_log_err(SLAPI_LOG_ERR, "log__open_securitylogfile", + "failed to compress rotated security audit log (%s)\n", + newfile); +@@ -6172,7 +6184,7 @@ log__open_errorlogfile(int logfile_state, int locked) + return LOG_UNABLE_TO_OPENFILE; + } + } else if (loginfo.log_error_compress) { +- if (compress_log_file(newfile) != 0) { ++ if (compress_log_file(newfile, loginfo.log_error_mode) != 0) { + PR_snprintf(buffer, sizeof(buffer), "Failed to compress errors log file (%s)\n", newfile); + log__error_emergency(buffer, 1, 1); + } else { +@@ -6355,7 +6367,7 @@ log__open_auditlogfile(int logfile_state, int locked) + return LOG_UNABLE_TO_OPENFILE; + } + } else if (loginfo.log_audit_compress) { +- if (compress_log_file(newfile) != 0) { ++ if (compress_log_file(newfile, loginfo.log_audit_mode) != 0) { + slapi_log_err(SLAPI_LOG_ERR, "log__open_auditfaillogfile", + "failed to compress rotated audit log (%s)\n", + newfile); +@@ -6514,7 +6526,7 @@ log__open_auditfaillogfile(int logfile_state, int locked) + return LOG_UNABLE_TO_OPENFILE; + } + } else if (loginfo.log_auditfail_compress) { +- if (compress_log_file(newfile) != 0) { ++ if (compress_log_file(newfile, loginfo.log_auditfail_mode) != 0) { + slapi_log_err(SLAPI_LOG_ERR, "log__open_auditfaillogfile", + "failed to compress rotated auditfail log (%s)\n", + newfile); +diff --git a/ldap/servers/slapd/schema.c b/ldap/servers/slapd/schema.c +index a8e6b1210..9ef4ee4bf 100644 +--- a/ldap/servers/slapd/schema.c ++++ b/ldap/servers/slapd/schema.c +@@ -903,7 +903,7 @@ oc_check_allowed_sv(Slapi_PBlock *pb, Slapi_Entry *e, const char *type, struct o + + if (pb) { + PR_snprintf(errtext, sizeof(errtext), +- "attribute \"%s\" not allowed\n", ++ "attribute \"%s\" not allowed", + escape_string(type, ebuf)); + slapi_pblock_set(pb, SLAPI_PB_RESULT_TEXT, errtext); + } +-- +2.49.0 + diff --git a/SOURCES/0012-Issue-6878-Prevent-repeated-disconnect-logs-during-s.patch b/SOURCES/0012-Issue-6878-Prevent-repeated-disconnect-logs-during-s.patch new file mode 100644 index 0000000..8dbfc90 --- /dev/null +++ b/SOURCES/0012-Issue-6878-Prevent-repeated-disconnect-logs-during-s.patch @@ -0,0 +1,116 @@ +From 9b8b23f6d46f16fbc1784b26cfc04dd6b4fa94e1 Mon Sep 17 00:00:00 2001 +From: Simon Pichugin +Date: Fri, 18 Jul 2025 18:50:33 -0700 +Subject: [PATCH] Issue 6878 - Prevent repeated disconnect logs during shutdown + (#6879) + +Description: Avoid logging non-active initialized connections via CONN in disconnect_server_nomutex_ext by adding a check to skip invalid conn=0 with invalid sockets, preventing excessive repeated messages. + +Update ds_logs_test.py by adding test_no_repeated_disconnect_messages to verify the fix. + +Fixes: https://github.com/389ds/389-ds-base/issues/6878 + +Reviewed by: @mreynolds389 (Thanks!) +--- + .../tests/suites/ds_logs/ds_logs_test.py | 51 ++++++++++++++++++- + ldap/servers/slapd/connection.c | 15 +++--- + 2 files changed, 59 insertions(+), 7 deletions(-) + +diff --git a/dirsrvtests/tests/suites/ds_logs/ds_logs_test.py b/dirsrvtests/tests/suites/ds_logs/ds_logs_test.py +index 2c22347bb..b86c72687 100644 +--- a/dirsrvtests/tests/suites/ds_logs/ds_logs_test.py ++++ b/dirsrvtests/tests/suites/ds_logs/ds_logs_test.py +@@ -24,7 +24,7 @@ from lib389.plugins import AutoMembershipPlugin, ReferentialIntegrityPlugin, Aut + from lib389.idm.user import UserAccounts, UserAccount + from lib389.idm.group import Groups + from lib389.idm.organizationalunit import OrganizationalUnits +-from lib389._constants import DEFAULT_SUFFIX, LOG_ACCESS_LEVEL, PASSWORD ++from lib389._constants import DEFAULT_SUFFIX, LOG_ACCESS_LEVEL, PASSWORD, ErrorLog + from lib389.utils import ds_is_older, ds_is_newer + from lib389.config import RSA + from lib389.dseldif import DSEldif +@@ -1435,6 +1435,55 @@ def test_errorlog_buffering(topology_st, request): + assert inst.ds_error_log.match(".*slapd_daemon - slapd started.*") + + ++def test_no_repeated_disconnect_messages(topology_st): ++ """Test that there are no repeated "Not setting conn 0 to be disconnected: socket is invalid" messages on restart ++ ++ :id: 72b5e1ce-2db8-458f-b2cd-0a0b6525f51f ++ :setup: Standalone Instance ++ :steps: ++ 1. Set error log level to CONNECTION ++ 2. Clear existing error logs ++ 3. Restart the server with 30 second timeout ++ 4. Check error log for repeated disconnect messages ++ 5. Verify there are no more than 10 occurrences of the disconnect message ++ :expectedresults: ++ 1. Error log level should be set successfully ++ 2. Error logs should be cleared ++ 3. Server should restart successfully within 30 seconds ++ 4. Error log should be accessible ++ 5. There should be no more than 10 repeated disconnect messages ++ """ ++ ++ inst = topology_st.standalone ++ ++ log.info('Set error log level to CONNECTION') ++ inst.config.loglevel([ErrorLog.CONNECT]) ++ current_level = inst.config.get_attr_val_int('nsslapd-errorlog-level') ++ log.info(f'Error log level set to: {current_level}') ++ ++ log.info('Clear existing error logs') ++ inst.deleteErrorLogs() ++ ++ log.info('Restart the server with 30 second timeout') ++ inst.restart(timeout=30) ++ ++ log.info('Check error log for repeated disconnect messages') ++ disconnect_message = "Not setting conn 0 to be disconnected: socket is invalid" ++ ++ # Count occurrences of the disconnect message ++ error_log_lines = inst.ds_error_log.readlines() ++ disconnect_count = 0 ++ ++ for line in error_log_lines: ++ if disconnect_message in line: ++ disconnect_count += 1 ++ ++ log.info(f'Found {disconnect_count} occurrences of disconnect message') ++ ++ log.info('Verify there are no more than 10 occurrences') ++ assert disconnect_count <= 10, f"Found {disconnect_count} repeated disconnect messages, expected <= 10" ++ ++ + if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode +diff --git a/ldap/servers/slapd/connection.c b/ldap/servers/slapd/connection.c +index bb4fcd77f..2967de15b 100644 +--- a/ldap/servers/slapd/connection.c ++++ b/ldap/servers/slapd/connection.c +@@ -2465,12 +2465,15 @@ disconnect_server_nomutex_ext(Connection *conn, PRUint64 opconnid, int opid, PRE + } + + } else { +- slapi_log_err(SLAPI_LOG_CONNS, "disconnect_server_nomutex_ext", +- "Not setting conn %d to be disconnected: %s\n", +- conn->c_sd, +- (conn->c_sd == SLAPD_INVALID_SOCKET) ? "socket is invalid" : +- ((conn->c_connid != opconnid) ? "conn id does not match op conn id" : +- ((conn->c_flags & CONN_FLAG_CLOSING) ? "conn is closing" : "unknown"))); ++ /* We avoid logging an invalid conn=0 connection as it is not a real connection. */ ++ if (!(conn->c_sd == SLAPD_INVALID_SOCKET && conn->c_connid == 0)) { ++ slapi_log_err(SLAPI_LOG_CONNS, "disconnect_server_nomutex_ext", ++ "Not setting conn %d to be disconnected: %s\n", ++ conn->c_sd, ++ (conn->c_sd == SLAPD_INVALID_SOCKET) ? "socket is invalid" : ++ ((conn->c_connid != opconnid) ? "conn id does not match op conn id" : ++ ((conn->c_flags & CONN_FLAG_CLOSING) ? "conn is closing" : "unknown"))); ++ } + } + } + +-- +2.49.0 + diff --git a/SOURCES/0013-Issue-6772-dsconf-Replicas-with-the-consumer-role-al.patch b/SOURCES/0013-Issue-6772-dsconf-Replicas-with-the-consumer-role-al.patch new file mode 100644 index 0000000..64f23ea --- /dev/null +++ b/SOURCES/0013-Issue-6772-dsconf-Replicas-with-the-consumer-role-al.patch @@ -0,0 +1,67 @@ +From fef4875a9c3d67ef424a1fb1698ae011152735b1 Mon Sep 17 00:00:00 2001 +From: Anuar Beisembayev <111912342+abeisemb@users.noreply.github.com> +Date: Wed, 23 Jul 2025 23:48:11 -0400 +Subject: [PATCH] Issue 6772 - dsconf - Replicas with the "consumer" role allow + for viewing and modification of their changelog. (#6773) + +dsconf currently allows users to set and retrieve changelogs in consumer replicas, which do not have officially supported changelogs. This can lead to undefined behavior and confusion. +This commit prints a warning message if the user tries to interact with a changelog on a consumer replica. + +Resolves: https://github.com/389ds/389-ds-base/issues/6772 + +Reviewed by: @droideck +--- + src/lib389/lib389/cli_conf/replication.py | 23 +++++++++++++++++++++++ + 1 file changed, 23 insertions(+) + +diff --git a/src/lib389/lib389/cli_conf/replication.py b/src/lib389/lib389/cli_conf/replication.py +index 6f77f34ca..a18bf83ca 100644 +--- a/src/lib389/lib389/cli_conf/replication.py ++++ b/src/lib389/lib389/cli_conf/replication.py +@@ -686,6 +686,9 @@ def set_per_backend_cl(inst, basedn, log, args): + replace_list = [] + did_something = False + ++ if (is_replica_role_consumer(inst, suffix)): ++ log.info("Warning: Changelogs are not supported for consumer replicas. You may run into undefined behavior.") ++ + if args.encrypt: + cl.replace('nsslapd-encryptionalgorithm', 'AES') + del args.encrypt +@@ -715,6 +718,10 @@ def set_per_backend_cl(inst, basedn, log, args): + # that means there is a changelog config entry per backend (aka suffix) + def get_per_backend_cl(inst, basedn, log, args): + suffix = args.suffix ++ ++ if (is_replica_role_consumer(inst, suffix)): ++ log.info("Warning: Changelogs are not supported for consumer replicas. You may run into undefined behavior.") ++ + cl = Changelog(inst, suffix) + if args and args.json: + log.info(cl.get_all_attrs_json()) +@@ -822,6 +829,22 @@ def del_repl_manager(inst, basedn, log, args): + + log.info("Successfully deleted replication manager: " + manager_dn) + ++def is_replica_role_consumer(inst, suffix): ++ """Helper function for get_per_backend_cl and set_per_backend_cl. ++ Makes sure the instance in question is not a consumer, which is a role that ++ does not support changelogs. ++ """ ++ replicas = Replicas(inst) ++ try: ++ replica = replicas.get(suffix) ++ role = replica.get_role() ++ except ldap.NO_SUCH_OBJECT: ++ raise ValueError(f"Backend \"{suffix}\" is not enabled for replication") ++ ++ if role == ReplicaRole.CONSUMER: ++ return True ++ else: ++ return False + + # + # Agreements +-- +2.49.0 + diff --git a/SOURCES/0014-Issue-6893-Log-user-that-is-updated-during-password-.patch b/SOURCES/0014-Issue-6893-Log-user-that-is-updated-during-password-.patch new file mode 100644 index 0000000..d75bc8d --- /dev/null +++ b/SOURCES/0014-Issue-6893-Log-user-that-is-updated-during-password-.patch @@ -0,0 +1,143 @@ +From 4cb50f83397e6a5e14a9b75ed15f24189ee2792b Mon Sep 17 00:00:00 2001 +From: Mark Reynolds +Date: Mon, 21 Jul 2025 18:07:21 -0400 +Subject: [PATCH] Issue 6893 - Log user that is updated during password modify + extended operation + +Description: + +When a user's password is updated via an extended operation (password modify +plugin) we only log the bind DN and not what user was updated. While "internal +operation" logging will display the the user it should be logged by the default +logging level. + +Add access logging using "EXT_INFO" where we display the bind dn, target +dn, and message. + +Relates: https://github.com/389ds/389-ds-base/issues/6893 + +Reviewed by: spichugi & tbordaz(Thanks!!) +--- + ldap/servers/slapd/passwd_extop.c | 56 +++++++++++++++---------------- + 1 file changed, 28 insertions(+), 28 deletions(-) + +diff --git a/ldap/servers/slapd/passwd_extop.c b/ldap/servers/slapd/passwd_extop.c +index 4bb60afd6..0296d64fb 100644 +--- a/ldap/servers/slapd/passwd_extop.c ++++ b/ldap/servers/slapd/passwd_extop.c +@@ -465,12 +465,13 @@ passwd_modify_extop(Slapi_PBlock *pb) + BerElement *response_ber = NULL; + Slapi_Entry *targetEntry = NULL; + Connection *conn = NULL; ++ Operation *pb_op = NULL; + LDAPControl **req_controls = NULL; + LDAPControl **resp_controls = NULL; + passwdPolicy *pwpolicy = NULL; + Slapi_DN *target_sdn = NULL; + Slapi_Entry *referrals = NULL; +- /* Slapi_DN sdn; */ ++ Slapi_Backend *be = NULL; + + slapi_log_err(SLAPI_LOG_TRACE, "passwd_modify_extop", "=>\n"); + +@@ -647,7 +648,7 @@ parse_req_done: + } + dn = slapi_sdn_get_ndn(target_sdn); + if (dn == NULL || *dn == '\0') { +- /* Refuse the operation because they're bound anonymously */ ++ /* Invalid DN - refuse the operation */ + errMesg = "Invalid dn."; + rc = LDAP_INVALID_DN_SYNTAX; + goto free_and_return; +@@ -724,14 +725,19 @@ parse_req_done: + ber_free(response_ber, 1); + } + +- slapi_pblock_set(pb, SLAPI_ORIGINAL_TARGET, (void *)dn); ++ slapi_pblock_get(pb, SLAPI_OPERATION, &pb_op); ++ if (pb_op == NULL) { ++ slapi_log_err(SLAPI_LOG_ERR, "passwd_modify_extop", "pb_op is NULL\n"); ++ goto free_and_return; ++ } + ++ slapi_pblock_set(pb, SLAPI_ORIGINAL_TARGET, (void *)dn); + /* Now we have the DN, look for the entry */ + ret = passwd_modify_getEntry(dn, &targetEntry); + /* If we can't find the entry, then that's an error */ + if (ret) { + /* Couldn't find the entry, fail */ +- errMesg = "No such Entry exists."; ++ errMesg = "No such entry exists."; + rc = LDAP_NO_SUCH_OBJECT; + goto free_and_return; + } +@@ -742,30 +748,18 @@ parse_req_done: + leak any useful information to the client such as current password + wrong, etc. + */ +- Operation *pb_op = NULL; +- slapi_pblock_get(pb, SLAPI_OPERATION, &pb_op); +- if (pb_op == NULL) { +- slapi_log_err(SLAPI_LOG_ERR, "passwd_modify_extop", "pb_op is NULL\n"); +- goto free_and_return; +- } +- + operation_set_target_spec(pb_op, slapi_entry_get_sdn(targetEntry)); + slapi_pblock_set(pb, SLAPI_REQUESTOR_ISROOT, &pb_op->o_isroot); + +- /* In order to perform the access control check , we need to select a backend (even though +- * we don't actually need it otherwise). +- */ +- { +- Slapi_Backend *be = NULL; +- +- be = slapi_mapping_tree_find_backend_for_sdn(slapi_entry_get_sdn(targetEntry)); +- if (NULL == be) { +- errMesg = "Failed to find backend for target entry"; +- rc = LDAP_OPERATIONS_ERROR; +- goto free_and_return; +- } +- slapi_pblock_set(pb, SLAPI_BACKEND, be); ++ /* In order to perform the access control check, we need to select a backend (even though ++ * we don't actually need it otherwise). */ ++ be = slapi_mapping_tree_find_backend_for_sdn(slapi_entry_get_sdn(targetEntry)); ++ if (NULL == be) { ++ errMesg = "Failed to find backend for target entry"; ++ rc = LDAP_NO_SUCH_OBJECT; ++ goto free_and_return; + } ++ slapi_pblock_set(pb, SLAPI_BACKEND, be); + + /* Check if the pwpolicy control is present */ + slapi_pblock_get(pb, SLAPI_PWPOLICY, &need_pwpolicy_ctrl); +@@ -797,10 +791,7 @@ parse_req_done: + /* Check if password policy allows users to change their passwords. We need to do + * this here since the normal modify code doesn't perform this check for + * internal operations. */ +- +- Connection *pb_conn; +- slapi_pblock_get(pb, SLAPI_CONNECTION, &pb_conn); +- if (!pb_op->o_isroot && !pb_conn->c_needpw && !pwpolicy->pw_change) { ++ if (!pb_op->o_isroot && !conn->c_needpw && !pwpolicy->pw_change) { + if (NULL == bindSDN) { + bindSDN = slapi_sdn_new_normdn_byref(bindDN); + } +@@ -848,6 +839,15 @@ free_and_return: + slapi_log_err(SLAPI_LOG_PLUGIN, "passwd_modify_extop", + "%s\n", errMesg ? errMesg : "success"); + ++ if (dn) { ++ /* Log the target ndn (if we have a target ndn) */ ++ slapi_log_access(LDAP_DEBUG_STATS, ++ "conn=%" PRIu64 " op=%d EXT_INFO name=\"passwd_modify_plugin\" bind_dn=\"%s\" target_dn=\"%s\" msg=\"%s\" rc=%d\n", ++ conn ? conn->c_connid : -1, pb_op ? pb_op->o_opid : -1, ++ bindDN ? bindDN : "", dn, ++ errMesg ? errMesg : "success", rc); ++ } ++ + if ((rc == LDAP_REFERRAL) && (referrals)) { + send_referrals_from_entry(pb, referrals); + } else { +-- +2.49.0 + diff --git a/SOURCES/0015-Issue-6895-Crash-if-repl-keep-alive-entry-can-not-be.patch b/SOURCES/0015-Issue-6895-Crash-if-repl-keep-alive-entry-can-not-be.patch new file mode 100644 index 0000000..914d27b --- /dev/null +++ b/SOURCES/0015-Issue-6895-Crash-if-repl-keep-alive-entry-can-not-be.patch @@ -0,0 +1,98 @@ +From ffc3a81ed5852b7f1fbaed79b9b776af23d65b7c Mon Sep 17 00:00:00 2001 +From: Mark Reynolds +Date: Wed, 23 Jul 2025 19:35:32 -0400 +Subject: [PATCH] Issue 6895 - Crash if repl keep alive entry can not be + created + +Description: + +Heap use after free when logging that the replicaton keep-alive entry can not +be created. slapi_add_internal_pb() frees the slapi entry, then +we try and get the dn from the entry and we get a use-after-free crash. + +Relates: https://github.com/389ds/389-ds-base/issues/6895 + +Reviewed by: spichugi(Thanks!) +--- + ldap/servers/plugins/chainingdb/cb_config.c | 3 +-- + ldap/servers/plugins/posix-winsync/posix-winsync.c | 1 - + ldap/servers/plugins/replication/repl5_init.c | 3 --- + ldap/servers/plugins/replication/repl5_replica.c | 8 ++++---- + 4 files changed, 5 insertions(+), 10 deletions(-) + +diff --git a/ldap/servers/plugins/chainingdb/cb_config.c b/ldap/servers/plugins/chainingdb/cb_config.c +index 40a7088d7..24fa1bcb3 100644 +--- a/ldap/servers/plugins/chainingdb/cb_config.c ++++ b/ldap/servers/plugins/chainingdb/cb_config.c +@@ -44,8 +44,7 @@ cb_config_add_dse_entries(cb_backend *cb, char **entries, char *string1, char *s + slapi_pblock_get(util_pb, SLAPI_PLUGIN_INTOP_RESULT, &res); + if (LDAP_SUCCESS != res && LDAP_ALREADY_EXISTS != res) { + slapi_log_err(SLAPI_LOG_ERR, CB_PLUGIN_SUBSYSTEM, +- "cb_config_add_dse_entries - Unable to add config entry (%s) to the DSE: %s\n", +- slapi_entry_get_dn(e), ++ "cb_config_add_dse_entries - Unable to add config entry to the DSE: %s\n", + ldap_err2string(res)); + rc = res; + slapi_pblock_destroy(util_pb); +diff --git a/ldap/servers/plugins/posix-winsync/posix-winsync.c b/ldap/servers/plugins/posix-winsync/posix-winsync.c +index 51a55b643..3a002bb70 100644 +--- a/ldap/servers/plugins/posix-winsync/posix-winsync.c ++++ b/ldap/servers/plugins/posix-winsync/posix-winsync.c +@@ -1626,7 +1626,6 @@ posix_winsync_end_update_cb(void *cbdata __attribute__((unused)), + "posix_winsync_end_update_cb: " + "add task entry\n"); + } +- /* slapi_entry_free(e_task); */ + slapi_pblock_destroy(pb); + pb = NULL; + posix_winsync_config_reset_MOFTaskCreated(); +diff --git a/ldap/servers/plugins/replication/repl5_init.c b/ldap/servers/plugins/replication/repl5_init.c +index 8bc0b5372..5047fb8dc 100644 +--- a/ldap/servers/plugins/replication/repl5_init.c ++++ b/ldap/servers/plugins/replication/repl5_init.c +@@ -682,7 +682,6 @@ create_repl_schema_policy(void) + repl_schema_top, + ldap_err2string(return_value)); + rc = -1; +- slapi_entry_free(e); /* The entry was not consumed */ + goto done; + } + slapi_pblock_destroy(pb); +@@ -703,7 +702,6 @@ create_repl_schema_policy(void) + repl_schema_supplier, + ldap_err2string(return_value)); + rc = -1; +- slapi_entry_free(e); /* The entry was not consumed */ + goto done; + } + slapi_pblock_destroy(pb); +@@ -724,7 +722,6 @@ create_repl_schema_policy(void) + repl_schema_consumer, + ldap_err2string(return_value)); + rc = -1; +- slapi_entry_free(e); /* The entry was not consumed */ + goto done; + } + slapi_pblock_destroy(pb); +diff --git a/ldap/servers/plugins/replication/repl5_replica.c b/ldap/servers/plugins/replication/repl5_replica.c +index 59062b46b..a97c807e9 100644 +--- a/ldap/servers/plugins/replication/repl5_replica.c ++++ b/ldap/servers/plugins/replication/repl5_replica.c +@@ -465,10 +465,10 @@ replica_subentry_create(const char *repl_root, ReplicaId rid) + if (return_value != LDAP_SUCCESS && + return_value != LDAP_ALREADY_EXISTS && + return_value != LDAP_REFERRAL /* CONSUMER */) { +- slapi_log_err(SLAPI_LOG_ERR, repl_plugin_name, "replica_subentry_create - Unable to " +- "create replication keep alive entry %s: error %d - %s\n", +- slapi_entry_get_dn_const(e), +- return_value, ldap_err2string(return_value)); ++ slapi_log_err(SLAPI_LOG_ERR, repl_plugin_name, "replica_subentry_create - " ++ "Unable to create replication keep alive entry 'cn=%s %d,%s': error %d - %s\n", ++ KEEP_ALIVE_ENTRY, rid, repl_root, ++ return_value, ldap_err2string(return_value)); + rc = -1; + goto done; + } +-- +2.49.0 + diff --git a/SOURCES/0016-Issue-6250-Add-test-for-entryUSN-overflow-on-failed-.patch b/SOURCES/0016-Issue-6250-Add-test-for-entryUSN-overflow-on-failed-.patch new file mode 100644 index 0000000..0e3949e --- /dev/null +++ b/SOURCES/0016-Issue-6250-Add-test-for-entryUSN-overflow-on-failed-.patch @@ -0,0 +1,352 @@ +From 191634746fdcb7e26a154cd00a22324e02a10110 Mon Sep 17 00:00:00 2001 +From: Simon Pichugin +Date: Mon, 28 Jul 2025 10:50:26 -0700 +Subject: [PATCH] Issue 6250 - Add test for entryUSN overflow on failed add + operations (#6821) + +Description: Add comprehensive test to reproduce the entryUSN +overflow issue where failed attempts to add existing entries followed by +modify operations cause entryUSN values to underflow/overflow instead of +incrementing properly. + +Related: https://github.com/389ds/389-ds-base/issues/6250 + +Reviewed by: @tbordaz (Thanks!) +--- + .../suites/plugins/entryusn_overflow_test.py | 323 ++++++++++++++++++ + 1 file changed, 323 insertions(+) + create mode 100644 dirsrvtests/tests/suites/plugins/entryusn_overflow_test.py + +diff --git a/dirsrvtests/tests/suites/plugins/entryusn_overflow_test.py b/dirsrvtests/tests/suites/plugins/entryusn_overflow_test.py +new file mode 100644 +index 000000000..a23d734ca +--- /dev/null ++++ b/dirsrvtests/tests/suites/plugins/entryusn_overflow_test.py +@@ -0,0 +1,323 @@ ++# --- BEGIN COPYRIGHT BLOCK --- ++# Copyright (C) 2025 Red Hat, Inc. ++# All rights reserved. ++# ++# License: GPL (version 3 or any later version). ++# See LICENSE for details. ++# --- END COPYRIGHT BLOCK --- ++# ++import os ++import ldap ++import logging ++import pytest ++import time ++import random ++from lib389._constants import DEFAULT_SUFFIX ++from lib389.config import Config ++from lib389.plugins import USNPlugin ++from lib389.idm.user import UserAccounts ++from lib389.topologies import topology_st ++from lib389.rootdse import RootDSE ++ ++pytestmark = pytest.mark.tier2 ++ ++log = logging.getLogger(__name__) ++ ++# Test constants ++DEMO_USER_BASE_DN = "uid=demo_user,ou=people," + DEFAULT_SUFFIX ++TEST_USER_PREFIX = "Demo User" ++MAX_USN_64BIT = 18446744073709551615 # 2^64 - 1 ++ITERATIONS = 10 ++ADD_EXISTING_ENTRY_MAX_ATTEMPTS = 5 ++ ++ ++@pytest.fixture(scope="module") ++def setup_usn_test(topology_st, request): ++ """Setup USN plugin and test data for entryUSN overflow testing""" ++ ++ inst = topology_st.standalone ++ ++ log.info("Enable the USN plugin...") ++ plugin = USNPlugin(inst) ++ plugin.enable() ++ plugin.enable_global_mode() ++ ++ inst.restart() ++ ++ # Create initial test users ++ users = UserAccounts(inst, DEFAULT_SUFFIX) ++ created_users = [] ++ ++ log.info("Creating initial test users...") ++ for i in range(3): ++ user_props = { ++ 'uid': f'{TEST_USER_PREFIX}-{i}', ++ 'cn': f'{TEST_USER_PREFIX}-{i}', ++ 'sn': f'User{i}', ++ 'uidNumber': str(1000 + i), ++ 'gidNumber': str(1000 + i), ++ 'homeDirectory': f'/home/{TEST_USER_PREFIX}-{i}', ++ 'userPassword': 'password123' ++ } ++ try: ++ user = users.create(properties=user_props) ++ created_users.append(user) ++ log.info(f"Created user: {user.dn}") ++ except ldap.ALREADY_EXISTS: ++ log.info(f"User {user_props['uid']} already exists, skipping creation") ++ user = users.get(user_props['uid']) ++ created_users.append(user) ++ ++ def fin(): ++ log.info("Cleaning up test users...") ++ for user in created_users: ++ try: ++ user.delete() ++ except ldap.NO_SUCH_OBJECT: ++ pass ++ ++ request.addfinalizer(fin) ++ ++ return created_users ++ ++ ++def test_entryusn_overflow_on_add_existing_entries(topology_st, setup_usn_test): ++ """Test that reproduces entryUSN overflow when adding existing entries ++ ++ :id: a5a8c33d-82f3-4113-be2b-027de51791c8 ++ :setup: Standalone instance with USN plugin enabled and test users ++ :steps: ++ 1. Record initial entryUSN values for existing users ++ 2. Attempt to add existing entries multiple times (should fail) ++ 3. Perform modify operations on the entries ++ 4. Check that entryUSN values increment correctly without overflow ++ 5. Verify lastusn values are consistent ++ :expectedresults: ++ 1. Initial entryUSN values are recorded successfully ++ 2. Add operations fail with ALREADY_EXISTS error ++ 3. Modify operations succeed ++ 4. EntryUSN values increment properly without underflow/overflow ++ 5. LastUSN values are consistent and increasing ++ """ ++ ++ inst = topology_st.standalone ++ users = setup_usn_test ++ ++ # Enable detailed logging for debugging ++ config = Config(inst) ++ config.replace('nsslapd-accesslog-level', '260') # Internal op logging ++ config.replace('nsslapd-errorlog-level', '65536') ++ config.replace('nsslapd-plugin-logging', 'on') ++ ++ root_dse = RootDSE(inst) ++ ++ log.info("Starting entryUSN overflow reproduction test") ++ ++ # Record initial state ++ initial_usn_values = {} ++ for user in users: ++ initial_usn = user.get_attr_val_int('entryusn') ++ initial_usn_values[user.dn] = initial_usn ++ log.info(f"Initial entryUSN for {user.get_attr_val_utf8('cn')}: {initial_usn}") ++ ++ initial_lastusn = root_dse.get_attr_val_int("lastusn") ++ log.info(f"Initial lastUSN: {initial_lastusn}") ++ ++ # Perform test iterations ++ for iteration in range(1, ITERATIONS + 1): ++ log.info(f"\n--- Iteration {iteration} ---") ++ ++ # Step 1: Try to add existing entries multiple times ++ selected_user = random.choice(users) ++ cn_value = selected_user.get_attr_val_utf8('cn') ++ attempts = random.randint(1, ADD_EXISTING_ENTRY_MAX_ATTEMPTS) ++ ++ log.info(f"Attempting to add existing entry '{cn_value}' {attempts} times") ++ ++ # Get user attributes for recreation attempt ++ user_attrs = { ++ 'uid': selected_user.get_attr_val_utf8('uid'), ++ 'cn': selected_user.get_attr_val_utf8('cn'), ++ 'sn': selected_user.get_attr_val_utf8('sn'), ++ 'uidNumber': selected_user.get_attr_val_utf8('uidNumber'), ++ 'gidNumber': selected_user.get_attr_val_utf8('gidNumber'), ++ 'homeDirectory': selected_user.get_attr_val_utf8('homeDirectory'), ++ 'userPassword': 'password123' ++ } ++ ++ users_collection = UserAccounts(inst, DEFAULT_SUFFIX) ++ ++ # Try to add the existing user multiple times ++ for attempt in range(attempts): ++ try: ++ users_collection.create(properties=user_attrs) ++ log.error(f"ERROR: Add operation should have failed but succeeded on attempt {attempt + 1}") ++ assert False, "Add operation should have failed with ALREADY_EXISTS" ++ except ldap.ALREADY_EXISTS: ++ log.info(f"Attempt {attempt + 1}: Got expected ALREADY_EXISTS error") ++ except Exception as e: ++ log.error(f"Unexpected error on attempt {attempt + 1}: {e}") ++ raise ++ ++ # Step 2: Perform modify operation ++ target_user = random.choice(users) ++ cn_value = target_user.get_attr_val_utf8('cn') ++ old_usn = target_user.get_attr_val_int('entryusn') ++ ++ # Modify the user entry ++ new_description = f"Modified in iteration {iteration} - {time.time()}" ++ target_user.replace('description', new_description) ++ ++ # Get new USN value ++ new_usn = target_user.get_attr_val_int('entryusn') ++ ++ log.info(f"Modified entry '{cn_value}': old USN = {old_usn}, new USN = {new_usn}") ++ ++ # Step 3: Validate USN values ++ # Check for overflow/underflow conditions ++ assert new_usn > 0, f"EntryUSN should be positive, got {new_usn}" ++ assert new_usn < MAX_USN_64BIT, f"EntryUSN overflow detected: {new_usn} >= {MAX_USN_64BIT}" ++ ++ # Check that USN didn't wrap around (underflow detection) ++ usn_diff = new_usn - old_usn ++ assert usn_diff < 1000, f"USN increment too large, possible overflow: {usn_diff}" ++ ++ # Verify lastUSN is also reasonable ++ current_lastusn = root_dse.get_attr_val_int("lastusn") ++ assert current_lastusn >= new_usn, f"LastUSN ({current_lastusn}) should be >= entryUSN ({new_usn})" ++ assert current_lastusn < MAX_USN_64BIT, f"LastUSN overflow detected: {current_lastusn}" ++ ++ log.info(f"USN validation passed for iteration {iteration}") ++ ++ # Add a new entry occasionally to increase USN diversity ++ if iteration % 3 == 0: ++ new_user_props = { ++ 'uid': f'{TEST_USER_PREFIX}-new-{iteration}', ++ 'cn': f'{TEST_USER_PREFIX}-new-{iteration}', ++ 'sn': f'NewUser{iteration}', ++ 'uidNumber': str(2000 + iteration), ++ 'gidNumber': str(2000 + iteration), ++ 'homeDirectory': f'/home/{TEST_USER_PREFIX}-new-{iteration}', ++ 'userPassword': 'newpassword123' ++ } ++ try: ++ new_user = users_collection.create(properties=new_user_props) ++ new_user_usn = new_user.get_attr_val_int('entryusn') ++ log.info(f"Created new entry '{new_user.get_attr_val_utf8('cn')}' with USN: {new_user_usn}") ++ users.append(new_user) # Add to cleanup list ++ except Exception as e: ++ log.warning(f"Failed to create new user in iteration {iteration}: {e}") ++ ++ # Final validation: Check all USN values are reasonable ++ log.info("\nFinal USN validation") ++ final_lastusn = root_dse.get_attr_val_int("lastusn") ++ ++ for user in users: ++ try: ++ final_usn = user.get_attr_val_int('entryusn') ++ cn_value = user.get_attr_val_utf8('cn') ++ log.info(f"Final entryUSN for '{cn_value}': {final_usn}") ++ ++ # Ensure no overflow occurred ++ assert final_usn > 0, f"Final entryUSN should be positive for {cn_value}: {final_usn}" ++ assert final_usn < MAX_USN_64BIT, f"EntryUSN overflow for {cn_value}: {final_usn}" ++ ++ except ldap.NO_SUCH_OBJECT: ++ log.info(f"User {user.dn} was deleted during test") ++ ++ log.info(f"Final lastUSN: {final_lastusn}") ++ assert final_lastusn > initial_lastusn, "LastUSN should have increased during test" ++ assert final_lastusn < MAX_USN_64BIT, f"LastUSN overflow detected: {final_lastusn}" ++ ++ log.info("EntryUSN overflow test completed successfully") ++ ++ ++def test_entryusn_consistency_after_failed_adds(topology_st, setup_usn_test): ++ """Test that entryUSN remains consistent after failed add operations ++ ++ :id: e380ccad-527b-427e-a331-df5c41badbed ++ :setup: Standalone instance with USN plugin enabled and test users ++ :steps: ++ 1. Record entryUSN values before failed add attempts ++ 2. Attempt to add existing entries (should fail) ++ 3. Verify entryUSN values haven't changed due to failed operations ++ 4. Perform successful modify operations ++ 5. Verify entryUSN increments correctly ++ :expectedresults: ++ 1. Initial entryUSN values recorded ++ 2. Add operations fail as expected ++ 3. EntryUSN values unchanged after failed adds ++ 4. Modify operations succeed ++ 5. EntryUSN values increment correctly without overflow ++ """ ++ ++ inst = topology_st.standalone ++ users = setup_usn_test ++ ++ log.info("Testing entryUSN consistency after failed adds") ++ ++ # Record USN values before any operations ++ pre_operation_usns = {} ++ for user in users: ++ usn = user.get_attr_val_int('entryusn') ++ pre_operation_usns[user.dn] = usn ++ log.info(f"Pre-operation entryUSN for {user.get_attr_val_utf8('cn')}: {usn}") ++ ++ # Attempt to add existing entries - these should fail ++ users_collection = UserAccounts(inst, DEFAULT_SUFFIX) ++ ++ for user in users: ++ cn_value = user.get_attr_val_utf8('cn') ++ log.info(f"Attempting to add existing user: {cn_value}") ++ ++ user_attrs = { ++ 'uid': user.get_attr_val_utf8('uid'), ++ 'cn': cn_value, ++ 'sn': user.get_attr_val_utf8('sn'), ++ 'uidNumber': user.get_attr_val_utf8('uidNumber'), ++ 'gidNumber': user.get_attr_val_utf8('gidNumber'), ++ 'homeDirectory': user.get_attr_val_utf8('homeDirectory'), ++ 'userPassword': 'password123' ++ } ++ ++ try: ++ users_collection.create(properties=user_attrs) ++ assert False, f"Add operation should have failed for existing user {cn_value}" ++ except ldap.ALREADY_EXISTS: ++ log.info(f"Got expected ALREADY_EXISTS for {cn_value}") ++ ++ # Verify USN values haven't changed after failed adds ++ log.info("Verifying entryUSN values after failed add operations...") ++ for user in users: ++ current_usn = user.get_attr_val_int('entryusn') ++ expected_usn = pre_operation_usns[user.dn] ++ cn_value = user.get_attr_val_utf8('cn') ++ ++ assert current_usn == expected_usn, \ ++ f"EntryUSN changed after failed add for {cn_value}: was {expected_usn}, now {current_usn}" ++ log.info(f"EntryUSN unchanged for {cn_value}: {current_usn}") ++ ++ # Now perform successful modify operations ++ log.info("Performing successful modify operations...") ++ for i, user in enumerate(users): ++ cn_value = user.get_attr_val_utf8('cn') ++ old_usn = user.get_attr_val_int('entryusn') ++ ++ # Modify the user ++ user.replace('description', f'Consistency test modification {i + 1}') ++ ++ new_usn = user.get_attr_val_int('entryusn') ++ log.info(f"Modified {cn_value}: USN {old_usn} -> {new_usn}") ++ ++ # Verify proper increment ++ assert (new_usn - old_usn) == 1, f"EntryUSN should increment by 1 for {cn_value}: {old_usn} -> {new_usn}" ++ assert new_usn < MAX_USN_64BIT, f"EntryUSN overflow for {cn_value}: {new_usn}" ++ ++ log.info("EntryUSN consistency test completed successfully") ++ ++ ++if __name__ == '__main__': ++ # Run isolated ++ # -s for DEBUG mode ++ CURRENT_FILE = os.path.realpath(__file__) ++ pytest.main("-s %s" % CURRENT_FILE) +\ No newline at end of file +-- +2.49.0 + diff --git a/SOURCES/0017-Issue-6594-Add-test-for-numSubordinates-replication-.patch b/SOURCES/0017-Issue-6594-Add-test-for-numSubordinates-replication-.patch new file mode 100644 index 0000000..9680aa3 --- /dev/null +++ b/SOURCES/0017-Issue-6594-Add-test-for-numSubordinates-replication-.patch @@ -0,0 +1,172 @@ +From 37a56f75afac2805e1ba958eebd496e77b7079e7 Mon Sep 17 00:00:00 2001 +From: Simon Pichugin +Date: Mon, 28 Jul 2025 15:35:50 -0700 +Subject: [PATCH] Issue 6594 - Add test for numSubordinates replication + consistency with tombstones (#6862) + +Description: Add a comprehensive test to verify that numSubordinates and +tombstoneNumSubordinates attributes are correctly replicated between +instances when tombstone entries are present. + +Fixes: https://github.com/389ds/389-ds-base/issues/6594 + +Reviewed by: @progier389 (Thanks!) +--- + .../numsubordinates_replication_test.py | 144 ++++++++++++++++++ + 1 file changed, 144 insertions(+) + create mode 100644 dirsrvtests/tests/suites/replication/numsubordinates_replication_test.py + +diff --git a/dirsrvtests/tests/suites/replication/numsubordinates_replication_test.py b/dirsrvtests/tests/suites/replication/numsubordinates_replication_test.py +new file mode 100644 +index 000000000..9ba10657d +--- /dev/null ++++ b/dirsrvtests/tests/suites/replication/numsubordinates_replication_test.py +@@ -0,0 +1,144 @@ ++# --- BEGIN COPYRIGHT BLOCK --- ++# Copyright (C) 2025 Red Hat, Inc. ++# All rights reserved. ++# ++# License: GPL (version 3 or any later version). ++# See LICENSE for details. ++# --- END COPYRIGHT BLOCK --- ++ ++import os ++import logging ++import pytest ++from lib389._constants import DEFAULT_SUFFIX ++from lib389.replica import ReplicationManager ++from lib389.idm.organizationalunit import OrganizationalUnits ++from lib389.idm.user import UserAccounts ++from lib389.topologies import topology_i2 as topo_i2 ++ ++ ++pytestmark = pytest.mark.tier1 ++ ++DEBUGGING = os.getenv("DEBUGGING", default=False) ++if DEBUGGING: ++ logging.getLogger(__name__).setLevel(logging.DEBUG) ++else: ++ logging.getLogger(__name__).setLevel(logging.INFO) ++log = logging.getLogger(__name__) ++ ++ ++def test_numsubordinates_tombstone_replication_mismatch(topo_i2): ++ """Test that numSubordinates values match between replicas after tombstone creation ++ ++ :id: c43ecc7a-d706-42e8-9179-1ff7d0e7163a ++ :setup: Two standalone instances ++ :steps: ++ 1. Create a container (organizational unit) on the first instance ++ 2. Create a user object in that container ++ 3. Delete the user object (this creates a tombstone) ++ 4. Set up replication between the two instances ++ 5. Wait for replication to complete ++ 6. Check numSubordinates on both instances ++ 7. Check tombstoneNumSubordinates on both instances ++ 8. Verify that numSubordinates values match on both instances ++ :expectedresults: ++ 1. Container should be created successfully ++ 2. User object should be created successfully ++ 3. User object should be deleted successfully ++ 4. Replication should be set up successfully ++ 5. Replication should complete successfully ++ 6. numSubordinates should be accessible on both instances ++ 7. tombstoneNumSubordinates should be accessible on both instances ++ 8. numSubordinates values should match on both instances ++ """ ++ ++ instance1 = topo_i2.ins["standalone1"] ++ instance2 = topo_i2.ins["standalone2"] ++ ++ log.info("Create a container (organizational unit) on the first instance") ++ ous1 = OrganizationalUnits(instance1, DEFAULT_SUFFIX) ++ container = ous1.create(properties={ ++ 'ou': 'test_container', ++ 'description': 'Test container for numSubordinates replication test' ++ }) ++ container_rdn = container.rdn ++ log.info(f"Created container: {container_rdn}") ++ ++ log.info("Create a user object in that container") ++ users1 = UserAccounts(instance1, DEFAULT_SUFFIX, rdn=f"ou={container_rdn}") ++ test_user = users1.create_test_user(uid=1001) ++ log.info(f"Created user: {test_user.dn}") ++ ++ log.info("Checking initial numSubordinates on container") ++ container_obj1 = OrganizationalUnits(instance1, DEFAULT_SUFFIX).get(container_rdn) ++ initial_numsubordinates = container_obj1.get_attr_val_int('numSubordinates') ++ log.info(f"Initial numSubordinates: {initial_numsubordinates}") ++ assert initial_numsubordinates == 1 ++ ++ log.info("Delete the user object (this creates a tombstone)") ++ test_user.delete() ++ ++ log.info("Checking numSubordinates after deletion") ++ after_delete_numsubordinates = container_obj1.get_attr_val_int('numSubordinates') ++ log.info(f"numSubordinates after deletion: {after_delete_numsubordinates}") ++ ++ log.info("Checking tombstoneNumSubordinates after deletion") ++ try: ++ tombstone_numsubordinates = container_obj1.get_attr_val_int('tombstoneNumSubordinates') ++ log.info(f"tombstoneNumSubordinates: {tombstone_numsubordinates}") ++ except Exception as e: ++ log.info(f"tombstoneNumSubordinates not found or error: {e}") ++ tombstone_numsubordinates = 0 ++ ++ log.info("Set up replication between the two instances") ++ repl = ReplicationManager(DEFAULT_SUFFIX) ++ repl.create_first_supplier(instance1) ++ repl.join_supplier(instance1, instance2) ++ ++ log.info("Wait for replication to complete") ++ repl.wait_for_replication(instance1, instance2) ++ ++ log.info("Check numSubordinates on both instances") ++ container_obj1 = OrganizationalUnits(instance1, DEFAULT_SUFFIX).get(container_rdn) ++ numsubordinates_instance1 = container_obj1.get_attr_val_int('numSubordinates') ++ log.info(f"numSubordinates on instance1: {numsubordinates_instance1}") ++ ++ container_obj2 = OrganizationalUnits(instance2, DEFAULT_SUFFIX).get(container_rdn) ++ numsubordinates_instance2 = container_obj2.get_attr_val_int('numSubordinates') ++ log.info(f"numSubordinates on instance2: {numsubordinates_instance2}") ++ ++ log.info("Check tombstoneNumSubordinates on both instances") ++ try: ++ tombstone_numsubordinates_instance1 = container_obj1.get_attr_val_int('tombstoneNumSubordinates') ++ log.info(f"tombstoneNumSubordinates on instance1: {tombstone_numsubordinates_instance1}") ++ except Exception as e: ++ log.info(f"tombstoneNumSubordinates not found on instance1: {e}") ++ tombstone_numsubordinates_instance1 = 0 ++ ++ try: ++ tombstone_numsubordinates_instance2 = container_obj2.get_attr_val_int('tombstoneNumSubordinates') ++ log.info(f"tombstoneNumSubordinates on instance2: {tombstone_numsubordinates_instance2}") ++ except Exception as e: ++ log.info(f"tombstoneNumSubordinates not found on instance2: {e}") ++ tombstone_numsubordinates_instance2 = 0 ++ ++ log.info("Verify that numSubordinates values match on both instances") ++ log.info(f"Comparison: instance1 numSubordinates={numsubordinates_instance1}, " ++ f"instance2 numSubordinates={numsubordinates_instance2}") ++ log.info(f"Comparison: instance1 tombstoneNumSubordinates={tombstone_numsubordinates_instance1}, " ++ f"instance2 tombstoneNumSubordinates={tombstone_numsubordinates_instance2}") ++ ++ assert numsubordinates_instance1 == numsubordinates_instance2, ( ++ f"numSubordinates mismatch: instance1 has {numsubordinates_instance1}, " ++ f"instance2 has {numsubordinates_instance2}. " ++ ) ++ assert tombstone_numsubordinates_instance1 == tombstone_numsubordinates_instance2, ( ++ f"tombstoneNumSubordinates mismatch: instance1 has {tombstone_numsubordinates_instance1}, " ++ f"instance2 has {tombstone_numsubordinates_instance2}. " ++ ) ++ ++ ++if __name__ == '__main__': ++ # Run isolated ++ # -s for DEBUG mode ++ CURRENT_FILE = os.path.realpath(__file__) ++ pytest.main("-s %s" % CURRENT_FILE) +\ No newline at end of file +-- +2.49.0 + diff --git a/SOURCES/0018-Issue-6884-Mask-password-hashes-in-audit-logs-6885.patch b/SOURCES/0018-Issue-6884-Mask-password-hashes-in-audit-logs-6885.patch new file mode 100644 index 0000000..f7f9a2b --- /dev/null +++ b/SOURCES/0018-Issue-6884-Mask-password-hashes-in-audit-logs-6885.patch @@ -0,0 +1,814 @@ +From e05653cbff500c47b89e43e4a1c85b7cb30321ff Mon Sep 17 00:00:00 2001 +From: Simon Pichugin +Date: Mon, 28 Jul 2025 15:41:29 -0700 +Subject: [PATCH] Issue 6884 - Mask password hashes in audit logs (#6885) + +Description: Fix the audit log functionality to mask password hash values for +userPassword, nsslapd-rootpw, nsmultiplexorcredentials, nsds5ReplicaCredentials, +and nsds5ReplicaBootstrapCredentials attributes in ADD and MODIFY operations. +Update auditlog.c to detect password attributes and replace their values with +asterisks (**********************) in both LDIF and JSON audit log formats. +Add a comprehensive test suite audit_password_masking_test.py to verify +password masking works correctly across all log formats and operation types. + +Fixes: https://github.com/389ds/389-ds-base/issues/6884 + +Reviewed by: @mreynolds389, @vashirov (Thanks!!) +--- + .../logging/audit_password_masking_test.py | 501 ++++++++++++++++++ + ldap/servers/slapd/auditlog.c | 170 +++++- + ldap/servers/slapd/slapi-private.h | 1 + + src/lib389/lib389/chaining.py | 3 +- + 4 files changed, 652 insertions(+), 23 deletions(-) + create mode 100644 dirsrvtests/tests/suites/logging/audit_password_masking_test.py + +diff --git a/dirsrvtests/tests/suites/logging/audit_password_masking_test.py b/dirsrvtests/tests/suites/logging/audit_password_masking_test.py +new file mode 100644 +index 000000000..3b6a54849 +--- /dev/null ++++ b/dirsrvtests/tests/suites/logging/audit_password_masking_test.py +@@ -0,0 +1,501 @@ ++# --- BEGIN COPYRIGHT BLOCK --- ++# Copyright (C) 2025 Red Hat, Inc. ++# All rights reserved. ++# ++# License: GPL (version 3 or any later version). ++# See LICENSE for details. ++# --- END COPYRIGHT BLOCK --- ++# ++import logging ++import pytest ++import os ++import re ++import time ++import ldap ++from lib389._constants import DEFAULT_SUFFIX, DN_DM, PW_DM ++from lib389.topologies import topology_m2 as topo ++from lib389.idm.user import UserAccounts ++from lib389.dirsrv_log import DirsrvAuditJSONLog ++from lib389.plugins import ChainingBackendPlugin ++from lib389.chaining import ChainingLinks ++from lib389.agreement import Agreements ++from lib389.replica import ReplicationManager, Replicas ++from lib389.idm.directorymanager import DirectoryManager ++ ++log = logging.getLogger(__name__) ++ ++MASKED_PASSWORD = "**********************" ++TEST_PASSWORD = "MySecret123" ++TEST_PASSWORD_2 = "NewPassword789" ++TEST_PASSWORD_3 = "NewPassword101" ++ ++ ++def setup_audit_logging(inst, log_format='default', display_attrs=None): ++ """Configure audit logging settings""" ++ inst.config.replace('nsslapd-auditlog-logbuffering', 'off') ++ inst.config.replace('nsslapd-auditlog-logging-enabled', 'on') ++ inst.config.replace('nsslapd-auditlog-log-format', log_format) ++ ++ if display_attrs is not None: ++ inst.config.replace('nsslapd-auditlog-display-attrs', display_attrs) ++ ++ inst.deleteAuditLogs() ++ ++ ++def check_password_masked(inst, log_format, expected_password, actual_password): ++ """Helper function to check password masking in audit logs""" ++ ++ time.sleep(1) # Allow log to flush ++ ++ # List of all password/credential attributes that should be masked ++ password_attributes = [ ++ 'userPassword', ++ 'nsslapd-rootpw', ++ 'nsmultiplexorcredentials', ++ 'nsDS5ReplicaCredentials', ++ 'nsDS5ReplicaBootstrapCredentials' ++ ] ++ ++ # Get password schemes to check for hash leakage ++ user_password_scheme = inst.config.get_attr_val_utf8('passwordStorageScheme') ++ root_password_scheme = inst.config.get_attr_val_utf8('nsslapd-rootpwstoragescheme') ++ ++ if log_format == 'json': ++ # Check JSON format logs ++ audit_log = DirsrvAuditJSONLog(inst) ++ log_lines = audit_log.readlines() ++ ++ found_masked = False ++ found_actual = False ++ found_hashed = False ++ ++ for line in log_lines: ++ # Check if any password attribute is present in the line ++ for attr in password_attributes: ++ if attr in line: ++ if expected_password in line: ++ found_masked = True ++ if actual_password in line: ++ found_actual = True ++ # Check for password scheme indicators (hashed passwords) ++ if user_password_scheme and f'{{{user_password_scheme}}}' in line: ++ found_hashed = True ++ if root_password_scheme and f'{{{root_password_scheme}}}' in line: ++ found_hashed = True ++ break # Found a password attribute, no need to check others for this line ++ ++ else: ++ # Check LDIF format logs ++ found_masked = False ++ found_actual = False ++ found_hashed = False ++ ++ # Check each password attribute for masked password ++ for attr in password_attributes: ++ if inst.ds_audit_log.match(f"{attr}: {re.escape(expected_password)}"): ++ found_masked = True ++ if inst.ds_audit_log.match(f"{attr}: {actual_password}"): ++ found_actual = True ++ ++ # Check for hashed passwords in LDIF format ++ if user_password_scheme: ++ if inst.ds_audit_log.match(f"userPassword: {{{user_password_scheme}}}"): ++ found_hashed = True ++ if root_password_scheme: ++ if inst.ds_audit_log.match(f"nsslapd-rootpw: {{{root_password_scheme}}}"): ++ found_hashed = True ++ ++ # Delete audit logs to avoid interference with other tests ++ # We need to reset the root password to default as deleteAuditLogs() ++ # opens a new connection with the default password ++ dm = DirectoryManager(inst) ++ dm.change_password(PW_DM) ++ inst.deleteAuditLogs() ++ ++ return found_masked, found_actual, found_hashed ++ ++ ++@pytest.mark.parametrize("log_format,display_attrs", [ ++ ("default", None), ++ ("default", "*"), ++ ("default", "userPassword"), ++ ("json", None), ++ ("json", "*"), ++ ("json", "userPassword") ++]) ++def test_password_masking_add_operation(topo, log_format, display_attrs): ++ """Test password masking in ADD operations ++ ++ :id: 4358bd75-bcc7-401c-b492-d3209b10412d ++ :parametrized: yes ++ :setup: Standalone Instance ++ :steps: ++ 1. Configure audit logging format ++ 2. Add user with password ++ 3. Check that password is masked in audit log ++ 4. Verify actual password does not appear in log ++ :expectedresults: ++ 1. Success ++ 2. Success ++ 3. Password should be masked with asterisks ++ 4. Actual password should not be found in log ++ """ ++ inst = topo.ms['supplier1'] ++ setup_audit_logging(inst, log_format, display_attrs) ++ ++ users = UserAccounts(inst, DEFAULT_SUFFIX) ++ user = None ++ ++ try: ++ user = users.create(properties={ ++ 'uid': 'test_add_pwd_mask', ++ 'cn': 'Test Add User', ++ 'sn': 'User', ++ 'uidNumber': '1000', ++ 'gidNumber': '1000', ++ 'homeDirectory': '/home/test_add', ++ 'userPassword': TEST_PASSWORD ++ }) ++ ++ found_masked, found_actual, found_hashed = check_password_masked(inst, log_format, MASKED_PASSWORD, TEST_PASSWORD) ++ ++ assert found_masked, f"Masked password not found in {log_format} ADD operation" ++ assert not found_actual, f"Actual password found in {log_format} ADD log (should be masked)" ++ assert not found_hashed, f"Hashed password found in {log_format} ADD log (should be masked)" ++ ++ finally: ++ if user is not None: ++ try: ++ user.delete() ++ except: ++ pass ++ ++ ++@pytest.mark.parametrize("log_format,display_attrs", [ ++ ("default", None), ++ ("default", "*"), ++ ("default", "userPassword"), ++ ("json", None), ++ ("json", "*"), ++ ("json", "userPassword") ++]) ++def test_password_masking_modify_operation(topo, log_format, display_attrs): ++ """Test password masking in MODIFY operations ++ ++ :id: e6963aa9-7609-419c-aae2-1d517aa434bd ++ :parametrized: yes ++ :setup: Standalone Instance ++ :steps: ++ 1. Configure audit logging format ++ 2. Add user without password ++ 3. Add password via MODIFY operation ++ 4. Check that password is masked in audit log ++ 5. Modify password to new value ++ 6. Check that new password is also masked ++ 7. Verify actual passwords do not appear in log ++ :expectedresults: ++ 1. Success ++ 2. Success ++ 3. Success ++ 4. Password should be masked with asterisks ++ 5. Success ++ 6. New password should be masked with asterisks ++ 7. No actual password values should be found in log ++ """ ++ inst = topo.ms['supplier1'] ++ setup_audit_logging(inst, log_format, display_attrs) ++ ++ users = UserAccounts(inst, DEFAULT_SUFFIX) ++ user = None ++ ++ try: ++ user = users.create(properties={ ++ 'uid': 'test_modify_pwd_mask', ++ 'cn': 'Test Modify User', ++ 'sn': 'User', ++ 'uidNumber': '2000', ++ 'gidNumber': '2000', ++ 'homeDirectory': '/home/test_modify' ++ }) ++ ++ user.replace('userPassword', TEST_PASSWORD) ++ ++ found_masked, found_actual, found_hashed = check_password_masked(inst, log_format, MASKED_PASSWORD, TEST_PASSWORD) ++ assert found_masked, f"Masked password not found in {log_format} MODIFY operation (first password)" ++ assert not found_actual, f"Actual password found in {log_format} MODIFY log (should be masked)" ++ assert not found_hashed, f"Hashed password found in {log_format} MODIFY log (should be masked)" ++ ++ user.replace('userPassword', TEST_PASSWORD_2) ++ ++ found_masked_2, found_actual_2, found_hashed_2 = check_password_masked(inst, log_format, MASKED_PASSWORD, TEST_PASSWORD_2) ++ assert found_masked_2, f"Masked password not found in {log_format} MODIFY operation (second password)" ++ assert not found_actual_2, f"Second actual password found in {log_format} MODIFY log (should be masked)" ++ assert not found_hashed_2, f"Second hashed password found in {log_format} MODIFY log (should be masked)" ++ ++ finally: ++ if user is not None: ++ try: ++ user.delete() ++ except: ++ pass ++ ++ ++@pytest.mark.parametrize("log_format,display_attrs", [ ++ ("default", None), ++ ("default", "*"), ++ ("default", "nsslapd-rootpw"), ++ ("json", None), ++ ("json", "*"), ++ ("json", "nsslapd-rootpw") ++]) ++def test_password_masking_rootpw_modify_operation(topo, log_format, display_attrs): ++ """Test password masking for nsslapd-rootpw MODIFY operations ++ ++ :id: ec8c9fd4-56ba-4663-ab65-58efb3b445e4 ++ :parametrized: yes ++ :setup: Standalone Instance ++ :steps: ++ 1. Configure audit logging format ++ 2. Modify nsslapd-rootpw in configuration ++ 3. Check that root password is masked in audit log ++ 4. Modify root password to new value ++ 5. Check that new root password is also masked ++ 6. Verify actual root passwords do not appear in log ++ :expectedresults: ++ 1. Success ++ 2. Success ++ 3. Root password should be masked with asterisks ++ 4. Success ++ 5. New root password should be masked with asterisks ++ 6. No actual root password values should be found in log ++ """ ++ inst = topo.ms['supplier1'] ++ setup_audit_logging(inst, log_format, display_attrs) ++ dm = DirectoryManager(inst) ++ ++ try: ++ dm.change_password(TEST_PASSWORD) ++ dm.rebind(TEST_PASSWORD) ++ ++ found_masked, found_actual, found_hashed = check_password_masked(inst, log_format, MASKED_PASSWORD, TEST_PASSWORD) ++ assert found_masked, f"Masked root password not found in {log_format} MODIFY operation (first root password)" ++ assert not found_actual, f"Actual root password found in {log_format} MODIFY log (should be masked)" ++ assert not found_hashed, f"Hashed root password found in {log_format} MODIFY log (should be masked)" ++ ++ dm.change_password(TEST_PASSWORD_2) ++ dm.rebind(TEST_PASSWORD_2) ++ ++ found_masked_2, found_actual_2, found_hashed_2 = check_password_masked(inst, log_format, MASKED_PASSWORD, TEST_PASSWORD_2) ++ assert found_masked_2, f"Masked root password not found in {log_format} MODIFY operation (second root password)" ++ assert not found_actual_2, f"Second actual root password found in {log_format} MODIFY log (should be masked)" ++ assert not found_hashed_2, f"Second hashed root password found in {log_format} MODIFY log (should be masked)" ++ ++ finally: ++ dm.change_password(PW_DM) ++ dm.rebind(PW_DM) ++ ++ ++@pytest.mark.parametrize("log_format,display_attrs", [ ++ ("default", None), ++ ("default", "*"), ++ ("default", "nsmultiplexorcredentials"), ++ ("json", None), ++ ("json", "*"), ++ ("json", "nsmultiplexorcredentials") ++]) ++def test_password_masking_multiplexor_credentials(topo, log_format, display_attrs): ++ """Test password masking for nsmultiplexorcredentials in chaining/multiplexor configurations ++ ++ :id: 161a9498-b248-4926-90be-a696a36ed36e ++ :parametrized: yes ++ :setup: Standalone Instance ++ :steps: ++ 1. Configure audit logging format ++ 2. Create a chaining backend configuration entry with nsmultiplexorcredentials ++ 3. Check that multiplexor credentials are masked in audit log ++ 4. Modify the credentials ++ 5. Check that updated credentials are also masked ++ 6. Verify actual credentials do not appear in log ++ :expectedresults: ++ 1. Success ++ 2. Success ++ 3. Multiplexor credentials should be masked with asterisks ++ 4. Success ++ 5. Updated credentials should be masked with asterisks ++ 6. No actual credential values should be found in log ++ """ ++ inst = topo.ms['supplier1'] ++ setup_audit_logging(inst, log_format, display_attrs) ++ ++ # Enable chaining plugin and create chaining link ++ chain_plugin = ChainingBackendPlugin(inst) ++ chain_plugin.enable() ++ ++ chains = ChainingLinks(inst) ++ chain = None ++ ++ try: ++ # Create chaining link with multiplexor credentials ++ chain = chains.create(properties={ ++ 'cn': 'testchain', ++ 'nsfarmserverurl': 'ldap://localhost:389/', ++ 'nsslapd-suffix': 'dc=example,dc=com', ++ 'nsmultiplexorbinddn': 'cn=manager', ++ 'nsmultiplexorcredentials': TEST_PASSWORD, ++ 'nsCheckLocalACI': 'on', ++ 'nsConnectionLife': '30', ++ }) ++ ++ found_masked, found_actual, found_hashed = check_password_masked(inst, log_format, MASKED_PASSWORD, TEST_PASSWORD) ++ assert found_masked, f"Masked multiplexor credentials not found in {log_format} ADD operation" ++ assert not found_actual, f"Actual multiplexor credentials found in {log_format} ADD log (should be masked)" ++ assert not found_hashed, f"Hashed multiplexor credentials found in {log_format} ADD log (should be masked)" ++ ++ # Modify the credentials ++ chain.replace('nsmultiplexorcredentials', TEST_PASSWORD_2) ++ ++ found_masked_2, found_actual_2, found_hashed_2 = check_password_masked(inst, log_format, MASKED_PASSWORD, TEST_PASSWORD_2) ++ assert found_masked_2, f"Masked multiplexor credentials not found in {log_format} MODIFY operation" ++ assert not found_actual_2, f"Actual multiplexor credentials found in {log_format} MODIFY log (should be masked)" ++ assert not found_hashed_2, f"Hashed multiplexor credentials found in {log_format} MODIFY log (should be masked)" ++ ++ finally: ++ chain_plugin.disable() ++ if chain is not None: ++ inst.delete_branch_s(chain.dn, ldap.SCOPE_ONELEVEL) ++ chain.delete() ++ ++ ++@pytest.mark.parametrize("log_format,display_attrs", [ ++ ("default", None), ++ ("default", "*"), ++ ("default", "nsDS5ReplicaCredentials"), ++ ("json", None), ++ ("json", "*"), ++ ("json", "nsDS5ReplicaCredentials") ++]) ++def test_password_masking_replica_credentials(topo, log_format, display_attrs): ++ """Test password masking for nsDS5ReplicaCredentials in replication agreements ++ ++ :id: 7bf9e612-1b7c-49af-9fc0-de4c7df84b2a ++ :parametrized: yes ++ :setup: Standalone Instance ++ :steps: ++ 1. Configure audit logging format ++ 2. Create a replication agreement entry with nsDS5ReplicaCredentials ++ 3. Check that replica credentials are masked in audit log ++ 4. Modify the credentials ++ 5. Check that updated credentials are also masked ++ 6. Verify actual credentials do not appear in log ++ :expectedresults: ++ 1. Success ++ 2. Success ++ 3. Replica credentials should be masked with asterisks ++ 4. Success ++ 5. Updated credentials should be masked with asterisks ++ 6. No actual credential values should be found in log ++ """ ++ inst = topo.ms['supplier2'] ++ setup_audit_logging(inst, log_format, display_attrs) ++ agmt = None ++ ++ try: ++ replicas = Replicas(inst) ++ replica = replicas.get(DEFAULT_SUFFIX) ++ agmts = replica.get_agreements() ++ agmt = agmts.create(properties={ ++ 'cn': 'testagmt', ++ 'nsDS5ReplicaHost': 'localhost', ++ 'nsDS5ReplicaPort': '389', ++ 'nsDS5ReplicaBindDN': 'cn=replication manager,cn=config', ++ 'nsDS5ReplicaCredentials': TEST_PASSWORD, ++ 'nsDS5ReplicaRoot': DEFAULT_SUFFIX ++ }) ++ ++ found_masked, found_actual, found_hashed = check_password_masked(inst, log_format, MASKED_PASSWORD, TEST_PASSWORD) ++ assert found_masked, f"Masked replica credentials not found in {log_format} ADD operation" ++ assert not found_actual, f"Actual replica credentials found in {log_format} ADD log (should be masked)" ++ assert not found_hashed, f"Hashed replica credentials found in {log_format} ADD log (should be masked)" ++ ++ # Modify the credentials ++ agmt.replace('nsDS5ReplicaCredentials', TEST_PASSWORD_2) ++ ++ found_masked_2, found_actual_2, found_hashed_2 = check_password_masked(inst, log_format, MASKED_PASSWORD, TEST_PASSWORD_2) ++ assert found_masked_2, f"Masked replica credentials not found in {log_format} MODIFY operation" ++ assert not found_actual_2, f"Actual replica credentials found in {log_format} MODIFY log (should be masked)" ++ assert not found_hashed_2, f"Hashed replica credentials found in {log_format} MODIFY log (should be masked)" ++ ++ finally: ++ if agmt is not None: ++ agmt.delete() ++ ++ ++@pytest.mark.parametrize("log_format,display_attrs", [ ++ ("default", None), ++ ("default", "*"), ++ ("default", "nsDS5ReplicaBootstrapCredentials"), ++ ("json", None), ++ ("json", "*"), ++ ("json", "nsDS5ReplicaBootstrapCredentials") ++]) ++def test_password_masking_bootstrap_credentials(topo, log_format, display_attrs): ++ """Test password masking for nsDS5ReplicaCredentials and nsDS5ReplicaBootstrapCredentials in replication agreements ++ ++ :id: 248bd418-ffa4-4733-963d-2314c60b7c5b ++ :parametrized: yes ++ :setup: Standalone Instance ++ :steps: ++ 1. Configure audit logging format ++ 2. Create a replication agreement entry with both nsDS5ReplicaCredentials and nsDS5ReplicaBootstrapCredentials ++ 3. Check that both credentials are masked in audit log ++ 4. Modify both credentials ++ 5. Check that both updated credentials are also masked ++ 6. Verify actual credentials do not appear in log ++ :expectedresults: ++ 1. Success ++ 2. Success ++ 3. Both credentials should be masked with asterisks ++ 4. Success ++ 5. Both updated credentials should be masked with asterisks ++ 6. No actual credential values should be found in log ++ """ ++ inst = topo.ms['supplier2'] ++ setup_audit_logging(inst, log_format, display_attrs) ++ agmt = None ++ ++ try: ++ replicas = Replicas(inst) ++ replica = replicas.get(DEFAULT_SUFFIX) ++ agmts = replica.get_agreements() ++ agmt = agmts.create(properties={ ++ 'cn': 'testbootstrapagmt', ++ 'nsDS5ReplicaHost': 'localhost', ++ 'nsDS5ReplicaPort': '389', ++ 'nsDS5ReplicaBindDN': 'cn=replication manager,cn=config', ++ 'nsDS5ReplicaCredentials': TEST_PASSWORD, ++ 'nsDS5replicabootstrapbinddn': 'cn=bootstrap manager,cn=config', ++ 'nsDS5ReplicaBootstrapCredentials': TEST_PASSWORD_2, ++ 'nsDS5ReplicaRoot': DEFAULT_SUFFIX ++ }) ++ ++ found_masked_bootstrap, found_actual_bootstrap, found_hashed_bootstrap = check_password_masked(inst, log_format, MASKED_PASSWORD, TEST_PASSWORD_2) ++ assert found_masked_bootstrap, f"Masked bootstrap credentials not found in {log_format} ADD operation" ++ assert not found_actual_bootstrap, f"Actual bootstrap credentials found in {log_format} ADD log (should be masked)" ++ assert not found_hashed_bootstrap, f"Hashed bootstrap credentials found in {log_format} ADD log (should be masked)" ++ ++ agmt.replace('nsDS5ReplicaBootstrapCredentials', TEST_PASSWORD_3) ++ ++ found_masked_bootstrap_2, found_actual_bootstrap_2, found_hashed_bootstrap_2 = check_password_masked(inst, log_format, MASKED_PASSWORD, TEST_PASSWORD_3) ++ assert found_masked_bootstrap_2, f"Masked bootstrap credentials not found in {log_format} MODIFY operation" ++ assert not found_actual_bootstrap_2, f"Actual bootstrap credentials found in {log_format} MODIFY log (should be masked)" ++ assert not found_hashed_bootstrap_2, f"Hashed bootstrap credentials found in {log_format} MODIFY log (should be masked)" ++ ++ finally: ++ if agmt is not None: ++ agmt.delete() ++ ++ ++ ++if __name__ == '__main__': ++ CURRENT_FILE = os.path.realpath(__file__) ++ pytest.main(["-s", CURRENT_FILE]) +\ No newline at end of file +diff --git a/ldap/servers/slapd/auditlog.c b/ldap/servers/slapd/auditlog.c +index 3945b0533..3a34959f6 100644 +--- a/ldap/servers/slapd/auditlog.c ++++ b/ldap/servers/slapd/auditlog.c +@@ -39,6 +39,89 @@ static void write_audit_file(Slapi_PBlock *pb, Slapi_Entry *entry, int logtype, + + static const char *modrdn_changes[4]; + ++/* Helper function to check if an attribute is a password that needs masking */ ++static int ++is_password_attribute(const char *attr_name) ++{ ++ return (strcasecmp(attr_name, SLAPI_USERPWD_ATTR) == 0 || ++ strcasecmp(attr_name, CONFIG_ROOTPW_ATTRIBUTE) == 0 || ++ strcasecmp(attr_name, SLAPI_MB_CREDENTIALS) == 0 || ++ strcasecmp(attr_name, SLAPI_REP_CREDENTIALS) == 0 || ++ strcasecmp(attr_name, SLAPI_REP_BOOTSTRAP_CREDENTIALS) == 0); ++} ++ ++/* Helper function to create a masked string representation of an entry */ ++static char * ++create_masked_entry_string(Slapi_Entry *original_entry, int *len) ++{ ++ Slapi_Attr *attr = NULL; ++ char *entry_str = NULL; ++ char *current_pos = NULL; ++ char *line_start = NULL; ++ char *next_line = NULL; ++ char *colon_pos = NULL; ++ int has_password_attrs = 0; ++ ++ if (original_entry == NULL) { ++ return NULL; ++ } ++ ++ /* Single pass through attributes to check for password attributes */ ++ for (slapi_entry_first_attr(original_entry, &attr); attr != NULL; ++ slapi_entry_next_attr(original_entry, attr, &attr)) { ++ ++ char *attr_name = NULL; ++ slapi_attr_get_type(attr, &attr_name); ++ ++ if (is_password_attribute(attr_name)) { ++ has_password_attrs = 1; ++ break; ++ } ++ } ++ ++ /* If no password attributes, return original string - no masking needed */ ++ entry_str = slapi_entry2str(original_entry, len); ++ if (!has_password_attrs) { ++ return entry_str; ++ } ++ ++ /* Process the string in-place, replacing password values */ ++ current_pos = entry_str; ++ while ((line_start = current_pos) != NULL && *line_start != '\0') { ++ /* Find the end of current line */ ++ next_line = strchr(line_start, '\n'); ++ if (next_line != NULL) { ++ *next_line = '\0'; /* Temporarily terminate line */ ++ current_pos = next_line + 1; ++ } else { ++ current_pos = NULL; /* Last line */ ++ } ++ ++ /* Find the colon that separates attribute name from value */ ++ colon_pos = strchr(line_start, ':'); ++ if (colon_pos != NULL) { ++ char saved_colon = *colon_pos; ++ *colon_pos = '\0'; /* Temporarily null-terminate attribute name */ ++ ++ /* Check if this is a password attribute that needs masking */ ++ if (is_password_attribute(line_start)) { ++ strcpy(colon_pos + 1, " **********************"); ++ } ++ ++ *colon_pos = saved_colon; /* Restore colon */ ++ } ++ ++ /* Restore newline if it was there */ ++ if (next_line != NULL) { ++ *next_line = '\n'; ++ } ++ } ++ ++ /* Update length since we may have shortened the string */ ++ *len = strlen(entry_str); ++ return entry_str; /* Return the modified original string */ ++} ++ + void + write_audit_log_entry(Slapi_PBlock *pb) + { +@@ -279,10 +362,31 @@ add_entry_attrs_ext(Slapi_Entry *entry, lenstr *l, PRBool use_json, json_object + { + slapi_entry_attr_find(entry, req_attr, &entry_attr); + if (entry_attr) { +- if (use_json) { +- log_entry_attr_json(entry_attr, req_attr, id_list); ++ if (strcmp(req_attr, PSEUDO_ATTR_UNHASHEDUSERPASSWORD) == 0) { ++ /* Do not write the unhashed clear-text password */ ++ continue; ++ } ++ ++ /* Check if this is a password attribute that needs masking */ ++ if (is_password_attribute(req_attr)) { ++ /* userpassword/rootdn password - mask the value */ ++ if (use_json) { ++ json_object *secret_obj = json_object_new_object(); ++ json_object_object_add(secret_obj, req_attr, ++ json_object_new_string("**********************")); ++ json_object_array_add(id_list, secret_obj); ++ } else { ++ addlenstr(l, "#"); ++ addlenstr(l, req_attr); ++ addlenstr(l, ": **********************\n"); ++ } + } else { +- log_entry_attr(entry_attr, req_attr, l); ++ /* Regular attribute - log normally */ ++ if (use_json) { ++ log_entry_attr_json(entry_attr, req_attr, id_list); ++ } else { ++ log_entry_attr(entry_attr, req_attr, l); ++ } + } + } + } +@@ -297,9 +401,7 @@ add_entry_attrs_ext(Slapi_Entry *entry, lenstr *l, PRBool use_json, json_object + continue; + } + +- if (strcasecmp(attr, SLAPI_USERPWD_ATTR) == 0 || +- strcasecmp(attr, CONFIG_ROOTPW_ATTRIBUTE) == 0) +- { ++ if (is_password_attribute(attr)) { + /* userpassword/rootdn password - mask the value */ + if (use_json) { + json_object *secret_obj = json_object_new_object(); +@@ -309,7 +411,7 @@ add_entry_attrs_ext(Slapi_Entry *entry, lenstr *l, PRBool use_json, json_object + } else { + addlenstr(l, "#"); + addlenstr(l, attr); +- addlenstr(l, ": ****************************\n"); ++ addlenstr(l, ": **********************\n"); + } + continue; + } +@@ -478,6 +580,9 @@ write_audit_file_json(Slapi_PBlock *pb, Slapi_Entry *entry, int logtype, + } + } + ++ /* Check if this is a password attribute that needs masking */ ++ int is_password_attr = is_password_attribute(mods[j]->mod_type); ++ + mod = json_object_new_object(); + switch (operationtype) { + case LDAP_MOD_ADD: +@@ -502,7 +607,12 @@ write_audit_file_json(Slapi_PBlock *pb, Slapi_Entry *entry, int logtype, + json_object *val_list = NULL; + val_list = json_object_new_array(); + for (size_t i = 0; mods[j]->mod_bvalues != NULL && mods[j]->mod_bvalues[i] != NULL; i++) { +- json_object_array_add(val_list, json_object_new_string(mods[j]->mod_bvalues[i]->bv_val)); ++ if (is_password_attr) { ++ /* Mask password values */ ++ json_object_array_add(val_list, json_object_new_string("**********************")); ++ } else { ++ json_object_array_add(val_list, json_object_new_string(mods[j]->mod_bvalues[i]->bv_val)); ++ } + } + json_object_object_add(mod, "values", val_list); + } +@@ -514,8 +624,11 @@ write_audit_file_json(Slapi_PBlock *pb, Slapi_Entry *entry, int logtype, + + case SLAPI_OPERATION_ADD: + int len; ++ + e = change; +- tmp = slapi_entry2str(e, &len); ++ ++ /* Create a masked string representation for password attributes */ ++ tmp = create_masked_entry_string(e, &len); + tmpsave = tmp; + while ((tmp = strchr(tmp, '\n')) != NULL) { + tmp++; +@@ -662,6 +775,10 @@ write_audit_file( + break; + } + } ++ ++ /* Check if this is a password attribute that needs masking */ ++ int is_password_attr = is_password_attribute(mods[j]->mod_type); ++ + switch (operationtype) { + case LDAP_MOD_ADD: + addlenstr(l, "add: "); +@@ -686,18 +803,27 @@ write_audit_file( + break; + } + if (operationtype != LDAP_MOD_IGNORE) { +- for (i = 0; mods[j]->mod_bvalues != NULL && mods[j]->mod_bvalues[i] != NULL; i++) { +- char *buf, *bufp; +- len = strlen(mods[j]->mod_type); +- len = LDIF_SIZE_NEEDED(len, mods[j]->mod_bvalues[i]->bv_len) + 1; +- buf = slapi_ch_malloc(len); +- bufp = buf; +- slapi_ldif_put_type_and_value_with_options(&bufp, mods[j]->mod_type, +- mods[j]->mod_bvalues[i]->bv_val, +- mods[j]->mod_bvalues[i]->bv_len, 0); +- *bufp = '\0'; +- addlenstr(l, buf); +- slapi_ch_free((void **)&buf); ++ if (is_password_attr) { ++ /* Add masked password */ ++ for (i = 0; mods[j]->mod_bvalues != NULL && mods[j]->mod_bvalues[i] != NULL; i++) { ++ addlenstr(l, mods[j]->mod_type); ++ addlenstr(l, ": **********************\n"); ++ } ++ } else { ++ /* Add actual values for non-password attributes */ ++ for (i = 0; mods[j]->mod_bvalues != NULL && mods[j]->mod_bvalues[i] != NULL; i++) { ++ char *buf, *bufp; ++ len = strlen(mods[j]->mod_type); ++ len = LDIF_SIZE_NEEDED(len, mods[j]->mod_bvalues[i]->bv_len) + 1; ++ buf = slapi_ch_malloc(len); ++ bufp = buf; ++ slapi_ldif_put_type_and_value_with_options(&bufp, mods[j]->mod_type, ++ mods[j]->mod_bvalues[i]->bv_val, ++ mods[j]->mod_bvalues[i]->bv_len, 0); ++ *bufp = '\0'; ++ addlenstr(l, buf); ++ slapi_ch_free((void **)&buf); ++ } + } + } + addlenstr(l, "-\n"); +@@ -708,7 +834,7 @@ write_audit_file( + e = change; + addlenstr(l, attr_changetype); + addlenstr(l, ": add\n"); +- tmp = slapi_entry2str(e, &len); ++ tmp = create_masked_entry_string(e, &len); + tmpsave = tmp; + while ((tmp = strchr(tmp, '\n')) != NULL) { + tmp++; +diff --git a/ldap/servers/slapd/slapi-private.h b/ldap/servers/slapd/slapi-private.h +index 7a3eb3fdf..fb88488b1 100644 +--- a/ldap/servers/slapd/slapi-private.h ++++ b/ldap/servers/slapd/slapi-private.h +@@ -848,6 +848,7 @@ void task_cleanup(void); + /* for reversible encyrption */ + #define SLAPI_MB_CREDENTIALS "nsmultiplexorcredentials" + #define SLAPI_REP_CREDENTIALS "nsds5ReplicaCredentials" ++#define SLAPI_REP_BOOTSTRAP_CREDENTIALS "nsds5ReplicaBootstrapCredentials" + int pw_rever_encode(Slapi_Value **vals, char *attr_name); + int pw_rever_decode(char *cipher, char **plain, const char *attr_name); + +diff --git a/src/lib389/lib389/chaining.py b/src/lib389/lib389/chaining.py +index 533b83ebf..33ae78c8b 100644 +--- a/src/lib389/lib389/chaining.py ++++ b/src/lib389/lib389/chaining.py +@@ -134,7 +134,7 @@ class ChainingLink(DSLdapObject): + """ + + # Create chaining entry +- super(ChainingLink, self).create(rdn, properties, basedn) ++ link = super(ChainingLink, self).create(rdn, properties, basedn) + + # Create mapping tree entry + dn_comps = ldap.explode_dn(properties['nsslapd-suffix'][0]) +@@ -149,6 +149,7 @@ class ChainingLink(DSLdapObject): + self._mts.ensure_state(properties=mt_properties) + except ldap.ALREADY_EXISTS: + pass ++ return link + + + class ChainingLinks(DSLdapObjects): +-- +2.49.0 + diff --git a/SOURCES/0019-Issue-6897-Fix-disk-monitoring-test-failures-and-imp.patch b/SOURCES/0019-Issue-6897-Fix-disk-monitoring-test-failures-and-imp.patch new file mode 100644 index 0000000..e9a2e57 --- /dev/null +++ b/SOURCES/0019-Issue-6897-Fix-disk-monitoring-test-failures-and-imp.patch @@ -0,0 +1,1721 @@ +From 590c11b8fb24dde31910614eff1810e2eb0377a9 Mon Sep 17 00:00:00 2001 +From: Simon Pichugin +Date: Mon, 28 Jul 2025 20:02:09 -0700 +Subject: [PATCH] Issue 6897 - Fix disk monitoring test failures and improve + test maintainability (#6898) + +Description: Refactor disk_monitoring_test.py to address failures and +improve maintainability. Replace manual sleep loops with proper wait +conditions using wait_for_condition() and wait_for_log_entry() helpers. +Add comprehensive logging throughout all tests for better debugging. +Implement configuration capture/restore to prevent test pollution +between runs. +Change fixture scope from module to function level for better test +isolation and ensure proper cleanup in all test cases. + +Fixes: https://github.com/389ds/389-ds-base/issues/6897 + +Reviewed by: @mreynolds389 (Thanks!) +--- + .../disk_monitoring/disk_monitoring_test.py | 1429 +++++++++++------ + 1 file changed, 940 insertions(+), 489 deletions(-) + +diff --git a/dirsrvtests/tests/suites/disk_monitoring/disk_monitoring_test.py b/dirsrvtests/tests/suites/disk_monitoring/disk_monitoring_test.py +index 78d7dd794..a4c445748 100644 +--- a/dirsrvtests/tests/suites/disk_monitoring/disk_monitoring_test.py ++++ b/dirsrvtests/tests/suites/disk_monitoring/disk_monitoring_test.py +@@ -1,17 +1,18 @@ + # --- BEGIN COPYRIGHT BLOCK --- +-# Copyright (C) 2018 Red Hat, Inc. ++# Copyright (C) 2025 Red Hat, Inc. + # All rights reserved. + # + # License: GPL (version 3 or any later version). + # See LICENSE for details. + # --- END COPYRIGHT BLOCK --- + +- + import os + import subprocess + import re + import time ++import ldap + import pytest ++import logging + from lib389.tasks import * + from lib389._constants import * + from lib389.utils import ensure_bytes +@@ -20,95 +21,221 @@ from lib389.topologies import topology_st as topo + from lib389.paths import * + from lib389.idm.user import UserAccounts + ++DEBUGGING = os.getenv("DEBUGGING", default=False) ++if DEBUGGING: ++ logging.getLogger(__name__).setLevel(logging.DEBUG) ++else: ++ logging.getLogger(__name__).setLevel(logging.INFO) ++log = logging.getLogger(__name__) ++ + pytestmark = pytest.mark.tier2 + disk_monitoring_ack = pytest.mark.skipif(not os.environ.get('DISK_MONITORING_ACK', False), reason="Disk monitoring tests may damage system configuration.") + +-THRESHOLD = '30' +-THRESHOLD_BYTES = '30000000' ++THRESHOLD_BYTES = 30000000 + + +-def _withouterrorlog(topo, condition, maxtimesleep): +- timecount = 0 +- while eval(condition): +- time.sleep(1) +- timecount += 1 +- if timecount >= maxtimesleep: break +- assert not eval(condition) ++def presetup(inst): ++ """Presetup function to mount a tmpfs for log directory to simulate disk space limits.""" + ++ log.info("Setting up tmpfs for disk monitoring tests") ++ inst.stop() ++ log_dir = inst.ds_paths.log_dir + +-def _witherrorlog(topo, condition, maxtimesleep): +- timecount = 0 +- with open(topo.standalone.errlog, 'r') as study: study = study.read() +- while condition not in study: +- time.sleep(1) +- timecount += 1 +- with open(topo.standalone.errlog, 'r') as study: study = study.read() +- if timecount >= maxtimesleep: break +- assert condition in study ++ if os.path.exists(log_dir): ++ log.debug(f"Mounting tmpfs on existing directory: {log_dir}") ++ subprocess.call(['mount', '-t', 'tmpfs', '-o', 'size=35M', 'tmpfs', log_dir]) ++ else: ++ log.debug(f"Creating and mounting tmpfs on new directory: {log_dir}") ++ os.mkdir(log_dir) ++ subprocess.call(['mount', '-t', 'tmpfs', '-o', 'size=35M', 'tmpfs', log_dir]) ++ ++ subprocess.call(f'chown {DEFAULT_USER}: -R {log_dir}', shell=True) ++ subprocess.call(f'chown {DEFAULT_USER}: -R {log_dir}/*', shell=True) ++ subprocess.call(f'restorecon -FvvR {log_dir}', shell=True) ++ inst.start() ++ log.info("tmpfs setup completed") ++ ++ ++def setupthesystem(inst): ++ """Setup system configuration for disk monitoring tests.""" ++ ++ log.info("Configuring system for disk monitoring tests") ++ inst.start() ++ inst.config.set('nsslapd-disk-monitoring-grace-period', '1') ++ inst.config.set('nsslapd-accesslog-logbuffering', 'off') ++ inst.config.set('nsslapd-disk-monitoring-threshold', ensure_bytes(str(THRESHOLD_BYTES))) ++ inst.restart() ++ log.info("System configuration completed") ++ ++ ++def capture_config(inst): ++ """Capture current configuration values for later restoration.""" ++ ++ log.info("Capturing current configuration values") ++ ++ config_attrs = [ ++ 'nsslapd-disk-monitoring', ++ 'nsslapd-disk-monitoring-threshold', ++ 'nsslapd-disk-monitoring-grace-period', ++ 'nsslapd-disk-monitoring-logging-critical', ++ 'nsslapd-disk-monitoring-readonly-on-threshold', ++ 'nsslapd-accesslog-logbuffering', ++ 'nsslapd-errorlog-level', ++ 'nsslapd-accesslog-logging-enabled', ++ 'nsslapd-accesslog-maxlogsize', ++ 'nsslapd-accesslog-logrotationtimeunit', ++ 'nsslapd-accesslog-level', ++ 'nsslapd-external-libs-debug-enabled', ++ 'nsslapd-errorlog-logging-enabled' ++ ] ++ ++ captured_config = {} ++ for config_attr in config_attrs: ++ try: ++ current_value = inst.config.get_attr_val_utf8(config_attr) ++ captured_config[config_attr] = current_value ++ log.debug(f"Captured {config_attr}: {current_value}") ++ except Exception as e: ++ log.debug(f"Could not capture {config_attr}: {e}") ++ captured_config[config_attr] = None + ++ log.info("Configuration capture completed") ++ return captured_config + +-def presetup(topo): +- """ +- This is function is part of fixture function setup , will setup the environment for this test. +- """ +- topo.standalone.stop() +- if os.path.exists(topo.standalone.ds_paths.log_dir): +- subprocess.call(['mount', '-t', 'tmpfs', '-o', 'size=35M', 'tmpfs', topo.standalone.ds_paths.log_dir]) +- else: +- os.mkdir(topo.standalone.ds_paths.log_dir) +- subprocess.call(['mount', '-t', 'tmpfs', '-o', 'size=35M', 'tmpfs', topo.standalone.ds_paths.log_dir]) +- subprocess.call('chown {}: -R {}'.format(DEFAULT_USER, topo.standalone.ds_paths.log_dir), shell=True) +- subprocess.call('chown {}: -R {}/*'.format(DEFAULT_USER, topo.standalone.ds_paths.log_dir), shell=True) +- subprocess.call('restorecon -FvvR {}'.format(topo.standalone.ds_paths.log_dir), shell=True) +- topo.standalone.start() + ++def restore_config(inst, captured_config): ++ """Restore configuration values to previously captured state.""" + +-def setupthesystem(topo): +- """ +- This function is part of fixture function setup , will setup the environment for this test. +- """ +- global TOTAL_SIZE, USED_SIZE, AVAIL_SIZE, HALF_THR_FILL_SIZE, FULL_THR_FILL_SIZE +- topo.standalone.start() +- topo.standalone.config.set('nsslapd-disk-monitoring-grace-period', '1') +- topo.standalone.config.set('nsslapd-accesslog-logbuffering', 'off') +- topo.standalone.config.set('nsslapd-disk-monitoring-threshold', ensure_bytes(THRESHOLD_BYTES)) +- TOTAL_SIZE = int(re.findall(r'\d+', str(os.statvfs(topo.standalone.ds_paths.log_dir)))[2])*4096/1024/1024 +- AVAIL_SIZE = round(int(re.findall(r'\d+', str(os.statvfs(topo.standalone.ds_paths.log_dir)))[3]) * 4096 / 1024 / 1024) +- USED_SIZE = TOTAL_SIZE - AVAIL_SIZE +- HALF_THR_FILL_SIZE = TOTAL_SIZE - float(THRESHOLD) + 5 - USED_SIZE +- FULL_THR_FILL_SIZE = TOTAL_SIZE - 0.5 * float(THRESHOLD) + 5 - USED_SIZE +- HALF_THR_FILL_SIZE = round(HALF_THR_FILL_SIZE) +- FULL_THR_FILL_SIZE = round(FULL_THR_FILL_SIZE) +- topo.standalone.restart() +- +- +-@pytest.fixture(scope="module") ++ log.info("Restoring configuration to captured values") ++ ++ for config_attr, original_value in captured_config.items(): ++ if original_value is not None: ++ try: ++ current_value = inst.config.get_attr_val_utf8(config_attr) ++ if current_value != original_value: ++ log.debug(f"Restoring {config_attr} from '{current_value}' to '{original_value}'") ++ inst.config.set(config_attr, ensure_bytes(original_value)) ++ except Exception as e: ++ log.debug(f"Could not restore {config_attr}: {e}") ++ ++ log.info("Configuration restoration completed") ++ ++ ++@pytest.fixture(scope="function") + def setup(request, topo): +- """ +- This is the fixture function , will run before running every test case. +- """ +- presetup(topo) +- setupthesystem(topo) ++ """Module-level fixture to setup the test environment.""" ++ ++ log.info("Starting module setup for disk monitoring tests") ++ inst = topo.standalone ++ ++ # Capture current configuration before making any changes ++ original_config = capture_config(inst) ++ ++ presetup(inst) ++ setupthesystem(inst) + + def fin(): +- topo.standalone.stop() +- subprocess.call(['umount', '-fl', topo.standalone.ds_paths.log_dir]) +- topo.standalone.start() ++ log.info("Running module cleanup for disk monitoring tests") ++ inst.stop() ++ subprocess.call(['umount', '-fl', inst.ds_paths.log_dir]) ++ # Restore configuration to original values ++ inst.start() ++ restore_config(inst, original_config) ++ log.info("Module cleanup completed") + + request.addfinalizer(fin) + + ++def wait_for_condition(inst, condition_str, timeout=30): ++ """Wait until the given condition evaluates to False.""" ++ ++ log.debug(f"Waiting for condition to be False: {condition_str} (timeout: {timeout}s)") ++ start_time = time.time() ++ while time.time() - start_time < timeout: ++ if not eval(condition_str): ++ log.debug(f"Condition satisfied after {time.time() - start_time:.2f}s") ++ return ++ time.sleep(1) ++ raise AssertionError(f"Condition '{condition_str}' still True after {timeout} seconds") ++ ++ ++def wait_for_log_entry(inst, message, timeout=30): ++ """Wait for a specific message to appear in the error log.""" ++ ++ log.debug(f"Waiting for log entry: '{message}' (timeout: {timeout}s)") ++ start_time = time.time() ++ while time.time() - start_time < timeout: ++ with open(inst.errlog, 'r') as log_file: ++ if message in log_file.read(): ++ log.debug(f"Found log entry after {time.time() - start_time:.2f}s") ++ return ++ time.sleep(1) ++ raise AssertionError(f"Message '{message}' not found in error log after {timeout} seconds") ++ ++ ++def get_avail_bytes(path): ++ """Get available bytes on the filesystem at the given path.""" ++ ++ stat = os.statvfs(path) ++ return stat.f_bavail * stat.f_bsize ++ ++ ++def fill_to_target_avail(path, target_avail_bytes): ++ """Fill the disk to reach the target available bytes by creating a large file.""" ++ ++ avail = get_avail_bytes(path) ++ fill_bytes = avail - target_avail_bytes ++ log.debug(f"Current available: {avail}, target: {target_avail_bytes}, will create {fill_bytes} byte file") ++ if fill_bytes <= 0: ++ raise ValueError("Already below target avail") ++ ++ fill_file = os.path.join(path, 'fill.dd') ++ bs = 4096 ++ count = (fill_bytes + bs - 1) // bs # ceil division to ensure enough ++ log.info(f"Creating fill file {fill_file} with {count} blocks of {bs} bytes") ++ subprocess.check_call(['dd', 'if=/dev/zero', f'of={fill_file}', f'bs={bs}', f'count={count}']) ++ return fill_file ++ ++ + @pytest.fixture(scope="function") + def reset_logs(topo): +- """ +- Reset the errors log file before the test +- """ +- open('{}/errors'.format(topo.standalone.ds_paths.log_dir), 'w').close() ++ """Function-level fixture to reset the error log before each test.""" ++ ++ log.debug("Resetting error logs before test") ++ topo.standalone.deleteErrorLogs() ++ ++ ++def generate_access_log_activity(inst, num_users=10, num_binds=100): ++ """Generate access log activity by creating users and performing binds.""" ++ ++ log.info(f"Generating access log activity with {num_users} users and {num_binds} binds each") ++ users = UserAccounts(inst, DEFAULT_SUFFIX) ++ ++ # Create test users ++ for i in range(num_users): ++ user_properties = { ++ 'uid': f'cn=user{i}', ++ 'cn': f'cn=user{i}', ++ 'sn': f'cn=user{i}', ++ 'userPassword': "Itsme123", ++ 'uidNumber': f'1{i}', ++ 'gidNumber': f'2{i}', ++ 'homeDirectory': f'/home/{i}' ++ } ++ users.create(properties=user_properties) ++ ++ # Perform bind operations ++ for j in range(num_binds): ++ for user in users.list(): ++ user.bind('Itsme123') ++ ++ log.info("Access log activity generation completed") ++ return users + + + @disk_monitoring_ack + def test_verify_operation_when_disk_monitoring_is_off(topo, setup, reset_logs): +- """Verify operation when Disk monitoring is off ++ """Verify operation when Disk monitoring is off. + + :id: 73a97536-fe9e-11e8-ba9f-8c16451d917b + :setup: Standalone +@@ -117,94 +244,127 @@ def test_verify_operation_when_disk_monitoring_is_off(topo, setup, reset_logs): + 2. Go below the threshold + 3. Check DS is up and not entering shutdown mode + :expectedresults: +- 1. Should Success +- 2. Should Success +- 3. Should Success ++ 1. Success ++ 2. Success ++ 3. Success + """ ++ log.info("Starting test_verify_operation_when_disk_monitoring_is_off") ++ inst = topo.standalone ++ fill_file = None ++ + try: +- # Turn off disk monitoring +- topo.standalone.config.set('nsslapd-disk-monitoring', 'off') +- topo.standalone.restart() +- # go below the threshold +- subprocess.call(['dd', 'if=/dev/zero', 'of={}/foo'.format(topo.standalone.ds_paths.log_dir), 'bs=1M', 'count={}'.format(FULL_THR_FILL_SIZE)]) +- subprocess.call(['dd', 'if=/dev/zero', 'of={}/foo1'.format(topo.standalone.ds_paths.log_dir), 'bs=1M', 'count={}'.format(FULL_THR_FILL_SIZE)]) +- # Wait for disk monitoring plugin thread to wake up +- _withouterrorlog(topo, 'topo.standalone.status() != True', 10) ++ log.info("Disabling disk monitoring") ++ inst.config.set('nsslapd-disk-monitoring', 'off') ++ inst.restart() ++ ++ log.info(f"Filling disk to go below threshold ({THRESHOLD_BYTES} bytes)") ++ fill_file = fill_to_target_avail(inst.ds_paths.log_dir, THRESHOLD_BYTES - 1) ++ ++ log.info("Verifying server stays up despite being below threshold") ++ wait_for_condition(inst, 'inst.status() != True', 11) ++ + # Check DS is up and not entering shutdown mode +- assert topo.standalone.status() == True ++ assert inst.status() == True ++ log.info("Verified: server remains operational when disk monitoring is disabled") ++ + finally: +- os.remove('{}/foo'.format(topo.standalone.ds_paths.log_dir)) +- os.remove('{}/foo1'.format(topo.standalone.ds_paths.log_dir)) ++ if fill_file and os.path.exists(fill_file): ++ log.debug(f"Cleaning up fill file: {fill_file}") ++ os.remove(fill_file) ++ ++ log.info("Test completed successfully") + + + @disk_monitoring_ack + def test_enable_external_libs_debug_log(topo, setup, reset_logs): +- """Check that OpenLDAP logs are successfully enabled and disabled when +- disk threshold is reached ++ """Check that OpenLDAP logs are successfully enabled and disabled when disk threshold is reached. + + :id: 121b2b24-ecba-48e2-9ee2-312d929dc8c6 + :setup: Standalone instance +- :steps: 1. Set nsslapd-external-libs-debug-enabled to "on" +- 2. Go straight below 1/2 of the threshold +- 3. Verify that the external libs debug setting is disabled +- 4. Go back above 1/2 of the threshold +- 5. Verify that the external libs debug setting is enabled back +- :expectedresults: 1. Success +- 2. Success +- 3. Success +- 4. Success +- 5. Success ++ :steps: ++ 1. Set nsslapd-external-libs-debug-enabled to "on" ++ 2. Go straight below 1/2 of the threshold ++ 3. Verify that the external libs debug setting is disabled ++ 4. Go back above 1/2 of the threshold ++ 5. Verify that the external libs debug setting is enabled back ++ :expectedresults: ++ 1. Success ++ 2. Success ++ 3. Success ++ 4. Success ++ 5. Success + """ ++ log.info("Starting test_enable_external_libs_debug_log") ++ inst = topo.standalone ++ fill_file = None ++ + try: +- # Verify that verbose logging was set to default level +- assert topo.standalone.config.set('nsslapd-disk-monitoring', 'on') +- assert topo.standalone.config.set('nsslapd-disk-monitoring-logging-critical', 'off') +- assert topo.standalone.config.set('nsslapd-external-libs-debug-enabled', 'on') +- assert topo.standalone.config.set('nsslapd-errorlog-level', '8') +- topo.standalone.restart() +- subprocess.call(['dd', 'if=/dev/zero', 'of={}/foo'.format(topo.standalone.ds_paths.log_dir), 'bs=1M', 'count={}'.format(HALF_THR_FILL_SIZE)]) +- # Verify that logging is disabled +- _withouterrorlog(topo, "topo.standalone.config.get_attr_val_utf8('nsslapd-external-libs-debug-enabled') != 'off'", 31) ++ log.info("Configuring disk monitoring and external libs debug") ++ inst.config.set('nsslapd-disk-monitoring', 'on') ++ inst.config.set('nsslapd-disk-monitoring-logging-critical', 'off') ++ inst.config.set('nsslapd-external-libs-debug-enabled', 'on') ++ inst.config.set('nsslapd-errorlog-level', '8') ++ inst.restart() ++ ++ log.info(f"Filling disk to go below half threshold ({THRESHOLD_BYTES // 2} bytes)") ++ fill_file = fill_to_target_avail(inst.ds_paths.log_dir, THRESHOLD_BYTES // 2 - 1) ++ ++ log.info("Verifying external libs debug is automatically disabled") ++ wait_for_condition(inst, "inst.config.get_attr_val_utf8('nsslapd-external-libs-debug-enabled') != 'off'", 31) ++ + finally: +- os.remove('{}/foo'.format(topo.standalone.ds_paths.log_dir)) +- _withouterrorlog(topo, "topo.standalone.config.get_attr_val_utf8('nsslapd-external-libs-debug-enabled') != 'on'", 31) +- assert topo.standalone.config.set('nsslapd-external-libs-debug-enabled', 'off') ++ if fill_file and os.path.exists(fill_file): ++ log.debug(f"Cleaning up fill file: {fill_file}") ++ os.remove(fill_file) ++ ++ log.info("Verifying external libs debug is re-enabled after freeing space") ++ wait_for_condition(inst, "inst.config.get_attr_val_utf8('nsslapd-external-libs-debug-enabled') != 'on'", 31) ++ inst.config.set('nsslapd-external-libs-debug-enabled', 'off') ++ ++ log.info("Test completed successfully") + + + @disk_monitoring_ack + def test_free_up_the_disk_space_and_change_ds_config(topo, setup, reset_logs): +- """Free up the disk space and change DS config ++ """Free up the disk space and change DS config. + + :id: 7be4d560-fe9e-11e8-a307-8c16451d917b + :setup: Standalone + :steps: +- 1. Enabling Disk Monitoring plugin and setting disk monitoring logging to critical ++ 1. Enable Disk Monitoring plugin and set disk monitoring logging to critical + 2. Verify no message about loglevel is present in the error log + 3. Verify no message about disabling logging is present in the error log + 4. Verify no message about removing rotated logs is present in the error log + :expectedresults: +- 1. Should Success +- 2. Should Success +- 3. Should Success +- 4. Should Success ++ 1. Success ++ 2. Success ++ 3. Success ++ 4. Success + """ +- # Enabling Disk Monitoring plugin and setting disk monitoring logging to critical +- assert topo.standalone.config.set('nsslapd-disk-monitoring', 'on') +- assert topo.standalone.config.set('nsslapd-disk-monitoring-logging-critical', 'on') +- assert topo.standalone.config.set('nsslapd-errorlog-level', '8') +- topo.standalone.restart() +- # Verify no message about loglevel is present in the error log +- # Verify no message about disabling logging is present in the error log +- # Verify no message about removing rotated logs is present in the error log +- with open(topo.standalone.errlog, 'r') as study: study = study.read() +- assert 'temporarily setting error loglevel to zero' not in study +- assert 'disabling access and audit logging' not in study +- assert 'deleting rotated logs' not in study ++ log.info("Starting test_free_up_the_disk_space_and_change_ds_config") ++ inst = topo.standalone ++ ++ log.info("Enabling disk monitoring with critical logging") ++ inst.config.set('nsslapd-disk-monitoring', 'on') ++ inst.config.set('nsslapd-disk-monitoring-logging-critical', 'on') ++ inst.config.set('nsslapd-errorlog-level', '8') ++ inst.restart() ++ ++ log.info("Verifying no premature disk monitoring messages in error log") ++ with open(inst.errlog, 'r') as err_log: ++ content = err_log.read() ++ ++ assert 'temporarily setting error loglevel to zero' not in content ++ assert 'disabling access and audit logging' not in content ++ assert 'deleting rotated logs' not in content ++ ++ log.info("Verified: no unexpected disk monitoring messages found") ++ log.info("Test completed successfully") + + + @disk_monitoring_ack + def test_verify_operation_with_nsslapd_disk_monitoring_logging_critical_off(topo, setup, reset_logs): +- """Verify operation with "nsslapd-disk-monitoring-logging-critical: off ++ """Verify operation with "nsslapd-disk-monitoring-logging-critical: off". + + :id: 82363bca-fe9e-11e8-9ae7-8c16451d917b + :setup: Standalone +@@ -213,39 +373,59 @@ def test_verify_operation_with_nsslapd_disk_monitoring_logging_critical_off(topo + 2. Verify that logging is disabled + 3. Verify that rotated logs were not removed + :expectedresults: +- 1. Should Success +- 2. Should Success +- 3. Should Success ++ 1. Success ++ 2. Success ++ 3. Success + """ ++ log.info("Starting test_verify_operation_with_nsslapd_disk_monitoring_logging_critical_off") ++ inst = topo.standalone ++ fill_file = None ++ + try: +- # Verify that verbose logging was set to default level +- assert topo.standalone.config.set('nsslapd-disk-monitoring', 'on') +- assert topo.standalone.config.set('nsslapd-disk-monitoring-logging-critical', 'off') +- assert topo.standalone.config.set('nsslapd-errorlog-level', '8') +- topo.standalone.restart() +- subprocess.call(['dd', 'if=/dev/zero', 'of={}/foo'.format(topo.standalone.ds_paths.log_dir), 'bs=1M', 'count={}'.format(HALF_THR_FILL_SIZE)]) +- _witherrorlog(topo, 'temporarily setting error loglevel to the default level', 11) +- assert LOG_DEFAULT == int(re.findall(r'nsslapd-errorlog-level: \d+', str( +- topo.standalone.search_s('cn=config', ldap.SCOPE_SUBTREE, '(objectclass=*)', ['nsslapd-errorlog-level'])))[ +- 0].split(' ')[1]) +- # Verify that logging is disabled +- _withouterrorlog(topo, "topo.standalone.config.get_attr_val_utf8('nsslapd-accesslog-logging-enabled') != 'off'", 10) +- assert topo.standalone.config.get_attr_val_utf8('nsslapd-accesslog-logging-enabled') == 'off' +- # Verify that rotated logs were not removed +- with open(topo.standalone.errlog, 'r') as study: study = study.read() +- assert 'disabling access and audit logging' in study +- _witherrorlog(topo, 'deleting rotated logs', 11) +- study = open(topo.standalone.errlog).read() +- assert "Unable to remove file: {}".format(topo.standalone.ds_paths.log_dir) not in study +- assert 'is too far below the threshold' not in study ++ log.info("Configuring disk monitoring with critical logging disabled") ++ inst.config.set('nsslapd-disk-monitoring', 'on') ++ inst.config.set('nsslapd-disk-monitoring-logging-critical', 'off') ++ inst.config.set('nsslapd-errorlog-level', '8') ++ inst.restart() ++ ++ log.info(f"Filling disk to go below threshold ({THRESHOLD_BYTES} bytes)") ++ fill_file = fill_to_target_avail(inst.ds_paths.log_dir, THRESHOLD_BYTES - 1) ++ ++ log.info("Waiting for loglevel to be set to default") ++ wait_for_log_entry(inst, 'temporarily setting error loglevel to the default level', 11) ++ ++ log.info("Verifying error log level was set to default") ++ config_entry = inst.search_s('cn=config', ldap.SCOPE_SUBTREE, '(objectclass=*)', ['nsslapd-errorlog-level']) ++ current_level = int(re.findall(r'nsslapd-errorlog-level: \d+', str(config_entry))[0].split(' ')[1]) ++ assert LOG_DEFAULT == current_level ++ ++ log.info("Verifying access logging is disabled") ++ wait_for_condition(inst, "inst.config.get_attr_val_utf8('nsslapd-accesslog-logging-enabled') != 'off'", 11) ++ assert inst.config.get_attr_val_utf8('nsslapd-accesslog-logging-enabled') == 'off' ++ ++ log.info("Verifying expected disk monitoring messages") ++ with open(inst.errlog, 'r') as err_log: ++ content = err_log.read() ++ ++ assert 'disabling access and audit logging' in content ++ wait_for_log_entry(inst, 'deleting rotated logs', 11) ++ assert f"Unable to remove file: {inst.ds_paths.log_dir}" not in content ++ assert 'is too far below the threshold' not in content ++ ++ log.info("All verifications passed") ++ + finally: +- os.remove('{}/foo'.format(topo.standalone.ds_paths.log_dir)) ++ if fill_file and os.path.exists(fill_file): ++ log.debug(f"Cleaning up fill file: {fill_file}") ++ os.remove(fill_file) ++ ++ log.info("Test completed successfully") + + + @disk_monitoring_ack + def test_operation_with_nsslapd_disk_monitoring_logging_critical_on_below_half_of_the_threshold(topo, setup, reset_logs): +- """Verify operation with \"nsslapd-disk-monitoring-logging-critical: on\" below 1/2 of the threshold +- Verify recovery ++ """Verify operation with "nsslapd-disk-monitoring-logging-critical: on" below 1/2 of the threshold. ++ Verify recovery. + + :id: 8940c502-fe9e-11e8-bcc0-8c16451d917b + :setup: Standalone +@@ -253,190 +433,277 @@ def test_operation_with_nsslapd_disk_monitoring_logging_critical_on_below_half_o + 1. Verify that DS goes into shutdown mode + 2. Verify that DS exited shutdown mode + :expectedresults: +- 1. Should Success +- 2. Should Success ++ 1. Success ++ 2. Success + """ +- assert topo.standalone.config.set('nsslapd-disk-monitoring', 'on') +- assert topo.standalone.config.set('nsslapd-disk-monitoring-logging-critical', 'on') +- topo.standalone.restart() +- # Verify that DS goes into shutdown mode +- if float(THRESHOLD) > FULL_THR_FILL_SIZE: +- FULL_THR_FILL_SIZE_new = FULL_THR_FILL_SIZE + round(float(THRESHOLD) - FULL_THR_FILL_SIZE) + 1 +- subprocess.call(['dd', 'if=/dev/zero', 'of={}/foo'.format(topo.standalone.ds_paths.log_dir), 'bs=1M', 'count={}'.format(FULL_THR_FILL_SIZE_new)]) +- else: +- subprocess.call(['dd', 'if=/dev/zero', 'of={}/foo'.format(topo.standalone.ds_paths.log_dir), 'bs=1M', 'count={}'.format(FULL_THR_FILL_SIZE)]) +- _witherrorlog(topo, 'is too far below the threshold', 20) +- os.remove('{}/foo'.format(topo.standalone.ds_paths.log_dir)) +- # Verify that DS exited shutdown mode +- _witherrorlog(topo, 'Available disk space is now acceptable', 25) ++ log.info("Starting test_operation_with_nsslapd_disk_monitoring_logging_critical_on_below_half_of_the_threshold") ++ inst = topo.standalone ++ fill_file = None ++ ++ try: ++ log.info("Configuring disk monitoring with critical logging enabled") ++ inst.config.set('nsslapd-disk-monitoring', 'on') ++ inst.config.set('nsslapd-disk-monitoring-logging-critical', 'on') ++ inst.restart() ++ ++ log.info(f"Filling disk to go below half threshold ({THRESHOLD_BYTES // 2} bytes)") ++ fill_file = fill_to_target_avail(inst.ds_paths.log_dir, THRESHOLD_BYTES // 2 - 1) ++ ++ log.info("Waiting for shutdown mode message") ++ wait_for_log_entry(inst, 'is too far below the threshold', 100) ++ ++ log.info("Freeing up disk space") ++ os.remove(fill_file) ++ fill_file = None ++ ++ log.info("Waiting for recovery message") ++ wait_for_log_entry(inst, 'Available disk space is now acceptable', 25) ++ ++ log.info("Verified: server entered and exited shutdown mode correctly") ++ ++ finally: ++ if fill_file and os.path.exists(fill_file): ++ log.debug(f"Cleaning up fill file: {fill_file}") ++ os.remove(fill_file) ++ ++ log.info("Test completed successfully") + + + @disk_monitoring_ack + def test_setting_nsslapd_disk_monitoring_logging_critical_to_off(topo, setup, reset_logs): +- """Setting nsslapd-disk-monitoring-logging-critical to "off" ++ """Setting nsslapd-disk-monitoring-logging-critical to "off". + + :id: 93265ec4-fe9e-11e8-af93-8c16451d917b + :setup: Standalone + :steps: +- 1. Setting nsslapd-disk-monitoring-logging-critical to "off" ++ 1. Set nsslapd-disk-monitoring-logging-critical to "off" + :expectedresults: +- 1. Should Success ++ 1. Success + """ +- assert topo.standalone.config.set('nsslapd-disk-monitoring', 'on') +- assert topo.standalone.config.set('nsslapd-disk-monitoring-logging-critical', 'off') +- assert topo.standalone.config.set('nsslapd-errorlog-level', '8') +- topo.standalone.restart() +- assert topo.standalone.status() == True ++ log.info("Starting test_setting_nsslapd_disk_monitoring_logging_critical_to_off") ++ inst = topo.standalone ++ ++ log.info("Setting disk monitoring configuration") ++ inst.config.set('nsslapd-disk-monitoring', 'on') ++ inst.config.set('nsslapd-disk-monitoring-logging-critical', 'off') ++ inst.config.set('nsslapd-errorlog-level', '8') ++ inst.restart() ++ ++ log.info("Verifying server is running normally") ++ assert inst.status() == True ++ ++ log.info("Test completed successfully") + + + @disk_monitoring_ack + def test_operation_with_nsslapd_disk_monitoring_logging_critical_off(topo, setup, reset_logs): +- """Verify operation with nsslapd-disk-monitoring-logging-critical: off ++ """Verify operation with nsslapd-disk-monitoring-logging-critical: off. + + :id: 97985a52-fe9e-11e8-9914-8c16451d917b + :setup: Standalone + :steps: +- 1. Verify that logging is disabled +- 2. Verify that rotated logs were removed ++ 1. Generate access log activity to create rotated logs ++ 2. Go below threshold to trigger disk monitoring + 3. Verify that verbose logging was set to default level + 4. Verify that logging is disabled + 5. Verify that rotated logs were removed + :expectedresults: +- 1. Should Success +- 2. Should Success +- 3. Should Success +- 4. Should Success +- 5. Should Success ++ 1. Success ++ 2. Success ++ 3. Success ++ 4. Success ++ 5. Success + """ +- # Verify that logging is disabled ++ log.info("Starting test_operation_with_nsslapd_disk_monitoring_logging_critical_off") ++ inst = topo.standalone ++ fill_file = None ++ users = None ++ + try: +- assert topo.standalone.config.set('nsslapd-disk-monitoring', 'on') +- assert topo.standalone.config.set('nsslapd-disk-monitoring-logging-critical', 'off') +- assert topo.standalone.config.set('nsslapd-errorlog-level', '8') +- assert topo.standalone.config.set('nsslapd-accesslog-maxlogsize', '1') +- assert topo.standalone.config.set('nsslapd-accesslog-logrotationtimeunit', 'minute') +- assert topo.standalone.config.set('nsslapd-accesslog-level', '772') +- topo.standalone.restart() +- # Verify that rotated logs were removed +- users = UserAccounts(topo.standalone, DEFAULT_SUFFIX) +- for i in range(10): +- user_properties = { +- 'uid': 'cn=anuj{}'.format(i), +- 'cn': 'cn=anuj{}'.format(i), +- 'sn': 'cn=anuj{}'.format(i), +- 'userPassword': "Itsme123", +- 'uidNumber': '1{}'.format(i), +- 'gidNumber': '2{}'.format(i), +- 'homeDirectory': '/home/{}'.format(i) +- } +- users.create(properties=user_properties) +- for j in range(100): +- for i in [i for i in users.list()]: i.bind('Itsme123') +- assert re.findall(r'access.\d+-\d+',str(os.listdir(topo.standalone.ds_paths.log_dir))) +- topo.standalone.bind_s(DN_DM, PW_DM) +- assert topo.standalone.config.set('nsslapd-accesslog-maxlogsize', '100') +- assert topo.standalone.config.set('nsslapd-accesslog-logrotationtimeunit', 'day') +- assert topo.standalone.config.set('nsslapd-accesslog-level', '256') +- topo.standalone.restart() +- subprocess.call(['dd', 'if=/dev/zero', 'of={}/foo2'.format(topo.standalone.ds_paths.log_dir), 'bs=1M', 'count={}'.format(HALF_THR_FILL_SIZE)]) +- # Verify that verbose logging was set to default level +- _witherrorlog(topo, 'temporarily setting error loglevel to the default level', 10) +- assert LOG_DEFAULT == int(re.findall(r'nsslapd-errorlog-level: \d+', str( +- topo.standalone.search_s('cn=config', ldap.SCOPE_SUBTREE, '(objectclass=*)', ['nsslapd-errorlog-level'])))[0].split(' ')[1]) +- # Verify that logging is disabled +- _withouterrorlog(topo, "topo.standalone.config.get_attr_val_utf8('nsslapd-accesslog-logging-enabled') != 'off'", 20) +- with open(topo.standalone.errlog, 'r') as study: study = study.read() +- assert 'disabling access and audit logging' in study +- # Verify that rotated logs were removed +- _witherrorlog(topo, 'deleting rotated logs', 10) +- with open(topo.standalone.errlog, 'r') as study:study = study.read() +- assert 'Unable to remove file:' not in study +- assert 'is too far below the threshold' not in study +- for i in [i for i in users.list()]: i.delete() ++ log.info("Configuring disk monitoring and access log settings") ++ inst.config.set('nsslapd-disk-monitoring', 'on') ++ inst.config.set('nsslapd-disk-monitoring-logging-critical', 'off') ++ inst.config.set('nsslapd-errorlog-level', '8') ++ inst.config.set('nsslapd-accesslog-maxlogsize', '1') ++ inst.config.set('nsslapd-accesslog-logrotationtimeunit', 'minute') ++ inst.config.set('nsslapd-accesslog-level', '772') ++ inst.restart() ++ ++ log.info("Generating access log activity to create rotated logs") ++ users = generate_access_log_activity(inst, num_users=10, num_binds=100) ++ ++ inst.bind_s(DN_DM, PW_DM) ++ ++ log.info("Resetting access log settings") ++ inst.config.set('nsslapd-accesslog-maxlogsize', '100') ++ inst.config.set('nsslapd-accesslog-logrotationtimeunit', 'day') ++ inst.config.set('nsslapd-accesslog-level', '256') ++ inst.restart() ++ ++ log.info(f"Filling disk to go below threshold ({THRESHOLD_BYTES} bytes)") ++ fill_file = fill_to_target_avail(inst.ds_paths.log_dir, THRESHOLD_BYTES - 1) ++ ++ log.info("Waiting for loglevel to be set to default") ++ wait_for_log_entry(inst, 'temporarily setting error loglevel to the default level', 11) ++ ++ log.info("Verifying error log level was set to default") ++ config_level = None ++ for _ in range(10): ++ time.sleep(1) ++ config_entry = inst.search_s('cn=config', ldap.SCOPE_SUBTREE, '(objectclass=*)', ['nsslapd-errorlog-level']) ++ config_level = int(re.findall(r'nsslapd-errorlog-level: \d+', str(config_entry))[0].split(' ')[1]) ++ if LOG_DEFAULT == config_level: ++ break ++ assert LOG_DEFAULT == config_level ++ ++ log.info("Verifying access logging is disabled") ++ wait_for_condition(inst, "inst.config.get_attr_val_utf8('nsslapd-accesslog-logging-enabled') == 'off'", 20) ++ ++ with open(inst.errlog, 'r') as err_log: ++ content = err_log.read() ++ assert 'disabling access and audit logging' in content ++ ++ log.info("Verifying rotated logs are removed") ++ wait_for_log_entry(inst, 'deleting rotated logs', 20) ++ ++ rotated_logs = re.findall(r'access.\d+-\d+', str(os.listdir(inst.ds_paths.log_dir))) ++ assert not rotated_logs, f"Found unexpected rotated logs: {rotated_logs}" ++ ++ with open(inst.errlog, 'r') as err_log: ++ content = err_log.read() ++ assert 'Unable to remove file:' not in content ++ assert 'is too far below the threshold' not in content ++ ++ log.info("All verifications passed") ++ + finally: +- os.remove('{}/foo2'.format(topo.standalone.ds_paths.log_dir)) ++ # Clean up users ++ if users: ++ log.debug("Cleaning up test users") ++ for user in users.list(): ++ try: ++ user.delete() ++ except ldap.ALREADY_EXISTS: ++ pass ++ ++ if fill_file and os.path.exists(fill_file): ++ log.debug(f"Cleaning up fill file: {fill_file}") ++ os.remove(fill_file) ++ ++ log.info("Test completed successfully") + + + @disk_monitoring_ack + def test_operation_with_nsslapd_disk_monitoring_logging_critical_off_below_half_of_the_threshold(topo, setup, reset_logs): +- """Verify operation with nsslapd-disk-monitoring-logging-critical: off below 1/2 of the threshold +- Verify shutdown +- Recovery and setup ++ """Verify operation with nsslapd-disk-monitoring-logging-critical: off below 1/2 of the threshold. ++ Verify shutdown and recovery. + + :id: 9d4c7d48-fe9e-11e8-b5d6-8c16451d917b + :setup: Standalone + :steps: +- 1. Verify that DS goes into shutdown mode +- 2. Verifying that DS has been shut down after the grace period +- 3. Verify logging enabled +- 4. Create rotated logfile +- 5. Enable verbose logging ++ 1. Go below half threshold to trigger shutdown ++ 2. Verify DS shutdown after grace period ++ 3. Free space and restart ++ 4. Verify logging is re-enabled ++ 5. Create rotated logs and enable verbose logging + :expectedresults: +- 1. Should Success +- 2. Should Success +- 3. Should Success +- 4. Should Success +- 5. Should Success ++ 1. Success ++ 2. Success ++ 3. Success ++ 4. Success ++ 5. Success + """ +- assert topo.standalone.config.set('nsslapd-disk-monitoring', 'on') +- assert topo.standalone.config.set('nsslapd-disk-monitoring-logging-critical', 'off') +- topo.standalone.restart() +- # Verify that DS goes into shutdown mode +- if float(THRESHOLD) > FULL_THR_FILL_SIZE: +- FULL_THR_FILL_SIZE_new = FULL_THR_FILL_SIZE + round(float(THRESHOLD) - FULL_THR_FILL_SIZE) +- subprocess.call(['dd', 'if=/dev/zero', 'of={}/foo'.format(topo.standalone.ds_paths.log_dir), 'bs=1M', 'count={}'.format(FULL_THR_FILL_SIZE_new)]) +- else: +- subprocess.call(['dd', 'if=/dev/zero', 'of={}/foo'.format(topo.standalone.ds_paths.log_dir), 'bs=1M', 'count={}'.format(FULL_THR_FILL_SIZE)]) +- # Increased sleep to avoid failure +- _witherrorlog(topo, 'is too far below the threshold', 100) +- _witherrorlog(topo, 'Signaling slapd for shutdown', 90) +- # Verifying that DS has been shut down after the grace period +- time.sleep(2) +- assert topo.standalone.status() == False +- # free_space +- os.remove('{}/foo'.format(topo.standalone.ds_paths.log_dir)) +- open('{}/errors'.format(topo.standalone.ds_paths.log_dir), 'w').close() +- # StartSlapd +- topo.standalone.start() +- # verify logging enabled +- assert topo.standalone.config.get_attr_val_utf8('nsslapd-accesslog-logging-enabled') == 'on' +- assert topo.standalone.config.get_attr_val_utf8('nsslapd-errorlog-logging-enabled') == 'on' +- with open(topo.standalone.errlog, 'r') as study: study = study.read() +- assert 'disabling access and audit logging' not in study +- assert topo.standalone.config.set('nsslapd-accesslog-maxlogsize', '1') +- assert topo.standalone.config.set('nsslapd-accesslog-logrotationtimeunit', 'minute') +- assert topo.standalone.config.set('nsslapd-accesslog-level', '772') +- topo.standalone.restart() +- # create rotated logfile +- users = UserAccounts(topo.standalone, DEFAULT_SUFFIX) +- for i in range(10): +- user_properties = { +- 'uid': 'cn=anuj{}'.format(i), +- 'cn': 'cn=anuj{}'.format(i), +- 'sn': 'cn=anuj{}'.format(i), +- 'userPassword': "Itsme123", +- 'uidNumber': '1{}'.format(i), +- 'gidNumber': '2{}'.format(i), +- 'homeDirectory': '/home/{}'.format(i) +- } +- users.create(properties=user_properties) +- for j in range(100): +- for i in [i for i in users.list()]: i.bind('Itsme123') +- assert re.findall(r'access.\d+-\d+',str(os.listdir(topo.standalone.ds_paths.log_dir))) +- topo.standalone.bind_s(DN_DM, PW_DM) +- # enable verbose logging +- assert topo.standalone.config.set('nsslapd-accesslog-maxlogsize', '100') +- assert topo.standalone.config.set('nsslapd-accesslog-logrotationtimeunit', 'day') +- assert topo.standalone.config.set('nsslapd-accesslog-level', '256') +- assert topo.standalone.config.set('nsslapd-errorlog-level', '8') +- topo.standalone.restart() +- for i in [i for i in users.list()]: i.delete() ++ log.info("Starting test_operation_with_nsslapd_disk_monitoring_logging_critical_off_below_half_of_the_threshold") ++ inst = topo.standalone ++ fill_file = None ++ users = None ++ ++ try: ++ log.info("Configuring disk monitoring with critical logging disabled") ++ inst.config.set('nsslapd-disk-monitoring', 'on') ++ inst.config.set('nsslapd-disk-monitoring-logging-critical', 'off') ++ inst.restart() ++ ++ log.info(f"Filling disk to go below half threshold ({THRESHOLD_BYTES // 2} bytes)") ++ fill_file = fill_to_target_avail(inst.ds_paths.log_dir, THRESHOLD_BYTES // 2 - 1) ++ ++ log.info("Waiting for shutdown messages") ++ wait_for_log_entry(inst, 'is too far below the threshold', 100) ++ wait_for_log_entry(inst, 'Signaling slapd for shutdown', 90) ++ ++ log.info("Verifying server shutdown within grace period") ++ for i in range(60): ++ time.sleep(1) ++ if not inst.status(): ++ log.info(f"Server shut down after {i+1} seconds") ++ break ++ assert inst.status() == False ++ ++ log.info("Freeing disk space and cleaning logs") ++ os.remove(fill_file) ++ fill_file = None ++ open(f'{inst.ds_paths.log_dir}/errors', 'w').close() ++ ++ log.info("Starting server after freeing space") ++ inst.start() ++ ++ log.info("Verifying logging is re-enabled") ++ assert inst.config.get_attr_val_utf8('nsslapd-accesslog-logging-enabled') == 'on' ++ assert inst.config.get_attr_val_utf8('nsslapd-errorlog-logging-enabled') == 'on' ++ ++ with open(inst.errlog, 'r') as err_log: ++ content = err_log.read() ++ assert 'disabling access and audit logging' not in content ++ ++ log.info("Setting up access log rotation for testing") ++ inst.config.set('nsslapd-accesslog-maxlogsize', '1') ++ inst.config.set('nsslapd-accesslog-logrotationtimeunit', 'minute') ++ inst.config.set('nsslapd-accesslog-level', '772') ++ inst.restart() ++ ++ log.info("Creating rotated log files through user activity") ++ users = generate_access_log_activity(inst, num_users=10, num_binds=100) ++ ++ log.info("Waiting for log rotation to occur") ++ for i in range(61): ++ time.sleep(1) ++ rotated_logs = re.findall(r'access.\d+-\d+', str(os.listdir(inst.ds_paths.log_dir))) ++ if rotated_logs: ++ log.info(f"Log rotation detected after {i+1} seconds") ++ break ++ assert rotated_logs, "No rotated logs found after waiting" ++ ++ inst.bind_s(DN_DM, PW_DM) ++ ++ log.info("Enabling verbose logging") ++ inst.config.set('nsslapd-accesslog-maxlogsize', '100') ++ inst.config.set('nsslapd-accesslog-logrotationtimeunit', 'day') ++ inst.config.set('nsslapd-accesslog-level', '256') ++ inst.config.set('nsslapd-errorlog-level', '8') ++ inst.restart() ++ ++ log.info("Recovery and setup verification completed") ++ ++ finally: ++ # Clean up users ++ if users: ++ log.debug("Cleaning up test users") ++ for user in users.list(): ++ try: ++ user.delete() ++ except ldap.ALREADY_EXISTS: ++ pass ++ ++ if fill_file and os.path.exists(fill_file): ++ log.debug(f"Cleaning up fill file: {fill_file}") ++ os.remove(fill_file) ++ ++ log.info("Test completed successfully") + + + @disk_monitoring_ack + def test_go_straight_below_half_of_the_threshold(topo, setup, reset_logs): +- """Go straight below 1/2 of the threshold +- Recovery and setup ++ """Go straight below 1/2 of the threshold and verify recovery. + + :id: a2a0664c-fe9e-11e8-b220-8c16451d917b + :setup: Standalone +@@ -447,252 +714,417 @@ def test_go_straight_below_half_of_the_threshold(topo, setup, reset_logs): + 4. Verify DS is in shutdown mode + 5. Verify DS has recovered from shutdown + :expectedresults: +- 1. Should Success +- 2. Should Success +- 3. Should Success +- 4. Should Success +- 5. Should Success ++ 1. Success ++ 2. Success ++ 3. Success ++ 4. Success ++ 5. Success + """ +- assert topo.standalone.config.set('nsslapd-disk-monitoring', 'on') +- assert topo.standalone.config.set('nsslapd-disk-monitoring-logging-critical', 'off') +- assert topo.standalone.config.set('nsslapd-errorlog-level', '8') +- topo.standalone.restart() +- if float(THRESHOLD) > FULL_THR_FILL_SIZE: +- FULL_THR_FILL_SIZE_new = FULL_THR_FILL_SIZE + round(float(THRESHOLD) - FULL_THR_FILL_SIZE) + 1 +- subprocess.call(['dd', 'if=/dev/zero', 'of={}/foo'.format(topo.standalone.ds_paths.log_dir), 'bs=1M', 'count={}'.format(FULL_THR_FILL_SIZE_new)]) +- else: +- subprocess.call(['dd', 'if=/dev/zero', 'of={}/foo'.format(topo.standalone.ds_paths.log_dir), 'bs=1M', 'count={}'.format(FULL_THR_FILL_SIZE)]) +- _witherrorlog(topo, 'temporarily setting error loglevel to the default level', 11) +- # Verify that verbose logging was set to default level +- assert LOG_DEFAULT == int(re.findall(r'nsslapd-errorlog-level: \d+', +- str(topo.standalone.search_s('cn=config', ldap.SCOPE_SUBTREE, +- '(objectclass=*)', +- ['nsslapd-errorlog-level'])) +- )[0].split(' ')[1]) +- # Verify that logging is disabled +- _withouterrorlog(topo, "topo.standalone.config.get_attr_val_utf8('nsslapd-accesslog-logging-enabled') != 'off'", 11) +- # Verify that rotated logs were removed +- _witherrorlog(topo, 'disabling access and audit logging', 2) +- _witherrorlog(topo, 'deleting rotated logs', 11) +- with open(topo.standalone.errlog, 'r') as study:study = study.read() +- assert 'Unable to remove file:' not in study +- # Verify DS is in shutdown mode +- _withouterrorlog(topo, 'topo.standalone.status() != False', 90) +- _witherrorlog(topo, 'is too far below the threshold', 2) +- # Verify DS has recovered from shutdown +- os.remove('{}/foo'.format(topo.standalone.ds_paths.log_dir)) +- open('{}/errors'.format(topo.standalone.ds_paths.log_dir), 'w').close() +- topo.standalone.start() +- _withouterrorlog(topo, "topo.standalone.config.get_attr_val_utf8('nsslapd-accesslog-logging-enabled') != 'on'", 20) +- with open(topo.standalone.errlog, 'r') as study: study = study.read() +- assert 'disabling access and audit logging' not in study ++ log.info("Starting test_go_straight_below_half_of_the_threshold") ++ inst = topo.standalone ++ fill_file = None ++ ++ try: ++ log.info("Configuring disk monitoring with critical logging disabled") ++ inst.config.set('nsslapd-disk-monitoring', 'on') ++ inst.config.set('nsslapd-disk-monitoring-logging-critical', 'off') ++ inst.config.set('nsslapd-errorlog-level', '8') ++ inst.restart() ++ ++ # Go straight below half threshold ++ log.info(f"Filling disk to go below half threshold ({THRESHOLD_BYTES // 2} bytes)") ++ fill_file = fill_to_target_avail(inst.ds_paths.log_dir, THRESHOLD_BYTES // 2 - 1) ++ ++ # Verify that verbose logging was set to default level ++ log.info("Waiting for loglevel to be set to default") ++ wait_for_log_entry(inst, 'temporarily setting error loglevel to the default level', 11) ++ ++ log.info("Verifying error log level was set to default") ++ config_entry = inst.search_s('cn=config', ldap.SCOPE_SUBTREE, '(objectclass=*)', ['nsslapd-errorlog-level']) ++ current_level = int(re.findall(r'nsslapd-errorlog-level: \d+', str(config_entry))[0].split(' ')[1]) ++ assert LOG_DEFAULT == current_level ++ ++ log.info("Verifying access logging is disabled") ++ wait_for_condition(inst, "inst.config.get_attr_val_utf8('nsslapd-accesslog-logging-enabled') != 'off'", 11) ++ ++ log.info("Verifying expected disk monitoring messages") ++ wait_for_log_entry(inst, 'disabling access and audit logging', 2) ++ wait_for_log_entry(inst, 'deleting rotated logs', 11) ++ ++ with open(inst.errlog, 'r') as err_log: ++ content = err_log.read() ++ assert 'Unable to remove file:' not in content ++ ++ log.info("Verifying server enters shutdown mode") ++ wait_for_condition(inst, 'inst.status() != False', 90) ++ wait_for_log_entry(inst, 'is too far below the threshold', 2) ++ ++ log.info("Freeing disk space and restarting server") ++ os.remove(fill_file) ++ fill_file = None ++ open(f'{inst.ds_paths.log_dir}/errors', 'w').close() ++ inst.start() ++ ++ log.info("Verifying server recovery") ++ wait_for_condition(inst, "inst.config.get_attr_val_utf8('nsslapd-accesslog-logging-enabled') != 'on'", 20) ++ ++ with open(inst.errlog, 'r') as err_log: ++ content = err_log.read() ++ assert 'disabling access and audit logging' not in content ++ ++ log.info("Recovery verification completed") ++ ++ finally: ++ if fill_file and os.path.exists(fill_file): ++ log.debug(f"Cleaning up fill file: {fill_file}") ++ os.remove(fill_file) ++ ++ log.info("Test completed successfully") + + + @disk_monitoring_ack + def test_readonly_on_threshold(topo, setup, reset_logs): +- """Verify that nsslapd-disk-monitoring-readonly-on-threshold switches the server to read-only mode ++ """Verify that nsslapd-disk-monitoring-readonly-on-threshold switches the server to read-only mode. + + :id: 06814c19-ef3c-4800-93c9-c7c6e76fcbb9 + :customerscenario: True + :setup: Standalone + :steps: +- 1. Verify that the backend is in read-only mode +- 2. Go back above the threshold +- 3. Verify that the backend is in read-write mode ++ 1. Configure readonly on threshold ++ 2. Go below threshold and verify backend is read-only ++ 3. Go back above threshold and verify backend is read-write + :expectedresults: +- 1. Should Success +- 2. Should Success +- 3. Should Success ++ 1. Success ++ 2. Success ++ 3. Success + """ +- file_path = '{}/foo'.format(topo.standalone.ds_paths.log_dir) +- backends = Backends(topo.standalone) +- backend_name = backends.list()[0].rdn +- # Verify that verbose logging was set to default level +- topo.standalone.deleteErrorLogs() +- assert topo.standalone.config.set('nsslapd-disk-monitoring', 'on') +- assert topo.standalone.config.set('nsslapd-disk-monitoring-readonly-on-threshold', 'on') +- topo.standalone.restart() ++ log.info("Starting test_readonly_on_threshold") ++ inst = topo.standalone ++ fill_file = None ++ test_user = None ++ + try: +- subprocess.call(['dd', 'if=/dev/zero', f'of={file_path}', 'bs=1M', f'count={HALF_THR_FILL_SIZE}']) +- _witherrorlog(topo, f"Putting the backend '{backend_name}' to read-only mode", 11) ++ backends = Backends(inst) ++ backend_name = backends.list()[0].rdn ++ log.info(f"Testing with backend: {backend_name}") ++ ++ log.info("Configuring disk monitoring with readonly on threshold") ++ inst.deleteErrorLogs() ++ inst.config.set('nsslapd-disk-monitoring', 'on') ++ inst.config.set('nsslapd-disk-monitoring-readonly-on-threshold', 'on') ++ inst.restart() ++ ++ log.info(f"Filling disk to go below threshold ({THRESHOLD_BYTES} bytes)") ++ fill_file = fill_to_target_avail(inst.ds_paths.log_dir, THRESHOLD_BYTES - 1) ++ ++ log.info("Waiting for backend to enter read-only mode") ++ wait_for_log_entry(inst, f"Putting the backend '{backend_name}' to read-only mode", 11) ++ ++ log.info("Verifying backend is in read-only mode") + users = UserAccounts(topo.standalone, DEFAULT_SUFFIX) + try: +- user = users.create_test_user() +- user.delete() ++ test_user = users.create_test_user() ++ test_user.delete() ++ assert False, "Expected UNWILLING_TO_PERFORM error for read-only mode" + except ldap.UNWILLING_TO_PERFORM as e: + if 'database is read-only' not in str(e): + raise +- os.remove(file_path) +- _witherrorlog(topo, f"Putting the backend '{backend_name}' back to read-write mode", 11) +- user = users.create_test_user() +- assert user.exists() +- user.delete() ++ log.info("Confirmed: backend correctly rejects writes in read-only mode") ++ ++ log.info("Freeing disk space") ++ os.remove(fill_file) ++ fill_file = None ++ ++ log.info("Waiting for backend to return to read-write mode") ++ wait_for_log_entry(inst, f"Putting the backend '{backend_name}' back to read-write mode", 11) ++ ++ log.info("Verifying backend is in read-write mode") ++ test_user = users.create_test_user() ++ assert test_user.exists() ++ test_user.delete() ++ test_user = None ++ ++ log.info("Confirmed: backend correctly accepts writes in read-write mode") ++ + finally: +- if os.path.exists(file_path): +- os.remove(file_path) ++ if test_user: ++ try: ++ test_user.delete() ++ except: ++ pass ++ ++ if fill_file and os.path.exists(fill_file): ++ log.debug(f"Cleaning up fill file: {fill_file}") ++ os.remove(fill_file) ++ ++ log.info("Test completed successfully") + + + @disk_monitoring_ack + def test_readonly_on_threshold_below_half_of_the_threshold(topo, setup, reset_logs): +- """Go below 1/2 of the threshold when readonly on threshold is enabled ++ """Go below 1/2 of the threshold when readonly on threshold is enabled. + + :id: 10262663-b41f-420e-a2d0-9532dd54fa7c + :customerscenario: True + :setup: Standalone + :steps: +- 1. Go straight below 1/2 of the threshold +- 2. Verify that the backend is in read-only mode +- 3. Go back above the threshold +- 4. Verify that the backend is in read-write mode ++ 1. Configure readonly on threshold ++ 2. Go below half threshold ++ 3. Verify backend is read-only and shutdown messages appear ++ 4. Free space and verify backend returns to read-write + :expectedresults: +- 1. Should Success +- 2. Should Success +- 3. Should Success +- 4. Should Success ++ 1. Success ++ 2. Success ++ 3. Success ++ 4. Success + """ +- file_path = '{}/foo'.format(topo.standalone.ds_paths.log_dir) +- backends = Backends(topo.standalone) +- backend_name = backends.list()[0].rdn +- topo.standalone.deleteErrorLogs() +- assert topo.standalone.config.set('nsslapd-disk-monitoring', 'on') +- assert topo.standalone.config.set('nsslapd-disk-monitoring-readonly-on-threshold', 'on') +- topo.standalone.restart() ++ log.info("Starting test_readonly_on_threshold_below_half_of_the_threshold") ++ inst = topo.standalone ++ fill_file = None ++ test_user = None ++ + try: +- if float(THRESHOLD) > FULL_THR_FILL_SIZE: +- FULL_THR_FILL_SIZE_new = FULL_THR_FILL_SIZE + round(float(THRESHOLD) - FULL_THR_FILL_SIZE) + 1 +- subprocess.call(['dd', 'if=/dev/zero', f'of={file_path}', 'bs=1M', f'count={FULL_THR_FILL_SIZE_new}']) +- else: +- subprocess.call(['dd', 'if=/dev/zero', f'of={file_path}', 'bs=1M', f'count={FULL_THR_FILL_SIZE}']) +- _witherrorlog(topo, f"Putting the backend '{backend_name}' to read-only mode", 11) +- users = UserAccounts(topo.standalone, DEFAULT_SUFFIX) ++ backends = Backends(inst) ++ backend_name = backends.list()[0].rdn ++ log.info(f"Testing with backend: {backend_name}") ++ ++ log.info("Configuring disk monitoring with readonly on threshold") ++ inst.deleteErrorLogs() ++ inst.config.set('nsslapd-disk-monitoring', 'on') ++ inst.config.set('nsslapd-disk-monitoring-readonly-on-threshold', 'on') ++ inst.restart() ++ ++ log.info(f"Filling disk to go below half threshold ({THRESHOLD_BYTES // 2} bytes)") ++ fill_file = fill_to_target_avail(inst.ds_paths.log_dir, THRESHOLD_BYTES // 2 - 1) ++ ++ log.info("Waiting for backend to enter read-only mode") ++ wait_for_log_entry(inst, f"Putting the backend '{backend_name}' to read-only mode", 11) ++ ++ log.info("Verifying backend is in read-only mode") ++ users = UserAccounts(inst, DEFAULT_SUFFIX) + try: +- user = users.create_test_user() +- user.delete() ++ test_user = users.create_test_user() ++ test_user.delete() ++ assert False, "Expected UNWILLING_TO_PERFORM error for read-only mode" + except ldap.UNWILLING_TO_PERFORM as e: + if 'database is read-only' not in str(e): + raise +- _witherrorlog(topo, 'is too far below the threshold', 51) +- # Verify DS has recovered from shutdown +- os.remove(file_path) +- _witherrorlog(topo, f"Putting the backend '{backend_name}' back to read-write mode", 51) +- user = users.create_test_user() +- assert user.exists() +- user.delete() ++ log.info("Confirmed: backend correctly rejects writes in read-only mode") ++ ++ log.info("Waiting for shutdown threshold message") ++ wait_for_log_entry(inst, 'is too far below the threshold', 51) ++ ++ log.info("Freeing disk space") ++ os.remove(fill_file) ++ fill_file = None ++ ++ log.info("Waiting for backend to return to read-write mode") ++ wait_for_log_entry(inst, f"Putting the backend '{backend_name}' back to read-write mode", 51) ++ ++ log.info("Verifying backend is in read-write mode") ++ test_user = users.create_test_user() ++ assert test_user.exists() ++ test_user.delete() ++ test_user = None ++ ++ log.info("Confirmed: backend correctly accepts writes in read-write mode") ++ + finally: +- if os.path.exists(file_path): +- os.remove(file_path) ++ if test_user: ++ try: ++ test_user.delete() ++ except: ++ pass ++ ++ if fill_file and os.path.exists(fill_file): ++ log.debug(f"Cleaning up fill file: {fill_file}") ++ os.remove(fill_file) ++ ++ log.info("Test completed successfully") + + + @disk_monitoring_ack + def test_below_half_of_the_threshold_not_starting_after_shutdown(topo, setup, reset_logs): +- """Test that the instance won't start if we are below 1/2 of the threshold ++ """Test that the instance won't start if we are below 1/2 of the threshold. + + :id: cceeaefd-9fa4-45c5-9ac6-9887a0671ef8 + :customerscenario: True + :setup: Standalone + :steps: +- 1. Go straight below 1/2 of the threshold +- 2. Try to start the instance +- 3. Go back above the threshold +- 4. Try to start the instance ++ 1. Go below half threshold and wait for shutdown ++ 2. Try to start the instance and verify it fails ++ 3. Free space and verify instance starts successfully + :expectedresults: +- 1. Should Success +- 2. Should Fail +- 3. Should Success +- 4. Should Success ++ 1. Success ++ 2. Startup fails as expected ++ 3. Success + """ +- file_path = '{}/foo'.format(topo.standalone.ds_paths.log_dir) +- topo.standalone.deleteErrorLogs() +- assert topo.standalone.config.set('nsslapd-disk-monitoring', 'on') +- topo.standalone.restart() ++ log.info("Starting test_below_half_of_the_threshold_not_starting_after_shutdown") ++ inst = topo.standalone ++ fill_file = None ++ + try: +- if float(THRESHOLD) > FULL_THR_FILL_SIZE: +- FULL_THR_FILL_SIZE_new = FULL_THR_FILL_SIZE + round(float(THRESHOLD) - FULL_THR_FILL_SIZE) + 1 +- subprocess.call(['dd', 'if=/dev/zero', f'of={file_path}', 'bs=1M', f'count={FULL_THR_FILL_SIZE_new}']) +- else: +- subprocess.call(['dd', 'if=/dev/zero', f'of={file_path}', 'bs=1M', f'count={FULL_THR_FILL_SIZE}']) +- _withouterrorlog(topo, 'topo.standalone.status() == True', 120) ++ log.info("Configuring disk monitoring") ++ inst.deleteErrorLogs() ++ inst.config.set('nsslapd-disk-monitoring', 'on') ++ inst.restart() ++ ++ log.info(f"Filling disk to go below half threshold ({THRESHOLD_BYTES // 2} bytes)") ++ fill_file = fill_to_target_avail(inst.ds_paths.log_dir, THRESHOLD_BYTES // 2 - 1) ++ ++ log.info("Waiting for server to shut down due to disk space") ++ wait_for_condition(inst, 'inst.status() == True', 120) ++ ++ log.info("Attempting to start instance (should fail)") + try: +- topo.standalone.start() ++ inst.start() ++ assert False, "Instance startup should have failed due to low disk space" + except (ValueError, subprocess.CalledProcessError): +- topo.standalone.log.info("Instance start up has failed as expected") +- _witherrorlog(topo, f'is too far below the threshold({THRESHOLD_BYTES} bytes). Exiting now', 2) +- # Verify DS has recovered from shutdown +- os.remove(file_path) +- topo.standalone.start() ++ log.info("Instance startup failed as expected due to low disk space") ++ ++ wait_for_log_entry(inst, f'is too far below the threshold({THRESHOLD_BYTES} bytes). Exiting now', 2) ++ ++ log.info("Freeing disk space") ++ os.remove(fill_file) ++ fill_file = None ++ ++ log.info("Starting instance after freeing space") ++ inst.start() ++ assert inst.status() == True ++ log.info("Instance started successfully after freeing space") ++ + finally: +- if os.path.exists(file_path): +- os.remove(file_path) ++ if fill_file and os.path.exists(fill_file): ++ log.debug(f"Cleaning up fill file: {fill_file}") ++ os.remove(fill_file) ++ ++ log.info("Test completed successfully") + + + @disk_monitoring_ack + def test_go_straight_below_4kb(topo, setup, reset_logs): +- """Go straight below 4KB ++ """Go straight below 4KB and verify behavior. + + :id: a855115a-fe9e-11e8-8e91-8c16451d917b + :setup: Standalone + :steps: + 1. Go straight below 4KB +- 2. Clean space ++ 2. Verify server behavior ++ 3. Clean space and restart + :expectedresults: +- 1. Should Success +- 2. Should Success ++ 1. Success ++ 2. Success ++ 3. Success + """ +- assert topo.standalone.config.set('nsslapd-disk-monitoring', 'on') +- topo.standalone.restart() +- subprocess.call(['dd', 'if=/dev/zero', 'of={}/foo'.format(topo.standalone.ds_paths.log_dir), 'bs=1M', 'count={}'.format(FULL_THR_FILL_SIZE)]) +- subprocess.call(['dd', 'if=/dev/zero', 'of={}/foo1'.format(topo.standalone.ds_paths.log_dir), 'bs=1M', 'count={}'.format(FULL_THR_FILL_SIZE)]) +- _withouterrorlog(topo, 'topo.standalone.status() != False', 11) +- os.remove('{}/foo'.format(topo.standalone.ds_paths.log_dir)) +- os.remove('{}/foo1'.format(topo.standalone.ds_paths.log_dir)) +- topo.standalone.start() +- assert topo.standalone.status() == True ++ log.info("Starting test_go_straight_below_4kb") ++ inst = topo.standalone ++ fill_file = None ++ ++ try: ++ log.info("Configuring disk monitoring") ++ inst.config.set('nsslapd-disk-monitoring', 'on') ++ inst.restart() ++ ++ log.info("Filling disk to go below 4KB") ++ fill_file = fill_to_target_avail(inst.ds_paths.log_dir, 4000) ++ ++ log.info("Waiting for server shutdown due to extreme low disk space") ++ wait_for_condition(inst, 'inst.status() != False', 11) ++ ++ log.info("Freeing disk space and restarting") ++ os.remove(fill_file) ++ fill_file = None ++ inst.start() ++ ++ assert inst.status() == True ++ log.info("Server restarted successfully after freeing space") ++ ++ finally: ++ if fill_file and os.path.exists(fill_file): ++ log.debug(f"Cleaning up fill file: {fill_file}") ++ os.remove(fill_file) ++ ++ log.info("Test completed successfully") + + + @disk_monitoring_ack + @pytest.mark.bz982325 + def test_threshold_to_overflow_value(topo, setup, reset_logs): +- """Overflow in nsslapd-disk-monitoring-threshold ++ """Test overflow in nsslapd-disk-monitoring-threshold. + + :id: ad60ab3c-fe9e-11e8-88dc-8c16451d917b + :setup: Standalone + :steps: +- 1. Setting nsslapd-disk-monitoring-threshold to overflow_value ++ 1. Set nsslapd-disk-monitoring-threshold to overflow value ++ 2. Verify the value is set correctly + :expectedresults: +- 1. Should Success ++ 1. Success ++ 2. Success + """ ++ log.info("Starting test_threshold_to_overflow_value") ++ inst = topo.standalone ++ + overflow_value = '3000000000' +- # Setting nsslapd-disk-monitoring-threshold to overflow_value +- assert topo.standalone.config.set('nsslapd-disk-monitoring-threshold', ensure_bytes(overflow_value)) +- assert overflow_value == re.findall(r'nsslapd-disk-monitoring-threshold: \d+', str( +- topo.standalone.search_s('cn=config', ldap.SCOPE_SUBTREE, '(objectclass=*)', +- ['nsslapd-disk-monitoring-threshold'])))[0].split(' ')[1] ++ log.info(f"Setting threshold to overflow value: {overflow_value}") ++ ++ inst.config.set('nsslapd-disk-monitoring-threshold', ensure_bytes(overflow_value)) ++ ++ config_entry = inst.search_s('cn=config', ldap.SCOPE_SUBTREE, '(objectclass=*)', ['nsslapd-disk-monitoring-threshold']) ++ current_value = re.findall(r'nsslapd-disk-monitoring-threshold: \d+', str(config_entry))[0].split(' ')[1] ++ assert overflow_value == current_value ++ ++ log.info(f"Verified: threshold value set to {current_value}") ++ log.info("Test completed successfully") + + + @disk_monitoring_ack + @pytest.mark.bz970995 + def test_threshold_is_reached_to_half(topo, setup, reset_logs): +- """RHDS not shutting down when disk monitoring threshold is reached to half. ++ """Verify RHDS not shutting down when disk monitoring threshold is reached to half. + + :id: b2d3665e-fe9e-11e8-b9c0-8c16451d917b + :setup: Standalone +- :steps: Standalone +- 1. Verify that there is not endless loop of error messages ++ :steps: ++ 1. Configure disk monitoring with critical logging ++ 2. Go below threshold ++ 3. Verify there is no endless loop of error messages + :expectedresults: +- 1. Should Success ++ 1. Success ++ 2. Success ++ 3. Success + """ ++ log.info("Starting test_threshold_is_reached_to_half") ++ inst = topo.standalone ++ fill_file = None ++ ++ try: ++ log.info("Configuring disk monitoring with critical logging enabled") ++ inst.config.set('nsslapd-disk-monitoring', 'on') ++ inst.config.set('nsslapd-disk-monitoring-logging-critical', 'on') ++ inst.config.set('nsslapd-errorlog-level', '8') ++ inst.config.set('nsslapd-disk-monitoring-threshold', ensure_bytes(str(THRESHOLD_BYTES))) ++ inst.restart() ++ ++ log.info(f"Filling disk to go below threshold ({THRESHOLD_BYTES} bytes)") ++ fill_file = fill_to_target_avail(inst.ds_paths.log_dir, THRESHOLD_BYTES // 2 - 1) ++ ++ log.info("Waiting for loglevel message and verifying it's not repeated") ++ wait_for_log_entry(inst, "temporarily setting error loglevel to the default level", 11) ++ ++ with open(inst.errlog, 'r') as err_log: ++ content = err_log.read() ++ ++ message_count = len(re.findall("temporarily setting error loglevel to the default level", content)) ++ assert message_count == 1, f"Expected 1 occurrence of message, found {message_count}" ++ ++ log.info("Verified: no endless loop of error messages") ++ ++ finally: ++ if fill_file and os.path.exists(fill_file): ++ log.debug(f"Cleaning up fill file: {fill_file}") ++ os.remove(fill_file) + +- assert topo.standalone.config.set('nsslapd-disk-monitoring', 'on') +- assert topo.standalone.config.set('nsslapd-disk-monitoring-logging-critical', 'on') +- assert topo.standalone.config.set('nsslapd-errorlog-level', '8') +- assert topo.standalone.config.set('nsslapd-disk-monitoring-threshold', ensure_bytes(THRESHOLD_BYTES)) +- topo.standalone.restart() +- subprocess.call(['dd', 'if=/dev/zero', 'of={}/foo'.format(topo.standalone.ds_paths.log_dir), 'bs=1M', 'count={}'.format(HALF_THR_FILL_SIZE)]) +- # Verify that there is not endless loop of error messages +- _witherrorlog(topo, "temporarily setting error loglevel to the default level", 10) +- with open(topo.standalone.errlog, 'r') as study:study = study.read() +- assert len(re.findall("temporarily setting error loglevel to the default level", study)) == 1 +- os.remove('{}/foo'.format(topo.standalone.ds_paths.log_dir)) ++ log.info("Test completed successfully") + + + @disk_monitoring_ack +@@ -713,58 +1145,77 @@ def test_threshold_is_reached_to_half(topo, setup, reset_logs): + ("nsslapd-disk-monitoring-grace-period", '0'), + ]) + def test_negagtive_parameterize(topo, setup, reset_logs, test_input, expected): +- """Verify that invalid operations are not permitted ++ """Verify that invalid operations are not permitted. + + :id: b88efbf8-fe9e-11e8-8499-8c16451d917b + :parametrized: yes + :setup: Standalone + :steps: +- 1. Verify that invalid operations are not permitted. ++ 1. Try to set invalid configuration values + :expectedresults: +- 1. Should not success. ++ 1. Configuration change should fail + """ ++ log.info(f"Starting test_negagtive_parameterize for {test_input}={expected}") ++ inst = topo.standalone ++ ++ log.info(f"Attempting to set invalid value: {test_input}={expected}") + with pytest.raises(Exception): +- topo.standalone.config.set(test_input, ensure_bytes(expected)) ++ inst.config.set(test_input, ensure_bytes(expected)) ++ ++ log.info("Verified: invalid configuration value was rejected") ++ log.info("Test completed successfully") + + + @disk_monitoring_ack + def test_valid_operations_are_permitted(topo, setup, reset_logs): +- """Verify that valid operations are permitted ++ """Verify that valid operations are permitted. + + :id: bd4f83f6-fe9e-11e8-88f4-8c16451d917b + :setup: Standalone + :steps: +- 1. Verify that valid operations are permitted ++ 1. Perform various valid configuration operations + :expectedresults: +- 1. Should Success. ++ 1. All operations should succeed + """ +- assert topo.standalone.config.set('nsslapd-disk-monitoring', 'on') +- assert topo.standalone.config.set('nsslapd-disk-monitoring-logging-critical', 'on') +- assert topo.standalone.config.set('nsslapd-errorlog-level', '8') +- topo.standalone.restart() +- # Trying to delete nsslapd-disk-monitoring-threshold +- assert topo.standalone.modify_s('cn=config', [(ldap.MOD_DELETE, 'nsslapd-disk-monitoring-threshold', '')]) +- # Trying to add another value to nsslapd-disk-monitoring-threshold (check that it is not multivalued) +- topo.standalone.config.add('nsslapd-disk-monitoring-threshold', '2000001') +- # Trying to delete nsslapd-disk-monitoring +- assert topo.standalone.modify_s('cn=config', [(ldap.MOD_DELETE, 'nsslapd-disk-monitoring', ensure_bytes(str( +- topo.standalone.search_s('cn=config', ldap.SCOPE_SUBTREE, '(objectclass=*)', ['nsslapd-disk-monitoring'])[ +- 0]).split(' ')[2].split('\n\n')[0]))]) +- # Trying to add another value to nsslapd-disk-monitoring +- topo.standalone.config.add('nsslapd-disk-monitoring', 'off') +- # Trying to delete nsslapd-disk-monitoring-grace-period +- assert topo.standalone.modify_s('cn=config', [(ldap.MOD_DELETE, 'nsslapd-disk-monitoring-grace-period', '')]) +- # Trying to add another value to nsslapd-disk-monitoring-grace-period +- topo.standalone.config.add('nsslapd-disk-monitoring-grace-period', '61') +- # Trying to delete nsslapd-disk-monitoring-logging-critical +- assert topo.standalone.modify_s('cn=config', [(ldap.MOD_DELETE, 'nsslapd-disk-monitoring-logging-critical', +- ensure_bytes(str( +- topo.standalone.search_s('cn=config', ldap.SCOPE_SUBTREE, +- '(objectclass=*)', [ +- 'nsslapd-disk-monitoring-logging-critical'])[ +- 0]).split(' ')[2].split('\n\n')[0]))]) +- # Trying to add another value to nsslapd-disk-monitoring-logging-critical +- assert topo.standalone.config.set('nsslapd-disk-monitoring-logging-critical', 'on') ++ log.info("Starting test_valid_operations_are_permitted") ++ inst = topo.standalone ++ ++ log.info("Setting initial disk monitoring configuration") ++ inst.config.set('nsslapd-disk-monitoring', 'on') ++ inst.config.set('nsslapd-disk-monitoring-logging-critical', 'on') ++ inst.config.set('nsslapd-errorlog-level', '8') ++ inst.restart() ++ ++ log.info("Testing deletion of nsslapd-disk-monitoring-threshold") ++ inst.modify_s('cn=config', [(ldap.MOD_DELETE, 'nsslapd-disk-monitoring-threshold', '')]) ++ ++ log.info("Testing addition of nsslapd-disk-monitoring-threshold value") ++ inst.config.add('nsslapd-disk-monitoring-threshold', '2000001') ++ ++ log.info("Testing deletion of nsslapd-disk-monitoring") ++ config_entry = inst.search_s('cn=config', ldap.SCOPE_SUBTREE, '(objectclass=*)', ['nsslapd-disk-monitoring']) ++ current_value = str(config_entry[0]).split(' ')[2].split('\n\n')[0] ++ inst.modify_s('cn=config', [(ldap.MOD_DELETE, 'nsslapd-disk-monitoring', ensure_bytes(current_value))]) ++ ++ log.info("Testing addition of nsslapd-disk-monitoring value") ++ inst.config.add('nsslapd-disk-monitoring', 'off') ++ ++ log.info("Testing deletion of nsslapd-disk-monitoring-grace-period") ++ inst.modify_s('cn=config', [(ldap.MOD_DELETE, 'nsslapd-disk-monitoring-grace-period', '')]) ++ ++ log.info("Testing addition of nsslapd-disk-monitoring-grace-period value") ++ inst.config.add('nsslapd-disk-monitoring-grace-period', '61') ++ ++ log.info("Testing deletion of nsslapd-disk-monitoring-logging-critical") ++ config_entry = inst.search_s('cn=config', ldap.SCOPE_SUBTREE, '(objectclass=*)', ['nsslapd-disk-monitoring-logging-critical']) ++ current_value = str(config_entry[0]).split(' ')[2].split('\n\n')[0] ++ inst.modify_s('cn=config', [(ldap.MOD_DELETE, 'nsslapd-disk-monitoring-logging-critical', ensure_bytes(current_value))]) ++ ++ log.info("Testing addition of nsslapd-disk-monitoring-logging-critical value") ++ inst.config.set('nsslapd-disk-monitoring-logging-critical', 'on') ++ ++ log.info("All valid operations completed successfully") ++ log.info("Test completed successfully") + + + if __name__ == '__main__': +-- +2.49.0 + diff --git a/SOURCES/0020-Issue-6339-Address-Coverity-scan-issues-in-memberof-.patch b/SOURCES/0020-Issue-6339-Address-Coverity-scan-issues-in-memberof-.patch new file mode 100644 index 0000000..fd7bf88 --- /dev/null +++ b/SOURCES/0020-Issue-6339-Address-Coverity-scan-issues-in-memberof-.patch @@ -0,0 +1,63 @@ +From 574a5295e13cf01c34226d676104057468198616 Mon Sep 17 00:00:00 2001 +From: Simon Pichugin +Date: Fri, 4 Oct 2024 08:55:11 -0700 +Subject: [PATCH] Issue 6339 - Address Coverity scan issues in memberof and + bdb_layer (#6353) + +Description: Add null check for memberof attribute in memberof.c +Fix memory leak by freeing 'cookie' in memberof.c +Add null check for database environment in bdb_layer.c +Fix race condition by adding mutex lock/unlock in bdb_layer.c + +Fixes: https://github.com/389ds/389-ds-base/issues/6339 + +Reviewed by: @progier389, @tbordaz (Thanks!) +--- + ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.c | 17 ++++++++++++++--- + 1 file changed, 14 insertions(+), 3 deletions(-) + +diff --git a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.c b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.c +index b04cd68e2..4f069197e 100644 +--- a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.c ++++ b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.c +@@ -6987,6 +6987,7 @@ bdb_public_private_open(backend *be, const char *db_filename, int rw, dbi_env_t + bdb_config *conf = (bdb_config *)li->li_dblayer_config; + bdb_db_env **ppEnv = (bdb_db_env**)&priv->dblayer_env; + char dbhome[MAXPATHLEN]; ++ bdb_db_env *pEnv = NULL; + DB_ENV *bdb_env = NULL; + DB *bdb_db = NULL; + struct stat st = {0}; +@@ -7036,7 +7037,13 @@ bdb_public_private_open(backend *be, const char *db_filename, int rw, dbi_env_t + conf->bdb_tx_max = 50; + rc = bdb_start(li, DBLAYER_NORMAL_MODE); + if (rc == 0) { +- bdb_env = ((struct bdb_db_env*)(priv->dblayer_env))->bdb_DB_ENV; ++ pEnv = (bdb_db_env *)priv->dblayer_env; ++ if (pEnv == NULL) { ++ fprintf(stderr, "bdb_public_private_open: dbenv is not available (0x%p) for database %s\n", ++ (void *)pEnv, db_filename ? db_filename : "unknown"); ++ return EINVAL; ++ } ++ bdb_env = pEnv->bdb_DB_ENV; + } + } else { + /* Setup minimal environment */ +@@ -7080,8 +7087,12 @@ bdb_public_private_close(struct ldbminfo *li, dbi_env_t **env, dbi_db_t **db) + if (priv) { + /* Detect if db is fully set up in read write mode */ + bdb_db_env *pEnv = (bdb_db_env *)priv->dblayer_env; +- if (pEnv && pEnv->bdb_thread_count>0) { +- rw = 1; ++ if (pEnv) { ++ pthread_mutex_lock(&pEnv->bdb_thread_count_lock); ++ if (pEnv->bdb_thread_count > 0) { ++ rw = 1; ++ } ++ pthread_mutex_unlock(&pEnv->bdb_thread_count_lock); + } + } + if (rw == 0) { +-- +2.49.0 + diff --git a/SOURCES/0021-Issue-6468-CLI-Fix-default-error-log-level.patch b/SOURCES/0021-Issue-6468-CLI-Fix-default-error-log-level.patch new file mode 100644 index 0000000..884e524 --- /dev/null +++ b/SOURCES/0021-Issue-6468-CLI-Fix-default-error-log-level.patch @@ -0,0 +1,31 @@ +From 972ddeed2029975d5d89e165db1db554f2e8bc28 Mon Sep 17 00:00:00 2001 +From: Viktor Ashirov +Date: Tue, 29 Jul 2025 08:00:00 +0200 +Subject: [PATCH] Issue 6468 - CLI - Fix default error log level + +Description: +Default error log level is 16384 + +Relates: https://github.com/389ds/389-ds-base/issues/6468 + +Reviewed by: @droideck (Thanks!) +--- + src/lib389/lib389/cli_conf/logging.py | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/src/lib389/lib389/cli_conf/logging.py b/src/lib389/lib389/cli_conf/logging.py +index d1e32822c..c48c75faa 100644 +--- a/src/lib389/lib389/cli_conf/logging.py ++++ b/src/lib389/lib389/cli_conf/logging.py +@@ -44,7 +44,7 @@ ERROR_LEVELS = { + + "methods used for a SASL bind" + }, + "default": { +- "level": 6384, ++ "level": 16384, + "desc": "Default logging level" + }, + "filter": { +-- +2.49.0 + diff --git a/SOURCES/0022-Issues-6913-6886-6250-Adjust-xfail-marks-6914.patch b/SOURCES/0022-Issues-6913-6886-6250-Adjust-xfail-marks-6914.patch new file mode 100644 index 0000000..2f5675e --- /dev/null +++ b/SOURCES/0022-Issues-6913-6886-6250-Adjust-xfail-marks-6914.patch @@ -0,0 +1,222 @@ +From f28deac93c552a9c4dc9dd9c18f449fcd5cc7731 Mon Sep 17 00:00:00 2001 +From: Simon Pichugin +Date: Fri, 1 Aug 2025 09:28:39 -0700 +Subject: [PATCH] Issues 6913, 6886, 6250 - Adjust xfail marks (#6914) + +Description: Some of the ACI invalid syntax issues were fixed, +so we need to remove xfail marks. +Disk space issue should have a 'skipif' mark. +Display all attrs (nsslapd-auditlog-display-attrs: *) fails because of a bug. +EntryUSN inconsistency and overflow bugs were exposed with the tests. + +Related: https://github.com/389ds/389-ds-base/issues/6913 +Related: https://github.com/389ds/389-ds-base/issues/6886 +Related: https://github.com/389ds/389-ds-base/issues/6250 + +Reviewed by: @vashirov (Thanks!) +--- + dirsrvtests/tests/suites/acl/syntax_test.py | 13 ++++++++-- + .../tests/suites/import/regression_test.py | 18 +++++++------- + .../logging/audit_password_masking_test.py | 24 +++++++++---------- + .../suites/plugins/entryusn_overflow_test.py | 2 ++ + 4 files changed, 34 insertions(+), 23 deletions(-) + +diff --git a/dirsrvtests/tests/suites/acl/syntax_test.py b/dirsrvtests/tests/suites/acl/syntax_test.py +index 4edc7fa4b..ed9919ba3 100644 +--- a/dirsrvtests/tests/suites/acl/syntax_test.py ++++ b/dirsrvtests/tests/suites/acl/syntax_test.py +@@ -190,10 +190,9 @@ FAILED = [('test_targattrfilters_18', + f'(all)userdn="ldap:///anyone";)'), ] + + +-@pytest.mark.xfail(reason='https://bugzilla.redhat.com/show_bug.cgi?id=1691473') + @pytest.mark.parametrize("real_value", [a[1] for a in FAILED], + ids=[a[0] for a in FAILED]) +-def test_aci_invalid_syntax_fail(topo, real_value): ++def test_aci_invalid_syntax_fail(topo, real_value, request): + """Try to set wrong ACI syntax. + + :id: 83c40784-fff5-49c8-9535-7064c9c19e7e +@@ -206,6 +205,16 @@ def test_aci_invalid_syntax_fail(topo, real_value): + 1. It should pass + 2. It should not pass + """ ++ # Mark specific test cases as xfail ++ xfail_cases = [ ++ 'test_targattrfilters_18', ++ 'test_targattrfilters_20', ++ 'test_bind_rule_set_with_more_than_three' ++ ] ++ ++ if request.node.callspec.id in xfail_cases: ++ pytest.xfail("DS6913 - This test case is expected to fail") ++ + domain = Domain(topo.standalone, DEFAULT_SUFFIX) + with pytest.raises(ldap.INVALID_SYNTAX): + domain.add("aci", real_value) +diff --git a/dirsrvtests/tests/suites/import/regression_test.py b/dirsrvtests/tests/suites/import/regression_test.py +index 2f850a19a..18611de35 100644 +--- a/dirsrvtests/tests/suites/import/regression_test.py ++++ b/dirsrvtests/tests/suites/import/regression_test.py +@@ -323,7 +323,7 @@ ou: myDups00001 + + @pytest.mark.bz1749595 + @pytest.mark.tier2 +-@pytest.mark.xfail(not _check_disk_space(), reason="not enough disk space for lmdb map") ++@pytest.mark.skipif(not _check_disk_space(), reason="not enough disk space for lmdb map") + @pytest.mark.xfail(ds_is_older("1.3.10.1"), reason="bz1749595 not fixed on versions older than 1.3.10.1") + def test_large_ldif2db_ancestorid_index_creation(topo, _set_mdb_map_size): + """Import with ldif2db a large file - check that the ancestorid index creation phase has a correct performance +@@ -399,39 +399,39 @@ def test_large_ldif2db_ancestorid_index_creation(topo, _set_mdb_map_size): + log.info('Starting the server') + topo.standalone.start() + +- # With lmdb there is no more any special phase for ancestorid ++ # With lmdb there is no more any special phase for ancestorid + # because ancestorsid get updated on the fly while processing the + # entryrdn (by up the parents chain to compute the parentid +- # ++ # + # But there is still a numSubordinates generation phase + if get_default_db_lib() == "mdb": + log.info('parse the errors logs to check lines with "Generating numSubordinates complete." are present') + end_numsubordinates = str(topo.standalone.ds_error_log.match(r'.*Generating numSubordinates complete.*'))[1:-1] + assert len(end_numsubordinates) > 0 +- ++ + else: + log.info('parse the errors logs to check lines with "Starting sort of ancestorid" are present') + start_sort_str = str(topo.standalone.ds_error_log.match(r'.*Starting sort of ancestorid non-leaf IDs*'))[1:-1] + assert len(start_sort_str) > 0 +- ++ + log.info('parse the errors logs to check lines with "Finished sort of ancestorid" are present') + end_sort_str = str(topo.standalone.ds_error_log.match(r'.*Finished sort of ancestorid non-leaf IDs*'))[1:-1] + assert len(end_sort_str) > 0 +- ++ + log.info('parse the error logs for the line with "Gathering ancestorid non-leaf IDs"') + start_ancestorid_indexing_op_str = str(topo.standalone.ds_error_log.match(r'.*Gathering ancestorid non-leaf IDs*'))[1:-1] + assert len(start_ancestorid_indexing_op_str) > 0 +- ++ + log.info('parse the error logs for the line with "Created ancestorid index"') + end_ancestorid_indexing_op_str = str(topo.standalone.ds_error_log.match(r'.*Created ancestorid index*'))[1:-1] + assert len(end_ancestorid_indexing_op_str) > 0 +- ++ + log.info('get the ancestorid non-leaf IDs indexing start and end time from the collected strings') + # Collected lines look like : '[15/May/2020:05:30:27.245967313 -0400] - INFO - bdb_get_nonleaf_ids - import userRoot: Gathering ancestorid non-leaf IDs...' + # We are getting the sec.nanosec part of the date, '27.245967313' in the above example + start_time = (start_ancestorid_indexing_op_str.split()[0]).split(':')[3] + end_time = (end_ancestorid_indexing_op_str.split()[0]).split(':')[3] +- ++ + log.info('Calculate the elapsed time for the ancestorid non-leaf IDs index creation') + etime = (Decimal(end_time) - Decimal(start_time)) + # The time for the ancestorid index creation should be less than 10s for an offline import of an ldif file with 100000 entries / 5 entries per node +diff --git a/dirsrvtests/tests/suites/logging/audit_password_masking_test.py b/dirsrvtests/tests/suites/logging/audit_password_masking_test.py +index 3b6a54849..69a36cb5d 100644 +--- a/dirsrvtests/tests/suites/logging/audit_password_masking_test.py ++++ b/dirsrvtests/tests/suites/logging/audit_password_masking_test.py +@@ -117,10 +117,10 @@ def check_password_masked(inst, log_format, expected_password, actual_password): + + @pytest.mark.parametrize("log_format,display_attrs", [ + ("default", None), +- ("default", "*"), ++ pytest.param("default", "*", marks=pytest.mark.xfail(reason="DS6886")), + ("default", "userPassword"), + ("json", None), +- ("json", "*"), ++ pytest.param("json", "*", marks=pytest.mark.xfail(reason="DS6886")), + ("json", "userPassword") + ]) + def test_password_masking_add_operation(topo, log_format, display_attrs): +@@ -173,10 +173,10 @@ def test_password_masking_add_operation(topo, log_format, display_attrs): + + @pytest.mark.parametrize("log_format,display_attrs", [ + ("default", None), +- ("default", "*"), ++ pytest.param("default", "*", marks=pytest.mark.xfail(reason="DS6886")), + ("default", "userPassword"), + ("json", None), +- ("json", "*"), ++ pytest.param("json", "*", marks=pytest.mark.xfail(reason="DS6886")), + ("json", "userPassword") + ]) + def test_password_masking_modify_operation(topo, log_format, display_attrs): +@@ -242,10 +242,10 @@ def test_password_masking_modify_operation(topo, log_format, display_attrs): + + @pytest.mark.parametrize("log_format,display_attrs", [ + ("default", None), +- ("default", "*"), ++ pytest.param("default", "*", marks=pytest.mark.xfail(reason="DS6886")), + ("default", "nsslapd-rootpw"), + ("json", None), +- ("json", "*"), ++ pytest.param("json", "*", marks=pytest.mark.xfail(reason="DS6886")), + ("json", "nsslapd-rootpw") + ]) + def test_password_masking_rootpw_modify_operation(topo, log_format, display_attrs): +@@ -297,10 +297,10 @@ def test_password_masking_rootpw_modify_operation(topo, log_format, display_attr + + @pytest.mark.parametrize("log_format,display_attrs", [ + ("default", None), +- ("default", "*"), ++ pytest.param("default", "*", marks=pytest.mark.xfail(reason="DS6886")), + ("default", "nsmultiplexorcredentials"), + ("json", None), +- ("json", "*"), ++ pytest.param("json", "*", marks=pytest.mark.xfail(reason="DS6886")), + ("json", "nsmultiplexorcredentials") + ]) + def test_password_masking_multiplexor_credentials(topo, log_format, display_attrs): +@@ -368,10 +368,10 @@ def test_password_masking_multiplexor_credentials(topo, log_format, display_attr + + @pytest.mark.parametrize("log_format,display_attrs", [ + ("default", None), +- ("default", "*"), ++ pytest.param("default", "*", marks=pytest.mark.xfail(reason="DS6886")), + ("default", "nsDS5ReplicaCredentials"), + ("json", None), +- ("json", "*"), ++ pytest.param("json", "*", marks=pytest.mark.xfail(reason="DS6886")), + ("json", "nsDS5ReplicaCredentials") + ]) + def test_password_masking_replica_credentials(topo, log_format, display_attrs): +@@ -432,10 +432,10 @@ def test_password_masking_replica_credentials(topo, log_format, display_attrs): + + @pytest.mark.parametrize("log_format,display_attrs", [ + ("default", None), +- ("default", "*"), ++ pytest.param("default", "*", marks=pytest.mark.xfail(reason="DS6886")), + ("default", "nsDS5ReplicaBootstrapCredentials"), + ("json", None), +- ("json", "*"), ++ pytest.param("json", "*", marks=pytest.mark.xfail(reason="DS6886")), + ("json", "nsDS5ReplicaBootstrapCredentials") + ]) + def test_password_masking_bootstrap_credentials(topo, log_format, display_attrs): +diff --git a/dirsrvtests/tests/suites/plugins/entryusn_overflow_test.py b/dirsrvtests/tests/suites/plugins/entryusn_overflow_test.py +index a23d734ca..8c3a537ab 100644 +--- a/dirsrvtests/tests/suites/plugins/entryusn_overflow_test.py ++++ b/dirsrvtests/tests/suites/plugins/entryusn_overflow_test.py +@@ -81,6 +81,7 @@ def setup_usn_test(topology_st, request): + return created_users + + ++@pytest.mark.xfail(reason="DS6250") + def test_entryusn_overflow_on_add_existing_entries(topology_st, setup_usn_test): + """Test that reproduces entryUSN overflow when adding existing entries + +@@ -232,6 +233,7 @@ def test_entryusn_overflow_on_add_existing_entries(topology_st, setup_usn_test): + log.info("EntryUSN overflow test completed successfully") + + ++@pytest.mark.xfail(reason="DS6250") + def test_entryusn_consistency_after_failed_adds(topology_st, setup_usn_test): + """Test that entryUSN remains consistent after failed add operations + +-- +2.49.0 + diff --git a/SOURCES/0023-Issue-6181-RFE-Allow-system-to-manage-uid-gid-at-sta.patch b/SOURCES/0023-Issue-6181-RFE-Allow-system-to-manage-uid-gid-at-sta.patch new file mode 100644 index 0000000..d208d39 --- /dev/null +++ b/SOURCES/0023-Issue-6181-RFE-Allow-system-to-manage-uid-gid-at-sta.patch @@ -0,0 +1,32 @@ +From 58a9e1083865e75bba3cf9867a3df109031d7810 Mon Sep 17 00:00:00 2001 +From: Viktor Ashirov +Date: Mon, 28 Jul 2025 13:18:26 +0200 +Subject: [PATCH] Issue 6181 - RFE - Allow system to manage uid/gid at startup + +Description: +Expand CapabilityBoundingSet to include CAP_FOWNER + +Relates: https://github.com/389ds/389-ds-base/issues/6181 +Relates: https://github.com/389ds/389-ds-base/issues/6906 + +Reviewed by: @progier389 (Thanks!) +--- + wrappers/systemd.template.service.in | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/wrappers/systemd.template.service.in b/wrappers/systemd.template.service.in +index fa05c9f60..6db1f6f8f 100644 +--- a/wrappers/systemd.template.service.in ++++ b/wrappers/systemd.template.service.in +@@ -25,7 +25,7 @@ MemoryAccounting=yes + + # Allow non-root instances to bind to low ports. + AmbientCapabilities=CAP_NET_BIND_SERVICE +-CapabilityBoundingSet=CAP_NET_BIND_SERVICE CAP_SETUID CAP_SETGID CAP_DAC_OVERRIDE CAP_CHOWN ++CapabilityBoundingSet=CAP_NET_BIND_SERVICE CAP_SETUID CAP_SETGID CAP_DAC_OVERRIDE CAP_CHOWN CAP_FOWNER + + PrivateTmp=on + # https://en.opensuse.org/openSUSE:Security_Features#Systemd_hardening_effort +-- +2.49.0 + diff --git a/SOURCES/0024-Issue-6778-Memory-leak-in-roles_cache_create_object_.patch b/SOURCES/0024-Issue-6778-Memory-leak-in-roles_cache_create_object_.patch new file mode 100644 index 0000000..d3ec59d --- /dev/null +++ b/SOURCES/0024-Issue-6778-Memory-leak-in-roles_cache_create_object_.patch @@ -0,0 +1,92 @@ +From e03af0aa7e041fc2ca20caf3bcb5810e968043dc Mon Sep 17 00:00:00 2001 +From: Viktor Ashirov +Date: Tue, 13 May 2025 13:53:05 +0200 +Subject: [PATCH] Issue 6778 - Memory leak in + roles_cache_create_object_from_entry + +Bug Description: +`this_role` has internal allocations (`dn`, `rolescopedn`, etc.) +that are not freed. + +Fix Description: +Use `roles_cache_role_object_free` to free `this_role` and all its +internal structures. + +Fixes: https://github.com/389ds/389-ds-base/issues/6778 + +Reviewed by: @mreynolds389 (Thanks!) +--- + ldap/servers/plugins/roles/roles_cache.c | 15 ++++++++------- + 1 file changed, 8 insertions(+), 7 deletions(-) + +diff --git a/ldap/servers/plugins/roles/roles_cache.c b/ldap/servers/plugins/roles/roles_cache.c +index bbed11802..60d7182e2 100644 +--- a/ldap/servers/plugins/roles/roles_cache.c ++++ b/ldap/servers/plugins/roles/roles_cache.c +@@ -1098,7 +1098,7 @@ roles_cache_create_object_from_entry(Slapi_Entry *role_entry, role_object **resu + /* We determine the role type by reading the objectclass */ + if (roles_cache_is_role_entry(role_entry) == 0) { + /* Bad type */ +- slapi_ch_free((void **)&this_role); ++ roles_cache_role_object_free((caddr_t)this_role); + return SLAPI_ROLE_DEFINITION_ERROR; + } + +@@ -1108,7 +1108,7 @@ roles_cache_create_object_from_entry(Slapi_Entry *role_entry, role_object **resu + this_role->type = type; + } else { + /* Bad type */ +- slapi_ch_free((void **)&this_role); ++ roles_cache_role_object_free((caddr_t)this_role); + return SLAPI_ROLE_DEFINITION_ERROR; + } + +@@ -1166,7 +1166,7 @@ roles_cache_create_object_from_entry(Slapi_Entry *role_entry, role_object **resu + filter_attr_value = (char *)slapi_entry_attr_get_charptr(role_entry, ROLE_FILTER_ATTR_NAME); + if (filter_attr_value == NULL) { + /* Means probably no attribute or no value there */ +- slapi_ch_free((void **)&this_role); ++ roles_cache_role_object_free((caddr_t)this_role); + return SLAPI_ROLE_ERROR_NO_FILTER_SPECIFIED; + } + +@@ -1205,7 +1205,7 @@ roles_cache_create_object_from_entry(Slapi_Entry *role_entry, role_object **resu + (char *)slapi_sdn_get_ndn(this_role->dn), + ROLE_FILTER_ATTR_NAME, filter_attr_value, + ROLE_FILTER_ATTR_NAME); +- slapi_ch_free((void **)&this_role); ++ roles_cache_role_object_free((caddr_t)this_role); + slapi_ch_free_string(&filter_attr_value); + return SLAPI_ROLE_ERROR_FILTER_BAD; + } +@@ -1217,7 +1217,7 @@ roles_cache_create_object_from_entry(Slapi_Entry *role_entry, role_object **resu + filter = slapi_str2filter(filter_attr_value); + if (filter == NULL) { + /* An error has occured */ +- slapi_ch_free((void **)&this_role); ++ roles_cache_role_object_free((caddr_t)this_role); + slapi_ch_free_string(&filter_attr_value); + return SLAPI_ROLE_ERROR_FILTER_BAD; + } +@@ -1228,7 +1228,8 @@ roles_cache_create_object_from_entry(Slapi_Entry *role_entry, role_object **resu + (char *)slapi_sdn_get_ndn(this_role->dn), + filter_attr_value, + ROLE_FILTER_ATTR_NAME); +- slapi_ch_free((void **)&this_role); ++ roles_cache_role_object_free((caddr_t)this_role); ++ slapi_filter_free(filter, 1); + slapi_ch_free_string(&filter_attr_value); + return SLAPI_ROLE_ERROR_FILTER_BAD; + } +@@ -1285,7 +1286,7 @@ roles_cache_create_object_from_entry(Slapi_Entry *role_entry, role_object **resu + if (rc == 0) { + *result = this_role; + } else { +- slapi_ch_free((void **)&this_role); ++ roles_cache_role_object_free((caddr_t)this_role); + } + + slapi_log_err(SLAPI_LOG_PLUGIN, ROLES_PLUGIN_SUBSYSTEM, +-- +2.49.0 + diff --git a/SOURCES/0025-Issue-6778-Memory-leak-in-roles_cache_create_object_.patch b/SOURCES/0025-Issue-6778-Memory-leak-in-roles_cache_create_object_.patch new file mode 100644 index 0000000..286ed06 --- /dev/null +++ b/SOURCES/0025-Issue-6778-Memory-leak-in-roles_cache_create_object_.patch @@ -0,0 +1,262 @@ +From c8c9d8814bd328d9772b6a248aa142b72430cba1 Mon Sep 17 00:00:00 2001 +From: Viktor Ashirov +Date: Wed, 16 Jul 2025 11:22:30 +0200 +Subject: [PATCH] Issue 6778 - Memory leak in + roles_cache_create_object_from_entry part 2 + +Bug Description: +Everytime a role with scope DN is processed, we leak rolescopeDN. + +Fix Description: +* Initialize all pointer variables to NULL +* Add additional NULL checks +* Free rolescopeDN +* Move test_rewriter_with_invalid_filter before the DB contains 90k entries +* Use task.wait() for import task completion instead of parsing logs, +increase the timeout + +Fixes: https://github.com/389ds/389-ds-base/issues/6778 + +Reviewed by: @progier389 (Thanks!) +--- + dirsrvtests/tests/suites/roles/basic_test.py | 164 +++++++++---------- + ldap/servers/plugins/roles/roles_cache.c | 10 +- + 2 files changed, 82 insertions(+), 92 deletions(-) + +diff --git a/dirsrvtests/tests/suites/roles/basic_test.py b/dirsrvtests/tests/suites/roles/basic_test.py +index d92d6f0c3..ec208bae9 100644 +--- a/dirsrvtests/tests/suites/roles/basic_test.py ++++ b/dirsrvtests/tests/suites/roles/basic_test.py +@@ -510,6 +510,76 @@ def test_vattr_on_managed_role(topo, request): + + request.addfinalizer(fin) + ++def test_rewriter_with_invalid_filter(topo, request): ++ """Test that server does not crash when having ++ invalid filter in filtered role ++ ++ :id: 5013b0b2-0af6-11f0-8684-482ae39447e5 ++ :setup: standalone server ++ :steps: ++ 1. Setup filtered role with good filter ++ 2. Setup nsrole rewriter ++ 3. Restart the server ++ 4. Search for entries ++ 5. Setup filtered role with bad filter ++ 6. Search for entries ++ :expectedresults: ++ 1. Operation should succeed ++ 2. Operation should succeed ++ 3. Operation should succeed ++ 4. Operation should succeed ++ 5. Operation should succeed ++ 6. Operation should succeed ++ """ ++ inst = topo.standalone ++ entries = [] ++ ++ def fin(): ++ inst.start() ++ for entry in entries: ++ entry.delete() ++ request.addfinalizer(fin) ++ ++ # Setup filtered role ++ roles = FilteredRoles(inst, f'ou=people,{DEFAULT_SUFFIX}') ++ filter_ko = '(&((objectClass=top)(objectClass=nsPerson))' ++ filter_ok = '(&(objectClass=top)(objectClass=nsPerson))' ++ role_properties = { ++ 'cn': 'TestFilteredRole', ++ 'nsRoleFilter': filter_ok, ++ 'description': 'Test good filter', ++ } ++ role = roles.create(properties=role_properties) ++ entries.append(role) ++ ++ # Setup nsrole rewriter ++ rewriters = Rewriters(inst) ++ rewriter_properties = { ++ "cn": "nsrole", ++ "nsslapd-libpath": 'libroles-plugin', ++ "nsslapd-filterrewriter": 'role_nsRole_filter_rewriter', ++ } ++ rewriter = rewriters.ensure_state(properties=rewriter_properties) ++ entries.append(rewriter) ++ ++ # Restart thge instance ++ inst.restart() ++ ++ # Search for entries ++ entries = inst.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, "(nsrole=%s)" % role.dn) ++ ++ # Set bad filter ++ role_properties = { ++ 'cn': 'TestFilteredRole', ++ 'nsRoleFilter': filter_ko, ++ 'description': 'Test bad filter', ++ } ++ role.ensure_state(properties=role_properties) ++ ++ # Search for entries ++ entries = inst.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, "(nsrole=%s)" % role.dn) ++ ++ + def test_managed_and_filtered_role_rewrite(topo, request): + """Test that filter components containing 'nsrole=xxx' + are reworked if xxx is either a filtered role or a managed +@@ -581,17 +651,11 @@ def test_managed_and_filtered_role_rewrite(topo, request): + PARENT="ou=people,%s" % DEFAULT_SUFFIX + dbgen_users(topo.standalone, 90000, import_ldif, DEFAULT_SUFFIX, entry_name=RDN, generic=True, parent=PARENT) + +- # online import ++ # Online import + import_task = ImportTask(topo.standalone) + import_task.import_suffix_from_ldif(ldiffile=import_ldif, suffix=DEFAULT_SUFFIX) +- # Check for up to 200sec that the completion +- for i in range(1, 20): +- if len(topo.standalone.ds_error_log.match('.*import userRoot: Import complete. Processed 9000.*')) > 0: +- break +- time.sleep(10) +- import_complete = topo.standalone.ds_error_log.match('.*import userRoot: Import complete. Processed 9000.*') +- assert (len(import_complete) == 1) +- ++ import_task.wait(timeout=400) ++ assert import_task.get_exit_code() == 0 + # Restart server + topo.standalone.restart() + +@@ -715,17 +779,11 @@ def test_not_such_entry_role_rewrite(topo, request): + PARENT="ou=people,%s" % DEFAULT_SUFFIX + dbgen_users(topo.standalone, 91000, import_ldif, DEFAULT_SUFFIX, entry_name=RDN, generic=True, parent=PARENT) + +- # online import ++ # Online import + import_task = ImportTask(topo.standalone) + import_task.import_suffix_from_ldif(ldiffile=import_ldif, suffix=DEFAULT_SUFFIX) +- # Check for up to 200sec that the completion +- for i in range(1, 20): +- if len(topo.standalone.ds_error_log.match('.*import userRoot: Import complete. Processed 9100.*')) > 0: +- break +- time.sleep(10) +- import_complete = topo.standalone.ds_error_log.match('.*import userRoot: Import complete. Processed 9100.*') +- assert (len(import_complete) == 1) +- ++ import_task.wait(timeout=400) ++ assert import_task.get_exit_code() == 0 + # Restart server + topo.standalone.restart() + +@@ -769,76 +827,6 @@ def test_not_such_entry_role_rewrite(topo, request): + request.addfinalizer(fin) + + +-def test_rewriter_with_invalid_filter(topo, request): +- """Test that server does not crash when having +- invalid filter in filtered role +- +- :id: 5013b0b2-0af6-11f0-8684-482ae39447e5 +- :setup: standalone server +- :steps: +- 1. Setup filtered role with good filter +- 2. Setup nsrole rewriter +- 3. Restart the server +- 4. Search for entries +- 5. Setup filtered role with bad filter +- 6. Search for entries +- :expectedresults: +- 1. Operation should succeed +- 2. Operation should succeed +- 3. Operation should succeed +- 4. Operation should succeed +- 5. Operation should succeed +- 6. Operation should succeed +- """ +- inst = topo.standalone +- entries = [] +- +- def fin(): +- inst.start() +- for entry in entries: +- entry.delete() +- request.addfinalizer(fin) +- +- # Setup filtered role +- roles = FilteredRoles(inst, f'ou=people,{DEFAULT_SUFFIX}') +- filter_ko = '(&((objectClass=top)(objectClass=nsPerson))' +- filter_ok = '(&(objectClass=top)(objectClass=nsPerson))' +- role_properties = { +- 'cn': 'TestFilteredRole', +- 'nsRoleFilter': filter_ok, +- 'description': 'Test good filter', +- } +- role = roles.create(properties=role_properties) +- entries.append(role) +- +- # Setup nsrole rewriter +- rewriters = Rewriters(inst) +- rewriter_properties = { +- "cn": "nsrole", +- "nsslapd-libpath": 'libroles-plugin', +- "nsslapd-filterrewriter": 'role_nsRole_filter_rewriter', +- } +- rewriter = rewriters.ensure_state(properties=rewriter_properties) +- entries.append(rewriter) +- +- # Restart thge instance +- inst.restart() +- +- # Search for entries +- entries = inst.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, "(nsrole=%s)" % role.dn) +- +- # Set bad filter +- role_properties = { +- 'cn': 'TestFilteredRole', +- 'nsRoleFilter': filter_ko, +- 'description': 'Test bad filter', +- } +- role.ensure_state(properties=role_properties) +- +- # Search for entries +- entries = inst.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, "(nsrole=%s)" % role.dn) +- +- + if __name__ == "__main__": + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s -v %s" % CURRENT_FILE) +diff --git a/ldap/servers/plugins/roles/roles_cache.c b/ldap/servers/plugins/roles/roles_cache.c +index 60d7182e2..60f5a919a 100644 +--- a/ldap/servers/plugins/roles/roles_cache.c ++++ b/ldap/servers/plugins/roles/roles_cache.c +@@ -1117,16 +1117,17 @@ roles_cache_create_object_from_entry(Slapi_Entry *role_entry, role_object **resu + + rolescopeDN = slapi_entry_attr_get_charptr(role_entry, ROLE_SCOPE_DN); + if (rolescopeDN) { +- Slapi_DN *rolescopeSDN; +- Slapi_DN *top_rolescopeSDN, *top_this_roleSDN; ++ Slapi_DN *rolescopeSDN = NULL; ++ Slapi_DN *top_rolescopeSDN = NULL; ++ Slapi_DN *top_this_roleSDN = NULL; + + /* Before accepting to use this scope, first check if it belongs to the same suffix */ + rolescopeSDN = slapi_sdn_new_dn_byref(rolescopeDN); +- if ((strlen((char *)slapi_sdn_get_ndn(rolescopeSDN)) > 0) && ++ if (rolescopeSDN && (strlen((char *)slapi_sdn_get_ndn(rolescopeSDN)) > 0) && + (slapi_dn_syntax_check(NULL, (char *)slapi_sdn_get_ndn(rolescopeSDN), 1) == 0)) { + top_rolescopeSDN = roles_cache_get_top_suffix(rolescopeSDN); + top_this_roleSDN = roles_cache_get_top_suffix(this_role->dn); +- if (slapi_sdn_compare(top_rolescopeSDN, top_this_roleSDN) == 0) { ++ if (top_rolescopeSDN && top_this_roleSDN && slapi_sdn_compare(top_rolescopeSDN, top_this_roleSDN) == 0) { + /* rolescopeDN belongs to the same suffix as the role, we can use this scope */ + this_role->rolescopedn = rolescopeSDN; + } else { +@@ -1148,6 +1149,7 @@ roles_cache_create_object_from_entry(Slapi_Entry *role_entry, role_object **resu + rolescopeDN); + slapi_sdn_free(&rolescopeSDN); + } ++ slapi_ch_free_string(&rolescopeDN); + } + + /* Depending upon role type, pull out the remaining information we need */ +-- +2.49.0 + diff --git a/SOURCES/0026-Issue-6850-AddressSanitizer-memory-leak-in-mdb_init.patch b/SOURCES/0026-Issue-6850-AddressSanitizer-memory-leak-in-mdb_init.patch new file mode 100644 index 0000000..b99f974 --- /dev/null +++ b/SOURCES/0026-Issue-6850-AddressSanitizer-memory-leak-in-mdb_init.patch @@ -0,0 +1,65 @@ +From f83a1996e3438e471cec086d53fb94be0c8666aa Mon Sep 17 00:00:00 2001 +From: Viktor Ashirov +Date: Mon, 7 Jul 2025 23:11:17 +0200 +Subject: [PATCH] Issue 6850 - AddressSanitizer: memory leak in mdb_init + +Bug Description: +`dbmdb_componentid` can be allocated multiple times. To avoid a memory +leak, allocate it only once, and free at the cleanup. + +Fixes: https://github.com/389ds/389-ds-base/issues/6850 + +Reviewed by: @mreynolds389, @tbordaz (Tnanks!) +--- + ldap/servers/slapd/back-ldbm/db-mdb/mdb_config.c | 4 +++- + ldap/servers/slapd/back-ldbm/db-mdb/mdb_layer.c | 2 +- + ldap/servers/slapd/back-ldbm/db-mdb/mdb_misc.c | 5 +++++ + 3 files changed, 9 insertions(+), 2 deletions(-) + +diff --git a/ldap/servers/slapd/back-ldbm/db-mdb/mdb_config.c b/ldap/servers/slapd/back-ldbm/db-mdb/mdb_config.c +index 1f7b71442..bebc83b76 100644 +--- a/ldap/servers/slapd/back-ldbm/db-mdb/mdb_config.c ++++ b/ldap/servers/slapd/back-ldbm/db-mdb/mdb_config.c +@@ -146,7 +146,9 @@ dbmdb_compute_limits(struct ldbminfo *li) + int mdb_init(struct ldbminfo *li, config_info *config_array) + { + dbmdb_ctx_t *conf = (dbmdb_ctx_t *)slapi_ch_calloc(1, sizeof(dbmdb_ctx_t)); +- dbmdb_componentid = generate_componentid(NULL, "db-mdb"); ++ if (dbmdb_componentid == NULL) { ++ dbmdb_componentid = generate_componentid(NULL, "db-mdb"); ++ } + + li->li_dblayer_config = conf; + strncpy(conf->home, li->li_directory, MAXPATHLEN-1); +diff --git a/ldap/servers/slapd/back-ldbm/db-mdb/mdb_layer.c b/ldap/servers/slapd/back-ldbm/db-mdb/mdb_layer.c +index 3ecc47170..c6e9f8b01 100644 +--- a/ldap/servers/slapd/back-ldbm/db-mdb/mdb_layer.c ++++ b/ldap/servers/slapd/back-ldbm/db-mdb/mdb_layer.c +@@ -19,7 +19,7 @@ + #include + #include + +-Slapi_ComponentId *dbmdb_componentid; ++Slapi_ComponentId *dbmdb_componentid = NULL; + + #define BULKOP_MAX_RECORDS 100 /* Max records handled by a single bulk operations */ + +diff --git a/ldap/servers/slapd/back-ldbm/db-mdb/mdb_misc.c b/ldap/servers/slapd/back-ldbm/db-mdb/mdb_misc.c +index 2d07db9b5..ae10ac7cf 100644 +--- a/ldap/servers/slapd/back-ldbm/db-mdb/mdb_misc.c ++++ b/ldap/servers/slapd/back-ldbm/db-mdb/mdb_misc.c +@@ -49,6 +49,11 @@ dbmdb_cleanup(struct ldbminfo *li) + } + slapi_ch_free((void **)&(li->li_dblayer_config)); + ++ if (dbmdb_componentid != NULL) { ++ release_componentid(dbmdb_componentid); ++ dbmdb_componentid = NULL; ++ } ++ + return 0; + } + +-- +2.49.0 + diff --git a/SOURCES/0027-Issue-6848-AddressSanitizer-leak-in-do_search.patch b/SOURCES/0027-Issue-6848-AddressSanitizer-leak-in-do_search.patch new file mode 100644 index 0000000..6908ba1 --- /dev/null +++ b/SOURCES/0027-Issue-6848-AddressSanitizer-leak-in-do_search.patch @@ -0,0 +1,58 @@ +From e98acc1bfe2194fcdd0e420777eb65a20d55a64b Mon Sep 17 00:00:00 2001 +From: Viktor Ashirov +Date: Mon, 7 Jul 2025 22:01:09 +0200 +Subject: [PATCH] Issue 6848 - AddressSanitizer: leak in do_search + +Bug Description: +When there's a BER decoding error and the function goes to +`free_and_return`, the `attrs` variable is not being freed because it's +only freed if `!psearch || rc != 0 || err != 0`, but `err` is still 0 at +that point. + +If we reach `free_and_return` from the `ber_scanf` error path, `attrs` +was never set in the pblock with `slapi_pblock_set()`, so the +`slapi_pblock_get()` call will not retrieve the potentially partially +allocated `attrs` from the BER decoding. + +Fixes: https://github.com/389ds/389-ds-base/issues/6848 + +Reviewed by: @tbordaz, @droideck (Thanks!) +--- + ldap/servers/slapd/search.c | 14 ++++++++++++-- + 1 file changed, 12 insertions(+), 2 deletions(-) + +diff --git a/ldap/servers/slapd/search.c b/ldap/servers/slapd/search.c +index e9b2c3670..f9d03c090 100644 +--- a/ldap/servers/slapd/search.c ++++ b/ldap/servers/slapd/search.c +@@ -235,6 +235,7 @@ do_search(Slapi_PBlock *pb) + log_search_access(pb, base, scope, fstr, "decoding error"); + send_ldap_result(pb, LDAP_PROTOCOL_ERROR, NULL, NULL, 0, + NULL); ++ err = 1; /* Make sure we free everything */ + goto free_and_return; + } + +@@ -420,8 +421,17 @@ free_and_return: + if (!psearch || rc != 0 || err != 0) { + slapi_ch_free_string(&fstr); + slapi_filter_free(filter, 1); +- slapi_pblock_get(pb, SLAPI_SEARCH_ATTRS, &attrs); +- charray_free(attrs); /* passing NULL is fine */ ++ ++ /* Get attrs from pblock if it was set there, otherwise use local attrs */ ++ char **pblock_attrs = NULL; ++ slapi_pblock_get(pb, SLAPI_SEARCH_ATTRS, &pblock_attrs); ++ if (pblock_attrs != NULL) { ++ charray_free(pblock_attrs); /* Free attrs from pblock */ ++ slapi_pblock_set(pb, SLAPI_SEARCH_ATTRS, NULL); ++ } else if (attrs != NULL) { ++ /* Free attrs that were allocated but never put in pblock */ ++ charray_free(attrs); ++ } + charray_free(gerattrs); /* passing NULL is fine */ + /* + * Fix for defect 526719 / 553356 : Persistent search op failed. +-- +2.49.0 + diff --git a/SOURCES/0028-Issue-6865-AddressSanitizer-leak-in-agmt_update_init.patch b/SOURCES/0028-Issue-6865-AddressSanitizer-leak-in-agmt_update_init.patch new file mode 100644 index 0000000..99b5e6f --- /dev/null +++ b/SOURCES/0028-Issue-6865-AddressSanitizer-leak-in-agmt_update_init.patch @@ -0,0 +1,58 @@ +From 120bc2666b682a27ffd6ace5cc238b33fab32c21 Mon Sep 17 00:00:00 2001 +From: Viktor Ashirov +Date: Fri, 11 Jul 2025 12:32:38 +0200 +Subject: [PATCH] Issue 6865 - AddressSanitizer: leak in + agmt_update_init_status + +Bug Description: +We allocate an array of `LDAPMod *` pointers, but never free it: + +``` +================================================================= +==2748356==ERROR: LeakSanitizer: detected memory leaks + +Direct leak of 24 byte(s) in 1 object(s) allocated from: + #0 0x7f05e8cb4a07 in __interceptor_malloc (/lib64/libasan.so.6+0xb4a07) + #1 0x7f05e85c0138 in slapi_ch_malloc (/usr/lib64/dirsrv/libslapd.so.0+0x1c0138) + #2 0x7f05e109e481 in agmt_update_init_status ldap/servers/plugins/replication/repl5_agmt.c:2583 + #3 0x7f05e10a0aa5 in agmtlist_shutdown ldap/servers/plugins/replication/repl5_agmtlist.c:789 + #4 0x7f05e10ab6bc in multisupplier_stop ldap/servers/plugins/replication/repl5_init.c:844 + #5 0x7f05e10ab6bc in multisupplier_stop ldap/servers/plugins/replication/repl5_init.c:837 + #6 0x7f05e862507d in plugin_call_func ldap/servers/slapd/plugin.c:2001 + #7 0x7f05e8625be1 in plugin_call_one ldap/servers/slapd/plugin.c:1950 + #8 0x7f05e8625be1 in plugin_dependency_closeall ldap/servers/slapd/plugin.c:1844 + #9 0x55e1a7ff9815 in slapd_daemon ldap/servers/slapd/daemon.c:1275 + #10 0x55e1a7fd36ef in main (/usr/sbin/ns-slapd+0x3e6ef) + #11 0x7f05e80295cf in __libc_start_call_main (/lib64/libc.so.6+0x295cf) + #12 0x7f05e802967f in __libc_start_main_alias_2 (/lib64/libc.so.6+0x2967f) + #13 0x55e1a7fd74a4 in _start (/usr/sbin/ns-slapd+0x424a4) + +SUMMARY: AddressSanitizer: 24 byte(s) leaked in 1 allocation(s). +``` + +Fix Description: +Ensure `mods` is freed in the cleanup code. + +Fixes: https://github.com/389ds/389-ds-base/issues/6865 +Relates: https://github.com/389ds/389-ds-base/issues/6470 + +Reviewed by: @mreynolds389 (Thanks!) +--- + ldap/servers/plugins/replication/repl5_agmt.c | 1 + + 1 file changed, 1 insertion(+) + +diff --git a/ldap/servers/plugins/replication/repl5_agmt.c b/ldap/servers/plugins/replication/repl5_agmt.c +index 6ffb074d4..c6cfcda07 100644 +--- a/ldap/servers/plugins/replication/repl5_agmt.c ++++ b/ldap/servers/plugins/replication/repl5_agmt.c +@@ -2653,6 +2653,7 @@ agmt_update_init_status(Repl_Agmt *ra) + } else { + PR_Unlock(ra->lock); + } ++ slapi_ch_free((void **)&mods); + slapi_mod_done(&smod_start_time); + slapi_mod_done(&smod_end_time); + slapi_mod_done(&smod_status); +-- +2.49.0 + diff --git a/SOURCES/0029-Issue-6768-ns-slapd-crashes-when-a-referral-is-added.patch b/SOURCES/0029-Issue-6768-ns-slapd-crashes-when-a-referral-is-added.patch new file mode 100644 index 0000000..e82a4f9 --- /dev/null +++ b/SOURCES/0029-Issue-6768-ns-slapd-crashes-when-a-referral-is-added.patch @@ -0,0 +1,97 @@ +From 5cc13c70dfe22d95686bec9214c53f1b4114cd90 Mon Sep 17 00:00:00 2001 +From: James Chapman +Date: Fri, 1 Aug 2025 13:27:02 +0100 +Subject: [PATCH] Issue 6768 - ns-slapd crashes when a referral is added + (#6780) + +Bug description: When a paged result search is successfully run on a referred +suffix, we retrieve the search result set from the pblock and try to release +it. In this case the search result set is NULL, which triggers a SEGV during +the release. + +Fix description: If the search result code is LDAP_REFERRAL, skip deletion of +the search result set. Added test case. + +Fixes: https://github.com/389ds/389-ds-base/issues/6768 + +Reviewed by: @tbordaz, @progier389 (Thank you) +--- + .../paged_results/paged_results_test.py | 46 +++++++++++++++++++ + ldap/servers/slapd/opshared.c | 4 +- + 2 files changed, 49 insertions(+), 1 deletion(-) + +diff --git a/dirsrvtests/tests/suites/paged_results/paged_results_test.py b/dirsrvtests/tests/suites/paged_results/paged_results_test.py +index fca48db0f..1bb94b53a 100644 +--- a/dirsrvtests/tests/suites/paged_results/paged_results_test.py ++++ b/dirsrvtests/tests/suites/paged_results/paged_results_test.py +@@ -1271,6 +1271,52 @@ def test_search_stress_abandon(create_40k_users, create_user): + paged_search(conn, create_40k_users.suffix, [req_ctrl], search_flt, searchreq_attrlist, abandon_rate=abandon_rate) + + ++def test_search_referral(topology_st): ++ """Test a paged search on a referred suffix doesnt crash the server. ++ ++ :id: c788bdbf-965b-4f12-ac24-d4d695e2cce2 ++ ++ :setup: Standalone instance ++ ++ :steps: ++ 1. Configure a default referral. ++ 2. Create a paged result search control. ++ 3. Paged result search on referral suffix (doesnt exist on the instance, triggering a referral). ++ 4. Check the server is still running. ++ 5. Remove referral. ++ ++ :expectedresults: ++ 1. Referral sucessfully set. ++ 2. Control created. ++ 3. Search returns ldap.REFERRAL (10). ++ 4. Server still running. ++ 5. Referral removed. ++ """ ++ ++ page_size = 5 ++ SEARCH_SUFFIX = "dc=referme,dc=com" ++ REFERRAL = "ldap://localhost.localdomain:389/o%3dnetscaperoot" ++ ++ log.info('Configuring referral') ++ topology_st.standalone.config.set('nsslapd-referral', REFERRAL) ++ referral = topology_st.standalone.config.get_attr_val_utf8('nsslapd-referral') ++ assert (referral == REFERRAL) ++ ++ log.info('Create paged result search control') ++ req_ctrl = SimplePagedResultsControl(True, size=page_size, cookie='') ++ ++ log.info('Perform a paged result search on referred suffix, no chase') ++ with pytest.raises(ldap.REFERRAL): ++ topology_st.standalone.search_ext_s(SEARCH_SUFFIX, ldap.SCOPE_SUBTREE, serverctrls=[req_ctrl]) ++ ++ log.info('Confirm instance is still running') ++ assert (topology_st.standalone.status()) ++ ++ log.info('Remove referral') ++ topology_st.standalone.config.remove_all('nsslapd-referral') ++ referral = topology_st.standalone.config.get_attr_val_utf8('nsslapd-referral') ++ assert (referral == None) ++ + if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode +diff --git a/ldap/servers/slapd/opshared.c b/ldap/servers/slapd/opshared.c +index 14a7dcdfb..03ed60981 100644 +--- a/ldap/servers/slapd/opshared.c ++++ b/ldap/servers/slapd/opshared.c +@@ -879,7 +879,9 @@ op_shared_search(Slapi_PBlock *pb, int send_result) + /* Free the results if not "no_such_object" */ + void *sr = NULL; + slapi_pblock_get(pb, SLAPI_SEARCH_RESULT_SET, &sr); +- be->be_search_results_release(&sr); ++ if (be->be_search_results_release != NULL) { ++ be->be_search_results_release(&sr); ++ } + } + pagedresults_set_search_result(pb_conn, operation, NULL, 1, pr_idx); + rc = pagedresults_set_current_be(pb_conn, NULL, pr_idx, 1); +-- +2.49.0 + diff --git a/SOURCES/Cargo-2.7.0-1.lock b/SOURCES/Cargo-2.7.0-1.lock new file mode 100644 index 0000000..49eacc7 --- /dev/null +++ b/SOURCES/Cargo-2.7.0-1.lock @@ -0,0 +1,1018 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +version = 3 + +[[package]] +name = "addr2line" +version = "0.24.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dfbe277e56a376000877090da837660b4427aad530e3028d44e0bffe4f89a1c1" +dependencies = [ + "gimli", +] + +[[package]] +name = "adler2" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "320119579fcad9c21884f5c4861d16174d0e06250625266f50fe6898340abefa" + +[[package]] +name = "allocator-api2" +version = "0.2.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "683d7910e743518b0e34f1186f92494becacb047c7b6bf616c96772180fef923" + +[[package]] +name = "atty" +version = "0.2.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8" +dependencies = [ + "hermit-abi", + "libc", + "winapi", +] + +[[package]] +name = "autocfg" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c08606f8c3cbf4ce6ec8e28fb0014a2c086708fe954eaa885384a6165172e7e8" + +[[package]] +name = "backtrace" +version = "0.3.75" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6806a6321ec58106fea15becdad98371e28d92ccbc7c8f1b3b6dd724fe8f1002" +dependencies = [ + "addr2line", + "cfg-if", + "libc", + "miniz_oxide", + "object", + "rustc-demangle", + "windows-targets 0.52.6", +] + +[[package]] +name = "base64" +version = "0.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9e1b586273c5702936fe7b7d6896644d8be71e6314cfe09d3167c95f712589e8" + +[[package]] +name = "bitflags" +version = "1.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" + +[[package]] +name = "bitflags" +version = "2.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1b8e56985ec62d17e9c1001dc89c88ecd7dc08e47eba5ec7c29c7b5eeecde967" + +[[package]] +name = "byteorder" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" + +[[package]] +name = "cbindgen" +version = "0.26.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "da6bc11b07529f16944307272d5bd9b22530bc7d05751717c9d416586cedab49" +dependencies = [ + "clap", + "heck", + "indexmap", + "log", + "proc-macro2", + "quote", + "serde", + "serde_json", + "syn 1.0.109", + "tempfile", + "toml", +] + +[[package]] +name = "cc" +version = "1.2.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c3a42d84bb6b69d3a8b3eaacf0d88f179e1929695e1ad012b6cf64d9caaa5fd2" +dependencies = [ + "jobserver", + "libc", + "shlex", +] + +[[package]] +name = "cfg-if" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9555578bc9e57714c812a1f84e4fc5b4d21fcb063490c624de019f7464c91268" + +[[package]] +name = "clap" +version = "3.2.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4ea181bf566f71cb9a5d17a59e1871af638180a18fb0035c92ae62b705207123" +dependencies = [ + "atty", + "bitflags 1.3.2", + "clap_lex", + "indexmap", + "strsim", + "termcolor", + "textwrap", +] + +[[package]] +name = "clap_lex" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2850f2f5a82cbf437dd5af4d49848fbdfc27c157c3d010345776f952765261c5" +dependencies = [ + "os_str_bytes", +] + +[[package]] +name = "concread" +version = "0.5.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "07fd8c4b53f0aafeec114fa1cd863f323880f790656f2d7508af83a9b5110e8d" +dependencies = [ + "crossbeam-epoch", + "crossbeam-queue", + "crossbeam-utils", + "foldhash", + "lru", + "smallvec", + "sptr", + "tokio", + "tracing", +] + +[[package]] +name = "crossbeam-epoch" +version = "0.9.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5b82ac4a3c2ca9c3460964f020e1402edd5753411d7737aa39c3714ad1b5420e" +dependencies = [ + "crossbeam-utils", +] + +[[package]] +name = "crossbeam-queue" +version = "0.3.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0f58bbc28f91df819d0aa2a2c00cd19754769c2fad90579b3592b1c9ba7a3115" +dependencies = [ + "crossbeam-utils", +] + +[[package]] +name = "crossbeam-utils" +version = "0.8.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d0a5c400df2834b80a4c3327b3aad3a4c4cd4de0629063962b03235697506a28" + +[[package]] +name = "entryuuid" +version = "0.1.0" +dependencies = [ + "cc", + "libc", + "paste", + "slapi_r_plugin", + "uuid", +] + +[[package]] +name = "entryuuid_syntax" +version = "0.1.0" +dependencies = [ + "cc", + "libc", + "paste", + "slapi_r_plugin", + "uuid", +] + +[[package]] +name = "equivalent" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "877a4ace8713b0bcf2a4e7eec82529c029f1d0619886d18145fea96c3ffe5c0f" + +[[package]] +name = "errno" +version = "0.3.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "778e2ac28f6c47af28e4907f13ffd1e1ddbd400980a9abd7c8df189bf578a5ad" +dependencies = [ + "libc", + "windows-sys 0.60.2", +] + +[[package]] +name = "fastrand" +version = "2.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "37909eebbb50d72f9059c3b6d82c0463f2ff062c9e95845c43a6c9c0355411be" + +[[package]] +name = "fernet" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "93804560e638370a8be6d59ce71ed803e55e230abdbf42598e666b41adda9b1f" +dependencies = [ + "base64", + "byteorder", + "getrandom 0.2.16", + "openssl", + "zeroize", +] + +[[package]] +name = "foldhash" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d9c4f5dac5e15c24eb999c26181a6ca40b39fe946cbe4c263c7209467bc83af2" + +[[package]] +name = "foreign-types" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f6f339eb8adc052cd2ca78910fda869aefa38d22d5cb648e6485e4d3fc06f3b1" +dependencies = [ + "foreign-types-shared", +] + +[[package]] +name = "foreign-types-shared" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" + +[[package]] +name = "getrandom" +version = "0.2.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "335ff9f135e4384c8150d6f27c6daed433577f86b4750418338c01a1a2528592" +dependencies = [ + "cfg-if", + "libc", + "wasi 0.11.1+wasi-snapshot-preview1", +] + +[[package]] +name = "getrandom" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "26145e563e54f2cadc477553f1ec5ee650b00862f0a58bcd12cbdc5f0ea2d2f4" +dependencies = [ + "cfg-if", + "libc", + "r-efi", + "wasi 0.14.2+wasi-0.2.4", +] + +[[package]] +name = "gimli" +version = "0.31.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "07e28edb80900c19c28f1072f2e8aeca7fa06b23cd4169cefe1af5aa3260783f" + +[[package]] +name = "hashbrown" +version = "0.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" + +[[package]] +name = "hashbrown" +version = "0.15.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5971ac85611da7067dbfcabef3c70ebb5606018acd9e2a3903a0da507521e0d5" +dependencies = [ + "allocator-api2", + "equivalent", + "foldhash", +] + +[[package]] +name = "heck" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8" + +[[package]] +name = "hermit-abi" +version = "0.1.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "62b467343b94ba476dcb2500d242dadbb39557df889310ac77c5d99100aaac33" +dependencies = [ + "libc", +] + +[[package]] +name = "indexmap" +version = "1.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bd070e393353796e801d209ad339e89596eb4c8d430d18ede6a1cced8fafbd99" +dependencies = [ + "autocfg", + "hashbrown 0.12.3", +] + +[[package]] +name = "io-uring" +version = "0.7.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d93587f37623a1a17d94ef2bc9ada592f5465fe7732084ab7beefabe5c77c0c4" +dependencies = [ + "bitflags 2.9.1", + "cfg-if", + "libc", +] + +[[package]] +name = "itoa" +version = "1.0.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4a5f13b858c8d314ee3e8f639011f7ccefe71f97f96e50151fb991f267928e2c" + +[[package]] +name = "jobserver" +version = "0.1.33" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "38f262f097c174adebe41eb73d66ae9c06b2844fb0da69969647bbddd9b0538a" +dependencies = [ + "getrandom 0.3.3", + "libc", +] + +[[package]] +name = "libc" +version = "0.2.174" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1171693293099992e19cddea4e8b849964e9846f4acee11b3948bcc337be8776" + +[[package]] +name = "librnsslapd" +version = "0.1.0" +dependencies = [ + "cbindgen", + "libc", + "slapd", +] + +[[package]] +name = "librslapd" +version = "0.1.0" +dependencies = [ + "cbindgen", + "concread", + "libc", + "slapd", +] + +[[package]] +name = "linux-raw-sys" +version = "0.9.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cd945864f07fe9f5371a27ad7b52a172b4b499999f1d97574c9fa68373937e12" + +[[package]] +name = "log" +version = "0.4.27" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "13dc2df351e3202783a1fe0d44375f7295ffb4049267b0f3018346dc122a1d94" + +[[package]] +name = "lru" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "227748d55f2f0ab4735d87fd623798cb6b664512fe979705f829c9f81c934465" +dependencies = [ + "hashbrown 0.15.4", +] + +[[package]] +name = "memchr" +version = "2.7.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "32a282da65faaf38286cf3be983213fcf1d2e2a58700e808f83f4ea9a4804bc0" + +[[package]] +name = "miniz_oxide" +version = "0.8.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1fa76a2c86f704bdb222d66965fb3d63269ce38518b83cb0575fca855ebb6316" +dependencies = [ + "adler2", +] + +[[package]] +name = "mio" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "78bed444cc8a2160f01cbcf811ef18cac863ad68ae8ca62092e8db51d51c761c" +dependencies = [ + "libc", + "wasi 0.11.1+wasi-snapshot-preview1", + "windows-sys 0.59.0", +] + +[[package]] +name = "object" +version = "0.36.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "62948e14d923ea95ea2c7c86c71013138b66525b86bdc08d2dcc262bdb497b87" +dependencies = [ + "memchr", +] + +[[package]] +name = "once_cell" +version = "1.21.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42f5e15c9953c5e4ccceeb2e7382a716482c34515315f7b03532b8b4e8393d2d" + +[[package]] +name = "openssl" +version = "0.10.73" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8505734d46c8ab1e19a1dce3aef597ad87dcb4c37e7188231769bd6bd51cebf8" +dependencies = [ + "bitflags 2.9.1", + "cfg-if", + "foreign-types", + "libc", + "once_cell", + "openssl-macros", + "openssl-sys", +] + +[[package]] +name = "openssl-macros" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.104", +] + +[[package]] +name = "openssl-sys" +version = "0.9.109" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "90096e2e47630d78b7d1c20952dc621f957103f8bc2c8359ec81290d75238571" +dependencies = [ + "cc", + "libc", + "pkg-config", + "vcpkg", +] + +[[package]] +name = "os_str_bytes" +version = "6.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e2355d85b9a3786f481747ced0e0ff2ba35213a1f9bd406ed906554d7af805a1" + +[[package]] +name = "paste" +version = "0.1.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "45ca20c77d80be666aef2b45486da86238fabe33e38306bd3118fe4af33fa880" +dependencies = [ + "paste-impl", + "proc-macro-hack", +] + +[[package]] +name = "paste-impl" +version = "0.1.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d95a7db200b97ef370c8e6de0088252f7e0dfff7d047a28528e47456c0fc98b6" +dependencies = [ + "proc-macro-hack", +] + +[[package]] +name = "pin-project-lite" +version = "0.2.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3b3cff922bd51709b605d9ead9aa71031d81447142d828eb4a6eba76fe619f9b" + +[[package]] +name = "pkg-config" +version = "0.3.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7edddbd0b52d732b21ad9a5fab5c704c14cd949e5e9a1ec5929a24fded1b904c" + +[[package]] +name = "proc-macro-hack" +version = "0.5.20+deprecated" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc375e1527247fe1a97d8b7156678dfe7c1af2fc075c9a4db3690ecd2a148068" + +[[package]] +name = "proc-macro2" +version = "1.0.95" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "02b3e5e68a3a1a02aad3ec490a98007cbc13c37cbe84a3cd7b8e406d76e7f778" +dependencies = [ + "unicode-ident", +] + +[[package]] +name = "pwdchan" +version = "0.1.0" +dependencies = [ + "base64", + "cc", + "libc", + "openssl", + "paste", + "slapi_r_plugin", + "uuid", +] + +[[package]] +name = "quote" +version = "1.0.40" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1885c039570dc00dcb4ff087a89e185fd56bae234ddc7f056a945bf36467248d" +dependencies = [ + "proc-macro2", +] + +[[package]] +name = "r-efi" +version = "5.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "69cdb34c158ceb288df11e18b4bd39de994f6657d83847bdffdbd7f346754b0f" + +[[package]] +name = "rustc-demangle" +version = "0.1.26" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "56f7d92ca342cea22a06f2121d944b4fd82af56988c270852495420f961d4ace" + +[[package]] +name = "rustix" +version = "1.0.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "11181fbabf243db407ef8df94a6ce0b2f9a733bd8be4ad02b4eda9602296cac8" +dependencies = [ + "bitflags 2.9.1", + "errno", + "libc", + "linux-raw-sys", + "windows-sys 0.60.2", +] + +[[package]] +name = "ryu" +version = "1.0.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "28d3b2b1366ec20994f1fd18c3c594f05c5dd4bc44d8bb0c1c632c8d6829481f" + +[[package]] +name = "serde" +version = "1.0.219" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5f0e2c6ed6606019b4e29e69dbaba95b11854410e5347d525002456dbbb786b6" +dependencies = [ + "serde_derive", +] + +[[package]] +name = "serde_derive" +version = "1.0.219" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5b0276cf7f2c73365f7157c8123c21cd9a50fbbd844757af28ca1f5925fc2a00" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.104", +] + +[[package]] +name = "serde_json" +version = "1.0.142" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "030fedb782600dcbd6f02d479bf0d817ac3bb40d644745b769d6a96bc3afc5a7" +dependencies = [ + "itoa", + "memchr", + "ryu", + "serde", +] + +[[package]] +name = "shlex" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64" + +[[package]] +name = "slab" +version = "0.4.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "04dc19736151f35336d325007ac991178d504a119863a2fcb3758cdb5e52c50d" + +[[package]] +name = "slapd" +version = "0.1.0" +dependencies = [ + "fernet", +] + +[[package]] +name = "slapi_r_plugin" +version = "0.1.0" +dependencies = [ + "libc", + "paste", + "uuid", +] + +[[package]] +name = "smallvec" +version = "1.15.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "67b1b7a3b5fe4f1376887184045fcf45c69e92af734b7aaddc05fb777b6fbd03" + +[[package]] +name = "sptr" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3b9b39299b249ad65f3b7e96443bad61c02ca5cd3589f46cb6d610a0fd6c0d6a" + +[[package]] +name = "strsim" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623" + +[[package]] +name = "syn" +version = "1.0.109" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + +[[package]] +name = "syn" +version = "2.0.104" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "17b6f705963418cdb9927482fa304bc562ece2fdd4f616084c50b7023b435a40" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + +[[package]] +name = "tempfile" +version = "3.20.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e8a64e3985349f2441a1a9ef0b853f869006c3855f2cda6862a94d26ebb9d6a1" +dependencies = [ + "fastrand", + "getrandom 0.3.3", + "once_cell", + "rustix", + "windows-sys 0.59.0", +] + +[[package]] +name = "termcolor" +version = "1.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "06794f8f6c5c898b3275aebefa6b8a1cb24cd2c6c79397ab15774837a0bc5755" +dependencies = [ + "winapi-util", +] + +[[package]] +name = "textwrap" +version = "0.16.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c13547615a44dc9c452a8a534638acdf07120d4b6847c8178705da06306a3057" + +[[package]] +name = "tokio" +version = "1.47.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "89e49afdadebb872d3145a5638b59eb0691ea23e46ca484037cfab3b76b95038" +dependencies = [ + "backtrace", + "io-uring", + "libc", + "mio", + "pin-project-lite", + "slab", +] + +[[package]] +name = "toml" +version = "0.5.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f4f7f0dd8d50a853a531c426359045b1998f04219d88799810762cd4ad314234" +dependencies = [ + "serde", +] + +[[package]] +name = "tracing" +version = "0.1.41" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "784e0ac535deb450455cbfa28a6f0df145ea1bb7ae51b821cf5e7927fdcfbdd0" +dependencies = [ + "pin-project-lite", + "tracing-attributes", + "tracing-core", +] + +[[package]] +name = "tracing-attributes" +version = "0.1.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "81383ab64e72a7a8b8e13130c49e3dab29def6d0c7d76a03087b3cf71c5c6903" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.104", +] + +[[package]] +name = "tracing-core" +version = "0.1.34" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b9d12581f227e93f094d3af2ae690a574abb8a2b9b7a96e7cfe9647b2b617678" +dependencies = [ + "once_cell", +] + +[[package]] +name = "unicode-ident" +version = "1.0.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a5f39404a5da50712a4c1eecf25e90dd62b613502b7e925fd4e4d19b5c96512" + +[[package]] +name = "uuid" +version = "0.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bc5cf98d8186244414c848017f0e2676b3fcb46807f6668a97dfe67359a3c4b7" +dependencies = [ + "getrandom 0.2.16", +] + +[[package]] +name = "vcpkg" +version = "0.2.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426" + +[[package]] +name = "wasi" +version = "0.11.1+wasi-snapshot-preview1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ccf3ec651a847eb01de73ccad15eb7d99f80485de043efb2f370cd654f4ea44b" + +[[package]] +name = "wasi" +version = "0.14.2+wasi-0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9683f9a5a998d873c0d21fcbe3c083009670149a8fab228644b8bd36b2c48cb3" +dependencies = [ + "wit-bindgen-rt", +] + +[[package]] +name = "winapi" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" +dependencies = [ + "winapi-i686-pc-windows-gnu", + "winapi-x86_64-pc-windows-gnu", +] + +[[package]] +name = "winapi-i686-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" + +[[package]] +name = "winapi-util" +version = "0.1.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cf221c93e13a30d793f7645a0e7762c55d169dbb0a49671918a2319d289b10bb" +dependencies = [ + "windows-sys 0.59.0", +] + +[[package]] +name = "winapi-x86_64-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" + +[[package]] +name = "windows-link" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5e6ad25900d524eaabdbbb96d20b4311e1e7ae1699af4fb28c17ae66c80d798a" + +[[package]] +name = "windows-sys" +version = "0.59.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e38bc4d79ed67fd075bcc251a1c39b32a1776bbe92e5bef1f0bf1f8c531853b" +dependencies = [ + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-sys" +version = "0.60.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f2f500e4d28234f72040990ec9d39e3a6b950f9f22d3dba18416c35882612bcb" +dependencies = [ + "windows-targets 0.53.3", +] + +[[package]] +name = "windows-targets" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973" +dependencies = [ + "windows_aarch64_gnullvm 0.52.6", + "windows_aarch64_msvc 0.52.6", + "windows_i686_gnu 0.52.6", + "windows_i686_gnullvm 0.52.6", + "windows_i686_msvc 0.52.6", + "windows_x86_64_gnu 0.52.6", + "windows_x86_64_gnullvm 0.52.6", + "windows_x86_64_msvc 0.52.6", +] + +[[package]] +name = "windows-targets" +version = "0.53.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d5fe6031c4041849d7c496a8ded650796e7b6ecc19df1a431c1a363342e5dc91" +dependencies = [ + "windows-link", + "windows_aarch64_gnullvm 0.53.0", + "windows_aarch64_msvc 0.53.0", + "windows_i686_gnu 0.53.0", + "windows_i686_gnullvm 0.53.0", + "windows_i686_msvc 0.53.0", + "windows_x86_64_gnu 0.53.0", + "windows_x86_64_gnullvm 0.53.0", + "windows_x86_64_msvc 0.53.0", +] + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3" + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "86b8d5f90ddd19cb4a147a5fa63ca848db3df085e25fee3cc10b39b6eebae764" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c7651a1f62a11b8cbd5e0d42526e55f2c99886c77e007179efff86c2b137e66c" + +[[package]] +name = "windows_i686_gnu" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b" + +[[package]] +name = "windows_i686_gnu" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c1dc67659d35f387f5f6c479dc4e28f1d4bb90ddd1a5d3da2e5d97b42d6272c3" + +[[package]] +name = "windows_i686_gnullvm" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66" + +[[package]] +name = "windows_i686_gnullvm" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ce6ccbdedbf6d6354471319e781c0dfef054c81fbc7cf83f338a4296c0cae11" + +[[package]] +name = "windows_i686_msvc" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66" + +[[package]] +name = "windows_i686_msvc" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "581fee95406bb13382d2f65cd4a908ca7b1e4c2f1917f143ba16efe98a589b5d" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2e55b5ac9ea33f2fc1716d1742db15574fd6fc8dadc51caab1c16a3d3b4190ba" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0a6e035dd0599267ce1ee132e51c27dd29437f63325753051e71dd9e42406c57" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "271414315aff87387382ec3d271b52d7ae78726f5d44ac98b4f4030c91880486" + +[[package]] +name = "wit-bindgen-rt" +version = "0.39.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6f42320e61fe2cfd34354ecb597f86f413484a798ba44a8ca1165c58d42da6c1" +dependencies = [ + "bitflags 2.9.1", +] + +[[package]] +name = "zeroize" +version = "1.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ced3678a2879b30306d323f4542626697a464a97c0a07c9aebf7ebca65cd4dde" +dependencies = [ + "zeroize_derive", +] + +[[package]] +name = "zeroize_derive" +version = "1.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.104", +] diff --git a/SPECS/389-ds-base.spec b/SPECS/389-ds-base.spec index e89e257..617c251 100644 --- a/SPECS/389-ds-base.spec +++ b/SPECS/389-ds-base.spec @@ -46,9 +46,9 @@ ExcludeArch: i686 Summary: 389 Directory Server (base) Name: 389-ds-base -Version: 2.6.1 -Release: 6%{?dist} -License: GPL-3.0-or-later WITH GPL-3.0-389-ds-base-exception AND (0BSD OR Apache-2.0 OR MIT) AND (Apache-2.0 OR Apache-2.0 WITH LLVM-exception OR MIT) AND (Apache-2.0 OR BSD-2-Clause OR MIT) AND (Apache-2.0 OR BSL-1.0) AND (Apache-2.0 OR MIT OR Zlib) AND (Apache-2.0 OR MIT) AND (CC-BY-4.0 AND MIT) AND (MIT OR Apache-2.0) AND Unicode-3.0 AND (MIT OR CC0-1.0) AND (MIT OR Unlicense) AND 0BSD AND Apache-2.0 AND BSD-2-Clause AND BSD-3-Clause AND ISC AND MIT AND MIT AND ISC AND MPL-2.0 AND PSF-2.0 +Version: 2.7.0 +Release: 5%{?dist} +License: GPL-3.0-or-later WITH GPL-3.0-389-ds-base-exception AND (0BSD OR Apache-2.0 OR MIT) AND (Apache-2.0 OR Apache-2.0 WITH LLVM-exception OR MIT) AND (Apache-2.0 OR BSL-1.0) AND (Apache-2.0 OR LGPL-2.1-or-later OR MIT) AND (Apache-2.0 OR MIT OR Zlib) AND (Apache-2.0 OR MIT) AND (MIT OR Apache-2.0) AND Unicode-3.0 AND (MIT OR Unlicense) AND Apache-2.0 AND MIT AND MPL-2.0 AND Zlib URL: https://www.port389.org Conflicts: selinux-policy-base < 3.9.8 Conflicts: freeipa-server < 4.0.3 @@ -59,287 +59,102 @@ Provides: ldif2ldbm >= 0 ##### Bundled cargo crates list - START ##### Provides: bundled(crate(addr2line)) = 0.24.2 -Provides: bundled(crate(adler2)) = 2.0.0 -Provides: bundled(crate(ahash)) = 0.7.8 +Provides: bundled(crate(adler2)) = 2.0.1 +Provides: bundled(crate(allocator-api2)) = 0.2.21 Provides: bundled(crate(atty)) = 0.2.14 -Provides: bundled(crate(autocfg)) = 1.4.0 -Provides: bundled(crate(backtrace)) = 0.3.74 +Provides: bundled(crate(autocfg)) = 1.5.0 +Provides: bundled(crate(backtrace)) = 0.3.75 Provides: bundled(crate(base64)) = 0.13.1 -Provides: bundled(crate(bitflags)) = 2.8.0 +Provides: bundled(crate(bitflags)) = 2.9.1 Provides: bundled(crate(byteorder)) = 1.5.0 Provides: bundled(crate(cbindgen)) = 0.26.0 -Provides: bundled(crate(cc)) = 1.2.10 -Provides: bundled(crate(cfg-if)) = 1.0.0 +Provides: bundled(crate(cc)) = 1.2.31 +Provides: bundled(crate(cfg-if)) = 1.0.1 Provides: bundled(crate(clap)) = 3.2.25 Provides: bundled(crate(clap_lex)) = 0.2.4 -Provides: bundled(crate(concread)) = 0.2.21 -Provides: bundled(crate(crossbeam)) = 0.8.4 -Provides: bundled(crate(crossbeam-channel)) = 0.5.14 -Provides: bundled(crate(crossbeam-deque)) = 0.8.6 +Provides: bundled(crate(concread)) = 0.5.7 Provides: bundled(crate(crossbeam-epoch)) = 0.9.18 Provides: bundled(crate(crossbeam-queue)) = 0.3.12 Provides: bundled(crate(crossbeam-utils)) = 0.8.21 -Provides: bundled(crate(errno)) = 0.3.10 +Provides: bundled(crate(equivalent)) = 1.0.2 +Provides: bundled(crate(errno)) = 0.3.13 Provides: bundled(crate(fastrand)) = 2.3.0 Provides: bundled(crate(fernet)) = 0.1.4 +Provides: bundled(crate(foldhash)) = 0.1.5 Provides: bundled(crate(foreign-types)) = 0.3.2 Provides: bundled(crate(foreign-types-shared)) = 0.1.1 -Provides: bundled(crate(getrandom)) = 0.2.15 +Provides: bundled(crate(getrandom)) = 0.3.3 Provides: bundled(crate(gimli)) = 0.31.1 -Provides: bundled(crate(hashbrown)) = 0.12.3 +Provides: bundled(crate(hashbrown)) = 0.15.4 Provides: bundled(crate(heck)) = 0.4.1 Provides: bundled(crate(hermit-abi)) = 0.1.19 Provides: bundled(crate(indexmap)) = 1.9.3 -Provides: bundled(crate(instant)) = 0.1.13 -Provides: bundled(crate(itoa)) = 1.0.14 -Provides: bundled(crate(jobserver)) = 0.1.32 -Provides: bundled(crate(libc)) = 0.2.169 -Provides: bundled(crate(linux-raw-sys)) = 0.4.15 -Provides: bundled(crate(lock_api)) = 0.4.12 -Provides: bundled(crate(log)) = 0.4.25 -Provides: bundled(crate(lru)) = 0.7.8 -Provides: bundled(crate(memchr)) = 2.7.4 -Provides: bundled(crate(miniz_oxide)) = 0.8.3 +Provides: bundled(crate(io-uring)) = 0.7.9 +Provides: bundled(crate(itoa)) = 1.0.15 +Provides: bundled(crate(jobserver)) = 0.1.33 +Provides: bundled(crate(libc)) = 0.2.174 +Provides: bundled(crate(linux-raw-sys)) = 0.9.4 +Provides: bundled(crate(log)) = 0.4.27 +Provides: bundled(crate(lru)) = 0.13.0 +Provides: bundled(crate(memchr)) = 2.7.5 +Provides: bundled(crate(miniz_oxide)) = 0.8.9 +Provides: bundled(crate(mio)) = 1.0.4 Provides: bundled(crate(object)) = 0.36.7 -Provides: bundled(crate(once_cell)) = 1.20.2 -Provides: bundled(crate(openssl)) = 0.10.68 +Provides: bundled(crate(once_cell)) = 1.21.3 +Provides: bundled(crate(openssl)) = 0.10.73 Provides: bundled(crate(openssl-macros)) = 0.1.1 -Provides: bundled(crate(openssl-sys)) = 0.9.104 +Provides: bundled(crate(openssl-sys)) = 0.9.109 Provides: bundled(crate(os_str_bytes)) = 6.6.1 -Provides: bundled(crate(parking_lot)) = 0.11.2 -Provides: bundled(crate(parking_lot_core)) = 0.8.6 Provides: bundled(crate(paste)) = 0.1.18 Provides: bundled(crate(paste-impl)) = 0.1.18 Provides: bundled(crate(pin-project-lite)) = 0.2.16 -Provides: bundled(crate(pkg-config)) = 0.3.31 -Provides: bundled(crate(ppv-lite86)) = 0.2.20 +Provides: bundled(crate(pkg-config)) = 0.3.32 Provides: bundled(crate(proc-macro-hack)) = 0.5.20+deprecated -Provides: bundled(crate(proc-macro2)) = 1.0.93 -Provides: bundled(crate(quote)) = 1.0.38 -Provides: bundled(crate(rand)) = 0.8.5 -Provides: bundled(crate(rand_chacha)) = 0.3.1 -Provides: bundled(crate(rand_core)) = 0.6.4 -Provides: bundled(crate(redox_syscall)) = 0.2.16 -Provides: bundled(crate(rustc-demangle)) = 0.1.24 -Provides: bundled(crate(rustix)) = 0.38.44 -Provides: bundled(crate(ryu)) = 1.0.18 -Provides: bundled(crate(scopeguard)) = 1.2.0 -Provides: bundled(crate(serde)) = 1.0.217 -Provides: bundled(crate(serde_derive)) = 1.0.217 -Provides: bundled(crate(serde_json)) = 1.0.137 +Provides: bundled(crate(proc-macro2)) = 1.0.95 +Provides: bundled(crate(quote)) = 1.0.40 +Provides: bundled(crate(r-efi)) = 5.3.0 +Provides: bundled(crate(rustc-demangle)) = 0.1.26 +Provides: bundled(crate(rustix)) = 1.0.8 +Provides: bundled(crate(ryu)) = 1.0.20 +Provides: bundled(crate(serde)) = 1.0.219 +Provides: bundled(crate(serde_derive)) = 1.0.219 +Provides: bundled(crate(serde_json)) = 1.0.142 Provides: bundled(crate(shlex)) = 1.3.0 -Provides: bundled(crate(smallvec)) = 1.13.2 +Provides: bundled(crate(slab)) = 0.4.10 +Provides: bundled(crate(smallvec)) = 1.15.1 +Provides: bundled(crate(sptr)) = 0.3.2 Provides: bundled(crate(strsim)) = 0.10.0 -Provides: bundled(crate(syn)) = 2.0.96 -Provides: bundled(crate(tempfile)) = 3.15.0 +Provides: bundled(crate(syn)) = 2.0.104 +Provides: bundled(crate(tempfile)) = 3.20.0 Provides: bundled(crate(termcolor)) = 1.4.1 -Provides: bundled(crate(textwrap)) = 0.16.1 -Provides: bundled(crate(tokio)) = 1.43.0 -Provides: bundled(crate(tokio-macros)) = 2.5.0 +Provides: bundled(crate(textwrap)) = 0.16.2 +Provides: bundled(crate(tokio)) = 1.47.1 Provides: bundled(crate(toml)) = 0.5.11 -Provides: bundled(crate(unicode-ident)) = 1.0.15 +Provides: bundled(crate(tracing)) = 0.1.41 +Provides: bundled(crate(tracing-attributes)) = 0.1.30 +Provides: bundled(crate(tracing-core)) = 0.1.34 +Provides: bundled(crate(unicode-ident)) = 1.0.18 Provides: bundled(crate(uuid)) = 0.8.2 Provides: bundled(crate(vcpkg)) = 0.2.15 -Provides: bundled(crate(version_check)) = 0.9.5 -Provides: bundled(crate(wasi)) = 0.11.0+wasi_snapshot_preview1 +Provides: bundled(crate(wasi)) = 0.14.2+wasi_0.2.4 Provides: bundled(crate(winapi)) = 0.3.9 Provides: bundled(crate(winapi-i686-pc-windows-gnu)) = 0.4.0 Provides: bundled(crate(winapi-util)) = 0.1.9 Provides: bundled(crate(winapi-x86_64-pc-windows-gnu)) = 0.4.0 -Provides: bundled(crate(windows-sys)) = 0.59.0 -Provides: bundled(crate(windows-targets)) = 0.52.6 -Provides: bundled(crate(windows_aarch64_gnullvm)) = 0.52.6 -Provides: bundled(crate(windows_aarch64_msvc)) = 0.52.6 -Provides: bundled(crate(windows_i686_gnu)) = 0.52.6 -Provides: bundled(crate(windows_i686_gnullvm)) = 0.52.6 -Provides: bundled(crate(windows_i686_msvc)) = 0.52.6 -Provides: bundled(crate(windows_x86_64_gnu)) = 0.52.6 -Provides: bundled(crate(windows_x86_64_gnullvm)) = 0.52.6 -Provides: bundled(crate(windows_x86_64_msvc)) = 0.52.6 -Provides: bundled(crate(zerocopy)) = 0.7.35 -Provides: bundled(crate(zerocopy-derive)) = 0.7.35 +Provides: bundled(crate(windows-link)) = 0.1.3 +Provides: bundled(crate(windows-sys)) = 0.60.2 +Provides: bundled(crate(windows-targets)) = 0.53.3 +Provides: bundled(crate(windows_aarch64_gnullvm)) = 0.53.0 +Provides: bundled(crate(windows_aarch64_msvc)) = 0.53.0 +Provides: bundled(crate(windows_i686_gnu)) = 0.53.0 +Provides: bundled(crate(windows_i686_gnullvm)) = 0.53.0 +Provides: bundled(crate(windows_i686_msvc)) = 0.53.0 +Provides: bundled(crate(windows_x86_64_gnu)) = 0.53.0 +Provides: bundled(crate(windows_x86_64_gnullvm)) = 0.53.0 +Provides: bundled(crate(windows_x86_64_msvc)) = 0.53.0 +Provides: bundled(crate(wit-bindgen-rt)) = 0.39.0 Provides: bundled(crate(zeroize)) = 1.8.1 Provides: bundled(crate(zeroize_derive)) = 1.4.2 -Provides: bundled(npm(@aashutoshrathi/word-wrap)) = 1.2.6 -Provides: bundled(npm(@eslint-community/eslint-utils)) = 4.4.0 -Provides: bundled(npm(@eslint-community/regexpp)) = 4.5.1 -Provides: bundled(npm(@eslint/eslintrc)) = 2.0.3 -Provides: bundled(npm(@eslint/js)) = 8.42.0 -Provides: bundled(npm(@fortawesome/fontawesome-common-types)) = 0.2.36 -Provides: bundled(npm(@fortawesome/fontawesome-svg-core)) = 1.2.36 -Provides: bundled(npm(@fortawesome/free-solid-svg-icons)) = 5.15.4 -Provides: bundled(npm(@fortawesome/react-fontawesome)) = 0.1.19 -Provides: bundled(npm(@humanwhocodes/config-array)) = 0.11.10 -Provides: bundled(npm(@humanwhocodes/module-importer)) = 1.0.1 -Provides: bundled(npm(@humanwhocodes/object-schema)) = 1.2.1 -Provides: bundled(npm(@nodelib/fs.scandir)) = 2.1.5 -Provides: bundled(npm(@nodelib/fs.stat)) = 2.0.5 -Provides: bundled(npm(@nodelib/fs.walk)) = 1.2.8 -Provides: bundled(npm(@patternfly/patternfly)) = 4.224.2 -Provides: bundled(npm(@patternfly/react-charts)) = 6.94.19 -Provides: bundled(npm(@patternfly/react-core)) = 4.276.8 -Provides: bundled(npm(@patternfly/react-icons)) = 4.93.6 -Provides: bundled(npm(@patternfly/react-styles)) = 4.92.6 -Provides: bundled(npm(@patternfly/react-table)) = 4.113.0 -Provides: bundled(npm(@patternfly/react-tokens)) = 4.94.6 -Provides: bundled(npm(@types/d3-array)) = 3.0.5 -Provides: bundled(npm(@types/d3-color)) = 3.1.0 -Provides: bundled(npm(@types/d3-ease)) = 3.0.0 -Provides: bundled(npm(@types/d3-interpolate)) = 3.0.1 -Provides: bundled(npm(@types/d3-path)) = 3.0.0 -Provides: bundled(npm(@types/d3-scale)) = 4.0.3 -Provides: bundled(npm(@types/d3-shape)) = 3.1.1 -Provides: bundled(npm(@types/d3-time)) = 3.0.0 -Provides: bundled(npm(@types/d3-timer)) = 3.0.0 -Provides: bundled(npm(acorn)) = 8.8.2 -Provides: bundled(npm(acorn-jsx)) = 5.3.2 -Provides: bundled(npm(ajv)) = 6.12.6 -Provides: bundled(npm(ansi-regex)) = 5.0.1 -Provides: bundled(npm(ansi-styles)) = 4.3.0 -Provides: bundled(npm(argparse)) = 2.0.1 -Provides: bundled(npm(attr-accept)) = 1.1.3 -Provides: bundled(npm(balanced-match)) = 1.0.2 -Provides: bundled(npm(brace-expansion)) = 1.1.11 -Provides: bundled(npm(callsites)) = 3.1.0 -Provides: bundled(npm(chalk)) = 4.1.2 -Provides: bundled(npm(color-convert)) = 2.0.1 -Provides: bundled(npm(color-name)) = 1.1.4 -Provides: bundled(npm(concat-map)) = 0.0.1 -Provides: bundled(npm(core-js)) = 2.6.12 -Provides: bundled(npm(cross-spawn)) = 7.0.6 -Provides: bundled(npm(d3-array)) = 3.2.4 -Provides: bundled(npm(d3-color)) = 3.1.0 -Provides: bundled(npm(d3-ease)) = 3.0.1 -Provides: bundled(npm(d3-format)) = 3.1.0 -Provides: bundled(npm(d3-interpolate)) = 3.0.1 -Provides: bundled(npm(d3-path)) = 3.1.0 -Provides: bundled(npm(d3-scale)) = 4.0.2 -Provides: bundled(npm(d3-shape)) = 3.2.0 -Provides: bundled(npm(d3-time)) = 3.1.0 -Provides: bundled(npm(d3-time-format)) = 4.1.0 -Provides: bundled(npm(d3-timer)) = 3.0.1 -Provides: bundled(npm(debug)) = 4.3.4 -Provides: bundled(npm(deep-is)) = 0.1.4 -Provides: bundled(npm(delaunator)) = 4.0.1 -Provides: bundled(npm(delaunay-find)) = 0.0.6 -Provides: bundled(npm(doctrine)) = 3.0.0 -Provides: bundled(npm(encoding)) = 0.1.13 -Provides: bundled(npm(escape-string-regexp)) = 4.0.0 -Provides: bundled(npm(eslint)) = 8.42.0 -Provides: bundled(npm(eslint-plugin-react-hooks)) = 4.6.0 -Provides: bundled(npm(eslint-scope)) = 7.2.0 -Provides: bundled(npm(eslint-visitor-keys)) = 3.4.1 -Provides: bundled(npm(espree)) = 9.5.2 -Provides: bundled(npm(esquery)) = 1.5.0 -Provides: bundled(npm(esrecurse)) = 4.3.0 -Provides: bundled(npm(estraverse)) = 5.3.0 -Provides: bundled(npm(esutils)) = 2.0.3 -Provides: bundled(npm(fast-deep-equal)) = 3.1.3 -Provides: bundled(npm(fast-json-stable-stringify)) = 2.1.0 -Provides: bundled(npm(fast-levenshtein)) = 2.0.6 -Provides: bundled(npm(fastq)) = 1.15.0 -Provides: bundled(npm(file-entry-cache)) = 6.0.1 -Provides: bundled(npm(file-selector)) = 0.1.19 -Provides: bundled(npm(find-up)) = 5.0.0 -Provides: bundled(npm(flat-cache)) = 3.0.4 -Provides: bundled(npm(flatted)) = 3.2.7 -Provides: bundled(npm(focus-trap)) = 6.9.2 -Provides: bundled(npm(fs.realpath)) = 1.0.0 -Provides: bundled(npm(gettext-parser)) = 2.0.0 -Provides: bundled(npm(glob)) = 7.2.3 -Provides: bundled(npm(glob-parent)) = 6.0.2 -Provides: bundled(npm(globals)) = 13.20.0 -Provides: bundled(npm(graphemer)) = 1.4.0 -Provides: bundled(npm(has-flag)) = 4.0.0 -Provides: bundled(npm(hoist-non-react-statics)) = 3.3.2 -Provides: bundled(npm(iconv-lite)) = 0.6.3 -Provides: bundled(npm(ignore)) = 5.2.4 -Provides: bundled(npm(import-fresh)) = 3.3.0 -Provides: bundled(npm(imurmurhash)) = 0.1.4 -Provides: bundled(npm(inflight)) = 1.0.6 -Provides: bundled(npm(inherits)) = 2.0.4 -Provides: bundled(npm(internmap)) = 2.0.3 -Provides: bundled(npm(is-extglob)) = 2.1.1 -Provides: bundled(npm(is-glob)) = 4.0.3 -Provides: bundled(npm(is-path-inside)) = 3.0.3 -Provides: bundled(npm(isexe)) = 2.0.0 -Provides: bundled(npm(js-tokens)) = 4.0.0 -Provides: bundled(npm(js-yaml)) = 4.1.0 -Provides: bundled(npm(json-schema-traverse)) = 0.4.1 -Provides: bundled(npm(json-stable-stringify-without-jsonify)) = 1.0.1 -Provides: bundled(npm(json-stringify-safe)) = 5.0.1 -Provides: bundled(npm(levn)) = 0.4.1 -Provides: bundled(npm(locate-path)) = 6.0.0 -Provides: bundled(npm(lodash)) = 4.17.21 -Provides: bundled(npm(lodash.merge)) = 4.6.2 -Provides: bundled(npm(loose-envify)) = 1.4.0 -Provides: bundled(npm(minimatch)) = 3.1.2 -Provides: bundled(npm(ms)) = 2.1.2 -Provides: bundled(npm(natural-compare)) = 1.4.0 -Provides: bundled(npm(object-assign)) = 4.1.1 -Provides: bundled(npm(once)) = 1.4.0 -Provides: bundled(npm(optionator)) = 0.9.3 -Provides: bundled(npm(p-limit)) = 3.1.0 -Provides: bundled(npm(p-locate)) = 5.0.0 -Provides: bundled(npm(parent-module)) = 1.0.1 -Provides: bundled(npm(path-exists)) = 4.0.0 -Provides: bundled(npm(path-is-absolute)) = 1.0.1 -Provides: bundled(npm(path-key)) = 3.1.1 -Provides: bundled(npm(popper.js)) = 1.16.1 -Provides: bundled(npm(prelude-ls)) = 1.2.1 -Provides: bundled(npm(prop-types)) = 15.8.1 -Provides: bundled(npm(prop-types-extra)) = 1.1.1 -Provides: bundled(npm(punycode)) = 2.3.0 -Provides: bundled(npm(queue-microtask)) = 1.2.3 -Provides: bundled(npm(react)) = 17.0.2 -Provides: bundled(npm(react-dom)) = 17.0.2 -Provides: bundled(npm(react-dropzone)) = 9.0.0 -Provides: bundled(npm(react-fast-compare)) = 3.2.2 -Provides: bundled(npm(react-is)) = 16.13.1 -Provides: bundled(npm(resolve-from)) = 4.0.0 -Provides: bundled(npm(reusify)) = 1.0.4 -Provides: bundled(npm(rimraf)) = 3.0.2 -Provides: bundled(npm(run-parallel)) = 1.2.0 -Provides: bundled(npm(safe-buffer)) = 5.2.1 -Provides: bundled(npm(safer-buffer)) = 2.1.2 -Provides: bundled(npm(scheduler)) = 0.20.2 -Provides: bundled(npm(shebang-command)) = 2.0.0 -Provides: bundled(npm(shebang-regex)) = 3.0.0 -Provides: bundled(npm(strip-ansi)) = 6.0.1 -Provides: bundled(npm(strip-json-comments)) = 3.1.1 -Provides: bundled(npm(supports-color)) = 7.2.0 -Provides: bundled(npm(tabbable)) = 5.3.3 -Provides: bundled(npm(text-table)) = 0.2.0 -Provides: bundled(npm(tippy.js)) = 5.1.2 -Provides: bundled(npm(tslib)) = 2.5.3 -Provides: bundled(npm(type-check)) = 0.4.0 -Provides: bundled(npm(type-fest)) = 0.20.2 -Provides: bundled(npm(uri-js)) = 4.4.1 -Provides: bundled(npm(victory-area)) = 36.6.10 -Provides: bundled(npm(victory-axis)) = 36.6.10 -Provides: bundled(npm(victory-bar)) = 36.6.10 -Provides: bundled(npm(victory-brush-container)) = 36.6.10 -Provides: bundled(npm(victory-chart)) = 36.6.10 -Provides: bundled(npm(victory-core)) = 36.6.10 -Provides: bundled(npm(victory-create-container)) = 36.6.10 -Provides: bundled(npm(victory-cursor-container)) = 36.6.10 -Provides: bundled(npm(victory-group)) = 36.6.10 -Provides: bundled(npm(victory-legend)) = 36.6.10 -Provides: bundled(npm(victory-line)) = 36.6.10 -Provides: bundled(npm(victory-pie)) = 36.6.10 -Provides: bundled(npm(victory-polar-axis)) = 36.6.10 -Provides: bundled(npm(victory-scatter)) = 36.6.10 -Provides: bundled(npm(victory-selection-container)) = 36.6.10 -Provides: bundled(npm(victory-shared-events)) = 36.6.10 -Provides: bundled(npm(victory-stack)) = 36.6.10 -Provides: bundled(npm(victory-tooltip)) = 36.6.10 -Provides: bundled(npm(victory-vendor)) = 36.6.10 -Provides: bundled(npm(victory-voronoi-container)) = 36.6.10 -Provides: bundled(npm(victory-zoom-container)) = 36.6.10 -Provides: bundled(npm(warning)) = 4.0.3 -Provides: bundled(npm(which)) = 2.0.2 -Provides: bundled(npm(wrappy)) = 1.0.2 -Provides: bundled(npm(yocto-queue)) = 0.1.0 ##### Bundled cargo crates list - END ##### BuildRequires: nspr-devel >= 4.32 @@ -407,6 +222,7 @@ BuildRequires: python%{python3_pkgversion}-argparse-manpage BuildRequires: python%{python3_pkgversion}-libselinux BuildRequires: python%{python3_pkgversion}-policycoreutils BuildRequires: python%{python3_pkgversion}-cryptography +BuildRequires: python%{python3_pkgversion}-psutil # For cockpit %if %{use_cockpit} @@ -470,16 +286,38 @@ Source3: https://github.com/jemalloc/%{jemalloc_name}/releases/download %endif Source4: 389-ds-base.sysusers -Patch: 0001-Issue-6468-Fix-building-for-older-versions-of-Python.patch -Patch: 0002-Issue-6489-After-log-rotation-refresh-the-FD-pointer.patch -Patch: 0003-Issue-6374-nsslapd-mdb-max-dbs-autotuning-doesn-t-wo.patch -Patch: 0004-Issue-6090-Fix-dbscan-options-and-man-pages-6315.patch -Patch: 0005-Issue-6566-RI-plugin-failure-to-handle-a-modrdn-for-.patch -Patch: 0006-Issue-6258-Mitigate-race-condition-in-paged_results_.patch -Patch: 0007-Issue-6229-After-an-initial-failure-subsequent-onlin.patch -Patch: 0008-Issue-6554-During-import-of-entries-without-nsUnique.patch -Patch: 0009-Issue-6561-TLS-1.2-stickiness-in-FIPS-mode.patch -Patch: 0010-Issue-6090-dbscan-use-bdb-by-default.patch +Source5: vendor-%{version}-1.tar.gz +Source6: Cargo-%{version}-1.lock + +Patch: 0001-Issue-6377-syntax-error-in-setup.py-6378.patch +Patch: 0002-Issue-6838-lib389-replica.py-is-using-nonexistent-da.patch +Patch: 0003-Issue-6680-instance-read-only-mode-is-broken-6681.patch +Patch: 0004-Issue-6825-RootDN-Access-Control-Plugin-with-wildcar.patch +Patch: 0005-Issue-6119-Synchronise-accept_thread-with-slapd_daem.patch +Patch: 0006-Issue-6782-Improve-paged-result-locking.patch +Patch: 0007-Issue-6822-Backend-creation-cleanup-and-Database-UI-.patch +Patch: 0008-Issue-6857-uiduniq-allow-specifying-match-rules-in-t.patch +Patch: 0009-Issue-6756-CLI-UI-Properly-handle-disabled-NDN-cache.patch +Patch: 0010-Issue-6859-str2filter-is-not-fully-applying-matching.patch +Patch: 0011-Issue-6872-compressed-log-rotation-creates-files-wit.patch +Patch: 0012-Issue-6878-Prevent-repeated-disconnect-logs-during-s.patch +Patch: 0013-Issue-6772-dsconf-Replicas-with-the-consumer-role-al.patch +Patch: 0014-Issue-6893-Log-user-that-is-updated-during-password-.patch +Patch: 0015-Issue-6895-Crash-if-repl-keep-alive-entry-can-not-be.patch +Patch: 0016-Issue-6250-Add-test-for-entryUSN-overflow-on-failed-.patch +Patch: 0017-Issue-6594-Add-test-for-numSubordinates-replication-.patch +Patch: 0018-Issue-6884-Mask-password-hashes-in-audit-logs-6885.patch +Patch: 0019-Issue-6897-Fix-disk-monitoring-test-failures-and-imp.patch +Patch: 0020-Issue-6339-Address-Coverity-scan-issues-in-memberof-.patch +Patch: 0021-Issue-6468-CLI-Fix-default-error-log-level.patch +Patch: 0022-Issues-6913-6886-6250-Adjust-xfail-marks-6914.patch +Patch: 0023-Issue-6181-RFE-Allow-system-to-manage-uid-gid-at-sta.patch +Patch: 0024-Issue-6778-Memory-leak-in-roles_cache_create_object_.patch +Patch: 0025-Issue-6778-Memory-leak-in-roles_cache_create_object_.patch +Patch: 0026-Issue-6850-AddressSanitizer-memory-leak-in-mdb_init.patch +Patch: 0027-Issue-6848-AddressSanitizer-leak-in-do_search.patch +Patch: 0028-Issue-6865-AddressSanitizer-leak-in-agmt_update_init.patch +Patch: 0029-Issue-6768-ns-slapd-crashes-when-a-referral-is-added.patch %description 389 Directory Server is an LDAPv3 compliant server. The base package includes @@ -560,6 +398,7 @@ Requires: python%{python3_pkgversion}-argcomplete Requires: python%{python3_pkgversion}-libselinux Requires: python%{python3_pkgversion}-setuptools Requires: python%{python3_pkgversion}-cryptography +Requires: python%{python3_pkgversion}-psutil %{?python_provide:%python_provide python%{python3_pkgversion}-lib389} %description -n python%{python3_pkgversion}-lib389 @@ -582,6 +421,10 @@ A cockpit UI Plugin for configuring and administering the 389 Directory Server %prep %autosetup -p1 -n %{name}-%{version} +rm -rf vendor +tar xzf %{SOURCE5} +cp %{SOURCE6} src/Cargo.lock + %if %{bundle_jemalloc} %setup -q -n %{name}-%{version} -T -D -b 3 %endif @@ -643,7 +486,7 @@ pushd ../%{jemalloc_name}-%{jemalloc_ver} --libdir=%{_libdir}/%{pkgname}/lib \ --bindir=%{_libdir}/%{pkgname}/bin \ --enable-prof -make %{?_smp_mflags} +%make_build popd %endif @@ -678,8 +521,7 @@ sed -i "1s/\"1\"/\"8\"/" %{_builddir}/%{name}-%{version}/src/lib389/man/dscreat # Generate symbolic info for debuggers export XCFLAGS=$RPM_OPT_FLAGS -#make %{?_smp_mflags} -make +%make_build %install @@ -922,6 +764,37 @@ exit 0 %endif %changelog +* Tue Aug 05 2025 Viktor Ashirov - 2.7.0-5 +- Resolves: RHEL-89762 - dsidm Error: float() argument must be a string or a number, not 'NoneType' [rhel-9] +- Resolves: RHEL-92041 - Memory leak in roles_cache_create_object_from_entry +- Resolves: RHEL-95444 - ns-slapd[xxxx]: segfault at 10d7d0d0 ip 00007ff734050cdb sp 00007ff6de9f1430 error 6 in libslapd.so.0.1.0[7ff733ec0000+1b3000] [rhel-9] +- Resolves: RHEL-104821 - ipa-restore fails to restore SELinux contexts, causes ns-slapd AVC denials on /dev/shm after restore. +- Resolves: RHEL-107005 - Failure to get Server monitoring data when NDN cache is disabled. [rhel-9] +- Resolves: RHEL-107581 - segfault - error 4 in libpthread-2.28.so [rhel-9] +- Resolves: RHEL-107585 - ns-slapd crashed when we add nsslapd-referral [rhel-9] +- Resolves: RHEL-107586 - CWE-284 dirsrv log rotation creates files with world readable permission [rhel-9] +- Resolves: RHEL-107587 - CWE-532 Created user password hash available to see in audit log [rhel-9] +- Resolves: RHEL-107588 - CWE-778 Log doesn't show what user gets password changed by administrator [rhel-9] + +* Mon Jul 21 2025 Viktor Ashirov - 2.7.0-4 +- Resolves: RHEL-61347 - Directory Server is unavailable after a restart with nsslapd-readonly=on and consumes 100% CPU + +* Tue Jul 01 2025 Viktor Ashirov - 2.7.0-3 +- Resolves: RHEL-77983 - Defects found by OpenScanHub +- Resolves: RHEL-79673 - Improve the "result" field of ipa-healthcheck if replicas are busy +- Resolves: RHEL-80496 - Can't rename users member of automember rule [rhel-9] +- Resolves: RHEL-81141 - Healthcheck tool should warn admin about creating a substring index on membership attribute [rhel-9] +- Resolves: RHEL-89736 - dsconf backend replication monitor fails if replica id starts with 0 [rhel-9] +- Resolves: RHEL-89745 - ns-slapd crash in dbmdb_import_prepare_worker_entry() [rhel-9] +- Resolves: RHEL-89753 - Nested group does not receive memberOf attribute [rhel-9] +- Resolves: RHEL-89769 - Crash in __strlen_sse2 when using the nsRole filter rewriter. [rhel-9] +- Resolves: RHEL-89782 - RHDS12.2 NSMMReplicationPlugin - release_replica Unable to parse the response [rhel-9] +- Resolves: RHEL-95768 - Improve error message when bulk import connection is closed [rhel-9] +- Resolves: RHEL-101189 - lib389/replica.py is using unexisting datetime.UTC in python3.9 + +* Mon Jun 30 2025 Viktor Ashirov - 2.7.0-1 +- Resolves: RHEL-80163 - Rebase 389-ds-base to 2.7.x + * Fri Mar 14 2025 Viktor Ashirov - 2.6.1-6 - Resolves: RHEL-82271 - ipa-restore is failing with "Failed to start Directory Service"