import CS 389-ds-base-2.7.0-5.el9
This commit is contained in:
parent
b5464a6352
commit
b1fa2c887d
@ -1,2 +1,3 @@
|
||||
25969f6e65d79aa29671eff7185e4307ff3c08a0 SOURCES/389-ds-base-2.6.1.tar.bz2
|
||||
e9ce5b0affef3f7a319958610c5382152f1b559f SOURCES/389-ds-base-2.7.0.tar.bz2
|
||||
1c8f2d0dfbf39fa8cd86363bf3314351ab21f8d4 SOURCES/jemalloc-5.3.0.tar.bz2
|
||||
b183c1ebee9c1d81d4b394df6de6521a8b333cbc SOURCES/vendor-2.7.0-1.tar.gz
|
||||
|
||||
3
.gitignore
vendored
3
.gitignore
vendored
@ -1,2 +1,3 @@
|
||||
SOURCES/389-ds-base-2.6.1.tar.bz2
|
||||
SOURCES/389-ds-base-2.7.0.tar.bz2
|
||||
SOURCES/jemalloc-5.3.0.tar.bz2
|
||||
SOURCES/vendor-2.7.0-1.tar.gz
|
||||
|
||||
40
SOURCES/0001-Issue-6377-syntax-error-in-setup.py-6378.patch
Normal file
40
SOURCES/0001-Issue-6377-syntax-error-in-setup.py-6378.patch
Normal file
@ -0,0 +1,40 @@
|
||||
From 5903fac2334f984d18aea663735fb260d6b100ed Mon Sep 17 00:00:00 2001
|
||||
From: progier389 <progier@redhat.com>
|
||||
Date: Tue, 22 Oct 2024 17:26:46 +0200
|
||||
Subject: [PATCH] Issue 6377 - syntax error in setup.py (#6378)
|
||||
|
||||
Syntax error due to badly nested quotes in dblib.py cause trouble in setup.py and dsconf dblib b2b2mdb/mdb2dbd
|
||||
Fix bit using double quotes in the f-expression and quotes for the embedded strings.
|
||||
|
||||
Issue: #6377
|
||||
|
||||
Reviewed by: @tbordaz, @droideck (Thank!)
|
||||
---
|
||||
src/lib389/lib389/cli_ctl/dblib.py | 4 ++--
|
||||
1 file changed, 2 insertions(+), 2 deletions(-)
|
||||
|
||||
diff --git a/src/lib389/lib389/cli_ctl/dblib.py b/src/lib389/lib389/cli_ctl/dblib.py
|
||||
index ff81f0e19..3f6e7b456 100644
|
||||
--- a/src/lib389/lib389/cli_ctl/dblib.py
|
||||
+++ b/src/lib389/lib389/cli_ctl/dblib.py
|
||||
@@ -183,7 +183,7 @@ def export_changelog(be, dblib):
|
||||
return False
|
||||
try:
|
||||
cl5dbname = be['eccl5dbname'] if dblib == "bdb" else be['cl5dbname']
|
||||
- _log.info(f'Exporting changelog {cl5dbname} to {be['cl5name']}')
|
||||
+ _log.info(f"Exporting changelog {cl5dbname} to {be['cl5name']}")
|
||||
run_dbscan(['-D', dblib, '-f', cl5dbname, '-X', be['cl5name']])
|
||||
return True
|
||||
except subprocess.CalledProcessError as e:
|
||||
@@ -194,7 +194,7 @@ def import_changelog(be, dblib):
|
||||
# import backend changelog
|
||||
try:
|
||||
cl5dbname = be['eccl5dbname'] if dblib == "bdb" else be['cl5dbname']
|
||||
- _log.info(f'Importing changelog {cl5dbname} from {be['cl5name']}')
|
||||
+ _log.info(f"Importing changelog {cl5dbname} from {be['cl5name']}")
|
||||
run_dbscan(['-D', dblib, '-f', cl5dbname, '--import', be['cl5name'], '--do-it'])
|
||||
return True
|
||||
except subprocess.CalledProcessError as e:
|
||||
--
|
||||
2.49.0
|
||||
|
||||
@ -1,60 +0,0 @@
|
||||
From 0921400a39b61687db2bc55ebd5021eef507e960 Mon Sep 17 00:00:00 2001
|
||||
From: Viktor Ashirov <vashirov@redhat.com>
|
||||
Date: Tue, 28 Jan 2025 21:05:49 +0100
|
||||
Subject: [PATCH] Issue 6468 - Fix building for older versions of Python
|
||||
|
||||
Bug Description:
|
||||
Structural Pattern Matching has been added in Python 3.10, older version
|
||||
do not support it.
|
||||
|
||||
Fix Description:
|
||||
Replace `match` and `case` statements with `if-elif`.
|
||||
|
||||
Relates: https://github.com/389ds/389-ds-base/issues/6468
|
||||
|
||||
Reviewed by: @droideck (Thanks!)
|
||||
---
|
||||
src/lib389/lib389/cli_conf/logging.py | 27 ++++++++++++++-------------
|
||||
1 file changed, 14 insertions(+), 13 deletions(-)
|
||||
|
||||
diff --git a/src/lib389/lib389/cli_conf/logging.py b/src/lib389/lib389/cli_conf/logging.py
|
||||
index 2e86f2de8..d1e32822c 100644
|
||||
--- a/src/lib389/lib389/cli_conf/logging.py
|
||||
+++ b/src/lib389/lib389/cli_conf/logging.py
|
||||
@@ -234,19 +234,20 @@ def get_log_config(inst, basedn, log, args):
|
||||
attr_map = {}
|
||||
levels = {}
|
||||
|
||||
- match args.logtype:
|
||||
- case "access":
|
||||
- attr_map = ACCESS_ATTR_MAP
|
||||
- levels = ACCESS_LEVELS
|
||||
- case "error":
|
||||
- attr_map = ERROR_ATTR_MAP
|
||||
- levels = ERROR_LEVELS
|
||||
- case "security":
|
||||
- attr_map = SECURITY_ATTR_MAP
|
||||
- case "audit":
|
||||
- attr_map = AUDIT_ATTR_MAP
|
||||
- case "auditfail":
|
||||
- attr_map = AUDITFAIL_ATTR_MAP
|
||||
+ if args.logtype == "access":
|
||||
+ attr_map = ACCESS_ATTR_MAP
|
||||
+ levels = ACCESS_LEVELS
|
||||
+ elif args.logtype == "error":
|
||||
+ attr_map = ERROR_ATTR_MAP
|
||||
+ levels = ERROR_LEVELS
|
||||
+ elif args.logtype == "security":
|
||||
+ attr_map = SECURITY_ATTR_MAP
|
||||
+ elif args.logtype == "audit":
|
||||
+ attr_map = AUDIT_ATTR_MAP
|
||||
+ elif args.logtype == "auditfail":
|
||||
+ attr_map = AUDITFAIL_ATTR_MAP
|
||||
+ else:
|
||||
+ raise ValueError(f"Unknown logtype: {args.logtype}")
|
||||
|
||||
sorted_results = []
|
||||
for attr, value in attrs.items():
|
||||
--
|
||||
2.48.0
|
||||
|
||||
@ -1,146 +0,0 @@
|
||||
From 12f9bf81e834549db02b1243ecf769b511c9f69f Mon Sep 17 00:00:00 2001
|
||||
From: Mark Reynolds <mreynolds@redhat.com>
|
||||
Date: Fri, 31 Jan 2025 08:54:27 -0500
|
||||
Subject: [PATCH] Issue 6489 - After log rotation refresh the FD pointer
|
||||
|
||||
Description:
|
||||
|
||||
When flushing a log buffer we get a FD for log prior to checking if the
|
||||
log should be rotated. If the log is rotated that FD reference is now
|
||||
invalid, and it needs to be refrehed before proceeding
|
||||
|
||||
Relates: https://github.com/389ds/389-ds-base/issues/6489
|
||||
|
||||
Reviewed by: tbordaz(Thanks!)
|
||||
---
|
||||
.../suites/logging/log_flush_rotation_test.py | 81 +++++++++++++++++++
|
||||
ldap/servers/slapd/log.c | 18 +++++
|
||||
2 files changed, 99 insertions(+)
|
||||
create mode 100644 dirsrvtests/tests/suites/logging/log_flush_rotation_test.py
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/logging/log_flush_rotation_test.py b/dirsrvtests/tests/suites/logging/log_flush_rotation_test.py
|
||||
new file mode 100644
|
||||
index 000000000..b33a622e1
|
||||
--- /dev/null
|
||||
+++ b/dirsrvtests/tests/suites/logging/log_flush_rotation_test.py
|
||||
@@ -0,0 +1,81 @@
|
||||
+# --- BEGIN COPYRIGHT BLOCK ---
|
||||
+# Copyright (C) 2025 Red Hat, Inc.
|
||||
+# All rights reserved.
|
||||
+#
|
||||
+# License: GPL (version 3 or any later version).
|
||||
+# See LICENSE for details.
|
||||
+# --- END COPYRIGHT BLOCK ---
|
||||
+#
|
||||
+import os
|
||||
+import logging
|
||||
+import time
|
||||
+import pytest
|
||||
+from lib389._constants import DEFAULT_SUFFIX, PW_DM
|
||||
+from lib389.tasks import ImportTask
|
||||
+from lib389.idm.user import UserAccounts
|
||||
+from lib389.topologies import topology_st as topo
|
||||
+
|
||||
+
|
||||
+log = logging.getLogger(__name__)
|
||||
+
|
||||
+
|
||||
+def test_log_flush_and_rotation_crash(topo):
|
||||
+ """Make sure server does not crash whening flushing a buffer and rotating
|
||||
+ the log at the same time
|
||||
+
|
||||
+ :id: d4b0af2f-48b2-45f5-ae8b-f06f692c3133
|
||||
+ :setup: Standalone Instance
|
||||
+ :steps:
|
||||
+ 1. Enable all logs
|
||||
+ 2. Enable log buffering for all logs
|
||||
+ 3. Set rotation time unit to 1 minute
|
||||
+ 4. Make sure server is still running after 1 minute
|
||||
+ :expectedresults:
|
||||
+ 1. Success
|
||||
+ 2. Success
|
||||
+ 3. Success
|
||||
+ 4. Success
|
||||
+ """
|
||||
+
|
||||
+ inst = topo.standalone
|
||||
+
|
||||
+ # Enable logging and buffering
|
||||
+ inst.config.set("nsslapd-auditlog-logging-enabled", "on")
|
||||
+ inst.config.set("nsslapd-accesslog-logbuffering", "on")
|
||||
+ inst.config.set("nsslapd-auditlog-logbuffering", "on")
|
||||
+ inst.config.set("nsslapd-errorlog-logbuffering", "on")
|
||||
+ inst.config.set("nsslapd-securitylog-logbuffering", "on")
|
||||
+
|
||||
+ # Set rotation policy to trigger rotation asap
|
||||
+ inst.config.set("nsslapd-accesslog-logrotationtimeunit", "minute")
|
||||
+ inst.config.set("nsslapd-auditlog-logrotationtimeunit", "minute")
|
||||
+ inst.config.set("nsslapd-errorlog-logrotationtimeunit", "minute")
|
||||
+ inst.config.set("nsslapd-securitylog-logrotationtimeunit", "minute")
|
||||
+
|
||||
+ #
|
||||
+ # Performs ops to populate all the logs
|
||||
+ #
|
||||
+ # Access & audit log
|
||||
+ users = UserAccounts(topo.standalone, DEFAULT_SUFFIX)
|
||||
+ user = users.create_test_user()
|
||||
+ user.set("userPassword", PW_DM)
|
||||
+ # Security log
|
||||
+ user.bind(PW_DM)
|
||||
+ # Error log
|
||||
+ import_task = ImportTask(inst)
|
||||
+ import_task.import_suffix_from_ldif(ldiffile="/not/here",
|
||||
+ suffix=DEFAULT_SUFFIX)
|
||||
+
|
||||
+ # Wait a minute and make sure the server did not crash
|
||||
+ log.info("Sleep until logs are flushed and rotated")
|
||||
+ time.sleep(61)
|
||||
+
|
||||
+ assert inst.status()
|
||||
+
|
||||
+
|
||||
+if __name__ == '__main__':
|
||||
+ # Run isolated
|
||||
+ # -s for DEBUG mode
|
||||
+ CURRENT_FILE = os.path.realpath(__file__)
|
||||
+ pytest.main(["-s", CURRENT_FILE])
|
||||
+
|
||||
diff --git a/ldap/servers/slapd/log.c b/ldap/servers/slapd/log.c
|
||||
index 8352f4abd..c1260a203 100644
|
||||
--- a/ldap/servers/slapd/log.c
|
||||
+++ b/ldap/servers/slapd/log.c
|
||||
@@ -6746,6 +6746,23 @@ log_refresh_state(int32_t log_type)
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
+static LOGFD
|
||||
+log_refresh_fd(int32_t log_type)
|
||||
+{
|
||||
+ switch (log_type) {
|
||||
+ case SLAPD_ACCESS_LOG:
|
||||
+ return loginfo.log_access_fdes;
|
||||
+ case SLAPD_SECURITY_LOG:
|
||||
+ return loginfo.log_security_fdes;
|
||||
+ case SLAPD_AUDIT_LOG:
|
||||
+ return loginfo.log_audit_fdes;
|
||||
+ case SLAPD_AUDITFAIL_LOG:
|
||||
+ return loginfo.log_auditfail_fdes;
|
||||
+ case SLAPD_ERROR_LOG:
|
||||
+ return loginfo.log_error_fdes;
|
||||
+ }
|
||||
+ return NULL;
|
||||
+}
|
||||
|
||||
/* this function assumes the lock is already acquired */
|
||||
/* if sync_now is non-zero, data is flushed to physical storage */
|
||||
@@ -6857,6 +6874,7 @@ log_flush_buffer(LogBufferInfo *lbi, int log_type, int sync_now, int locked)
|
||||
rotationtime_secs);
|
||||
}
|
||||
log_state = log_refresh_state(log_type);
|
||||
+ fd = log_refresh_fd(log_type);
|
||||
}
|
||||
|
||||
if (log_state & LOGGING_NEED_TITLE) {
|
||||
--
|
||||
2.48.0
|
||||
|
||||
@ -0,0 +1,37 @@
|
||||
From a91c2641646824e44ef3b31a7eea238e3f55e5c3 Mon Sep 17 00:00:00 2001
|
||||
From: Viktor Ashirov <vashirov@redhat.com>
|
||||
Date: Tue, 1 Jul 2025 12:44:04 +0200
|
||||
Subject: [PATCH] Issue 6838 - lib389/replica.py is using nonexistent
|
||||
datetime.UTC in Python 3.9
|
||||
|
||||
Bug Description:
|
||||
389-ds-base-2.x is supposed to be used with Python 3.9.
|
||||
But lib389/replica.py is using `datetime.UTC`, which is an alias
|
||||
to `datetime.timezone.utc` was added only in Python 3.11.
|
||||
|
||||
Fix Description:
|
||||
Use `datetime.timezone.utc` instead.
|
||||
|
||||
Fixes: https://github.com/389ds/389-ds-base/issues/6838
|
||||
|
||||
Reviewed by: @mreynolds389 (Thanks!)
|
||||
---
|
||||
src/lib389/lib389/replica.py | 2 +-
|
||||
1 file changed, 1 insertion(+), 1 deletion(-)
|
||||
|
||||
diff --git a/src/lib389/lib389/replica.py b/src/lib389/lib389/replica.py
|
||||
index 8791f7f4c..78d6eb4eb 100644
|
||||
--- a/src/lib389/lib389/replica.py
|
||||
+++ b/src/lib389/lib389/replica.py
|
||||
@@ -917,7 +917,7 @@ class RUV(object):
|
||||
ValueError("Wrong CSN value was supplied")
|
||||
|
||||
timestamp = int(csn[:8], 16)
|
||||
- time_str = datetime.datetime.fromtimestamp(timestamp, datetime.UTC).strftime('%Y-%m-%d %H:%M:%S')
|
||||
+ time_str = datetime.datetime.fromtimestamp(timestamp, datetime.timezone.utc).strftime('%Y-%m-%d %H:%M:%S')
|
||||
# We are parsing shorter CSN which contains only timestamp
|
||||
if len(csn) == 8:
|
||||
return time_str
|
||||
--
|
||||
2.49.0
|
||||
|
||||
@ -1,311 +0,0 @@
|
||||
From f077f9692d1625a1bc2dc6ee02a4fca71ee30b03 Mon Sep 17 00:00:00 2001
|
||||
From: progier389 <progier@redhat.com>
|
||||
Date: Wed, 13 Nov 2024 15:31:35 +0100
|
||||
Subject: [PATCH] Issue 6374 - nsslapd-mdb-max-dbs autotuning doesn't work
|
||||
properly (#6400)
|
||||
|
||||
* Issue 6374 - nsslapd-mdb-max-dbs autotuning doesn't work properly
|
||||
|
||||
Several issues:
|
||||
|
||||
After restarting the server nsslapd-mdb-max-dbs may not be high enough to add a new backend
|
||||
because the value computation is wrong.
|
||||
dbscan fails to open the database if nsslapd-mdb-max-dbs has been increased.
|
||||
dbscan crashes when closing the database (typically when using -S)
|
||||
When starting the instance the nsslapd-mdb-max-dbs parameter is increased to ensure that a new backend may be added.
|
||||
When dse.ldif path is not specified, the db environment is now open using the INFO.mdb data instead of using the default values.
|
||||
synchronization between thread closure and database context destruction is hardened
|
||||
Issue: #6374
|
||||
|
||||
Reviewed by: @tbordaz , @vashirov (Thanks!)
|
||||
|
||||
(cherry picked from commit 56cd3389da608a3f6eeee58d20dffbcd286a8033)
|
||||
---
|
||||
.../tests/suites/config/config_test.py | 86 +++++++++++++++++++
|
||||
ldap/servers/slapd/back-ldbm/back-ldbm.h | 2 +
|
||||
.../slapd/back-ldbm/db-mdb/mdb_config.c | 17 ++--
|
||||
.../back-ldbm/db-mdb/mdb_import_threads.c | 9 +-
|
||||
.../slapd/back-ldbm/db-mdb/mdb_instance.c | 8 ++
|
||||
ldap/servers/slapd/back-ldbm/dbimpl.c | 2 +-
|
||||
ldap/servers/slapd/back-ldbm/import.c | 14 ++-
|
||||
7 files changed, 128 insertions(+), 10 deletions(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/config/config_test.py b/dirsrvtests/tests/suites/config/config_test.py
|
||||
index 57b155af7..34dac36b6 100644
|
||||
--- a/dirsrvtests/tests/suites/config/config_test.py
|
||||
+++ b/dirsrvtests/tests/suites/config/config_test.py
|
||||
@@ -17,6 +17,7 @@ from lib389.topologies import topology_m2, topology_st as topo
|
||||
from lib389.utils import *
|
||||
from lib389._constants import DN_CONFIG, DEFAULT_SUFFIX, DEFAULT_BENAME
|
||||
from lib389._mapped_object import DSLdapObjects
|
||||
+from lib389.agreement import Agreements
|
||||
from lib389.cli_base import FakeArgs
|
||||
from lib389.cli_conf.backend import db_config_set
|
||||
from lib389.idm.user import UserAccounts, TEST_USER_PROPERTIES
|
||||
@@ -27,6 +28,8 @@ from lib389.cos import CosPointerDefinitions, CosTemplates
|
||||
from lib389.backend import Backends, DatabaseConfig
|
||||
from lib389.monitor import MonitorLDBM, Monitor
|
||||
from lib389.plugins import ReferentialIntegrityPlugin
|
||||
+from lib389.replica import BootstrapReplicationManager, Replicas
|
||||
+from lib389.passwd import password_generate
|
||||
|
||||
pytestmark = pytest.mark.tier0
|
||||
|
||||
@@ -36,6 +39,8 @@ PSTACK_CMD = '/usr/bin/pstack'
|
||||
logging.getLogger(__name__).setLevel(logging.INFO)
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
+DEBUGGING = os.getenv("DEBUGGING", default=False)
|
||||
+
|
||||
@pytest.fixture(scope="module")
|
||||
def big_file():
|
||||
TEMP_BIG_FILE = ''
|
||||
@@ -811,6 +816,87 @@ def test_numlisteners_limit(topo):
|
||||
assert numlisteners[0] == '4'
|
||||
|
||||
|
||||
+def bootstrap_replication(inst_from, inst_to, creds):
|
||||
+ manager = BootstrapReplicationManager(inst_to)
|
||||
+ rdn_val = 'replication manager'
|
||||
+ if manager.exists():
|
||||
+ manager.delete()
|
||||
+ manager.create(properties={
|
||||
+ 'cn': rdn_val,
|
||||
+ 'uid': rdn_val,
|
||||
+ 'userPassword': creds
|
||||
+ })
|
||||
+ for replica in Replicas(inst_to).list():
|
||||
+ replica.remove_all('nsDS5ReplicaBindDNGroup')
|
||||
+ replica.replace('nsDS5ReplicaBindDN', manager.dn)
|
||||
+ for agmt in Agreements(inst_from).list():
|
||||
+ agmt.replace('nsDS5ReplicaBindDN', manager.dn)
|
||||
+ agmt.replace('nsDS5ReplicaCredentials', creds)
|
||||
+
|
||||
+
|
||||
+@pytest.mark.skipif(get_default_db_lib() != "mdb", reason="This test requires lmdb")
|
||||
+def test_lmdb_autotuned_maxdbs(topology_m2, request):
|
||||
+ """Verify that after restart, nsslapd-mdb-max-dbs is large enough to add a new backend.
|
||||
+
|
||||
+ :id: 0272d432-9080-11ef-8f40-482ae39447e5
|
||||
+ :setup: Two suppliers configuration
|
||||
+ :steps:
|
||||
+ 1. loop 20 times
|
||||
+ 3. In 1 loop: restart instance
|
||||
+ 3. In 1 loop: add a new backend
|
||||
+ 4. In 1 loop: check that instance is still alive
|
||||
+ :expectedresults:
|
||||
+ 1. Success
|
||||
+ 2. Success
|
||||
+ 3. Success
|
||||
+ 4. Success
|
||||
+ """
|
||||
+
|
||||
+ s1 = topology_m2.ms["supplier1"]
|
||||
+ s2 = topology_m2.ms["supplier2"]
|
||||
+
|
||||
+ backends = Backends(s1)
|
||||
+ db_config = DatabaseConfig(s1)
|
||||
+ # Generate the teardown finalizer
|
||||
+ belist = []
|
||||
+ creds=password_generate()
|
||||
+ bootstrap_replication(s2, s1, creds)
|
||||
+ bootstrap_replication(s1, s2, creds)
|
||||
+
|
||||
+ def fin():
|
||||
+ s1.start()
|
||||
+ for be in belist:
|
||||
+ be.delete()
|
||||
+
|
||||
+ if not DEBUGGING:
|
||||
+ request.addfinalizer(fin)
|
||||
+
|
||||
+ # 1. Set autotuning (off-line to be able to decrease the value)
|
||||
+ s1.stop()
|
||||
+ dse_ldif = DSEldif(s1)
|
||||
+ dse_ldif.replace(db_config.dn, 'nsslapd-mdb-max-dbs', '0')
|
||||
+ os.remove(f'{s1.dbdir}/data.mdb')
|
||||
+ s1.start()
|
||||
+
|
||||
+ # 2. Reinitialize the db:
|
||||
+ log.info("Bulk import...")
|
||||
+ agmt = Agreements(s2).list()[0]
|
||||
+ agmt.begin_reinit()
|
||||
+ (done, error) = agmt.wait_reinit()
|
||||
+ log.info(f'Bulk importresult is ({done}, {error})')
|
||||
+ assert done is True
|
||||
+ assert error is False
|
||||
+
|
||||
+ # 3. loop 20 times
|
||||
+ for idx in range(20):
|
||||
+ s1.restart()
|
||||
+ log.info(f'Adding backend test{idx}')
|
||||
+ belist.append(backends.create(properties={'cn': f'test{idx}',
|
||||
+ 'nsslapd-suffix': f'dc=test{idx}'}))
|
||||
+ assert s1.status()
|
||||
+
|
||||
+
|
||||
+
|
||||
if __name__ == '__main__':
|
||||
# Run isolated
|
||||
# -s for DEBUG mode
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/back-ldbm.h b/ldap/servers/slapd/back-ldbm/back-ldbm.h
|
||||
index 8fea63e35..35d0ece04 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/back-ldbm.h
|
||||
+++ b/ldap/servers/slapd/back-ldbm/back-ldbm.h
|
||||
@@ -896,4 +896,6 @@ typedef struct _back_search_result_set
|
||||
((L)->size == (R)->size && !memcmp((L)->data, (R)->data, (L)->size))
|
||||
|
||||
typedef int backend_implement_init_fn(struct ldbminfo *li, config_info *config_array);
|
||||
+
|
||||
+pthread_mutex_t *get_import_ctx_mutex();
|
||||
#endif /* _back_ldbm_h_ */
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/db-mdb/mdb_config.c b/ldap/servers/slapd/back-ldbm/db-mdb/mdb_config.c
|
||||
index 351f54037..1f7b71442 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/db-mdb/mdb_config.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/db-mdb/mdb_config.c
|
||||
@@ -83,7 +83,7 @@ dbmdb_compute_limits(struct ldbminfo *li)
|
||||
uint64_t total_space = 0;
|
||||
uint64_t avail_space = 0;
|
||||
uint64_t cur_dbsize = 0;
|
||||
- int nbchangelogs = 0;
|
||||
+ int nbvlvs = 0;
|
||||
int nbsuffixes = 0;
|
||||
int nbindexes = 0;
|
||||
int nbagmt = 0;
|
||||
@@ -99,8 +99,8 @@ dbmdb_compute_limits(struct ldbminfo *li)
|
||||
* But some tunable may be autotuned.
|
||||
*/
|
||||
if (dbmdb_count_config_entries("(objectClass=nsMappingTree)", &nbsuffixes) ||
|
||||
- dbmdb_count_config_entries("(objectClass=nsIndex)", &nbsuffixes) ||
|
||||
- dbmdb_count_config_entries("(&(objectClass=nsds5Replica)(nsDS5Flags=1))", &nbchangelogs) ||
|
||||
+ dbmdb_count_config_entries("(objectClass=nsIndex)", &nbindexes) ||
|
||||
+ dbmdb_count_config_entries("(objectClass=vlvIndex)", &nbvlvs) ||
|
||||
dbmdb_count_config_entries("(objectClass=nsds5replicationagreement)", &nbagmt)) {
|
||||
/* error message is already logged */
|
||||
return 1;
|
||||
@@ -120,8 +120,15 @@ dbmdb_compute_limits(struct ldbminfo *li)
|
||||
|
||||
info->pagesize = sysconf(_SC_PAGE_SIZE);
|
||||
limits->min_readers = config_get_threadnumber() + nbagmt + DBMDB_READERS_MARGIN;
|
||||
- /* Default indexes are counted in "nbindexes" so we should always have enough resource to add 1 new suffix */
|
||||
- limits->min_dbs = nbsuffixes + nbindexes + nbchangelogs + DBMDB_DBS_MARGIN;
|
||||
+ /*
|
||||
+ * For each suffix there are 4 databases instances:
|
||||
+ * long-entryrdn, replication_changelog, id2entry and ancestorid
|
||||
+ * then the indexes and the vlv and vlv cache
|
||||
+ *
|
||||
+ * Default indexes are counted in "nbindexes" so we should always have enough
|
||||
+ * resource to add 1 new suffix
|
||||
+ */
|
||||
+ limits->min_dbs = 4*nbsuffixes + nbindexes + 2*nbvlvs + DBMDB_DBS_MARGIN;
|
||||
|
||||
total_space = ((uint64_t)(buf.f_blocks)) * ((uint64_t)(buf.f_bsize));
|
||||
avail_space = ((uint64_t)(buf.f_bavail)) * ((uint64_t)(buf.f_bsize));
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/db-mdb/mdb_import_threads.c b/ldap/servers/slapd/back-ldbm/db-mdb/mdb_import_threads.c
|
||||
index 8c879da31..707a110c5 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/db-mdb/mdb_import_threads.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/db-mdb/mdb_import_threads.c
|
||||
@@ -4312,9 +4312,12 @@ dbmdb_import_init_writer(ImportJob *job, ImportRole_t role)
|
||||
void
|
||||
dbmdb_free_import_ctx(ImportJob *job)
|
||||
{
|
||||
- if (job->writer_ctx) {
|
||||
- ImportCtx_t *ctx = job->writer_ctx;
|
||||
- job->writer_ctx = NULL;
|
||||
+ ImportCtx_t *ctx = NULL;
|
||||
+ pthread_mutex_lock(get_import_ctx_mutex());
|
||||
+ ctx = job->writer_ctx;
|
||||
+ job->writer_ctx = NULL;
|
||||
+ pthread_mutex_unlock(get_import_ctx_mutex());
|
||||
+ if (ctx) {
|
||||
pthread_mutex_destroy(&ctx->workerq.mutex);
|
||||
pthread_cond_destroy(&ctx->workerq.cv);
|
||||
slapi_ch_free((void**)&ctx->workerq.slots);
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/db-mdb/mdb_instance.c b/ldap/servers/slapd/back-ldbm/db-mdb/mdb_instance.c
|
||||
index 6386ecf06..05f1e348d 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/db-mdb/mdb_instance.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/db-mdb/mdb_instance.c
|
||||
@@ -287,6 +287,13 @@ int add_dbi(dbi_open_ctx_t *octx, backend *be, const char *fname, int flags)
|
||||
slapi_ch_free((void**)&treekey.dbname);
|
||||
return octx->rc;
|
||||
}
|
||||
+ if (treekey.dbi >= ctx->dsecfg.max_dbs) {
|
||||
+ octx->rc = MDB_DBS_FULL;
|
||||
+ slapi_log_err(SLAPI_LOG_ERR, "add_dbi", "Failed to open database instance %s slots: %d/%d. Error is %d: %s.\n",
|
||||
+ treekey.dbname, treekey.dbi, ctx->dsecfg.max_dbs, octx->rc, mdb_strerror(octx->rc));
|
||||
+ slapi_ch_free((void**)&treekey.dbname);
|
||||
+ return octx->rc;
|
||||
+ }
|
||||
if (octx->ai && octx->ai->ai_key_cmp_fn) {
|
||||
octx->rc = dbmdb_update_dbi_cmp_fn(ctx, &treekey, octx->ai->ai_key_cmp_fn, octx->txn);
|
||||
if (octx->rc) {
|
||||
@@ -689,6 +696,7 @@ int dbmdb_make_env(dbmdb_ctx_t *ctx, int readOnly, mdb_mode_t mode)
|
||||
rc = dbmdb_write_infofile(ctx);
|
||||
} else {
|
||||
/* No Config ==> read it from info file */
|
||||
+ ctx->dsecfg = ctx->startcfg;
|
||||
}
|
||||
if (rc) {
|
||||
return rc;
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/dbimpl.c b/ldap/servers/slapd/back-ldbm/dbimpl.c
|
||||
index da4a4548e..42f4a0718 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/dbimpl.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/dbimpl.c
|
||||
@@ -463,7 +463,7 @@ int dblayer_show_statistics(const char *dbimpl_name, const char *dbhome, FILE *f
|
||||
li->li_plugin = be->be_database;
|
||||
li->li_plugin->plg_name = (char*) "back-ldbm-dbimpl";
|
||||
li->li_plugin->plg_libpath = (char*) "libback-ldbm";
|
||||
- li->li_directory = (char*)dbhome;
|
||||
+ li->li_directory = get_li_directory(dbhome);
|
||||
|
||||
/* Initialize database plugin */
|
||||
rc = dbimpl_setup(li, dbimpl_name);
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/import.c b/ldap/servers/slapd/back-ldbm/import.c
|
||||
index 2bb8cb581..30ec462fa 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/import.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/import.c
|
||||
@@ -27,6 +27,9 @@
|
||||
#define NEED_DN_NORM_SP -25
|
||||
#define NEED_DN_NORM_BT -26
|
||||
|
||||
+/* Protect against import context destruction */
|
||||
+static pthread_mutex_t import_ctx_mutex = PTHREAD_MUTEX_INITIALIZER;
|
||||
+
|
||||
|
||||
/********** routines to manipulate the entry fifo **********/
|
||||
|
||||
@@ -143,6 +146,14 @@ ldbm_back_wire_import(Slapi_PBlock *pb)
|
||||
|
||||
/* Threads management */
|
||||
|
||||
+/* Return the mutex that protects against import context destruction */
|
||||
+pthread_mutex_t *
|
||||
+get_import_ctx_mutex()
|
||||
+{
|
||||
+ return &import_ctx_mutex;
|
||||
+}
|
||||
+
|
||||
+
|
||||
/* tell all the threads to abort */
|
||||
void
|
||||
import_abort_all(ImportJob *job, int wait_for_them)
|
||||
@@ -151,7 +162,7 @@ import_abort_all(ImportJob *job, int wait_for_them)
|
||||
|
||||
/* tell all the worker threads to abort */
|
||||
job->flags |= FLAG_ABORT;
|
||||
-
|
||||
+ pthread_mutex_lock(&import_ctx_mutex);
|
||||
for (worker = job->worker_list; worker; worker = worker->next)
|
||||
worker->command = ABORT;
|
||||
|
||||
@@ -167,6 +178,7 @@ import_abort_all(ImportJob *job, int wait_for_them)
|
||||
}
|
||||
}
|
||||
}
|
||||
+ pthread_mutex_unlock(&import_ctx_mutex);
|
||||
}
|
||||
|
||||
|
||||
--
|
||||
2.48.0
|
||||
|
||||
@ -0,0 +1,351 @@
|
||||
From 4eef34cec551582d1de23266bc6cde84a7e38b5d Mon Sep 17 00:00:00 2001
|
||||
From: progier389 <progier@redhat.com>
|
||||
Date: Mon, 24 Mar 2025 10:43:21 +0100
|
||||
Subject: [PATCH] Issue 6680 - instance read-only mode is broken (#6681)
|
||||
|
||||
Read only mode is broken because some plugins fails to starts as they are not able to create/updates some entries in the dse backend.
|
||||
Solution is to allow interrnal operations to write in dse.backend but not modify the dse.ldif (except for the special case when trying to modify nsslapd-readonly flags (to be allowed to set/unset the readonly mode)
|
||||
|
||||
Issue: #6680
|
||||
|
||||
Reviewed by: @droideck, @tbordaz (thanks!)
|
||||
---
|
||||
.../tests/suites/config/regression_test.py | 60 ++++++++++
|
||||
ldap/servers/slapd/dse.c | 110 +++++++++++++++++-
|
||||
ldap/servers/slapd/mapping_tree.c | 90 ++++++++++++--
|
||||
3 files changed, 247 insertions(+), 13 deletions(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/config/regression_test.py b/dirsrvtests/tests/suites/config/regression_test.py
|
||||
index 8dbba8cd2..6e313ac8a 100644
|
||||
--- a/dirsrvtests/tests/suites/config/regression_test.py
|
||||
+++ b/dirsrvtests/tests/suites/config/regression_test.py
|
||||
@@ -28,6 +28,8 @@ CUSTOM_MEM = '9100100100'
|
||||
IDLETIMEOUT = 5
|
||||
DN_TEST_USER = f'uid={TEST_USER_PROPERTIES["uid"]},ou=People,{DEFAULT_SUFFIX}'
|
||||
|
||||
+RO_ATTR = 'nsslapd-readonly'
|
||||
+
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
def idletimeout_topo(topo, request):
|
||||
@@ -190,3 +192,61 @@ def test_idletimeout(idletimeout_topo, dn, expected_result):
|
||||
except ldap.SERVER_DOWN:
|
||||
result = True
|
||||
assert expected_result == result
|
||||
+
|
||||
+
|
||||
+def test_instance_readonly_mode(topo):
|
||||
+ """Check that readonly mode is supported
|
||||
+
|
||||
+ :id: 34d2e28e-04d7-11f0-b0cf-482ae39447e5
|
||||
+ :setup: Standalone Instance
|
||||
+ :steps:
|
||||
+ 1. Set readonly mode
|
||||
+ 2. Stop the instance
|
||||
+ 3. Get dse.ldif modification time
|
||||
+ 4. Start the instance
|
||||
+ 5. Get dse.ldif modification time
|
||||
+ 6. Check that modification time has not changed
|
||||
+ 7. Check that readonly mode is set
|
||||
+ 8. Try to modify another config attribute
|
||||
+ 9. Unset readonly mode
|
||||
+ 10. Restart the instance
|
||||
+ 11. Check that modification time has not changed
|
||||
+ 12. Check that modification time has changed
|
||||
+ 13. Check that readonly mode is unset
|
||||
+ 14. Try to modify another config attribute
|
||||
+ :expectedresults:
|
||||
+ 1. Success
|
||||
+ 2. Success
|
||||
+ 3. Success
|
||||
+ 4. Success
|
||||
+ 5. Success
|
||||
+ 6. Success
|
||||
+ 7. Success
|
||||
+ 8. Should get ldap.UNWILLING_TO_PERFORM exception
|
||||
+ 9. Success
|
||||
+ 10. Success
|
||||
+ 11. Success
|
||||
+ 12. Success
|
||||
+ 13. Success
|
||||
+ 14. Success
|
||||
+ """
|
||||
+
|
||||
+ inst = topo.standalone
|
||||
+ dse_path = f'{topo.standalone.get_config_dir()}/dse.ldif'
|
||||
+ inst.config.replace(RO_ATTR, 'on')
|
||||
+ inst.stop()
|
||||
+ dse_mtime = os.stat(dse_path).st_mtime
|
||||
+ inst.start()
|
||||
+ new_dse_mtime = os.stat(dse_path).st_mtime
|
||||
+ assert dse_mtime == new_dse_mtime
|
||||
+ assert inst.config.get_attr_val_utf8(RO_ATTR) == "on"
|
||||
+ attr = 'nsslapd-errorlog-maxlogsize'
|
||||
+ val = inst.config.get_attr_val_utf8(attr)
|
||||
+ with pytest.raises(ldap.UNWILLING_TO_PERFORM):
|
||||
+ inst.config.replace(attr, val)
|
||||
+ inst.config.replace(RO_ATTR, 'off')
|
||||
+ inst.restart()
|
||||
+ new_dse_mtime = os.stat(dse_path).st_mtime
|
||||
+ assert dse_mtime != new_dse_mtime
|
||||
+ assert inst.config.get_attr_val_utf8(RO_ATTR) == "off"
|
||||
+ inst.config.replace(attr, val)
|
||||
diff --git a/ldap/servers/slapd/dse.c b/ldap/servers/slapd/dse.c
|
||||
index e3157c1ce..0f266f0d7 100644
|
||||
--- a/ldap/servers/slapd/dse.c
|
||||
+++ b/ldap/servers/slapd/dse.c
|
||||
@@ -1031,6 +1031,114 @@ dse_check_for_readonly_error(Slapi_PBlock *pb, struct dse *pdse)
|
||||
return rc; /* no error */
|
||||
}
|
||||
|
||||
+/* Trivial wrapper around slapi_re_comp to handle errors */
|
||||
+static Slapi_Regex *
|
||||
+recomp(const char *regexp)
|
||||
+{
|
||||
+ char *error = "";
|
||||
+ Slapi_Regex *re = slapi_re_comp(regexp, &error);
|
||||
+ if (re == NULL) {
|
||||
+ slapi_log_err(SLAPI_LOG_ERR, "is_readonly_set_in_dse",
|
||||
+ "Failed to compile '%s' regular expression. Error is %s\n",
|
||||
+ regexp, error);
|
||||
+ }
|
||||
+ slapi_ch_free_string(&error);
|
||||
+ return re;
|
||||
+}
|
||||
+
|
||||
+/*
|
||||
+ * Check if "nsslapd-readonly: on" is in cn-config in dse.ldif file
|
||||
+ * ( If the flag is set in memory but on in the file, the file should
|
||||
+ * be written (to let dsconf able to modify the nsslapd-readonly flag)
|
||||
+ */
|
||||
+static bool
|
||||
+is_readonly_set_in_dse(const char *dsename)
|
||||
+{
|
||||
+ Slapi_Regex *re_config = recomp("^dn:\\s+cn=config\\s*$");
|
||||
+ Slapi_Regex *re_isro = recomp("^" CONFIG_READONLY_ATTRIBUTE ":\\s+on\\s*$");
|
||||
+ Slapi_Regex *re_eoe = recomp("^$");
|
||||
+ bool isconfigentry = false;
|
||||
+ bool isro = false;
|
||||
+ FILE *fdse = NULL;
|
||||
+ char line[128];
|
||||
+ char *error = NULL;
|
||||
+ const char *regexp = "";
|
||||
+
|
||||
+ if (!dsename) {
|
||||
+ goto done;
|
||||
+ }
|
||||
+ if (re_config == NULL || re_isro == NULL || re_eoe == NULL) {
|
||||
+ goto done;
|
||||
+ }
|
||||
+ fdse = fopen(dsename, "r");
|
||||
+ if (fdse == NULL) {
|
||||
+ /* No dse file, we need to write it */
|
||||
+ goto done;
|
||||
+ }
|
||||
+ while (fgets(line, (sizeof line), fdse)) {
|
||||
+ /* Convert the read line to lowercase */
|
||||
+ for (char *pt=line; *pt; pt++) {
|
||||
+ if (isalpha(*pt)) {
|
||||
+ *pt = tolower(*pt);
|
||||
+ }
|
||||
+ }
|
||||
+ if (slapi_re_exec_nt(re_config, line)) {
|
||||
+ isconfigentry = true;
|
||||
+ }
|
||||
+ if (slapi_re_exec_nt(re_eoe, line)) {
|
||||
+ if (isconfigentry) {
|
||||
+ /* End of config entry ==> readonly flag is not set */
|
||||
+ break;
|
||||
+ }
|
||||
+ }
|
||||
+ if (isconfigentry && slapi_re_exec_nt(re_isro, line)) {
|
||||
+ /* Found readonly flag */
|
||||
+ isro = true;
|
||||
+ break;
|
||||
+ }
|
||||
+ }
|
||||
+done:
|
||||
+ if (fdse) {
|
||||
+ (void) fclose(fdse);
|
||||
+ }
|
||||
+ slapi_re_free(re_config);
|
||||
+ slapi_re_free(re_isro);
|
||||
+ slapi_re_free(re_eoe);
|
||||
+ return isro;
|
||||
+}
|
||||
+
|
||||
+/*
|
||||
+ * Check if dse.ldif can be written
|
||||
+ * Beware that even in read-only mode dse.ldif file
|
||||
+ * should still be written to change the nsslapd-readonly value
|
||||
+ */
|
||||
+static bool
|
||||
+check_if_readonly(struct dse *pdse)
|
||||
+{
|
||||
+ static bool ro = false;
|
||||
+
|
||||
+ if (pdse->dse_filename == NULL) {
|
||||
+ return false;
|
||||
+ }
|
||||
+ if (!slapi_config_get_readonly()) {
|
||||
+ ro = false;
|
||||
+ return ro;
|
||||
+ }
|
||||
+ if (ro) {
|
||||
+ /* read-only mode and dse is up to date ==> Do not modify it. */
|
||||
+ return ro;
|
||||
+ }
|
||||
+ /* First attempt to write the dse.ldif since readonly mode is enabled.
|
||||
+ * Lets check if "nsslapd-readonly: on" is in cn=config entry
|
||||
+ * and allow to write the dse.ldif if it is the case
|
||||
+ */
|
||||
+ if (is_readonly_set_in_dse(pdse->dse_filename)) {
|
||||
+ /* read-only mode and dse is up to date ==> Do not modify it. */
|
||||
+ ro = true;
|
||||
+ }
|
||||
+ /* Read only mode but nsslapd-readonly value is not up to date. */
|
||||
+ return ro;
|
||||
+}
|
||||
|
||||
/*
|
||||
* Write the AVL tree of entries back to the LDIF file.
|
||||
@@ -1041,7 +1149,7 @@ dse_write_file_nolock(struct dse *pdse)
|
||||
FPWrapper fpw;
|
||||
int rc = 0;
|
||||
|
||||
- if (dont_ever_write_dse_files) {
|
||||
+ if (dont_ever_write_dse_files || check_if_readonly(pdse)) {
|
||||
return rc;
|
||||
}
|
||||
|
||||
diff --git a/ldap/servers/slapd/mapping_tree.c b/ldap/servers/slapd/mapping_tree.c
|
||||
index dd7b1af37..e51b3b948 100644
|
||||
--- a/ldap/servers/slapd/mapping_tree.c
|
||||
+++ b/ldap/servers/slapd/mapping_tree.c
|
||||
@@ -2058,6 +2058,82 @@ slapi_dn_write_needs_referral(Slapi_DN *target_sdn, Slapi_Entry **referral)
|
||||
done:
|
||||
return ret;
|
||||
}
|
||||
+
|
||||
+/*
|
||||
+ * This function dermines if an operation should be rejected
|
||||
+ * when readonly mode is enabled.
|
||||
+ * All operations are rejected except:
|
||||
+ * - if they target a private backend that is not the DSE backend
|
||||
+ * - if they are read operations (SEARCH, COMPARE, BIND, UNBIND)
|
||||
+ * - if they are tombstone fixup operation (i.e: tombstone purging)
|
||||
+ * - if they are internal operation that targets the DSE backend.
|
||||
+ * (change will then be done in memory but not written in dse.ldif)
|
||||
+ * - single modify modify operation on cn=config changing nsslapd-readonly
|
||||
+ * (to allow "dsconf instance config replace nsslapd-readonly=xxx",
|
||||
+ change will then be done both in memory and in dse.ldif)
|
||||
+ */
|
||||
+static bool
|
||||
+is_rejected_op(Slapi_Operation *op, Slapi_Backend *be)
|
||||
+{
|
||||
+ const char *betype = slapi_be_gettype(be);
|
||||
+ unsigned long be_op_type = operation_get_type(op);
|
||||
+ int isdse = (betype && strcmp(betype, "DSE") == 0);
|
||||
+
|
||||
+ /* Private backend operations are not rejected */
|
||||
+
|
||||
+ /* Read operations are not rejected */
|
||||
+ if ((be_op_type == SLAPI_OPERATION_SEARCH) ||
|
||||
+ (be_op_type == SLAPI_OPERATION_COMPARE) ||
|
||||
+ (be_op_type == SLAPI_OPERATION_BIND) ||
|
||||
+ (be_op_type == SLAPI_OPERATION_UNBIND)) {
|
||||
+ return false;
|
||||
+ }
|
||||
+
|
||||
+ /* Tombstone fixup are not rejected. */
|
||||
+ if (operation_is_flag_set(op, OP_FLAG_TOMBSTONE_FIXUP)) {
|
||||
+ return false;
|
||||
+ }
|
||||
+
|
||||
+ if (!isdse) {
|
||||
+ /* write operation on readonly backends are rejected */
|
||||
+ if (be->be_readonly) {
|
||||
+ return true;
|
||||
+ }
|
||||
+
|
||||
+ /* private backends (DSE excepted) are not backed on files
|
||||
+ * so write operations are accepted.
|
||||
+ * but other operations (not on DSE) are rejected.
|
||||
+ */
|
||||
+ if (slapi_be_private(be)) {
|
||||
+ return false;
|
||||
+ } else {
|
||||
+ return true;
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ /* Allowed operations in dse backend are:
|
||||
+ * - the internal operations and
|
||||
+ * - modify of nsslapd-readonly flag in cn=config
|
||||
+ */
|
||||
+
|
||||
+ if (operation_is_flag_set(op, OP_FLAG_INTERNAL)) {
|
||||
+ return false;
|
||||
+ }
|
||||
+ if (be_op_type == SLAPI_OPERATION_MODIFY) {
|
||||
+ Slapi_DN *sdn = operation_get_target_spec(op);
|
||||
+ Slapi_DN config = {0};
|
||||
+ LDAPMod **mods = op->o_params.p.p_modify.modify_mods;
|
||||
+ slapi_sdn_init_ndn_byref(&config, SLAPD_CONFIG_DN);
|
||||
+ if (mods && mods[0] && !mods[1] &&
|
||||
+ slapi_sdn_compare(sdn, &config) == 0 &&
|
||||
+ strcasecmp(mods[0]->mod_type, CONFIG_READONLY_ATTRIBUTE) == 0) {
|
||||
+ /* Single modifier impacting nsslapd-readonly */
|
||||
+ return false;
|
||||
+ }
|
||||
+ }
|
||||
+ return true;
|
||||
+}
|
||||
+
|
||||
/*
|
||||
* Description:
|
||||
* The reason we have a mapping tree. This function selects a backend or
|
||||
@@ -2095,7 +2171,6 @@ slapi_mapping_tree_select(Slapi_PBlock *pb, Slapi_Backend **be, Slapi_Entry **re
|
||||
int ret;
|
||||
int scope = LDAP_SCOPE_BASE;
|
||||
int op_type;
|
||||
- int fixup = 0;
|
||||
|
||||
if (slapi_atomic_load_32(&mapping_tree_freed, __ATOMIC_RELAXED)) {
|
||||
/* shutdown detected */
|
||||
@@ -2112,7 +2187,6 @@ slapi_mapping_tree_select(Slapi_PBlock *pb, Slapi_Backend **be, Slapi_Entry **re
|
||||
|
||||
/* Get the target for this op */
|
||||
target_sdn = operation_get_target_spec(op);
|
||||
- fixup = operation_is_flag_set(op, OP_FLAG_TOMBSTONE_FIXUP);
|
||||
|
||||
PR_ASSERT(mapping_tree_inited == 1);
|
||||
|
||||
@@ -2161,22 +2235,14 @@ slapi_mapping_tree_select(Slapi_PBlock *pb, Slapi_Backend **be, Slapi_Entry **re
|
||||
* or if the whole server is readonly AND backend is public (!private)
|
||||
*/
|
||||
if ((ret == LDAP_SUCCESS) && *be && !be_isdeleted(*be) &&
|
||||
- (((*be)->be_readonly && !fixup) ||
|
||||
- ((slapi_config_get_readonly() && !fixup) &&
|
||||
- !slapi_be_private(*be)))) {
|
||||
- unsigned long be_op_type = operation_get_type(op);
|
||||
-
|
||||
- if ((be_op_type != SLAPI_OPERATION_SEARCH) &&
|
||||
- (be_op_type != SLAPI_OPERATION_COMPARE) &&
|
||||
- (be_op_type != SLAPI_OPERATION_BIND) &&
|
||||
- (be_op_type != SLAPI_OPERATION_UNBIND)) {
|
||||
+ ((*be)->be_readonly || slapi_config_get_readonly()) &&
|
||||
+ is_rejected_op(op, *be)) {
|
||||
if (errorbuf) {
|
||||
PL_strncpyz(errorbuf, slapi_config_get_readonly() ? "Server is read-only" : "database is read-only", ebuflen);
|
||||
}
|
||||
ret = LDAP_UNWILLING_TO_PERFORM;
|
||||
slapi_be_Unlock(*be);
|
||||
*be = NULL;
|
||||
- }
|
||||
}
|
||||
|
||||
return ret;
|
||||
--
|
||||
2.49.0
|
||||
|
||||
@ -1,894 +0,0 @@
|
||||
From b53faa9e7289383bbc02fc260b1b34958a317fdd Mon Sep 17 00:00:00 2001
|
||||
From: progier389 <progier@redhat.com>
|
||||
Date: Fri, 6 Sep 2024 14:45:06 +0200
|
||||
Subject: [PATCH] Issue 6090 - Fix dbscan options and man pages (#6315)
|
||||
|
||||
* Issue 6090 - Fix dbscan options and man pages
|
||||
|
||||
dbscan -d option is dangerously confusing as it removes a database instance while in db_stat it identify the database
|
||||
(cf issue #5609 ).
|
||||
This fix implements long options in dbscan, rename -d in --remove, and requires a new --do-it option for action that change the database content.
|
||||
The fix should also align both the usage and the dbscan man page with the new set of options
|
||||
|
||||
Issue: #6090
|
||||
|
||||
Reviewed by: @tbordaz, @droideck (Thanks!)
|
||||
|
||||
(cherry picked from commit 25e1d16887ebd299dfe0088080b9ee0deec1e41f)
|
||||
---
|
||||
dirsrvtests/tests/suites/clu/dbscan_test.py | 253 ++++++++++++++++++
|
||||
.../tests/suites/clu/repl_monitor_test.py | 4 +-
|
||||
.../slapd/back-ldbm/db-bdb/bdb_layer.c | 12 +-
|
||||
ldap/servers/slapd/back-ldbm/dbimpl.c | 50 +++-
|
||||
ldap/servers/slapd/tools/dbscan.c | 182 ++++++++++---
|
||||
man/man1/dbscan.1 | 74 +++--
|
||||
src/lib389/lib389/__init__.py | 9 +-
|
||||
src/lib389/lib389/cli_ctl/dblib.py | 13 +-
|
||||
8 files changed, 531 insertions(+), 66 deletions(-)
|
||||
create mode 100644 dirsrvtests/tests/suites/clu/dbscan_test.py
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/clu/dbscan_test.py b/dirsrvtests/tests/suites/clu/dbscan_test.py
|
||||
new file mode 100644
|
||||
index 000000000..2c9a9651a
|
||||
--- /dev/null
|
||||
+++ b/dirsrvtests/tests/suites/clu/dbscan_test.py
|
||||
@@ -0,0 +1,253 @@
|
||||
+# --- BEGIN COPYRIGHT BLOCK ---
|
||||
+# Copyright (C) 2024 Red Hat, Inc.
|
||||
+# All rights reserved.
|
||||
+#
|
||||
+# License: GPL (version 3 or any later version).
|
||||
+# See LICENSE for details.
|
||||
+# --- END COPYRIGHT BLOCK ---
|
||||
+#
|
||||
+import logging
|
||||
+import os
|
||||
+import pytest
|
||||
+import re
|
||||
+import subprocess
|
||||
+import sys
|
||||
+
|
||||
+from lib389 import DirSrv
|
||||
+from lib389._constants import DBSCAN
|
||||
+from lib389.topologies import topology_m2 as topo_m2
|
||||
+from difflib import context_diff
|
||||
+
|
||||
+pytestmark = pytest.mark.tier0
|
||||
+
|
||||
+logging.getLogger(__name__).setLevel(logging.DEBUG)
|
||||
+log = logging.getLogger(__name__)
|
||||
+
|
||||
+DEBUGGING = os.getenv("DEBUGGING", default=False)
|
||||
+
|
||||
+
|
||||
+class CalledProcessUnexpectedReturnCode(subprocess.CalledProcessError):
|
||||
+ def __init__(self, result, expected_rc):
|
||||
+ super().__init__(cmd=result.args, returncode=result.returncode, output=result.stdout, stderr=result.stderr)
|
||||
+ self.expected_rc = expected_rc
|
||||
+ self.result = result
|
||||
+
|
||||
+ def __str__(self):
|
||||
+ return f'Command {self.result.args} returned {self.result.returncode} instead of {self.expected_rc}'
|
||||
+
|
||||
+
|
||||
+class DbscanPaths:
|
||||
+ @staticmethod
|
||||
+ def list_instances(inst, dblib, dbhome):
|
||||
+ # compute db instance pathnames
|
||||
+ instances = dbscan(['-D', dblib, '-L', dbhome], inst=inst).stdout
|
||||
+ dbis = []
|
||||
+ if dblib == 'bdb':
|
||||
+ pattern = r'^ (.*) $'
|
||||
+ prefix = f'{dbhome}/'
|
||||
+ else:
|
||||
+ pattern = r'^ (.*) flags:'
|
||||
+ prefix = f''
|
||||
+ for match in re.finditer(pattern, instances, flags=re.MULTILINE):
|
||||
+ dbis.append(prefix+match.group(1))
|
||||
+ return dbis
|
||||
+
|
||||
+ @staticmethod
|
||||
+ def list_options(inst):
|
||||
+ # compute supported options
|
||||
+ options = []
|
||||
+ usage = dbscan(['-h'], inst=inst, expected_rc=None).stdout
|
||||
+ pattern = r'^\s+(?:(-[^-,]+), +)?(--[^ ]+).*$'
|
||||
+ for match in re.finditer(pattern, usage, flags=re.MULTILINE):
|
||||
+ for idx in range(1,3):
|
||||
+ if match.group(idx) is not None:
|
||||
+ options.append(match.group(idx))
|
||||
+ return options
|
||||
+
|
||||
+ def __init__(self, inst):
|
||||
+ dblib = inst.get_db_lib()
|
||||
+ dbhome = inst.ds_paths.db_home_dir
|
||||
+ self.inst = inst
|
||||
+ self.dblib = dblib
|
||||
+ self.dbhome = dbhome
|
||||
+ self.options = DbscanPaths.list_options(inst)
|
||||
+ self.dbis = DbscanPaths.list_instances(inst, dblib, dbhome)
|
||||
+ self.ldif_dir = inst.ds_paths.ldif_dir
|
||||
+
|
||||
+ def get_dbi(self, attr, backend='userroot'):
|
||||
+ for dbi in self.dbis:
|
||||
+ if f'{backend}/{attr}.'.lower() in dbi.lower():
|
||||
+ return dbi
|
||||
+ raise KeyError(f'Unknown dbi {backend}/{attr}')
|
||||
+
|
||||
+ def __repr__(self):
|
||||
+ attrs = ['inst', 'dblib', 'dbhome', 'ldif_dir', 'options', 'dbis' ]
|
||||
+ res = ", ".join(map(lambda x: f'{x}={self.__dict__[x]}', attrs))
|
||||
+ return f'DbscanPaths({res})'
|
||||
+
|
||||
+
|
||||
+def dbscan(args, inst=None, expected_rc=0):
|
||||
+ if inst is None:
|
||||
+ prefix = os.environ.get('PREFIX', "")
|
||||
+ prog = f'{prefix}/bin/dbscan'
|
||||
+ else:
|
||||
+ prog = os.path.join(inst.ds_paths.bin_dir, DBSCAN)
|
||||
+ args.insert(0, prog)
|
||||
+ output = subprocess.run(args, encoding='utf-8', stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
|
||||
+ log.debug(f'{args} result is {output.returncode} output is {output.stdout}')
|
||||
+ if expected_rc is not None and expected_rc != output.returncode:
|
||||
+ raise CalledProcessUnexpectedReturnCode(output, expected_rc)
|
||||
+ return output
|
||||
+
|
||||
+
|
||||
+def log_export_file(filename):
|
||||
+ with open(filename, 'r') as file:
|
||||
+ log.debug(f'=========== Dump of {filename} ================')
|
||||
+ for line in file:
|
||||
+ log.debug(line.rstrip('\n'))
|
||||
+ log.debug(f'=========== Enf of {filename} =================')
|
||||
+
|
||||
+
|
||||
+@pytest.fixture(scope='module')
|
||||
+def paths(topo_m2, request):
|
||||
+ inst = topo_m2.ms["supplier1"]
|
||||
+ if sys.version_info < (3,5):
|
||||
+ pytest.skip('requires python version >= 3.5')
|
||||
+ paths = DbscanPaths(inst)
|
||||
+ if '--do-it' not in paths.options:
|
||||
+ pytest.skip('Not supported with this dbscan version')
|
||||
+ inst.stop()
|
||||
+ return paths
|
||||
+
|
||||
+
|
||||
+def test_dbscan_destructive_actions(paths, request):
|
||||
+ """Test that dbscan remove/import actions
|
||||
+
|
||||
+ :id: f40b0c42-660a-11ef-9544-083a88554478
|
||||
+ :setup: Stopped standalone instance
|
||||
+ :steps:
|
||||
+ 1. Export cn instance with dbscan
|
||||
+ 2. Run dbscan --remove ...
|
||||
+ 3. Check the error message about missing --do-it
|
||||
+ 4. Check that cn instance is still present
|
||||
+ 5. Run dbscan -I import_file ...
|
||||
+ 6. Check it was properly imported
|
||||
+ 7. Check that cn instance is still present
|
||||
+ 8. Run dbscan --remove ... --doit
|
||||
+ 9. Check the error message about missing --do-it
|
||||
+ 10. Check that cn instance is still present
|
||||
+ 11. Run dbscan -I import_file ... --do-it
|
||||
+ 12. Check it was properly imported
|
||||
+ 13. Check that cn instance is still present
|
||||
+ 14. Export again the database
|
||||
+ 15. Check that content of export files are the same
|
||||
+ :expectedresults:
|
||||
+ 1. Success
|
||||
+ 2. dbscan return code should be 1 (error)
|
||||
+ 3. Error message should be present
|
||||
+ 4. cn instance should be present
|
||||
+ 5. dbscan return code should be 1 (error)
|
||||
+ 6. Error message should be present
|
||||
+ 7. cn instance should be present
|
||||
+ 8. dbscan return code should be 0 (success)
|
||||
+ 9. Error message should not be present
|
||||
+ 10. cn instance should not be present
|
||||
+ 11. dbscan return code should be 0 (success)
|
||||
+ 12. Error message should not be present
|
||||
+ 13. cn instance should be present
|
||||
+ 14. Success
|
||||
+ 15. Export files content should be the same
|
||||
+ """
|
||||
+
|
||||
+ # Export cn instance with dbscan
|
||||
+ export_cn = f'{paths.ldif_dir}/dbscan_cn.data'
|
||||
+ export_cn2 = f'{paths.ldif_dir}/dbscan_cn2.data'
|
||||
+ cndbi = paths.get_dbi('replication_changelog')
|
||||
+ inst = paths.inst
|
||||
+ dblib = paths.dblib
|
||||
+ exportok = False
|
||||
+ def fin():
|
||||
+ if os.path.exists(export_cn):
|
||||
+ # Restore cn if it was exported successfully but does not exists any more
|
||||
+ if exportok and cndbi not in DbscanPaths.list_instances(inst, dblib, paths.dbhome):
|
||||
+ dbscan(['-D', dblib, '-f', cndbi, '-I', export_cn, '--do-it'], inst=inst)
|
||||
+ if not DEBUGGING:
|
||||
+ os.remove(export_cn)
|
||||
+ if os.path.exists(export_cn) and not DEBUGGING:
|
||||
+ os.remove(export_cn2)
|
||||
+
|
||||
+ fin()
|
||||
+ request.addfinalizer(fin)
|
||||
+ dbscan(['-D', dblib, '-f', cndbi, '-X', export_cn], inst=inst)
|
||||
+ exportok = True
|
||||
+
|
||||
+ expected_msg = "without specifying '--do-it' parameter."
|
||||
+
|
||||
+ # Run dbscan --remove ...
|
||||
+ result = dbscan(['-D', paths.dblib, '--remove', '-f', cndbi],
|
||||
+ inst=paths.inst, expected_rc=1)
|
||||
+
|
||||
+ # Check the error message about missing --do-it
|
||||
+ assert expected_msg in result.stdout
|
||||
+
|
||||
+ # Check that cn instance is still present
|
||||
+ curdbis = DbscanPaths.list_instances(paths.inst, paths.dblib, paths.dbhome)
|
||||
+ assert cndbi in curdbis
|
||||
+
|
||||
+ # Run dbscan -I import_file ...
|
||||
+ result = dbscan(['-D', paths.dblib, '-f', cndbi, '-I', export_cn],
|
||||
+ inst=paths.inst, expected_rc=1)
|
||||
+
|
||||
+ # Check the error message about missing --do-it
|
||||
+ assert expected_msg in result.stdout
|
||||
+
|
||||
+ # Check that cn instance is still present
|
||||
+ curdbis = DbscanPaths.list_instances(paths.inst, paths.dblib, paths.dbhome)
|
||||
+ assert cndbi in curdbis
|
||||
+
|
||||
+ # Run dbscan --remove ... --doit
|
||||
+ result = dbscan(['-D', paths.dblib, '--remove', '-f', cndbi, '--do-it'],
|
||||
+ inst=paths.inst, expected_rc=0)
|
||||
+
|
||||
+ # Check the error message about missing --do-it
|
||||
+ assert expected_msg not in result.stdout
|
||||
+
|
||||
+ # Check that cn instance is still present
|
||||
+ curdbis = DbscanPaths.list_instances(paths.inst, paths.dblib, paths.dbhome)
|
||||
+ assert cndbi not in curdbis
|
||||
+
|
||||
+ # Run dbscan -I import_file ... --do-it
|
||||
+ result = dbscan(['-D', paths.dblib, '-f', cndbi,
|
||||
+ '-I', export_cn, '--do-it'],
|
||||
+ inst=paths.inst, expected_rc=0)
|
||||
+
|
||||
+ # Check the error message about missing --do-it
|
||||
+ assert expected_msg not in result.stdout
|
||||
+
|
||||
+ # Check that cn instance is still present
|
||||
+ curdbis = DbscanPaths.list_instances(paths.inst, paths.dblib, paths.dbhome)
|
||||
+ assert cndbi in curdbis
|
||||
+
|
||||
+ # Export again the database
|
||||
+ dbscan(['-D', dblib, '-f', cndbi, '-X', export_cn2], inst=inst)
|
||||
+
|
||||
+ # Check that content of export files are the same
|
||||
+ with open(export_cn) as f1:
|
||||
+ f1lines = f1.readlines()
|
||||
+ with open(export_cn2) as f2:
|
||||
+ f2lines = f2.readlines()
|
||||
+ diffs = list(context_diff(f1lines, f2lines))
|
||||
+ if len(diffs) > 0:
|
||||
+ log.debug("Export file differences are:")
|
||||
+ for d in diffs:
|
||||
+ log.debug(d)
|
||||
+ log_export_file(export_cn)
|
||||
+ log_export_file(export_cn2)
|
||||
+ assert diffs is None
|
||||
+
|
||||
+
|
||||
+if __name__ == '__main__':
|
||||
+ # Run isolated
|
||||
+ # -s for DEBUG mode
|
||||
+ CURRENT_FILE = os.path.realpath(__file__)
|
||||
+ pytest.main("-s %s" % CURRENT_FILE)
|
||||
diff --git a/dirsrvtests/tests/suites/clu/repl_monitor_test.py b/dirsrvtests/tests/suites/clu/repl_monitor_test.py
|
||||
index d83416847..842dd96fd 100644
|
||||
--- a/dirsrvtests/tests/suites/clu/repl_monitor_test.py
|
||||
+++ b/dirsrvtests/tests/suites/clu/repl_monitor_test.py
|
||||
@@ -77,13 +77,13 @@ def get_hostnames_from_log(port1, port2):
|
||||
# search for Supplier :hostname:port
|
||||
# and use \D to insure there is no more number is after
|
||||
# the matched port (i.e that 10 is not matching 101)
|
||||
- regexp = '(Supplier: )([^:]*)(:' + str(port1) + '\D)'
|
||||
+ regexp = '(Supplier: )([^:]*)(:' + str(port1) + r'\D)'
|
||||
match=re.search(regexp, logtext)
|
||||
host_m1 = 'localhost.localdomain'
|
||||
if (match is not None):
|
||||
host_m1 = match.group(2)
|
||||
# Same for supplier 2
|
||||
- regexp = '(Supplier: )([^:]*)(:' + str(port2) + '\D)'
|
||||
+ regexp = '(Supplier: )([^:]*)(:' + str(port2) + r'\D)'
|
||||
match=re.search(regexp, logtext)
|
||||
host_m2 = 'localhost.localdomain'
|
||||
if (match is not None):
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.c b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.c
|
||||
index de6be0f42..4b30e8e87 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.c
|
||||
@@ -5820,8 +5820,16 @@ bdb_import_file_name(ldbm_instance *inst)
|
||||
static char *
|
||||
bdb_restore_file_name(struct ldbminfo *li)
|
||||
{
|
||||
- char *fname = slapi_ch_smprintf("%s/../.restore", li->li_directory);
|
||||
-
|
||||
+ char *pt = strrchr(li->li_directory, '/');
|
||||
+ char *fname = NULL;
|
||||
+ if (pt == NULL) {
|
||||
+ fname = slapi_ch_strdup(".restore");
|
||||
+ } else {
|
||||
+ size_t len = pt-li->li_directory;
|
||||
+ fname = slapi_ch_malloc(len+10);
|
||||
+ strncpy(fname, li->li_directory, len);
|
||||
+ strcpy(fname+len, "/.restore");
|
||||
+ }
|
||||
return fname;
|
||||
}
|
||||
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/dbimpl.c b/ldap/servers/slapd/back-ldbm/dbimpl.c
|
||||
index 42f4a0718..134d06480 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/dbimpl.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/dbimpl.c
|
||||
@@ -397,7 +397,48 @@ const char *dblayer_op2str(dbi_op_t op)
|
||||
return str[idx];
|
||||
}
|
||||
|
||||
-/* Open db env, db and db file privately */
|
||||
+/* Get the li_directory directory from the database instance name -
|
||||
+ * Caller should free the returned value
|
||||
+ */
|
||||
+static char *
|
||||
+get_li_directory(const char *fname)
|
||||
+{
|
||||
+ /*
|
||||
+ * li_directory is an existing directory.
|
||||
+ * it can be fname or its parent or its greatparent
|
||||
+ * in case of problem returns the provided name
|
||||
+ */
|
||||
+ char *lid = slapi_ch_strdup(fname);
|
||||
+ struct stat sbuf = {0};
|
||||
+ char *pt = NULL;
|
||||
+ for (int count=0; count<3; count++) {
|
||||
+ if (stat(lid, &sbuf) == 0) {
|
||||
+ if (S_ISDIR(sbuf.st_mode)) {
|
||||
+ return lid;
|
||||
+ }
|
||||
+ /* Non directory existing file could be regular
|
||||
+ * at the first iteration otherwise it is an error.
|
||||
+ */
|
||||
+ if (count>0 || !S_ISREG(sbuf.st_mode)) {
|
||||
+ break;
|
||||
+ }
|
||||
+ }
|
||||
+ pt = strrchr(lid, '/');
|
||||
+ if (pt == NULL) {
|
||||
+ slapi_ch_free_string(&lid);
|
||||
+ return slapi_ch_strdup(".");
|
||||
+ }
|
||||
+ *pt = '\0';
|
||||
+ }
|
||||
+ /*
|
||||
+ * Error case. Returns a copy of the original string:
|
||||
+ * and let dblayer_private_open_fn fail to open the database
|
||||
+ */
|
||||
+ slapi_ch_free_string(&lid);
|
||||
+ return slapi_ch_strdup(fname);
|
||||
+}
|
||||
+
|
||||
+/* Open db env, db and db file privately (for dbscan) */
|
||||
int dblayer_private_open(const char *plgname, const char *dbfilename, int rw, Slapi_Backend **be, dbi_env_t **env, dbi_db_t **db)
|
||||
{
|
||||
struct ldbminfo *li;
|
||||
@@ -412,7 +453,7 @@ int dblayer_private_open(const char *plgname, const char *dbfilename, int rw, Sl
|
||||
li->li_plugin = (*be)->be_database;
|
||||
li->li_plugin->plg_name = (char*) "back-ldbm-dbimpl";
|
||||
li->li_plugin->plg_libpath = (char*) "libback-ldbm";
|
||||
- li->li_directory = slapi_ch_strdup(dbfilename);
|
||||
+ li->li_directory = get_li_directory(dbfilename);
|
||||
|
||||
/* Initialize database plugin */
|
||||
rc = dbimpl_setup(li, plgname);
|
||||
@@ -439,7 +480,10 @@ int dblayer_private_close(Slapi_Backend **be, dbi_env_t **env, dbi_db_t **db)
|
||||
}
|
||||
slapi_ch_free((void**)&li->li_dblayer_private);
|
||||
slapi_ch_free((void**)&li->li_dblayer_config);
|
||||
- ldbm_config_destroy(li);
|
||||
+ if (dblayer_is_lmdb(*be)) {
|
||||
+ /* Generate use after free and double free in bdb case */
|
||||
+ ldbm_config_destroy(li);
|
||||
+ }
|
||||
slapi_ch_free((void**)&(*be)->be_database);
|
||||
slapi_ch_free((void**)&(*be)->be_instance_info);
|
||||
slapi_ch_free((void**)be);
|
||||
diff --git a/ldap/servers/slapd/tools/dbscan.c b/ldap/servers/slapd/tools/dbscan.c
|
||||
index 2d28dd951..12edf7c5b 100644
|
||||
--- a/ldap/servers/slapd/tools/dbscan.c
|
||||
+++ b/ldap/servers/slapd/tools/dbscan.c
|
||||
@@ -26,6 +26,7 @@
|
||||
#include <string.h>
|
||||
#include <ctype.h>
|
||||
#include <errno.h>
|
||||
+#include <getopt.h>
|
||||
#include "../back-ldbm/dbimpl.h"
|
||||
#include "../slapi-plugin.h"
|
||||
#include "nspr.h"
|
||||
@@ -85,6 +86,8 @@
|
||||
#define DB_BUFFER_SMALL ENOMEM
|
||||
#endif
|
||||
|
||||
+#define COUNTOF(array) ((sizeof(array))/sizeof(*(array)))
|
||||
+
|
||||
#if defined(linux)
|
||||
#include <getopt.h>
|
||||
#endif
|
||||
@@ -130,9 +133,43 @@ long ind_cnt = 0;
|
||||
long allids_cnt = 0;
|
||||
long other_cnt = 0;
|
||||
char *dump_filename = NULL;
|
||||
+int do_it = 0;
|
||||
|
||||
static Slapi_Backend *be = NULL; /* Pseudo backend used to interact with db */
|
||||
|
||||
+/* For Long options without shortcuts */
|
||||
+enum {
|
||||
+ OPT_FIRST = 0x1000,
|
||||
+ OPT_DO_IT,
|
||||
+ OPT_REMOVE,
|
||||
+};
|
||||
+
|
||||
+static const struct option options[] = {
|
||||
+ /* Options without shortcut */
|
||||
+ { "do-it", no_argument, 0, OPT_DO_IT },
|
||||
+ { "remove", no_argument, 0, OPT_REMOVE },
|
||||
+ /* Options with shortcut */
|
||||
+ { "import", required_argument, 0, 'I' },
|
||||
+ { "export", required_argument, 0, 'X' },
|
||||
+ { "db-type", required_argument, 0, 'D' },
|
||||
+ { "dbi", required_argument, 0, 'f' },
|
||||
+ { "ascii", no_argument, 0, 'A' },
|
||||
+ { "raw", no_argument, 0, 'R' },
|
||||
+ { "truncate-entry", required_argument, 0, 't' },
|
||||
+ { "entry-id", required_argument, 0, 'K' },
|
||||
+ { "key", required_argument, 0, 'k' },
|
||||
+ { "list", required_argument, 0, 'L' },
|
||||
+ { "stats", required_argument, 0, 'S' },
|
||||
+ { "id-list-max-size", required_argument, 0, 'l' },
|
||||
+ { "id-list-min-size", required_argument, 0, 'G' },
|
||||
+ { "show-id-list-lenghts", no_argument, 0, 'n' },
|
||||
+ { "show-id-list", no_argument, 0, 'r' },
|
||||
+ { "summary", no_argument, 0, 's' },
|
||||
+ { "help", no_argument, 0, 'h' },
|
||||
+ { 0, 0, 0, 0 }
|
||||
+};
|
||||
+
|
||||
+
|
||||
/** db_printf - functioning same as printf but a place for manipluating output.
|
||||
*/
|
||||
void
|
||||
@@ -899,7 +936,7 @@ is_changelog(char *filename)
|
||||
}
|
||||
|
||||
static void
|
||||
-usage(char *argv0)
|
||||
+usage(char *argv0, int error)
|
||||
{
|
||||
char *copy = strdup(argv0);
|
||||
char *p0 = NULL, *p1 = NULL;
|
||||
@@ -922,42 +959,52 @@ usage(char *argv0)
|
||||
}
|
||||
printf("\n%s - scan a db file and dump the contents\n", p0);
|
||||
printf(" common options:\n");
|
||||
- printf(" -D <dbimpl> specify db implementaion (may be: bdb or mdb)\n");
|
||||
- printf(" -f <filename> specify db file\n");
|
||||
- printf(" -A dump as ascii data\n");
|
||||
- printf(" -R dump as raw data\n");
|
||||
- printf(" -t <size> entry truncate size (bytes)\n");
|
||||
+ printf(" -A, --ascii dump as ascii data\n");
|
||||
+ printf(" -D, --db-type <dbimpl> specify db implementaion (may be: bdb or mdb)\n");
|
||||
+ printf(" -f, --dbi <filename> specify db instance\n");
|
||||
+ printf(" -R, --raw dump as raw data\n");
|
||||
+ printf(" -t, --truncate-entry <size> entry truncate size (bytes)\n");
|
||||
+
|
||||
printf(" entry file options:\n");
|
||||
- printf(" -K <entry_id> lookup only a specific entry id\n");
|
||||
+ printf(" -K, --entry-id <entry_id> lookup only a specific entry id\n");
|
||||
+
|
||||
printf(" index file options:\n");
|
||||
- printf(" -k <key> lookup only a specific key\n");
|
||||
- printf(" -L <dbhome> list all db files\n");
|
||||
- printf(" -S <dbhome> show statistics\n");
|
||||
- printf(" -l <size> max length of dumped id list\n");
|
||||
- printf(" (default %" PRIu32 "; 40 bytes <= size <= 1048576 bytes)\n", MAX_BUFFER);
|
||||
- printf(" -G <n> only display index entries with more than <n> ids\n");
|
||||
- printf(" -n display ID list lengths\n");
|
||||
- printf(" -r display the conents of ID list\n");
|
||||
- printf(" -s Summary of index counts\n");
|
||||
- printf(" -I file Import database content from file\n");
|
||||
- printf(" -X file Export database content in file\n");
|
||||
+ printf(" -G, --id-list-min-size <n> only display index entries with more than <n> ids\n");
|
||||
+ printf(" -I, --import file Import database instance from file.\n");
|
||||
+ printf(" -k, --key <key> lookup only a specific key\n");
|
||||
+ printf(" -l, --id-list-max-size <size> max length of dumped id list\n");
|
||||
+ printf(" (default %" PRIu32 "; 40 bytes <= size <= 1048576 bytes)\n", MAX_BUFFER);
|
||||
+ printf(" -n, --show-id-list-lenghts display ID list lengths\n");
|
||||
+ printf(" --remove remove database instance\n");
|
||||
+ printf(" -r, --show-id-list display the conents of ID list\n");
|
||||
+ printf(" -S, --stats <dbhome> show statistics\n");
|
||||
+ printf(" -X, --export file export database instance in file\n");
|
||||
+
|
||||
+ printf(" other options:\n");
|
||||
+ printf(" -s, --summary summary of index counts\n");
|
||||
+ printf(" -L, --list <dbhome> list all db files\n");
|
||||
+ printf(" --do-it confirmation flags for destructive actions like --remove or --import\n");
|
||||
+ printf(" -h, --help display this usage\n");
|
||||
+
|
||||
printf(" sample usages:\n");
|
||||
- printf(" # list the db files\n");
|
||||
- printf(" %s -D mdb -L /var/lib/dirsrv/slapd-i/db/\n", p0);
|
||||
- printf(" %s -f id2entry.db\n", p0);
|
||||
+ printf(" # list the database instances\n");
|
||||
+ printf(" %s -L /var/lib/dirsrv/slapd-supplier1/db/\n", p0);
|
||||
printf(" # dump the entry file\n");
|
||||
printf(" %s -f id2entry.db\n", p0);
|
||||
printf(" # display index keys in cn.db4\n");
|
||||
printf(" %s -f cn.db4\n", p0);
|
||||
+ printf(" # display index keys in cn on lmdb\n");
|
||||
+ printf(" %s -f /var/lib/dirsrv/slapd-supplier1/db/userroot/cn.db\n", p0);
|
||||
+ printf(" (Note: Use 'dbscan -L db_home_dir' to get the db instance path)\n");
|
||||
printf(" # display index keys and the count of entries having the key in mail.db4\n");
|
||||
printf(" %s -r -f mail.db4\n", p0);
|
||||
printf(" # display index keys and the IDs having more than 20 IDs in sn.db4\n");
|
||||
printf(" %s -r -G 20 -f sn.db4\n", p0);
|
||||
printf(" # display summary of objectclass.db4\n");
|
||||
- printf(" %s -f objectclass.db4\n", p0);
|
||||
+ printf(" %s -s -f objectclass.db4\n", p0);
|
||||
printf("\n");
|
||||
free(copy);
|
||||
- exit(1);
|
||||
+ exit(error?1:0);
|
||||
}
|
||||
|
||||
void dump_ascii_val(const char *str, dbi_val_t *val)
|
||||
@@ -1126,13 +1173,12 @@ importdb(const char *dbimpl_name, const char *filename, const char *dump_name)
|
||||
dblayer_init_pvt_txn();
|
||||
|
||||
if (!dump) {
|
||||
- printf("Failed to open dump file %s. Error %d: %s\n", dump_name, errno, strerror(errno));
|
||||
- fclose(dump);
|
||||
+ printf("Error: Failed to open dump file %s. Error %d: %s\n", dump_name, errno, strerror(errno));
|
||||
return 1;
|
||||
}
|
||||
|
||||
if (dblayer_private_open(dbimpl_name, filename, 1, &be, &env, &db)) {
|
||||
- printf("Can't initialize db plugin: %s\n", dbimpl_name);
|
||||
+ printf("Error: Can't initialize db plugin: %s\n", dbimpl_name);
|
||||
fclose(dump);
|
||||
return 1;
|
||||
}
|
||||
@@ -1142,11 +1188,16 @@ importdb(const char *dbimpl_name, const char *filename, const char *dump_name)
|
||||
!_read_line(dump, &keyword, &data) && keyword == 'v') {
|
||||
ret = dblayer_db_op(be, db, txn.txn, DBI_OP_PUT, &key, &data);
|
||||
}
|
||||
+ if (ret !=0) {
|
||||
+ printf("Error: failed to write record in database. Error %d: %s\n", ret, dblayer_strerror(ret));
|
||||
+ dump_ascii_val("Failing record key", &key);
|
||||
+ dump_ascii_val("Failing record value", &data);
|
||||
+ }
|
||||
fclose(dump);
|
||||
dblayer_value_free(be, &key);
|
||||
dblayer_value_free(be, &data);
|
||||
if (dblayer_private_close(&be, &env, &db)) {
|
||||
- printf("Unable to shutdown the db plugin: %s\n", dblayer_strerror(1));
|
||||
+ printf("Error: Unable to shutdown the db plugin: %s\n", dblayer_strerror(1));
|
||||
return 1;
|
||||
}
|
||||
return ret;
|
||||
@@ -1243,6 +1294,7 @@ removedb(const char *dbimpl_name, const char *filename)
|
||||
return 1;
|
||||
}
|
||||
|
||||
+ db = NULL; /* Database is already closed by dblayer_db_remove */
|
||||
if (dblayer_private_close(&be, &env, &db)) {
|
||||
printf("Unable to shutdown the db plugin: %s\n", dblayer_strerror(1));
|
||||
return 1;
|
||||
@@ -1250,7 +1302,6 @@ removedb(const char *dbimpl_name, const char *filename)
|
||||
return 0;
|
||||
}
|
||||
|
||||
-
|
||||
int
|
||||
main(int argc, char **argv)
|
||||
{
|
||||
@@ -1262,11 +1313,46 @@ main(int argc, char **argv)
|
||||
int ret = 0;
|
||||
char *find_key = NULL;
|
||||
uint32_t entry_id = 0xffffffff;
|
||||
- char *dbimpl_name = (char*) "bdb";
|
||||
- int c;
|
||||
+ char *defdbimpl = getenv("NSSLAPD_DB_LIB");
|
||||
+ char *dbimpl_name = (char*) "mdb";
|
||||
+ int longopt_idx = 0;
|
||||
+ int c = 0;
|
||||
+ char optstring[2*COUNTOF(options)+1] = {0};
|
||||
+
|
||||
+ if (defdbimpl) {
|
||||
+ if (strcasecmp(defdbimpl, "bdb") == 0) {
|
||||
+ dbimpl_name = (char*) "bdb";
|
||||
+ }
|
||||
+ if (strcasecmp(defdbimpl, "mdb") == 0) {
|
||||
+ dbimpl_name = (char*) "mdb";
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ /* Compute getopt short option string */
|
||||
+ {
|
||||
+ char *pt = optstring;
|
||||
+ for (const struct option *opt = options; opt->name; opt++) {
|
||||
+ if (opt->val>0 && opt->val<OPT_FIRST) {
|
||||
+ *pt++ = (char)(opt->val);
|
||||
+ if (opt->has_arg == required_argument) {
|
||||
+ *pt++ = ':';
|
||||
+ }
|
||||
+ }
|
||||
+ }
|
||||
+ *pt = '\0';
|
||||
+ }
|
||||
|
||||
- while ((c = getopt(argc, argv, "Af:RL:S:l:nG:srk:K:hvt:D:X:I:d")) != EOF) {
|
||||
+ while ((c = getopt_long(argc, argv, optstring, options, &longopt_idx)) != EOF) {
|
||||
+ if (c == 0) {
|
||||
+ c = longopt_idx;
|
||||
+ }
|
||||
switch (c) {
|
||||
+ case OPT_DO_IT:
|
||||
+ do_it = 1;
|
||||
+ break;
|
||||
+ case OPT_REMOVE:
|
||||
+ display_mode |= REMOVE;
|
||||
+ break;
|
||||
case 'A':
|
||||
display_mode |= ASCIIDATA;
|
||||
break;
|
||||
@@ -1332,32 +1418,48 @@ main(int argc, char **argv)
|
||||
display_mode |= IMPORT;
|
||||
dump_filename = optarg;
|
||||
break;
|
||||
- case 'd':
|
||||
- display_mode |= REMOVE;
|
||||
- break;
|
||||
case 'h':
|
||||
default:
|
||||
- usage(argv[0]);
|
||||
+ usage(argv[0], 1);
|
||||
}
|
||||
}
|
||||
|
||||
+ if (filename == NULL) {
|
||||
+ fprintf(stderr, "PARAMETER ERROR! 'filename' parameter is missing.\n");
|
||||
+ usage(argv[0], 1);
|
||||
+ }
|
||||
+
|
||||
if (display_mode & EXPORT) {
|
||||
return exportdb(dbimpl_name, filename, dump_filename);
|
||||
}
|
||||
|
||||
if (display_mode & IMPORT) {
|
||||
+ if (!strstr(filename, "/id2entry") && !strstr(filename, "/replication_changelog")) {
|
||||
+ /* schema is unknown in dbscan ==> duplicate keys sort order is unknown
|
||||
+ * ==> cannot create dbi with duplicate keys
|
||||
+ * ==> only id2entry and repl changelog is importable.
|
||||
+ */
|
||||
+ fprintf(stderr, "ERROR: The only database instances that may be imported with dbscan are id2entry and replication_changelog.\n");
|
||||
+ exit(1);
|
||||
+ }
|
||||
+
|
||||
+ if (do_it == 0) {
|
||||
+ fprintf(stderr, "PARAMETER ERROR! Trying to perform a destructive action (import)\n"
|
||||
+ " without specifying '--do-it' parameter.\n");
|
||||
+ exit(1);
|
||||
+ }
|
||||
return importdb(dbimpl_name, filename, dump_filename);
|
||||
}
|
||||
|
||||
if (display_mode & REMOVE) {
|
||||
+ if (do_it == 0) {
|
||||
+ fprintf(stderr, "PARAMETER ERROR! Trying to perform a destructive action (remove)\n"
|
||||
+ " without specifying '--do-it' parameter.\n");
|
||||
+ exit(1);
|
||||
+ }
|
||||
return removedb(dbimpl_name, filename);
|
||||
}
|
||||
|
||||
- if (filename == NULL) {
|
||||
- fprintf(stderr, "PARAMETER ERROR! 'filename' parameter is missing.\n");
|
||||
- usage(argv[0]);
|
||||
- }
|
||||
-
|
||||
if (display_mode & LISTDBS) {
|
||||
dbi_dbslist_t *dbs = dblayer_list_dbs(dbimpl_name, filename);
|
||||
if (dbs) {
|
||||
diff --git a/man/man1/dbscan.1 b/man/man1/dbscan.1
|
||||
index 810608371..dfb6e8351 100644
|
||||
--- a/man/man1/dbscan.1
|
||||
+++ b/man/man1/dbscan.1
|
||||
@@ -31,50 +31,94 @@ Scans a Directory Server database index file and dumps the contents.
|
||||
.\" respectively.
|
||||
.SH OPTIONS
|
||||
A summary of options is included below:
|
||||
+.IP
|
||||
+common options:
|
||||
+.TP
|
||||
+.B \fB\-A, \-\-ascii\fR
|
||||
+dump as ascii data
|
||||
+.TP
|
||||
+.B \fB\-D, \-\-db\-type\fR <filename>
|
||||
+specify db type: bdb or mdb
|
||||
.TP
|
||||
-.B \fB\-f\fR <filename>
|
||||
-specify db file
|
||||
+.B \fB\-f, \-\-dbi\fR <filename>
|
||||
+specify db instance
|
||||
.TP
|
||||
-.B \fB\-R\fR
|
||||
+.B \fB\-R, \-\-raw\fR
|
||||
dump as raw data
|
||||
.TP
|
||||
-.B \fB\-t\fR <size>
|
||||
+.B \fB\-t, \-\-truncate\-entry\fR <size>
|
||||
entry truncate size (bytes)
|
||||
.IP
|
||||
entry file options:
|
||||
.TP
|
||||
-.B \fB\-K\fR <entry_id>
|
||||
+.B \fB\-K, \-\-entry\-id\fR <entry_id>
|
||||
lookup only a specific entry id
|
||||
+.IP
|
||||
index file options:
|
||||
.TP
|
||||
-.B \fB\-k\fR <key>
|
||||
+.B \fB\-G, \-\-id\-list\-min\-size\fR <n>
|
||||
+only display index entries with more than <n> ids
|
||||
+.TP
|
||||
+.B \fB\-I, \-\-import\fR <file>
|
||||
+Import database instance from file. Requires \-\-do\-it parameter
|
||||
+WARNING! Only the id2entry and replication_changelog database instances
|
||||
+may be imported by dbscan.
|
||||
+.TP
|
||||
+.B \fB\-k, \-\-key\fR <key>
|
||||
lookup only a specific key
|
||||
.TP
|
||||
-.B \fB\-l\fR <size>
|
||||
+.B \fB\-l, \-\-id\-list\-max\-size\fR <size>
|
||||
max length of dumped id list
|
||||
(default 4096; 40 bytes <= size <= 1048576 bytes)
|
||||
.TP
|
||||
-.B \fB\-G\fR <n>
|
||||
-only display index entries with more than <n> ids
|
||||
-.TP
|
||||
-.B \fB\-n\fR
|
||||
+.B \fB\-n, \-\-show\-id\-list\-lenghts\fR
|
||||
display ID list lengths
|
||||
.TP
|
||||
-.B \fB\-r\fR
|
||||
+.B \fB\-\-remove\fR
|
||||
+remove a db instance. Requires \-\-do\-it parameter
|
||||
+.TP
|
||||
+.B \fB\-r, \-\-show\-id\-list\fR
|
||||
display the contents of ID list
|
||||
.TP
|
||||
-.B \fB\-s\fR
|
||||
+.B \fB\-S, \-\-stats\fR
|
||||
+display statistics
|
||||
+.TP
|
||||
+.B \fB\-X, \-\-export\fR <file>
|
||||
+Export database instance to file
|
||||
+.IP
|
||||
+other options:
|
||||
+.TP
|
||||
+.B \fB\-s, \-\-summary\fR
|
||||
Summary of index counts
|
||||
+.TP
|
||||
+.B \fB\-L, \-\-list\fR
|
||||
+List od database instances
|
||||
+.TP
|
||||
+.B \fB\-\-do\-it\fR
|
||||
+confirmation required for actions that change the database contents
|
||||
+.TP
|
||||
+.B \fB\-h, \-\-help\-it\fR
|
||||
+display the usage
|
||||
.IP
|
||||
.SH USAGE
|
||||
Sample usages:
|
||||
.TP
|
||||
+List the database instances
|
||||
+.B
|
||||
+dbscan -L /var/lib/dirsrv/slapd-supplier1/db
|
||||
+.TP
|
||||
Dump the entry file:
|
||||
.B
|
||||
dbscan \fB\-f\fR id2entry.db4
|
||||
.TP
|
||||
Display index keys in cn.db4:
|
||||
-.B dbscan \fB\-f\fR cn.db4
|
||||
+.B
|
||||
+dbscan \fB\-f\fR cn.db4
|
||||
+.TP
|
||||
+Display index keys in cn on lmdb:
|
||||
+.B
|
||||
+dbscan \fB\-f\fR /var/lib/dirsrv/slapd\-supplier1/db/userroot/cn.db
|
||||
+ (Note: Use \fBdbscan \-L db_home_dir\R to get the db instance path)
|
||||
.TP
|
||||
Display index keys and the count of entries having the key in mail.db4:
|
||||
.B
|
||||
@@ -86,7 +130,7 @@ dbscan \fB\-r\fR \fB\-G\fR 20 \fB\-f\fR sn.db4
|
||||
.TP
|
||||
Display summary of objectclass.db4:
|
||||
.B
|
||||
-dbscan \fB\-f\fR objectclass.db4
|
||||
+dbscan \fB\-s \-f\fR objectclass.db4
|
||||
.br
|
||||
.SH AUTHOR
|
||||
dbscan was written by the 389 Project.
|
||||
diff --git a/src/lib389/lib389/__init__.py b/src/lib389/lib389/__init__.py
|
||||
index e87582d9e..368741a66 100644
|
||||
--- a/src/lib389/lib389/__init__.py
|
||||
+++ b/src/lib389/lib389/__init__.py
|
||||
@@ -3039,14 +3039,17 @@ class DirSrv(SimpleLDAPObject, object):
|
||||
return self._dbisupport
|
||||
# check if -D and -L options are supported
|
||||
try:
|
||||
- cmd = ["%s/dbscan" % self.get_bin_dir(), "--help"]
|
||||
+ cmd = ["%s/dbscan" % self.get_bin_dir(), "-h"]
|
||||
self.log.debug("DEBUG: checking dbscan supported options %s" % cmd)
|
||||
p = subprocess.Popen(cmd, stdout=subprocess.PIPE)
|
||||
except subprocess.CalledProcessError:
|
||||
pass
|
||||
output, stderr = p.communicate()
|
||||
- self.log.debug("is_dbi_supported output " + output.decode())
|
||||
- if "-D <dbimpl>" in output.decode() and "-L <dbhome>" in output.decode():
|
||||
+ output = output.decode()
|
||||
+ self.log.debug("is_dbi_supported output " + output)
|
||||
+ if "-D <dbimpl>" in output and "-L <dbhome>" in output:
|
||||
+ self._dbisupport = True
|
||||
+ elif "--db-type" in output and "--list" in output:
|
||||
self._dbisupport = True
|
||||
else:
|
||||
self._dbisupport = False
|
||||
diff --git a/src/lib389/lib389/cli_ctl/dblib.py b/src/lib389/lib389/cli_ctl/dblib.py
|
||||
index e9269e340..82f09c70c 100644
|
||||
--- a/src/lib389/lib389/cli_ctl/dblib.py
|
||||
+++ b/src/lib389/lib389/cli_ctl/dblib.py
|
||||
@@ -158,6 +158,14 @@ def run_dbscan(args):
|
||||
return output
|
||||
|
||||
|
||||
+def does_dbscan_need_do_it():
|
||||
+ prefix = os.environ.get('PREFIX', "")
|
||||
+ prog = f'{prefix}/bin/dbscan'
|
||||
+ args = [ prog, '-h' ]
|
||||
+ output = subprocess.run(args, encoding='utf-8', stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
|
||||
+ return '--do-it' in output.stdout
|
||||
+
|
||||
+
|
||||
def export_changelog(be, dblib):
|
||||
# Export backend changelog
|
||||
try:
|
||||
@@ -172,7 +180,10 @@ def import_changelog(be, dblib):
|
||||
# import backend changelog
|
||||
try:
|
||||
cl5dbname = be['eccl5dbname'] if dblib == "bdb" else be['cl5dbname']
|
||||
- run_dbscan(['-D', dblib, '-f', cl5dbname, '-I', be['cl5name']])
|
||||
+ if does_dbscan_need_do_it():
|
||||
+ run_dbscan(['-D', dblib, '-f', cl5dbname, '-I', be['cl5name'], '--do-it'])
|
||||
+ else:
|
||||
+ run_dbscan(['-D', dblib, '-f', cl5dbname, '-I', be['cl5name']])
|
||||
return True
|
||||
except subprocess.CalledProcessError as e:
|
||||
return False
|
||||
--
|
||||
2.48.0
|
||||
|
||||
@ -0,0 +1,125 @@
|
||||
From 5613937623f0037a54490b22c60f7eb1aa52cf4e Mon Sep 17 00:00:00 2001
|
||||
From: James Chapman <jachapma@redhat.com>
|
||||
Date: Wed, 25 Jun 2025 14:11:05 +0000
|
||||
Subject: [PATCH] =?UTF-8?q?Issue=206825=20-=20RootDN=20Access=20Control=20?=
|
||||
=?UTF-8?q?Plugin=20with=20wildcards=20for=20IP=20addre=E2=80=A6=20(#6826)?=
|
||||
MIME-Version: 1.0
|
||||
Content-Type: text/plain; charset=UTF-8
|
||||
Content-Transfer-Encoding: 8bit
|
||||
|
||||
Bug description:
|
||||
RootDN Access Control Plugin with wildcards for IP addresses fails withi
|
||||
an error "Invalid IP address"
|
||||
|
||||
socket.inet_aton() validates IPv4 IP addresses and does not support wildcards.
|
||||
|
||||
Fix description:
|
||||
Add a regex pattern to match wildcard IP addresses, check each octet is
|
||||
between 0-255
|
||||
|
||||
Fixes: https://github.com/389ds/389-ds-base/issues/6825
|
||||
|
||||
Reviewed by: @droideck (Thank you)
|
||||
---
|
||||
.../lib389/cli_conf/plugins/rootdn_ac.py | 16 +++-----
|
||||
src/lib389/lib389/utils.py | 40 +++++++++++++++++++
|
||||
2 files changed, 45 insertions(+), 11 deletions(-)
|
||||
|
||||
diff --git a/src/lib389/lib389/cli_conf/plugins/rootdn_ac.py b/src/lib389/lib389/cli_conf/plugins/rootdn_ac.py
|
||||
index 65486fff8..1456f5ebe 100644
|
||||
--- a/src/lib389/lib389/cli_conf/plugins/rootdn_ac.py
|
||||
+++ b/src/lib389/lib389/cli_conf/plugins/rootdn_ac.py
|
||||
@@ -8,7 +8,7 @@
|
||||
|
||||
import socket
|
||||
from lib389.plugins import RootDNAccessControlPlugin
|
||||
-from lib389.utils import is_valid_hostname
|
||||
+from lib389.utils import is_valid_hostname, is_valid_ip
|
||||
from lib389.cli_conf import add_generic_plugin_parsers, generic_object_edit
|
||||
from lib389.cli_base import CustomHelpFormatter
|
||||
|
||||
@@ -62,19 +62,13 @@ def validate_args(args):
|
||||
|
||||
if args.allow_ip is not None:
|
||||
for ip in args.allow_ip:
|
||||
- if ip != "delete":
|
||||
- try:
|
||||
- socket.inet_aton(ip)
|
||||
- except socket.error:
|
||||
- raise ValueError(f"Invalid IP address ({ip}) for '--allow-ip'")
|
||||
+ if ip != "delete" and not is_valid_ip(ip):
|
||||
+ raise ValueError(f"Invalid IP address ({ip}) for '--allow-ip'")
|
||||
|
||||
if args.deny_ip is not None and args.deny_ip != "delete":
|
||||
for ip in args.deny_ip:
|
||||
- if ip != "delete":
|
||||
- try:
|
||||
- socket.inet_aton(ip)
|
||||
- except socket.error:
|
||||
- raise ValueError(f"Invalid IP address ({ip}) for '--deny-ip'")
|
||||
+ if ip != "delete" and not is_valid_ip(ip):
|
||||
+ raise ValueError(f"Invalid IP address ({ip}) for '--deny-ip'")
|
||||
|
||||
if args.allow_host is not None:
|
||||
for hostname in args.allow_host:
|
||||
diff --git a/src/lib389/lib389/utils.py b/src/lib389/lib389/utils.py
|
||||
index afc282e94..3937fc1a8 100644
|
||||
--- a/src/lib389/lib389/utils.py
|
||||
+++ b/src/lib389/lib389/utils.py
|
||||
@@ -31,6 +31,7 @@ import logging
|
||||
import shutil
|
||||
import ldap
|
||||
import socket
|
||||
+import ipaddress
|
||||
import time
|
||||
import stat
|
||||
from datetime import (datetime, timedelta)
|
||||
@@ -1707,6 +1708,45 @@ def is_valid_hostname(hostname):
|
||||
allowed = re.compile(r"(?!-)[A-Z\d-]{1,63}(?<!-)$", re.IGNORECASE)
|
||||
return all(allowed.match(x) for x in hostname.split("."))
|
||||
|
||||
+def is_valid_ip(ip):
|
||||
+ """ Validate an IPv4 or IPv6 address, including asterisks for wildcards. """
|
||||
+ if '*' in ip and '.' in ip:
|
||||
+ ipv4_pattern = r'^(\d{1,3}|\*)\.(\d{1,3}|\*)\.(\d{1,3}|\*)\.(\d{1,3}|\*)$'
|
||||
+ if re.match(ipv4_pattern, ip):
|
||||
+ octets = ip.split('.')
|
||||
+ for octet in octets:
|
||||
+ if octet != '*':
|
||||
+ try:
|
||||
+ val = int(octet, 10)
|
||||
+ if not (0 <= val <= 255):
|
||||
+ return False
|
||||
+ except ValueError:
|
||||
+ return False
|
||||
+ return True
|
||||
+ else:
|
||||
+ return False
|
||||
+
|
||||
+ if '*' in ip and ':' in ip:
|
||||
+ ipv6_pattern = r'^([0-9a-fA-F]{1,4}|\*)(:([0-9a-fA-F]{1,4}|\*)){0,7}$'
|
||||
+ if re.match(ipv6_pattern, ip):
|
||||
+ octets = ip.split(':')
|
||||
+ for octet in octets:
|
||||
+ if octet != '*':
|
||||
+ try:
|
||||
+ val = int(octet, 16)
|
||||
+ if not (0 <= val <= 0xFFFF):
|
||||
+ return False
|
||||
+ except ValueError:
|
||||
+ return False
|
||||
+ return True
|
||||
+ else:
|
||||
+ return False
|
||||
+
|
||||
+ try:
|
||||
+ ipaddress.ip_address(ip)
|
||||
+ return True
|
||||
+ except ValueError:
|
||||
+ return False
|
||||
|
||||
def parse_size(size):
|
||||
"""
|
||||
--
|
||||
2.49.0
|
||||
|
||||
@ -0,0 +1,50 @@
|
||||
From b8cac173ca2549d2142332107e06fcb4bd34bd65 Mon Sep 17 00:00:00 2001
|
||||
From: James Chapman <jachapma@redhat.com>
|
||||
Date: Fri, 8 Mar 2024 16:15:52 +0000
|
||||
Subject: [PATCH] Issue 6119 - Synchronise accept_thread with slapd_daemon
|
||||
(#6120)
|
||||
|
||||
Bug Description: A corner cases exists, where the slapd_daemon has
|
||||
begun its shutdown process but the accept_thread is still running
|
||||
and capable of handling new connections. When this scenario occurs,
|
||||
the connection subsystem has been partially deallocated and is in
|
||||
an unstable state. A segfault is generated when attempting to get a
|
||||
new connection from the connection table.
|
||||
|
||||
Fix Description: The connection table is only deallocated when the
|
||||
number of active threads is 0. Modify the accept_thread to adjust the
|
||||
the active thread count during creation/destruction, meaning the connection
|
||||
table can only be freed when the accept_thread has completed
|
||||
|
||||
Relates: https://github.com/389ds/389-ds-base/issues/6119
|
||||
|
||||
Reviewed by: @tbordaz, @Firstyear , @mreynolds389 (Thank you)
|
||||
---
|
||||
ldap/servers/slapd/daemon.c | 4 ++++
|
||||
1 file changed, 4 insertions(+)
|
||||
|
||||
diff --git a/ldap/servers/slapd/daemon.c b/ldap/servers/slapd/daemon.c
|
||||
index 5d01a2526..a43fc9285 100644
|
||||
--- a/ldap/servers/slapd/daemon.c
|
||||
+++ b/ldap/servers/slapd/daemon.c
|
||||
@@ -868,6 +868,8 @@ accept_thread(void *vports)
|
||||
slapi_ch_free((void **)&listener_idxs);
|
||||
slapd_sockets_ports_free(ports);
|
||||
slapi_ch_free((void **)&fds);
|
||||
+ g_decr_active_threadcnt();
|
||||
+ slapi_log_err(SLAPI_LOG_INFO, "slapd_daemon", "slapd shutting down - accept_thread\n");
|
||||
}
|
||||
|
||||
void
|
||||
@@ -1158,6 +1160,8 @@ slapd_daemon(daemon_ports_t *ports)
|
||||
slapi_log_err(SLAPI_LOG_EMERG, "slapd_daemon", "Unable to fd accept thread - Shutting Down (" SLAPI_COMPONENT_NAME_NSPR " error %d - %s)\n",
|
||||
errorCode, slapd_pr_strerror(errorCode));
|
||||
g_set_shutdown(SLAPI_SHUTDOWN_EXIT);
|
||||
+ } else{
|
||||
+ g_incr_active_threadcnt();
|
||||
}
|
||||
|
||||
#ifdef WITH_SYSTEMD
|
||||
--
|
||||
2.49.0
|
||||
|
||||
@ -1,70 +0,0 @@
|
||||
From de52853a3551f1d1876ea21b33a5242ad669fec1 Mon Sep 17 00:00:00 2001
|
||||
From: James Chapman <jachapma@redhat.com>
|
||||
Date: Tue, 4 Feb 2025 15:40:16 +0000
|
||||
Subject: [PATCH] Issue 6566 - RI plugin failure to handle a modrdn for rename
|
||||
of member of multiple groups (#6567)
|
||||
|
||||
Bug description:
|
||||
With AM and RI plugins enabled, the rename of a user that is part of multiple groups
|
||||
fails with a "value exists" error.
|
||||
|
||||
Fix description:
|
||||
For a modrdn the RI plugin creates a new DN, before a modify is attempted check
|
||||
if the new DN already exists in the attr being updated.
|
||||
|
||||
Fixes: https://github.com/389ds/389-ds-base/issues/6566
|
||||
|
||||
Reviewed by: @progier389 , @tbordaz (Thank you)
|
||||
---
|
||||
ldap/servers/plugins/referint/referint.c | 15 ++++++++++++---
|
||||
1 file changed, 12 insertions(+), 3 deletions(-)
|
||||
|
||||
diff --git a/ldap/servers/plugins/referint/referint.c b/ldap/servers/plugins/referint/referint.c
|
||||
index 468fdc239..218863ea5 100644
|
||||
--- a/ldap/servers/plugins/referint/referint.c
|
||||
+++ b/ldap/servers/plugins/referint/referint.c
|
||||
@@ -924,6 +924,7 @@ _update_all_per_mod(Slapi_DN *entrySDN, /* DN of the searched entry */
|
||||
{
|
||||
Slapi_Mods *smods = NULL;
|
||||
char *newDN = NULL;
|
||||
+ struct berval bv = {0};
|
||||
char **dnParts = NULL;
|
||||
char *sval = NULL;
|
||||
char *newvalue = NULL;
|
||||
@@ -1026,22 +1027,30 @@ _update_all_per_mod(Slapi_DN *entrySDN, /* DN of the searched entry */
|
||||
}
|
||||
/* else: normalize_rc < 0) Ignore the DN normalization error for now. */
|
||||
|
||||
+ bv.bv_val = newDN;
|
||||
+ bv.bv_len = strlen(newDN);
|
||||
p = PL_strstr(sval, slapi_sdn_get_ndn(origDN));
|
||||
if (p == sval) {
|
||||
/* (case 1) */
|
||||
slapi_mods_add_string(smods, LDAP_MOD_DELETE, attrName, sval);
|
||||
- slapi_mods_add_string(smods, LDAP_MOD_ADD, attrName, newDN);
|
||||
-
|
||||
+ /* Add only if the attr value does not exist */
|
||||
+ if (VALUE_PRESENT != attr_value_find_wsi(attr, &bv, &v)) {
|
||||
+ slapi_mods_add_string(smods, LDAP_MOD_ADD, attrName, newDN);
|
||||
+ }
|
||||
} else if (p) {
|
||||
/* (case 2) */
|
||||
slapi_mods_add_string(smods, LDAP_MOD_DELETE, attrName, sval);
|
||||
*p = '\0';
|
||||
newvalue = slapi_ch_smprintf("%s%s", sval, newDN);
|
||||
- slapi_mods_add_string(smods, LDAP_MOD_ADD, attrName, newvalue);
|
||||
+ /* Add only if the attr value does not exist */
|
||||
+ if (VALUE_PRESENT != attr_value_find_wsi(attr, &bv, &v)) {
|
||||
+ slapi_mods_add_string(smods, LDAP_MOD_ADD, attrName, newvalue);
|
||||
+ }
|
||||
slapi_ch_free_string(&newvalue);
|
||||
}
|
||||
/* else: value does not include the modified DN. Ignore it. */
|
||||
slapi_ch_free_string(&sval);
|
||||
+ bv = (struct berval){0};
|
||||
}
|
||||
rc = _do_modify(mod_pb, entrySDN, slapi_mods_get_ldapmods_byref(smods));
|
||||
if (rc) {
|
||||
--
|
||||
2.48.0
|
||||
|
||||
@ -1,43 +0,0 @@
|
||||
From a634756784056270773d67747061e26152d85469 Mon Sep 17 00:00:00 2001
|
||||
From: Masahiro Matsuya <mmatsuya@redhat.com>
|
||||
Date: Wed, 5 Feb 2025 11:38:04 +0900
|
||||
Subject: [PATCH] Issue 6258 - Mitigate race condition in paged_results_test.py
|
||||
(#6433)
|
||||
|
||||
The regression test dirsrvtests/tests/suites/paged_results/paged_results_test.py::test_multi_suffix_search has a race condition causing it to fail due to multiple queries potentially writing their logs out of chronological order.
|
||||
|
||||
This failure is mitigated by sorting the retrieved access_log_lines by their "op" value. This ensures the log lines are in chronological order, as expected by the assertions at the end of test_multi_suffix_search().
|
||||
|
||||
Helps fix: #6258
|
||||
|
||||
Reviewed by: @droideck , @progier389 (Thanks!)
|
||||
|
||||
Co-authored-by: Anuar Beisembayev <111912342+abeisemb@users.noreply.github.com>
|
||||
---
|
||||
dirsrvtests/tests/suites/paged_results/paged_results_test.py | 3 +++
|
||||
1 file changed, 3 insertions(+)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/paged_results/paged_results_test.py b/dirsrvtests/tests/suites/paged_results/paged_results_test.py
|
||||
index eaf0e0da9..fca48db0f 100644
|
||||
--- a/dirsrvtests/tests/suites/paged_results/paged_results_test.py
|
||||
+++ b/dirsrvtests/tests/suites/paged_results/paged_results_test.py
|
||||
@@ -7,6 +7,7 @@
|
||||
# --- END COPYRIGHT BLOCK ---
|
||||
#
|
||||
import socket
|
||||
+import re
|
||||
from random import sample, randrange
|
||||
|
||||
import pytest
|
||||
@@ -1126,6 +1127,8 @@ def test_multi_suffix_search(topology_st, create_user, new_suffixes):
|
||||
topology_st.standalone.restart(timeout=10)
|
||||
|
||||
access_log_lines = topology_st.standalone.ds_access_log.match('.*pr_cookie=.*')
|
||||
+ # Sort access_log_lines by op number to mitigate race condition effects.
|
||||
+ access_log_lines.sort(key=lambda x: int(re.search(r"op=(\d+) RESULT", x).group(1)))
|
||||
pr_cookie_list = ([line.rsplit('=', 1)[-1] for line in access_log_lines])
|
||||
pr_cookie_list = [int(pr_cookie) for pr_cookie in pr_cookie_list]
|
||||
log.info('Assert that last pr_cookie == -1 and others pr_cookie == 0')
|
||||
--
|
||||
2.48.0
|
||||
|
||||
127
SOURCES/0006-Issue-6782-Improve-paged-result-locking.patch
Normal file
127
SOURCES/0006-Issue-6782-Improve-paged-result-locking.patch
Normal file
@ -0,0 +1,127 @@
|
||||
From 7943443bb92fca6676922349fb12503a527cb6b1 Mon Sep 17 00:00:00 2001
|
||||
From: Mark Reynolds <mreynolds@redhat.com>
|
||||
Date: Thu, 15 May 2025 10:35:27 -0400
|
||||
Subject: [PATCH] Issue 6782 - Improve paged result locking
|
||||
|
||||
Description:
|
||||
|
||||
When cleaning a slot, instead of mem setting everything to Zero and restoring
|
||||
the mutex, manually reset all the values leaving the mutex pointer
|
||||
intact.
|
||||
|
||||
There is also a deadlock possibility when checking for abandoned PR search
|
||||
in opshared.c, and we were checking a flag value outside of the per_conn
|
||||
lock.
|
||||
|
||||
Relates: https://github.com/389ds/389-ds-base/issues/6782
|
||||
|
||||
Reviewed by: progier & spichugi(Thanks!!)
|
||||
---
|
||||
ldap/servers/slapd/opshared.c | 10 +++++++++-
|
||||
ldap/servers/slapd/pagedresults.c | 27 +++++++++++++++++----------
|
||||
2 files changed, 26 insertions(+), 11 deletions(-)
|
||||
|
||||
diff --git a/ldap/servers/slapd/opshared.c b/ldap/servers/slapd/opshared.c
|
||||
index 7dc2d5983..14a7dcdfb 100644
|
||||
--- a/ldap/servers/slapd/opshared.c
|
||||
+++ b/ldap/servers/slapd/opshared.c
|
||||
@@ -592,6 +592,14 @@ op_shared_search(Slapi_PBlock *pb, int send_result)
|
||||
int32_t tlimit;
|
||||
slapi_pblock_get(pb, SLAPI_SEARCH_TIMELIMIT, &tlimit);
|
||||
pagedresults_set_timelimit(pb_conn, operation, (time_t)tlimit, pr_idx);
|
||||
+ /* When using this mutex in conjunction with the main paged
|
||||
+ * result lock, you must do so in this order:
|
||||
+ *
|
||||
+ * --> pagedresults_lock()
|
||||
+ * --> pagedresults_mutex
|
||||
+ * <-- pagedresults_mutex
|
||||
+ * <-- pagedresults_unlock()
|
||||
+ */
|
||||
pagedresults_mutex = pageresult_lock_get_addr(pb_conn);
|
||||
}
|
||||
|
||||
@@ -717,11 +725,11 @@ op_shared_search(Slapi_PBlock *pb, int send_result)
|
||||
pr_search_result = pagedresults_get_search_result(pb_conn, operation, 1 /*locked*/, pr_idx);
|
||||
if (pr_search_result) {
|
||||
if (pagedresults_is_abandoned_or_notavailable(pb_conn, 1 /*locked*/, pr_idx)) {
|
||||
+ pthread_mutex_unlock(pagedresults_mutex);
|
||||
pagedresults_unlock(pb_conn, pr_idx);
|
||||
/* Previous operation was abandoned and the simplepaged object is not in use. */
|
||||
send_ldap_result(pb, 0, NULL, "Simple Paged Results Search abandoned", 0, NULL);
|
||||
rc = LDAP_SUCCESS;
|
||||
- pthread_mutex_unlock(pagedresults_mutex);
|
||||
goto free_and_return;
|
||||
} else {
|
||||
slapi_pblock_set(pb, SLAPI_SEARCH_RESULT_SET, pr_search_result);
|
||||
diff --git a/ldap/servers/slapd/pagedresults.c b/ldap/servers/slapd/pagedresults.c
|
||||
index 642aefb3d..c3f3aae01 100644
|
||||
--- a/ldap/servers/slapd/pagedresults.c
|
||||
+++ b/ldap/servers/slapd/pagedresults.c
|
||||
@@ -48,7 +48,6 @@ pageresult_lock_get_addr(Connection *conn)
|
||||
static void
|
||||
_pr_cleanup_one_slot(PagedResults *prp)
|
||||
{
|
||||
- PRLock *prmutex = NULL;
|
||||
if (!prp) {
|
||||
return;
|
||||
}
|
||||
@@ -56,13 +55,17 @@ _pr_cleanup_one_slot(PagedResults *prp)
|
||||
/* sr is left; release it. */
|
||||
prp->pr_current_be->be_search_results_release(&(prp->pr_search_result_set));
|
||||
}
|
||||
- /* clean up the slot */
|
||||
- if (prp->pr_mutex) {
|
||||
- /* pr_mutex is reused; back it up and reset it. */
|
||||
- prmutex = prp->pr_mutex;
|
||||
- }
|
||||
- memset(prp, '\0', sizeof(PagedResults));
|
||||
- prp->pr_mutex = prmutex;
|
||||
+
|
||||
+ /* clean up the slot except the mutex */
|
||||
+ prp->pr_current_be = NULL;
|
||||
+ prp->pr_search_result_set = NULL;
|
||||
+ prp->pr_search_result_count = 0;
|
||||
+ prp->pr_search_result_set_size_estimate = 0;
|
||||
+ prp->pr_sort_result_code = 0;
|
||||
+ prp->pr_timelimit_hr.tv_sec = 0;
|
||||
+ prp->pr_timelimit_hr.tv_nsec = 0;
|
||||
+ prp->pr_flags = 0;
|
||||
+ prp->pr_msgid = 0;
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -1007,7 +1010,8 @@ op_set_pagedresults(Operation *op)
|
||||
|
||||
/*
|
||||
* pagedresults_lock/unlock -- introduced to protect search results for the
|
||||
- * asynchronous searches.
|
||||
+ * asynchronous searches. Do not call these functions while the PR conn lock
|
||||
+ * is held (e.g. pageresult_lock_get_addr(conn))
|
||||
*/
|
||||
void
|
||||
pagedresults_lock(Connection *conn, int index)
|
||||
@@ -1045,6 +1049,8 @@ int
|
||||
pagedresults_is_abandoned_or_notavailable(Connection *conn, int locked, int index)
|
||||
{
|
||||
PagedResults *prp;
|
||||
+ int32_t result;
|
||||
+
|
||||
if (!conn || (index < 0) || (index >= conn->c_pagedresults.prl_maxlen)) {
|
||||
return 1; /* not abandoned, but do not want to proceed paged results op. */
|
||||
}
|
||||
@@ -1052,10 +1058,11 @@ pagedresults_is_abandoned_or_notavailable(Connection *conn, int locked, int inde
|
||||
pthread_mutex_lock(pageresult_lock_get_addr(conn));
|
||||
}
|
||||
prp = conn->c_pagedresults.prl_list + index;
|
||||
+ result = prp->pr_flags & CONN_FLAG_PAGEDRESULTS_ABANDONED;
|
||||
if (!locked) {
|
||||
pthread_mutex_unlock(pageresult_lock_get_addr(conn));
|
||||
}
|
||||
- return prp->pr_flags & CONN_FLAG_PAGEDRESULTS_ABANDONED;
|
||||
+ return result;
|
||||
}
|
||||
|
||||
int
|
||||
--
|
||||
2.49.0
|
||||
|
||||
@ -1,566 +0,0 @@
|
||||
From 769e71499880a0820424bf925c0f0fe793e11cc8 Mon Sep 17 00:00:00 2001
|
||||
From: progier389 <progier@redhat.com>
|
||||
Date: Fri, 28 Jun 2024 18:56:49 +0200
|
||||
Subject: [PATCH] Issue 6229 - After an initial failure, subsequent online
|
||||
backups fail (#6230)
|
||||
|
||||
* Issue 6229 - After an initial failure, subsequent online backups will not work
|
||||
|
||||
Several issues related to backup task error handling:
|
||||
Backends stay busy after the failure
|
||||
Exit code is 0 in some cases
|
||||
Crash if failing to open the backup directory
|
||||
And a more general one:
|
||||
lib389 Task DN collision
|
||||
|
||||
Solutions:
|
||||
Always reset the busy flags that have been set
|
||||
Ensure that 0 is not returned in error case
|
||||
Avoid closing NULL directory descriptor
|
||||
Use a timestamp having milliseconds precision to create the task DN
|
||||
|
||||
Issue: #6229
|
||||
|
||||
Reviewed by: @droideck (Thanks!)
|
||||
|
||||
(cherry picked from commit 04a0b6ac776a1d588ec2e10ff651e5015078ad21)
|
||||
---
|
||||
ldap/servers/slapd/back-ldbm/archive.c | 45 +++++-----
|
||||
.../slapd/back-ldbm/db-mdb/mdb_layer.c | 3 +
|
||||
src/lib389/lib389/__init__.py | 10 +--
|
||||
src/lib389/lib389/tasks.py | 82 +++++++++----------
|
||||
4 files changed, 70 insertions(+), 70 deletions(-)
|
||||
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/archive.c b/ldap/servers/slapd/back-ldbm/archive.c
|
||||
index 0460a42f6..6658cc80a 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/archive.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/archive.c
|
||||
@@ -16,6 +16,8 @@
|
||||
#include "back-ldbm.h"
|
||||
#include "dblayer.h"
|
||||
|
||||
+#define NO_OBJECT ((Object*)-1)
|
||||
+
|
||||
int
|
||||
ldbm_temporary_close_all_instances(Slapi_PBlock *pb)
|
||||
{
|
||||
@@ -270,6 +272,7 @@ ldbm_back_ldbm2archive(Slapi_PBlock *pb)
|
||||
int run_from_cmdline = 0;
|
||||
Slapi_Task *task;
|
||||
struct stat sbuf;
|
||||
+ Object *last_busy_inst_obj = NO_OBJECT;
|
||||
|
||||
slapi_pblock_get(pb, SLAPI_PLUGIN_PRIVATE, &li);
|
||||
slapi_pblock_get(pb, SLAPI_SEQ_VAL, &rawdirectory);
|
||||
@@ -380,13 +383,12 @@ ldbm_back_ldbm2archive(Slapi_PBlock *pb)
|
||||
|
||||
/* to avoid conflict w/ import, do this check for commandline, as well */
|
||||
{
|
||||
- Object *inst_obj, *inst_obj2;
|
||||
ldbm_instance *inst = NULL;
|
||||
|
||||
/* server is up -- mark all backends busy */
|
||||
- for (inst_obj = objset_first_obj(li->li_instance_set); inst_obj;
|
||||
- inst_obj = objset_next_obj(li->li_instance_set, inst_obj)) {
|
||||
- inst = (ldbm_instance *)object_get_data(inst_obj);
|
||||
+ for (last_busy_inst_obj = objset_first_obj(li->li_instance_set); last_busy_inst_obj;
|
||||
+ last_busy_inst_obj = objset_next_obj(li->li_instance_set, last_busy_inst_obj)) {
|
||||
+ inst = (ldbm_instance *)object_get_data(last_busy_inst_obj);
|
||||
|
||||
/* check if an import/restore is already ongoing... */
|
||||
if (instance_set_busy(inst) != 0 || dblayer_in_import(inst) != 0) {
|
||||
@@ -400,20 +402,6 @@ ldbm_back_ldbm2archive(Slapi_PBlock *pb)
|
||||
"another task and cannot be disturbed.",
|
||||
inst->inst_name);
|
||||
}
|
||||
-
|
||||
- /* painfully, we have to clear the BUSY flags on the
|
||||
- * backends we'd already marked...
|
||||
- */
|
||||
- for (inst_obj2 = objset_first_obj(li->li_instance_set);
|
||||
- inst_obj2 && (inst_obj2 != inst_obj);
|
||||
- inst_obj2 = objset_next_obj(li->li_instance_set,
|
||||
- inst_obj2)) {
|
||||
- inst = (ldbm_instance *)object_get_data(inst_obj2);
|
||||
- instance_set_not_busy(inst);
|
||||
- }
|
||||
- if (inst_obj2 && inst_obj2 != inst_obj)
|
||||
- object_release(inst_obj2);
|
||||
- object_release(inst_obj);
|
||||
goto err;
|
||||
}
|
||||
}
|
||||
@@ -427,18 +415,26 @@ ldbm_back_ldbm2archive(Slapi_PBlock *pb)
|
||||
goto err;
|
||||
}
|
||||
|
||||
- if (!run_from_cmdline) {
|
||||
+err:
|
||||
+ /* Clear all BUSY flags that have been previously set */
|
||||
+ if (last_busy_inst_obj != NO_OBJECT) {
|
||||
ldbm_instance *inst;
|
||||
Object *inst_obj;
|
||||
|
||||
- /* none of these backends are busy anymore */
|
||||
- for (inst_obj = objset_first_obj(li->li_instance_set); inst_obj;
|
||||
+ for (inst_obj = objset_first_obj(li->li_instance_set);
|
||||
+ inst_obj && (inst_obj != last_busy_inst_obj);
|
||||
inst_obj = objset_next_obj(li->li_instance_set, inst_obj)) {
|
||||
inst = (ldbm_instance *)object_get_data(inst_obj);
|
||||
instance_set_not_busy(inst);
|
||||
}
|
||||
+ if (last_busy_inst_obj != NULL) {
|
||||
+ /* release last seen object for aborted objset_next_obj iterations */
|
||||
+ if (inst_obj != NULL) {
|
||||
+ object_release(inst_obj);
|
||||
+ }
|
||||
+ object_release(last_busy_inst_obj);
|
||||
+ }
|
||||
}
|
||||
-err:
|
||||
if (return_value) {
|
||||
if (dir_bak) {
|
||||
slapi_log_err(SLAPI_LOG_ERR,
|
||||
@@ -727,7 +723,10 @@ ldbm_archive_config(char *bakdir, Slapi_Task *task)
|
||||
}
|
||||
|
||||
error:
|
||||
- PR_CloseDir(dirhandle);
|
||||
+ if (NULL != dirhandle) {
|
||||
+ PR_CloseDir(dirhandle);
|
||||
+ dirhandle = NULL;
|
||||
+ }
|
||||
dse_backup_unlock();
|
||||
slapi_ch_free_string(&backup_config_dir);
|
||||
slapi_ch_free_string(&dse_file);
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/db-mdb/mdb_layer.c b/ldap/servers/slapd/back-ldbm/db-mdb/mdb_layer.c
|
||||
index 70a289bdb..de4161b0c 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/db-mdb/mdb_layer.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/db-mdb/mdb_layer.c
|
||||
@@ -983,6 +983,9 @@ dbmdb_backup(struct ldbminfo *li, char *dest_dir, Slapi_Task *task)
|
||||
if (ldbm_archive_config(dest_dir, task) != 0) {
|
||||
slapi_log_err(SLAPI_LOG_ERR, "dbmdb_backup",
|
||||
"Backup of config files failed or is incomplete\n");
|
||||
+ if (0 == return_value) {
|
||||
+ return_value = -1;
|
||||
+ }
|
||||
}
|
||||
|
||||
goto bail;
|
||||
diff --git a/src/lib389/lib389/__init__.py b/src/lib389/lib389/__init__.py
|
||||
index 368741a66..cb372c138 100644
|
||||
--- a/src/lib389/lib389/__init__.py
|
||||
+++ b/src/lib389/lib389/__init__.py
|
||||
@@ -69,7 +69,7 @@ from lib389.utils import (
|
||||
get_user_is_root)
|
||||
from lib389.paths import Paths
|
||||
from lib389.nss_ssl import NssSsl
|
||||
-from lib389.tasks import BackupTask, RestoreTask
|
||||
+from lib389.tasks import BackupTask, RestoreTask, Task
|
||||
from lib389.dseldif import DSEldif
|
||||
|
||||
# mixin
|
||||
@@ -1424,7 +1424,7 @@ class DirSrv(SimpleLDAPObject, object):
|
||||
name, self.ds_paths.prefix)
|
||||
|
||||
# create the archive
|
||||
- name = "backup_%s_%s.tar.gz" % (self.serverid, time.strftime("%m%d%Y_%H%M%S"))
|
||||
+ name = "backup_%s_%s.tar.gz" % (self.serverid, Task.get_timestamp())
|
||||
backup_file = os.path.join(backup_dir, name)
|
||||
tar = tarfile.open(backup_file, "w:gz")
|
||||
tar.extraction_filter = (lambda member, path: member)
|
||||
@@ -2810,7 +2810,7 @@ class DirSrv(SimpleLDAPObject, object):
|
||||
else:
|
||||
# No output file specified. Use the default ldif location/name
|
||||
cmd.append('-a')
|
||||
- tnow = datetime.now().strftime("%Y_%m_%d_%H_%M_%S")
|
||||
+ tnow = Task.get_timestamp()
|
||||
if bename:
|
||||
ldifname = os.path.join(self.ds_paths.ldif_dir, "%s-%s-%s.ldif" % (self.serverid, bename, tnow))
|
||||
else:
|
||||
@@ -2881,7 +2881,7 @@ class DirSrv(SimpleLDAPObject, object):
|
||||
|
||||
if archive_dir is None:
|
||||
# Use the instance name and date/time as the default backup name
|
||||
- tnow = datetime.now().strftime("%Y_%m_%d_%H_%M_%S")
|
||||
+ tnow = Task.get_timestamp()
|
||||
archive_dir = os.path.join(self.ds_paths.backup_dir, "%s-%s" % (self.serverid, tnow))
|
||||
elif not archive_dir.startswith("/"):
|
||||
# Relative path, append it to the bak directory
|
||||
@@ -3506,7 +3506,7 @@ class DirSrv(SimpleLDAPObject, object):
|
||||
|
||||
if archive is None:
|
||||
# Use the instance name and date/time as the default backup name
|
||||
- tnow = datetime.now().strftime("%Y_%m_%d_%H_%M_%S")
|
||||
+ tnow = Task.get_timestamp()
|
||||
if self.serverid is not None:
|
||||
backup_dir_name = "%s-%s" % (self.serverid, tnow)
|
||||
else:
|
||||
diff --git a/src/lib389/lib389/tasks.py b/src/lib389/lib389/tasks.py
|
||||
index 6c2adb5b2..6bf302862 100644
|
||||
--- a/src/lib389/lib389/tasks.py
|
||||
+++ b/src/lib389/lib389/tasks.py
|
||||
@@ -118,7 +118,7 @@ class Task(DSLdapObject):
|
||||
return super(Task, self).create(rdn, properties, basedn)
|
||||
|
||||
@staticmethod
|
||||
- def _get_task_date():
|
||||
+ def get_timestamp():
|
||||
"""Return a timestamp to use in naming new task entries."""
|
||||
|
||||
return datetime.now().isoformat()
|
||||
@@ -132,7 +132,7 @@ class AutomemberRebuildMembershipTask(Task):
|
||||
"""
|
||||
|
||||
def __init__(self, instance, dn=None):
|
||||
- self.cn = 'automember_rebuild_' + Task._get_task_date()
|
||||
+ self.cn = 'automember_rebuild_' + Task.get_timestamp()
|
||||
dn = "cn=" + self.cn + "," + DN_AUTOMEMBER_REBUILD_TASK
|
||||
|
||||
super(AutomemberRebuildMembershipTask, self).__init__(instance, dn)
|
||||
@@ -147,7 +147,7 @@ class AutomemberAbortRebuildTask(Task):
|
||||
"""
|
||||
|
||||
def __init__(self, instance, dn=None):
|
||||
- self.cn = 'automember_abort_' + Task._get_task_date()
|
||||
+ self.cn = 'automember_abort_' + Task.get_timestamp()
|
||||
dn = "cn=" + self.cn + "," + DN_AUTOMEMBER_ABORT_REBUILD_TASK
|
||||
|
||||
super(AutomemberAbortRebuildTask, self).__init__(instance, dn)
|
||||
@@ -161,7 +161,7 @@ class FixupLinkedAttributesTask(Task):
|
||||
"""
|
||||
|
||||
def __init__(self, instance, dn=None):
|
||||
- self.cn = 'fixup_linked_attrs_' + Task._get_task_date()
|
||||
+ self.cn = 'fixup_linked_attrs_' + Task.get_timestamp()
|
||||
dn = "cn=" + self.cn + "," + DN_FIXUP_LINKED_ATTIBUTES
|
||||
|
||||
super(FixupLinkedAttributesTask, self).__init__(instance, dn)
|
||||
@@ -175,7 +175,7 @@ class MemberUidFixupTask(Task):
|
||||
"""
|
||||
|
||||
def __init__(self, instance, dn=None):
|
||||
- self.cn = 'memberUid_fixup_' + Task._get_task_date()
|
||||
+ self.cn = 'memberUid_fixup_' + Task.get_timestamp()
|
||||
dn = f"cn={self.cn},cn=memberuid task,cn=tasks,cn=config"
|
||||
|
||||
super(MemberUidFixupTask, self).__init__(instance, dn)
|
||||
@@ -190,7 +190,7 @@ class MemberOfFixupTask(Task):
|
||||
"""
|
||||
|
||||
def __init__(self, instance, dn=None):
|
||||
- self.cn = 'memberOf_fixup_' + Task._get_task_date()
|
||||
+ self.cn = 'memberOf_fixup_' + Task.get_timestamp()
|
||||
dn = "cn=" + self.cn + "," + DN_MBO_TASK
|
||||
|
||||
super(MemberOfFixupTask, self).__init__(instance, dn)
|
||||
@@ -205,7 +205,7 @@ class USNTombstoneCleanupTask(Task):
|
||||
"""
|
||||
|
||||
def __init__(self, instance, dn=None):
|
||||
- self.cn = 'usn_cleanup_' + Task._get_task_date()
|
||||
+ self.cn = 'usn_cleanup_' + Task.get_timestamp()
|
||||
dn = "cn=" + self.cn + ",cn=USN tombstone cleanup task," + DN_TASKS
|
||||
|
||||
super(USNTombstoneCleanupTask, self).__init__(instance, dn)
|
||||
@@ -225,7 +225,7 @@ class csngenTestTask(Task):
|
||||
"""
|
||||
|
||||
def __init__(self, instance, dn=None):
|
||||
- self.cn = 'csngenTest_' + Task._get_task_date()
|
||||
+ self.cn = 'csngenTest_' + Task.get_timestamp()
|
||||
dn = "cn=" + self.cn + ",cn=csngen_test," + DN_TASKS
|
||||
super(csngenTestTask, self).__init__(instance, dn)
|
||||
|
||||
@@ -238,7 +238,7 @@ class EntryUUIDFixupTask(Task):
|
||||
"""
|
||||
|
||||
def __init__(self, instance, dn=None):
|
||||
- self.cn = 'entryuuid_fixup_' + Task._get_task_date()
|
||||
+ self.cn = 'entryuuid_fixup_' + Task.get_timestamp()
|
||||
dn = "cn=" + self.cn + "," + DN_EUUID_TASK
|
||||
super(EntryUUIDFixupTask, self).__init__(instance, dn)
|
||||
self._must_attributes.extend(['basedn'])
|
||||
@@ -252,7 +252,7 @@ class DBCompactTask(Task):
|
||||
"""
|
||||
|
||||
def __init__(self, instance, dn=None):
|
||||
- self.cn = 'compact_db_' + Task._get_task_date()
|
||||
+ self.cn = 'compact_db_' + Task.get_timestamp()
|
||||
dn = "cn=" + self.cn + "," + DN_COMPACTDB_TASK
|
||||
super(DBCompactTask, self).__init__(instance, dn)
|
||||
|
||||
@@ -265,7 +265,7 @@ class SchemaReloadTask(Task):
|
||||
"""
|
||||
|
||||
def __init__(self, instance, dn=None):
|
||||
- self.cn = 'schema_reload_' + Task._get_task_date()
|
||||
+ self.cn = 'schema_reload_' + Task.get_timestamp()
|
||||
dn = "cn=" + self.cn + ",cn=schema reload task," + DN_TASKS
|
||||
super(SchemaReloadTask, self).__init__(instance, dn)
|
||||
|
||||
@@ -278,7 +278,7 @@ class SyntaxValidateTask(Task):
|
||||
"""
|
||||
|
||||
def __init__(self, instance, dn=None):
|
||||
- self.cn = 'syntax_validate_' + Task._get_task_date()
|
||||
+ self.cn = 'syntax_validate_' + Task.get_timestamp()
|
||||
dn = f"cn={self.cn},cn=syntax validate,cn=tasks,cn=config"
|
||||
|
||||
super(SyntaxValidateTask, self).__init__(instance, dn)
|
||||
@@ -295,7 +295,7 @@ class AbortCleanAllRUVTask(Task):
|
||||
"""
|
||||
|
||||
def __init__(self, instance, dn=None):
|
||||
- self.cn = 'abortcleanallruv_' + Task._get_task_date()
|
||||
+ self.cn = 'abortcleanallruv_' + Task.get_timestamp()
|
||||
dn = "cn=" + self.cn + ",cn=abort cleanallruv," + DN_TASKS
|
||||
|
||||
super(AbortCleanAllRUVTask, self).__init__(instance, dn)
|
||||
@@ -312,7 +312,7 @@ class CleanAllRUVTask(Task):
|
||||
"""
|
||||
|
||||
def __init__(self, instance, dn=None):
|
||||
- self.cn = 'cleanallruv_' + Task._get_task_date()
|
||||
+ self.cn = 'cleanallruv_' + Task.get_timestamp()
|
||||
dn = "cn=" + self.cn + ",cn=cleanallruv," + DN_TASKS
|
||||
self._properties = None
|
||||
|
||||
@@ -359,7 +359,7 @@ class ImportTask(Task):
|
||||
"""
|
||||
|
||||
def __init__(self, instance, dn=None):
|
||||
- self.cn = 'import_' + Task._get_task_date()
|
||||
+ self.cn = 'import_' + Task.get_timestamp()
|
||||
dn = "cn=%s,%s" % (self.cn, DN_IMPORT_TASK)
|
||||
self._properties = None
|
||||
|
||||
@@ -388,7 +388,7 @@ class ExportTask(Task):
|
||||
"""
|
||||
|
||||
def __init__(self, instance, dn=None):
|
||||
- self.cn = 'export_' + Task._get_task_date()
|
||||
+ self.cn = 'export_' + Task.get_timestamp()
|
||||
dn = "cn=%s,%s" % (self.cn, DN_EXPORT_TASK)
|
||||
self._properties = None
|
||||
|
||||
@@ -411,7 +411,7 @@ class BackupTask(Task):
|
||||
"""
|
||||
|
||||
def __init__(self, instance, dn=None):
|
||||
- self.cn = 'backup_' + Task._get_task_date()
|
||||
+ self.cn = 'backup_' + Task.get_timestamp()
|
||||
dn = "cn=" + self.cn + ",cn=backup," + DN_TASKS
|
||||
self._properties = None
|
||||
|
||||
@@ -426,7 +426,7 @@ class RestoreTask(Task):
|
||||
"""
|
||||
|
||||
def __init__(self, instance, dn=None):
|
||||
- self.cn = 'restore_' + Task._get_task_date()
|
||||
+ self.cn = 'restore_' + Task.get_timestamp()
|
||||
dn = "cn=" + self.cn + ",cn=restore," + DN_TASKS
|
||||
self._properties = None
|
||||
|
||||
@@ -513,7 +513,7 @@ class Tasks(object):
|
||||
raise ValueError("Import file (%s) does not exist" % input_file)
|
||||
|
||||
# Prepare the task entry
|
||||
- cn = "import_" + time.strftime("%m%d%Y_%H%M%S", time.localtime())
|
||||
+ cn = "import_" + Task.get_timestamp()
|
||||
dn = "cn=%s,%s" % (cn, DN_IMPORT_TASK)
|
||||
entry = Entry(dn)
|
||||
entry.setValues('objectclass', 'top', 'extensibleObject')
|
||||
@@ -581,7 +581,7 @@ class Tasks(object):
|
||||
raise ValueError("output_file is mandatory")
|
||||
|
||||
# Prepare the task entry
|
||||
- cn = "export_" + time.strftime("%m%d%Y_%H%M%S", time.localtime())
|
||||
+ cn = "export_" + Task.get_timestamp()
|
||||
dn = "cn=%s,%s" % (cn, DN_EXPORT_TASK)
|
||||
entry = Entry(dn)
|
||||
entry.update({
|
||||
@@ -637,7 +637,7 @@ class Tasks(object):
|
||||
raise ValueError("You must specify a backup directory.")
|
||||
|
||||
# build the task entry
|
||||
- cn = "backup_" + time.strftime("%m%d%Y_%H%M%S", time.localtime())
|
||||
+ cn = "backup_" + Task.get_timestamp()
|
||||
dn = "cn=%s,%s" % (cn, DN_BACKUP_TASK)
|
||||
entry = Entry(dn)
|
||||
entry.update({
|
||||
@@ -694,7 +694,7 @@ class Tasks(object):
|
||||
raise ValueError("Backup file (%s) does not exist" % backup_dir)
|
||||
|
||||
# build the task entry
|
||||
- cn = "restore_" + time.strftime("%m%d%Y_%H%M%S", time.localtime())
|
||||
+ cn = "restore_" + Task.get_timestamp()
|
||||
dn = "cn=%s,%s" % (cn, DN_RESTORE_TASK)
|
||||
entry = Entry(dn)
|
||||
entry.update({
|
||||
@@ -789,7 +789,7 @@ class Tasks(object):
|
||||
attrs.append(attr)
|
||||
else:
|
||||
attrs.append(attrname)
|
||||
- cn = "index_vlv_%s" % (time.strftime("%m%d%Y_%H%M%S", time.localtime()))
|
||||
+ cn = "index_vlv_%s" % (Task.get_timestamp())
|
||||
dn = "cn=%s,%s" % (cn, DN_INDEX_TASK)
|
||||
entry = Entry(dn)
|
||||
entry.update({
|
||||
@@ -803,7 +803,7 @@ class Tasks(object):
|
||||
#
|
||||
# Reindex all attributes - gather them first...
|
||||
#
|
||||
- cn = "index_all_%s" % (time.strftime("%m%d%Y_%H%M%S", time.localtime()))
|
||||
+ cn = "index_all_%s" % (Task.get_timestamp())
|
||||
dn = ('cn=%s,cn=ldbm database,cn=plugins,cn=config' % backend)
|
||||
try:
|
||||
indexes = self.conn.search_s(dn, ldap.SCOPE_SUBTREE, '(objectclass=nsIndex)')
|
||||
@@ -815,7 +815,7 @@ class Tasks(object):
|
||||
#
|
||||
# Reindex specific attributes
|
||||
#
|
||||
- cn = "index_attrs_%s" % (time.strftime("%m%d%Y_%H%M%S", time.localtime()))
|
||||
+ cn = "index_attrs_%s" % (Task.get_timestamp())
|
||||
if isinstance(attrname, (tuple, list)):
|
||||
# Need to guarantee this is a list (and not a tuple)
|
||||
for attr in attrname:
|
||||
@@ -903,8 +903,7 @@ class Tasks(object):
|
||||
|
||||
suffix = ents[0].getValue(attr)
|
||||
|
||||
- cn = "fixupmemberof_" + time.strftime("%m%d%Y_%H%M%S",
|
||||
- time.localtime())
|
||||
+ cn = "fixupmemberof_" + Task.get_timestamp()
|
||||
dn = "cn=%s,%s" % (cn, DN_MBO_TASK)
|
||||
entry = Entry(dn)
|
||||
entry.setValues('objectclass', 'top', 'extensibleObject')
|
||||
@@ -965,8 +964,7 @@ class Tasks(object):
|
||||
if len(ents) != 1:
|
||||
raise ValueError("invalid backend name: %s" % bename)
|
||||
|
||||
- cn = "fixupTombstone_" + time.strftime("%m%d%Y_%H%M%S",
|
||||
- time.localtime())
|
||||
+ cn = "fixupTombstone_" + Task.get_timestamp()
|
||||
dn = "cn=%s,%s" % (cn, DN_TOMB_FIXUP_TASK)
|
||||
entry = Entry(dn)
|
||||
entry.setValues('objectclass', 'top', 'extensibleObject')
|
||||
@@ -1019,7 +1017,7 @@ class Tasks(object):
|
||||
@return exit code
|
||||
'''
|
||||
|
||||
- cn = 'task-' + time.strftime("%m%d%Y_%H%M%S", time.localtime())
|
||||
+ cn = 'task-' + Task.get_timestamp()
|
||||
dn = ('cn=%s,cn=automember rebuild membership,cn=tasks,cn=config' % cn)
|
||||
|
||||
entry = Entry(dn)
|
||||
@@ -1077,7 +1075,7 @@ class Tasks(object):
|
||||
if not ldif_out:
|
||||
raise ValueError("Missing ldif_out")
|
||||
|
||||
- cn = 'task-' + time.strftime("%m%d%Y_%H%M%S", time.localtime())
|
||||
+ cn = 'task-' + Task.get_timestamp()
|
||||
dn = ('cn=%s,cn=automember export updates,cn=tasks,cn=config' % cn)
|
||||
entry = Entry(dn)
|
||||
entry.setValues('objectclass', 'top', 'extensibleObject')
|
||||
@@ -1129,7 +1127,7 @@ class Tasks(object):
|
||||
if not ldif_out or not ldif_in:
|
||||
raise ValueError("Missing ldif_out and/or ldif_in")
|
||||
|
||||
- cn = 'task-' + time.strftime("%m%d%Y_%H%M%S", time.localtime())
|
||||
+ cn = 'task-' + Task.get_timestamp()
|
||||
dn = ('cn=%s,cn=automember map updates,cn=tasks,cn=config' % cn)
|
||||
|
||||
entry = Entry(dn)
|
||||
@@ -1175,7 +1173,7 @@ class Tasks(object):
|
||||
@return exit code
|
||||
'''
|
||||
|
||||
- cn = 'task-' + time.strftime("%m%d%Y_%H%M%S", time.localtime())
|
||||
+ cn = 'task-' + Task.get_timestamp()
|
||||
dn = ('cn=%s,cn=fixup linked attributes,cn=tasks,cn=config' % cn)
|
||||
entry = Entry(dn)
|
||||
entry.setValues('objectclass', 'top', 'extensibleObject')
|
||||
@@ -1219,7 +1217,7 @@ class Tasks(object):
|
||||
@return exit code
|
||||
'''
|
||||
|
||||
- cn = 'task-' + time.strftime("%m%d%Y_%H%M%S", time.localtime())
|
||||
+ cn = 'task-' + Task.get_timestamp()
|
||||
dn = ('cn=%s,cn=schema reload task,cn=tasks,cn=config' % cn)
|
||||
entry = Entry(dn)
|
||||
entry.setValues('objectclass', 'top', 'extensibleObject')
|
||||
@@ -1264,7 +1262,7 @@ class Tasks(object):
|
||||
@return exit code
|
||||
'''
|
||||
|
||||
- cn = 'task-' + time.strftime("%m%d%Y_%H%M%S", time.localtime())
|
||||
+ cn = 'task-' + Task.get_timestamp()
|
||||
dn = ('cn=%s,cn=memberuid task,cn=tasks,cn=config' % cn)
|
||||
entry = Entry(dn)
|
||||
entry.setValues('objectclass', 'top', 'extensibleObject')
|
||||
@@ -1311,7 +1309,7 @@ class Tasks(object):
|
||||
@return exit code
|
||||
'''
|
||||
|
||||
- cn = 'task-' + time.strftime("%m%d%Y_%H%M%S", time.localtime())
|
||||
+ cn = 'task-' + Task.get_timestamp()
|
||||
dn = ('cn=%s,cn=syntax validate,cn=tasks,cn=config' % cn)
|
||||
entry = Entry(dn)
|
||||
entry.setValues('objectclass', 'top', 'extensibleObject')
|
||||
@@ -1358,7 +1356,7 @@ class Tasks(object):
|
||||
@return exit code
|
||||
'''
|
||||
|
||||
- cn = 'task-' + time.strftime("%m%d%Y_%H%M%S", time.localtime())
|
||||
+ cn = 'task-' + Task.get_timestamp()
|
||||
dn = ('cn=%s,cn=USN tombstone cleanup task,cn=tasks,cn=config' % cn)
|
||||
entry = Entry(dn)
|
||||
entry.setValues('objectclass', 'top', 'extensibleObject')
|
||||
@@ -1413,7 +1411,7 @@ class Tasks(object):
|
||||
if not configfile:
|
||||
raise ValueError("Missing required paramter: configfile")
|
||||
|
||||
- cn = 'task-' + time.strftime("%m%d%Y_%H%M%S", time.localtime())
|
||||
+ cn = 'task-' + Task.get_timestamp()
|
||||
dn = ('cn=%s,cn=sysconfig reload,cn=tasks,cn=config' % cn)
|
||||
entry = Entry(dn)
|
||||
entry.setValues('objectclass', 'top', 'extensibleObject')
|
||||
@@ -1464,7 +1462,7 @@ class Tasks(object):
|
||||
if not suffix:
|
||||
raise ValueError("Missing required paramter: suffix")
|
||||
|
||||
- cn = 'task-' + time.strftime("%m%d%Y_%H%M%S", time.localtime())
|
||||
+ cn = 'task-' + Task.get_timestamp()
|
||||
dn = ('cn=%s,cn=cleanallruv,cn=tasks,cn=config' % cn)
|
||||
entry = Entry(dn)
|
||||
entry.setValues('objectclass', 'top', 'extensibleObject')
|
||||
@@ -1516,7 +1514,7 @@ class Tasks(object):
|
||||
if not suffix:
|
||||
raise ValueError("Missing required paramter: suffix")
|
||||
|
||||
- cn = 'task-' + time.strftime("%m%d%Y_%H%M%S", time.localtime())
|
||||
+ cn = 'task-' + Task.get_timestamp()
|
||||
dn = ('cn=%s,cn=abort cleanallruv,cn=tasks,cn=config' % cn)
|
||||
entry = Entry(dn)
|
||||
entry.setValues('objectclass', 'top', 'extensibleObject')
|
||||
@@ -1571,7 +1569,7 @@ class Tasks(object):
|
||||
if not nsArchiveDir:
|
||||
raise ValueError("Missing required paramter: nsArchiveDir")
|
||||
|
||||
- cn = 'task-' + time.strftime("%m%d%Y_%H%M%S", time.localtime())
|
||||
+ cn = 'task-' + Task.get_timestamp()
|
||||
dn = ('cn=%s,cn=upgradedb,cn=tasks,cn=config' % cn)
|
||||
entry = Entry(dn)
|
||||
entry.setValues('objectclass', 'top', 'extensibleObject')
|
||||
@@ -1616,6 +1614,6 @@ class LDAPIMappingReloadTask(Task):
|
||||
"""
|
||||
|
||||
def __init__(self, instance, dn=None):
|
||||
- self.cn = 'reload-' + Task._get_task_date()
|
||||
+ self.cn = 'reload-' + Task.get_timestamp()
|
||||
dn = f'cn={self.cn},cn=reload ldapi mappings,cn=tasks,cn=config'
|
||||
super(LDAPIMappingReloadTask, self).__init__(instance, dn)
|
||||
--
|
||||
2.48.0
|
||||
|
||||
@ -0,0 +1,488 @@
|
||||
From b6729a99f3a3d4c6ebe82d4bb60ea2a6f8727782 Mon Sep 17 00:00:00 2001
|
||||
From: Simon Pichugin <spichugi@redhat.com>
|
||||
Date: Fri, 27 Jun 2025 18:43:39 -0700
|
||||
Subject: [PATCH] Issue 6822 - Backend creation cleanup and Database UI tab
|
||||
error handling (#6823)
|
||||
|
||||
Description: Add rollback functionality when mapping tree creation fails
|
||||
during backend creation to prevent orphaned backends.
|
||||
Improve error handling in Database, Replication and Monitoring UI tabs
|
||||
to gracefully handle backend get-tree command failures.
|
||||
|
||||
Fixes: https://github.com/389ds/389-ds-base/issues/6822
|
||||
|
||||
Reviewed by: @mreynolds389 (Thanks!)
|
||||
---
|
||||
src/cockpit/389-console/src/database.jsx | 119 ++++++++------
|
||||
src/cockpit/389-console/src/monitor.jsx | 172 +++++++++++---------
|
||||
src/cockpit/389-console/src/replication.jsx | 55 ++++---
|
||||
src/lib389/lib389/backend.py | 18 +-
|
||||
4 files changed, 210 insertions(+), 154 deletions(-)
|
||||
|
||||
diff --git a/src/cockpit/389-console/src/database.jsx b/src/cockpit/389-console/src/database.jsx
|
||||
index c0c4be414..276125dfc 100644
|
||||
--- a/src/cockpit/389-console/src/database.jsx
|
||||
+++ b/src/cockpit/389-console/src/database.jsx
|
||||
@@ -478,6 +478,59 @@ export class Database extends React.Component {
|
||||
}
|
||||
|
||||
loadSuffixTree(fullReset) {
|
||||
+ const treeData = [
|
||||
+ {
|
||||
+ name: _("Global Database Configuration"),
|
||||
+ icon: <CogIcon />,
|
||||
+ id: "dbconfig",
|
||||
+ },
|
||||
+ {
|
||||
+ name: _("Chaining Configuration"),
|
||||
+ icon: <ExternalLinkAltIcon />,
|
||||
+ id: "chaining-config",
|
||||
+ },
|
||||
+ {
|
||||
+ name: _("Backups & LDIFs"),
|
||||
+ icon: <CopyIcon />,
|
||||
+ id: "backups",
|
||||
+ },
|
||||
+ {
|
||||
+ name: _("Password Policies"),
|
||||
+ id: "pwp",
|
||||
+ icon: <KeyIcon />,
|
||||
+ children: [
|
||||
+ {
|
||||
+ name: _("Global Policy"),
|
||||
+ icon: <HomeIcon />,
|
||||
+ id: "pwpolicy",
|
||||
+ },
|
||||
+ {
|
||||
+ name: _("Local Policies"),
|
||||
+ icon: <UsersIcon />,
|
||||
+ id: "localpwpolicy",
|
||||
+ },
|
||||
+ ],
|
||||
+ defaultExpanded: true
|
||||
+ },
|
||||
+ {
|
||||
+ name: _("Suffixes"),
|
||||
+ icon: <CatalogIcon />,
|
||||
+ id: "suffixes-tree",
|
||||
+ children: [],
|
||||
+ defaultExpanded: true,
|
||||
+ action: (
|
||||
+ <Button
|
||||
+ onClick={this.handleShowSuffixModal}
|
||||
+ variant="plain"
|
||||
+ aria-label="Create new suffix"
|
||||
+ title={_("Create new suffix")}
|
||||
+ >
|
||||
+ <PlusIcon />
|
||||
+ </Button>
|
||||
+ ),
|
||||
+ }
|
||||
+ ];
|
||||
+
|
||||
const cmd = [
|
||||
"dsconf", "-j", "ldapi://%2fvar%2frun%2fslapd-" + this.props.serverId + ".socket",
|
||||
"backend", "get-tree",
|
||||
@@ -491,58 +544,20 @@ export class Database extends React.Component {
|
||||
suffixData = JSON.parse(content);
|
||||
this.processTree(suffixData);
|
||||
}
|
||||
- const treeData = [
|
||||
- {
|
||||
- name: _("Global Database Configuration"),
|
||||
- icon: <CogIcon />,
|
||||
- id: "dbconfig",
|
||||
- },
|
||||
- {
|
||||
- name: _("Chaining Configuration"),
|
||||
- icon: <ExternalLinkAltIcon />,
|
||||
- id: "chaining-config",
|
||||
- },
|
||||
- {
|
||||
- name: _("Backups & LDIFs"),
|
||||
- icon: <CopyIcon />,
|
||||
- id: "backups",
|
||||
- },
|
||||
- {
|
||||
- name: _("Password Policies"),
|
||||
- id: "pwp",
|
||||
- icon: <KeyIcon />,
|
||||
- children: [
|
||||
- {
|
||||
- name: _("Global Policy"),
|
||||
- icon: <HomeIcon />,
|
||||
- id: "pwpolicy",
|
||||
- },
|
||||
- {
|
||||
- name: _("Local Policies"),
|
||||
- icon: <UsersIcon />,
|
||||
- id: "localpwpolicy",
|
||||
- },
|
||||
- ],
|
||||
- defaultExpanded: true
|
||||
- },
|
||||
- {
|
||||
- name: _("Suffixes"),
|
||||
- icon: <CatalogIcon />,
|
||||
- id: "suffixes-tree",
|
||||
- children: suffixData,
|
||||
- defaultExpanded: true,
|
||||
- action: (
|
||||
- <Button
|
||||
- onClick={this.handleShowSuffixModal}
|
||||
- variant="plain"
|
||||
- aria-label="Create new suffix"
|
||||
- title={_("Create new suffix")}
|
||||
- >
|
||||
- <PlusIcon />
|
||||
- </Button>
|
||||
- ),
|
||||
- }
|
||||
- ];
|
||||
+
|
||||
+ let current_node = this.state.node_name;
|
||||
+ if (fullReset) {
|
||||
+ current_node = DB_CONFIG;
|
||||
+ }
|
||||
+
|
||||
+ treeData[4].children = suffixData; // suffixes node
|
||||
+ this.setState(() => ({
|
||||
+ nodes: treeData,
|
||||
+ node_name: current_node,
|
||||
+ }), this.loadAttrs);
|
||||
+ })
|
||||
+ .fail(err => {
|
||||
+ // Handle backend get-tree failure gracefully
|
||||
let current_node = this.state.node_name;
|
||||
if (fullReset) {
|
||||
current_node = DB_CONFIG;
|
||||
diff --git a/src/cockpit/389-console/src/monitor.jsx b/src/cockpit/389-console/src/monitor.jsx
|
||||
index ad48d1f87..91a8e3e37 100644
|
||||
--- a/src/cockpit/389-console/src/monitor.jsx
|
||||
+++ b/src/cockpit/389-console/src/monitor.jsx
|
||||
@@ -200,6 +200,84 @@ export class Monitor extends React.Component {
|
||||
}
|
||||
|
||||
loadSuffixTree(fullReset) {
|
||||
+ const basicData = [
|
||||
+ {
|
||||
+ name: _("Server Statistics"),
|
||||
+ icon: <ClusterIcon />,
|
||||
+ id: "server-monitor",
|
||||
+ type: "server",
|
||||
+ },
|
||||
+ {
|
||||
+ name: _("Replication"),
|
||||
+ icon: <TopologyIcon />,
|
||||
+ id: "replication-monitor",
|
||||
+ type: "replication",
|
||||
+ defaultExpanded: true,
|
||||
+ children: [
|
||||
+ {
|
||||
+ name: _("Synchronization Report"),
|
||||
+ icon: <MonitoringIcon />,
|
||||
+ id: "sync-report",
|
||||
+ item: "sync-report",
|
||||
+ type: "repl-mon",
|
||||
+ },
|
||||
+ {
|
||||
+ name: _("Log Analysis"),
|
||||
+ icon: <MonitoringIcon />,
|
||||
+ id: "log-analysis",
|
||||
+ item: "log-analysis",
|
||||
+ type: "repl-mon",
|
||||
+ }
|
||||
+ ],
|
||||
+ },
|
||||
+ {
|
||||
+ name: _("Database"),
|
||||
+ icon: <DatabaseIcon />,
|
||||
+ id: "database-monitor",
|
||||
+ type: "database",
|
||||
+ children: [], // Will be populated with treeData on success
|
||||
+ defaultExpanded: true,
|
||||
+ },
|
||||
+ {
|
||||
+ name: _("Logging"),
|
||||
+ icon: <CatalogIcon />,
|
||||
+ id: "log-monitor",
|
||||
+ defaultExpanded: true,
|
||||
+ children: [
|
||||
+ {
|
||||
+ name: _("Access Log"),
|
||||
+ icon: <BookIcon size="sm" />,
|
||||
+ id: "access-log-monitor",
|
||||
+ type: "log",
|
||||
+ },
|
||||
+ {
|
||||
+ name: _("Audit Log"),
|
||||
+ icon: <BookIcon size="sm" />,
|
||||
+ id: "audit-log-monitor",
|
||||
+ type: "log",
|
||||
+ },
|
||||
+ {
|
||||
+ name: _("Audit Failure Log"),
|
||||
+ icon: <BookIcon size="sm" />,
|
||||
+ id: "auditfail-log-monitor",
|
||||
+ type: "log",
|
||||
+ },
|
||||
+ {
|
||||
+ name: _("Errors Log"),
|
||||
+ icon: <BookIcon size="sm" />,
|
||||
+ id: "error-log-monitor",
|
||||
+ type: "log",
|
||||
+ },
|
||||
+ {
|
||||
+ name: _("Security Log"),
|
||||
+ icon: <BookIcon size="sm" />,
|
||||
+ id: "security-log-monitor",
|
||||
+ type: "log",
|
||||
+ },
|
||||
+ ]
|
||||
+ },
|
||||
+ ];
|
||||
+
|
||||
const cmd = [
|
||||
"dsconf", "-j", "ldapi://%2fvar%2frun%2fslapd-" + this.props.serverId + ".socket",
|
||||
"backend", "get-tree",
|
||||
@@ -210,83 +288,7 @@ export class Monitor extends React.Component {
|
||||
.done(content => {
|
||||
const treeData = JSON.parse(content);
|
||||
this.processTree(treeData);
|
||||
- const basicData = [
|
||||
- {
|
||||
- name: _("Server Statistics"),
|
||||
- icon: <ClusterIcon />,
|
||||
- id: "server-monitor",
|
||||
- type: "server",
|
||||
- },
|
||||
- {
|
||||
- name: _("Replication"),
|
||||
- icon: <TopologyIcon />,
|
||||
- id: "replication-monitor",
|
||||
- type: "replication",
|
||||
- defaultExpanded: true,
|
||||
- children: [
|
||||
- {
|
||||
- name: _("Synchronization Report"),
|
||||
- icon: <MonitoringIcon />,
|
||||
- id: "sync-report",
|
||||
- item: "sync-report",
|
||||
- type: "repl-mon",
|
||||
- },
|
||||
- {
|
||||
- name: _("Log Analysis"),
|
||||
- icon: <MonitoringIcon />,
|
||||
- id: "log-analysis",
|
||||
- item: "log-analysis",
|
||||
- type: "repl-mon",
|
||||
- }
|
||||
- ],
|
||||
- },
|
||||
- {
|
||||
- name: _("Database"),
|
||||
- icon: <DatabaseIcon />,
|
||||
- id: "database-monitor",
|
||||
- type: "database",
|
||||
- children: [],
|
||||
- defaultExpanded: true,
|
||||
- },
|
||||
- {
|
||||
- name: _("Logging"),
|
||||
- icon: <CatalogIcon />,
|
||||
- id: "log-monitor",
|
||||
- defaultExpanded: true,
|
||||
- children: [
|
||||
- {
|
||||
- name: _("Access Log"),
|
||||
- icon: <BookIcon size="sm" />,
|
||||
- id: "access-log-monitor",
|
||||
- type: "log",
|
||||
- },
|
||||
- {
|
||||
- name: _("Audit Log"),
|
||||
- icon: <BookIcon size="sm" />,
|
||||
- id: "audit-log-monitor",
|
||||
- type: "log",
|
||||
- },
|
||||
- {
|
||||
- name: _("Audit Failure Log"),
|
||||
- icon: <BookIcon size="sm" />,
|
||||
- id: "auditfail-log-monitor",
|
||||
- type: "log",
|
||||
- },
|
||||
- {
|
||||
- name: _("Errors Log"),
|
||||
- icon: <BookIcon size="sm" />,
|
||||
- id: "error-log-monitor",
|
||||
- type: "log",
|
||||
- },
|
||||
- {
|
||||
- name: _("Security Log"),
|
||||
- icon: <BookIcon size="sm" />,
|
||||
- id: "security-log-monitor",
|
||||
- type: "log",
|
||||
- },
|
||||
- ]
|
||||
- },
|
||||
- ];
|
||||
+
|
||||
let current_node = this.state.node_name;
|
||||
let type = this.state.node_type;
|
||||
if (fullReset) {
|
||||
@@ -296,6 +298,22 @@ export class Monitor extends React.Component {
|
||||
basicData[2].children = treeData; // database node
|
||||
this.processReplSuffixes(basicData[1].children);
|
||||
|
||||
+ this.setState(() => ({
|
||||
+ nodes: basicData,
|
||||
+ node_name: current_node,
|
||||
+ node_type: type,
|
||||
+ }), this.update_tree_nodes);
|
||||
+ })
|
||||
+ .fail(err => {
|
||||
+ // Handle backend get-tree failure gracefully
|
||||
+ let current_node = this.state.node_name;
|
||||
+ let type = this.state.node_type;
|
||||
+ if (fullReset) {
|
||||
+ current_node = "server-monitor";
|
||||
+ type = "server";
|
||||
+ }
|
||||
+ this.processReplSuffixes(basicData[1].children);
|
||||
+
|
||||
this.setState(() => ({
|
||||
nodes: basicData,
|
||||
node_name: current_node,
|
||||
diff --git a/src/cockpit/389-console/src/replication.jsx b/src/cockpit/389-console/src/replication.jsx
|
||||
index fa492fd2a..aa535bfc7 100644
|
||||
--- a/src/cockpit/389-console/src/replication.jsx
|
||||
+++ b/src/cockpit/389-console/src/replication.jsx
|
||||
@@ -177,6 +177,16 @@ export class Replication extends React.Component {
|
||||
loaded: false
|
||||
});
|
||||
|
||||
+ const basicData = [
|
||||
+ {
|
||||
+ name: _("Suffixes"),
|
||||
+ icon: <TopologyIcon />,
|
||||
+ id: "repl-suffixes",
|
||||
+ children: [],
|
||||
+ defaultExpanded: true
|
||||
+ }
|
||||
+ ];
|
||||
+
|
||||
const cmd = [
|
||||
"dsconf", "-j", "ldapi://%2fvar%2frun%2fslapd-" + this.props.serverId + ".socket",
|
||||
"backend", "get-tree",
|
||||
@@ -199,15 +209,7 @@ export class Replication extends React.Component {
|
||||
}
|
||||
}
|
||||
}
|
||||
- const basicData = [
|
||||
- {
|
||||
- name: _("Suffixes"),
|
||||
- icon: <TopologyIcon />,
|
||||
- id: "repl-suffixes",
|
||||
- children: [],
|
||||
- defaultExpanded: true
|
||||
- }
|
||||
- ];
|
||||
+
|
||||
let current_node = this.state.node_name;
|
||||
let current_type = this.state.node_type;
|
||||
let replicated = this.state.node_replicated;
|
||||
@@ -258,6 +260,19 @@ export class Replication extends React.Component {
|
||||
}
|
||||
|
||||
basicData[0].children = treeData;
|
||||
+ this.setState({
|
||||
+ nodes: basicData,
|
||||
+ node_name: current_node,
|
||||
+ node_type: current_type,
|
||||
+ node_replicated: replicated,
|
||||
+ }, () => { this.update_tree_nodes() });
|
||||
+ })
|
||||
+ .fail(err => {
|
||||
+ // Handle backend get-tree failure gracefully
|
||||
+ let current_node = this.state.node_name;
|
||||
+ let current_type = this.state.node_type;
|
||||
+ let replicated = this.state.node_replicated;
|
||||
+
|
||||
this.setState({
|
||||
nodes: basicData,
|
||||
node_name: current_node,
|
||||
@@ -905,18 +920,18 @@ export class Replication extends React.Component {
|
||||
disableTree: false
|
||||
});
|
||||
});
|
||||
- })
|
||||
- .fail(err => {
|
||||
- const errMsg = JSON.parse(err);
|
||||
- this.props.addNotification(
|
||||
- "error",
|
||||
- cockpit.format(_("Error loading replication agreements configuration - $0"), errMsg.desc)
|
||||
- );
|
||||
- this.setState({
|
||||
- suffixLoading: false,
|
||||
- disableTree: false
|
||||
+ })
|
||||
+ .fail(err => {
|
||||
+ const errMsg = JSON.parse(err);
|
||||
+ this.props.addNotification(
|
||||
+ "error",
|
||||
+ cockpit.format(_("Error loading replication agreements configuration - $0"), errMsg.desc)
|
||||
+ );
|
||||
+ this.setState({
|
||||
+ suffixLoading: false,
|
||||
+ disableTree: false
|
||||
+ });
|
||||
});
|
||||
- });
|
||||
})
|
||||
.fail(err => {
|
||||
// changelog failure
|
||||
diff --git a/src/lib389/lib389/backend.py b/src/lib389/lib389/backend.py
|
||||
index 1319fa0cd..5bff61c58 100644
|
||||
--- a/src/lib389/lib389/backend.py
|
||||
+++ b/src/lib389/lib389/backend.py
|
||||
@@ -694,24 +694,32 @@ class Backend(DSLdapObject):
|
||||
parent_suffix = properties.pop('parent', False)
|
||||
|
||||
# Okay, now try to make the backend.
|
||||
- super(Backend, self).create(dn, properties, basedn)
|
||||
+ backend_obj = super(Backend, self).create(dn, properties, basedn)
|
||||
|
||||
# We check if the mapping tree exists in create, so do this *after*
|
||||
if create_mapping_tree is True:
|
||||
- properties = {
|
||||
+ mapping_tree_properties = {
|
||||
'cn': self._nprops_stash['nsslapd-suffix'],
|
||||
'nsslapd-state': 'backend',
|
||||
'nsslapd-backend': self._nprops_stash['cn'],
|
||||
}
|
||||
if parent_suffix:
|
||||
# This is a subsuffix, set the parent suffix
|
||||
- properties['nsslapd-parent-suffix'] = parent_suffix
|
||||
- self._mts.create(properties=properties)
|
||||
+ mapping_tree_properties['nsslapd-parent-suffix'] = parent_suffix
|
||||
+
|
||||
+ try:
|
||||
+ self._mts.create(properties=mapping_tree_properties)
|
||||
+ except Exception as e:
|
||||
+ try:
|
||||
+ backend_obj.delete()
|
||||
+ except Exception as cleanup_error:
|
||||
+ self._instance.log.error(f"Failed to cleanup backend after mapping tree creation failure: {cleanup_error}")
|
||||
+ raise e
|
||||
|
||||
# We can't create the sample entries unless a mapping tree was installed.
|
||||
if sample_entries is not False and create_mapping_tree is True:
|
||||
self.create_sample_entries(sample_entries)
|
||||
- return self
|
||||
+ return backend_obj
|
||||
|
||||
def delete(self):
|
||||
"""Deletes the backend, it's mapping tree and all related indices.
|
||||
--
|
||||
2.49.0
|
||||
|
||||
@ -1,165 +0,0 @@
|
||||
From b2511553590f0d9b41856d8baff5f3cd103dd46f Mon Sep 17 00:00:00 2001
|
||||
From: tbordaz <tbordaz@redhat.com>
|
||||
Date: Thu, 6 Feb 2025 18:25:36 +0100
|
||||
Subject: [PATCH] Issue 6554 - During import of entries without nsUniqueId, a
|
||||
supplier generates duplicate nsUniqueId (LMDB only) (#6582)
|
||||
|
||||
Bug description:
|
||||
During an import the entry is prepared (schema, operational
|
||||
attributes, password encryption,...) before starting the
|
||||
update of the database and indexes.
|
||||
A step of the preparation is to assign a value to 'nsuniqueid'
|
||||
operational attribute. 'nsuniqueid' must be unique.
|
||||
In LMDB the preparation is done by multiple threads (workers).
|
||||
In such case the 'nsuniqueid' are generated in parallel and
|
||||
as it is time based several values can be duplicated.
|
||||
|
||||
Fix description:
|
||||
To prevent that the routine dbmdb_import_generate_uniqueid
|
||||
should make sure to synchronize the workers.
|
||||
|
||||
fixes: #6554
|
||||
|
||||
Reviewed by: Pierre Rogier
|
||||
---
|
||||
.../tests/suites/import/import_test.py | 79 ++++++++++++++++++-
|
||||
.../back-ldbm/db-mdb/mdb_import_threads.c | 11 +++
|
||||
2 files changed, 89 insertions(+), 1 deletion(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/import/import_test.py b/dirsrvtests/tests/suites/import/import_test.py
|
||||
index b7cba32fd..18caec633 100644
|
||||
--- a/dirsrvtests/tests/suites/import/import_test.py
|
||||
+++ b/dirsrvtests/tests/suites/import/import_test.py
|
||||
@@ -14,11 +14,13 @@ import os
|
||||
import pytest
|
||||
import time
|
||||
import glob
|
||||
+import re
|
||||
import logging
|
||||
import subprocess
|
||||
from datetime import datetime
|
||||
from lib389.topologies import topology_st as topo
|
||||
-from lib389._constants import DEFAULT_SUFFIX, TaskWarning
|
||||
+from lib389.topologies import topology_m2 as topo_m2
|
||||
+from lib389._constants import DEFAULT_BENAME, DEFAULT_SUFFIX, TaskWarning
|
||||
from lib389.dbgen import dbgen_users
|
||||
from lib389.tasks import ImportTask
|
||||
from lib389.index import Indexes
|
||||
@@ -690,6 +692,81 @@ def test_online_import_under_load(topo):
|
||||
assert import_task.get_exit_code() == 0
|
||||
|
||||
|
||||
+def test_duplicate_nsuniqueid(topo_m2, request):
|
||||
+ """Test that after an offline import all
|
||||
+ nsuniqueid are different
|
||||
+
|
||||
+ :id: a2541677-a288-4633-bacf-4050cc56016d
|
||||
+ :setup: MMR with 2 suppliers
|
||||
+ :steps:
|
||||
+ 1. stop the instance to do offline operations
|
||||
+ 2. Generate a 5K users LDIF file
|
||||
+ 3. Check that no uniqueid are present in the generated file
|
||||
+ 4. import the generated LDIF
|
||||
+ 5. export the database
|
||||
+ 6. Check that that exported LDIF contains more than 5K nsuniqueid
|
||||
+ 7. Check that there is no duplicate nsuniqued in exported LDIF
|
||||
+ :expectedresults:
|
||||
+ 1. Should succeeds
|
||||
+ 2. Should succeeds
|
||||
+ 3. Should succeeds
|
||||
+ 4. Should succeeds
|
||||
+ 5. Should succeeds
|
||||
+ 6. Should succeeds
|
||||
+ 7. Should succeeds
|
||||
+ """
|
||||
+ m1 = topo_m2.ms["supplier1"]
|
||||
+
|
||||
+ # Stop the instance
|
||||
+ m1.stop()
|
||||
+
|
||||
+ # Generate a test ldif (5k entries)
|
||||
+ log.info("Generating LDIF...")
|
||||
+ ldif_dir = m1.get_ldif_dir()
|
||||
+ import_ldif = ldif_dir + '/5k_users_import.ldif'
|
||||
+ dbgen_users(m1, 5000, import_ldif, DEFAULT_SUFFIX)
|
||||
+
|
||||
+ # Check that the generated LDIF does not contain nsuniqueid
|
||||
+ all_nsuniqueid = []
|
||||
+ with open(import_ldif, 'r') as file:
|
||||
+ for line in file:
|
||||
+ if line.lower().startswith("nsuniqueid: "):
|
||||
+ all_nsuniqueid.append(line.split(': ')[1])
|
||||
+ log.info("import file contains " + str(len(all_nsuniqueid)) + " nsuniqueid")
|
||||
+ assert len(all_nsuniqueid) == 0
|
||||
+
|
||||
+ # Import the "nsuniquied free" LDIF file
|
||||
+ if not m1.ldif2db('userRoot', None, None, None, import_ldif):
|
||||
+ assert False
|
||||
+
|
||||
+ # Export the DB that now should contain nsuniqueid
|
||||
+ export_ldif = ldif_dir + '/5k_user_export.ldif'
|
||||
+ log.info("export to file " + export_ldif)
|
||||
+ m1.db2ldif(bename=DEFAULT_BENAME, suffixes=[DEFAULT_SUFFIX],
|
||||
+ excludeSuffixes=None, repl_data=False,
|
||||
+ outputfile=export_ldif, encrypt=False)
|
||||
+
|
||||
+ # Check that the export LDIF contain nsuniqueid
|
||||
+ all_nsuniqueid = []
|
||||
+ with open(export_ldif, 'r') as file:
|
||||
+ for line in file:
|
||||
+ if line.lower().startswith("nsuniqueid: "):
|
||||
+ all_nsuniqueid.append(line.split(': ')[1])
|
||||
+ log.info("export file " + export_ldif + " contains " + str(len(all_nsuniqueid)) + " nsuniqueid")
|
||||
+ assert len(all_nsuniqueid) >= 5000
|
||||
+
|
||||
+ # Check that the nsuniqueid are unique
|
||||
+ assert len(set(all_nsuniqueid)) == len(all_nsuniqueid)
|
||||
+
|
||||
+ def fin():
|
||||
+ if os.path.exists(import_ldif):
|
||||
+ os.remove(import_ldif)
|
||||
+ if os.path.exists(export_ldif):
|
||||
+ os.remove(export_ldif)
|
||||
+ m1.start
|
||||
+
|
||||
+ request.addfinalizer(fin)
|
||||
+
|
||||
if __name__ == '__main__':
|
||||
# Run isolated
|
||||
# -s for DEBUG mode
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/db-mdb/mdb_import_threads.c b/ldap/servers/slapd/back-ldbm/db-mdb/mdb_import_threads.c
|
||||
index 707a110c5..0f445bb56 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/db-mdb/mdb_import_threads.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/db-mdb/mdb_import_threads.c
|
||||
@@ -610,10 +610,20 @@ dbmdb_import_generate_uniqueid(ImportJob *job, Slapi_Entry *e)
|
||||
{
|
||||
const char *uniqueid = slapi_entry_get_uniqueid(e);
|
||||
int rc = UID_SUCCESS;
|
||||
+ static pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER;
|
||||
|
||||
if (!uniqueid && (job->uuid_gen_type != SLAPI_UNIQUEID_GENERATE_NONE)) {
|
||||
char *newuniqueid;
|
||||
|
||||
+ /* With 'mdb' we have several workers generating nsuniqueid
|
||||
+ * we need to serialize them to prevent generating duplicate value
|
||||
+ * From performance pov it only impacts import
|
||||
+ * The default value is SLAPI_UNIQUEID_GENERATE_TIME_BASED so
|
||||
+ * the only syscall is clock_gettime and then string formating
|
||||
+ * that should limit contention
|
||||
+ */
|
||||
+ pthread_mutex_lock(&mutex);
|
||||
+
|
||||
/* generate id based on dn */
|
||||
if (job->uuid_gen_type == SLAPI_UNIQUEID_GENERATE_NAME_BASED) {
|
||||
char *dn = slapi_entry_get_dn(e);
|
||||
@@ -624,6 +634,7 @@ dbmdb_import_generate_uniqueid(ImportJob *job, Slapi_Entry *e)
|
||||
/* time based */
|
||||
rc = slapi_uniqueIDGenerateString(&newuniqueid);
|
||||
}
|
||||
+ pthread_mutex_unlock(&mutex);
|
||||
|
||||
if (rc == UID_SUCCESS) {
|
||||
slapi_entry_set_uniqueid(e, newuniqueid);
|
||||
--
|
||||
2.48.0
|
||||
|
||||
@ -0,0 +1,45 @@
|
||||
From 0a7fe7c6e18759459499f468443ded4313ebdeab Mon Sep 17 00:00:00 2001
|
||||
From: Alexander Bokovoy <abokovoy@redhat.com>
|
||||
Date: Wed, 9 Jul 2025 12:08:09 +0300
|
||||
Subject: [PATCH] Issue 6857 - uiduniq: allow specifying match rules in the
|
||||
filter
|
||||
|
||||
Allow uniqueness plugin to work with attributes where uniqueness should
|
||||
be enforced using different matching rule than the one defined for the
|
||||
attribute itself.
|
||||
|
||||
Since uniqueness plugin configuration can contain multiple attributes,
|
||||
add matching rule right to the attribute as it is used in the LDAP rule
|
||||
(e.g. 'attribute:caseIgnoreMatch:' to force 'attribute' to be searched
|
||||
with case-insensitive matching rule instead of the original matching
|
||||
rule.
|
||||
|
||||
Fixes: https://github.com/389ds/389-ds-base/issues/6857
|
||||
|
||||
Signed-off-by: Alexander Bokovoy <abokovoy@redhat.com>
|
||||
---
|
||||
ldap/servers/plugins/uiduniq/uid.c | 7 +++++++
|
||||
1 file changed, 7 insertions(+)
|
||||
|
||||
diff --git a/ldap/servers/plugins/uiduniq/uid.c b/ldap/servers/plugins/uiduniq/uid.c
|
||||
index 053af4f9d..887e79d78 100644
|
||||
--- a/ldap/servers/plugins/uiduniq/uid.c
|
||||
+++ b/ldap/servers/plugins/uiduniq/uid.c
|
||||
@@ -1030,7 +1030,14 @@ preop_add(Slapi_PBlock *pb)
|
||||
}
|
||||
|
||||
for (i = 0; attrNames && attrNames[i]; i++) {
|
||||
+ char *attr_match = strchr(attrNames[i], ':');
|
||||
+ if (attr_match != NULL) {
|
||||
+ attr_match[0] = '\0';
|
||||
+ }
|
||||
err = slapi_entry_attr_find(e, attrNames[i], &attr);
|
||||
+ if (attr_match != NULL) {
|
||||
+ attr_match[0] = ':';
|
||||
+ }
|
||||
if (!err) {
|
||||
/*
|
||||
* Passed all the requirements - this is an operation we
|
||||
--
|
||||
2.49.0
|
||||
|
||||
@ -1,38 +0,0 @@
|
||||
From 116b7cf21618ad7e717ae7f535709508a824f7d9 Mon Sep 17 00:00:00 2001
|
||||
From: Viktor Ashirov <vashirov@redhat.com>
|
||||
Date: Thu, 13 Feb 2025 16:37:43 +0100
|
||||
Subject: [PATCH] Issue 6561 - TLS 1.2 stickiness in FIPS mode
|
||||
|
||||
Description:
|
||||
TLS 1.3 works with NSS in FIPS mode for quite some time now,
|
||||
this restriction is no longer needed.
|
||||
|
||||
Fixes: https://github.com/389ds/389-ds-base/issues/6561
|
||||
|
||||
Reviewed by: @mreynolds389 (Thanks!)
|
||||
---
|
||||
ldap/servers/slapd/ssl.c | 8 --------
|
||||
1 file changed, 8 deletions(-)
|
||||
|
||||
diff --git a/ldap/servers/slapd/ssl.c b/ldap/servers/slapd/ssl.c
|
||||
index 94259efe7..84a7fb004 100644
|
||||
--- a/ldap/servers/slapd/ssl.c
|
||||
+++ b/ldap/servers/slapd/ssl.c
|
||||
@@ -1929,14 +1929,6 @@ slapd_ssl_init2(PRFileDesc **fd, int startTLS)
|
||||
*/
|
||||
sslStatus = SSL_VersionRangeGet(pr_sock, &slapdNSSVersions);
|
||||
if (sslStatus == SECSuccess) {
|
||||
- if (slapdNSSVersions.max > LDAP_OPT_X_TLS_PROTOCOL_TLS1_2 && fipsMode) {
|
||||
- /*
|
||||
- * FIPS & NSS currently only support a max version of TLS1.2
|
||||
- * (although NSS advertises 1.3 as a max range in FIPS mode),
|
||||
- * hopefully this code block can be removed soon...
|
||||
- */
|
||||
- slapdNSSVersions.max = LDAP_OPT_X_TLS_PROTOCOL_TLS1_2;
|
||||
- }
|
||||
/* Reset request range */
|
||||
sslStatus = SSL_VersionRangeSet(pr_sock, &slapdNSSVersions);
|
||||
if (sslStatus == SECSuccess) {
|
||||
--
|
||||
2.48.1
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@ -1,44 +0,0 @@
|
||||
From 39d91c4b86fc2ad7e35f8bebd510dff984e8ba56 Mon Sep 17 00:00:00 2001
|
||||
From: Viktor Ashirov <vashirov@redhat.com>
|
||||
Date: Wed, 5 Mar 2025 23:46:02 +0100
|
||||
Subject: [PATCH] Issue 6090 - dbscan: use bdb by default
|
||||
|
||||
Bug Description:
|
||||
dbscan started to use mdb by default on versions where it's not the
|
||||
default.
|
||||
|
||||
Fix Description:
|
||||
Use bdb by default on 2.x versions.
|
||||
|
||||
Relates: https://github.com/389ds/389-ds-base/issues/6090
|
||||
|
||||
Reviewed by: @mreynolds389 (Thanks!)
|
||||
---
|
||||
ldap/servers/slapd/tools/dbscan.c | 4 ++--
|
||||
1 file changed, 2 insertions(+), 2 deletions(-)
|
||||
|
||||
diff --git a/ldap/servers/slapd/tools/dbscan.c b/ldap/servers/slapd/tools/dbscan.c
|
||||
index 12edf7c5b..9260c1532 100644
|
||||
--- a/ldap/servers/slapd/tools/dbscan.c
|
||||
+++ b/ldap/servers/slapd/tools/dbscan.c
|
||||
@@ -1280,7 +1280,7 @@ removedb(const char *dbimpl_name, const char *filename)
|
||||
|
||||
if (!filename) {
|
||||
printf("Error: -f option is missing.\n"
|
||||
- "Usage: dbscan -D mdb -d -f <db_home_dir>/<backend_name>/<db_name>\n");
|
||||
+ "Usage: dbscan -D bdb -d -f <db_home_dir>/<backend_name>/<db_name>\n");
|
||||
return 1;
|
||||
}
|
||||
|
||||
@@ -1314,7 +1314,7 @@ main(int argc, char **argv)
|
||||
char *find_key = NULL;
|
||||
uint32_t entry_id = 0xffffffff;
|
||||
char *defdbimpl = getenv("NSSLAPD_DB_LIB");
|
||||
- char *dbimpl_name = (char*) "mdb";
|
||||
+ char *dbimpl_name = (char*) "bdb";
|
||||
int longopt_idx = 0;
|
||||
int c = 0;
|
||||
char optstring[2*COUNTOF(options)+1] = {0};
|
||||
--
|
||||
2.48.1
|
||||
|
||||
@ -0,0 +1,399 @@
|
||||
From 5198da59d622dbc39afe2ece9c6f40f4fb249d52 Mon Sep 17 00:00:00 2001
|
||||
From: Mark Reynolds <mreynolds@redhat.com>
|
||||
Date: Wed, 9 Jul 2025 14:18:50 -0400
|
||||
Subject: [PATCH] Issue 6859 - str2filter is not fully applying matching rules
|
||||
|
||||
Description:
|
||||
|
||||
When we have an extended filter, one with a MR applied, it is ignored during
|
||||
internal searches:
|
||||
|
||||
"(cn:CaseExactMatch:=Value)"
|
||||
|
||||
For internal searches we use str2filter() and it doesn't fully apply extended
|
||||
search filter matching rules
|
||||
|
||||
Also needed to update attr uniqueness plugin to apply this change for mod
|
||||
operations (previously only Adds were correctly handling these attribute
|
||||
filters)
|
||||
|
||||
Relates: https://github.com/389ds/389-ds-base/issues/6857
|
||||
Relates: https://github.com/389ds/389-ds-base/issues/6859
|
||||
|
||||
Reviewed by: spichugi & tbordaz(Thanks!!)
|
||||
---
|
||||
.../tests/suites/plugins/attruniq_test.py | 295 +++++++++++++++++-
|
||||
ldap/servers/plugins/uiduniq/uid.c | 7 +
|
||||
ldap/servers/slapd/plugin_mr.c | 2 +-
|
||||
ldap/servers/slapd/str2filter.c | 8 +
|
||||
4 files changed, 309 insertions(+), 3 deletions(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/plugins/attruniq_test.py b/dirsrvtests/tests/suites/plugins/attruniq_test.py
|
||||
index b190e0ec1..b338f405f 100644
|
||||
--- a/dirsrvtests/tests/suites/plugins/attruniq_test.py
|
||||
+++ b/dirsrvtests/tests/suites/plugins/attruniq_test.py
|
||||
@@ -1,5 +1,5 @@
|
||||
# --- BEGIN COPYRIGHT BLOCK ---
|
||||
-# Copyright (C) 2021 Red Hat, Inc.
|
||||
+# Copyright (C) 2025 Red Hat, Inc.
|
||||
# All rights reserved.
|
||||
#
|
||||
# License: GPL (version 3 or any later version).
|
||||
@@ -80,4 +80,295 @@ def test_modrdn_attr_uniqueness(topology_st):
|
||||
log.debug(excinfo.value)
|
||||
|
||||
log.debug('Move user2 to group1')
|
||||
- user2.rename(f'uid={user2.rdn}', group1.dn)
|
||||
\ No newline at end of file
|
||||
+
|
||||
+ user2.rename(f'uid={user2.rdn}', group1.dn)
|
||||
+
|
||||
+ # Cleanup for next test
|
||||
+ user1.delete()
|
||||
+ user2.delete()
|
||||
+ attruniq.disable()
|
||||
+ attruniq.delete()
|
||||
+
|
||||
+
|
||||
+def test_multiple_attr_uniqueness(topology_st):
|
||||
+ """ Test that attribute uniqueness works properly with multiple attributes
|
||||
+
|
||||
+ :id: c49aa5c1-7e65-45fd-b064-55e0b815e9bc
|
||||
+ :setup: Standalone instance
|
||||
+ :steps:
|
||||
+ 1. Setup attribute uniqueness plugin to ensure uniqueness of attributes 'mail' and 'mailAlternateAddress'
|
||||
+ 2. Add user with unique 'mail=non-uniq@value.net' and 'mailAlternateAddress=alt-mail@value.net'
|
||||
+ 3. Try adding another user with 'mail=non-uniq@value.net'
|
||||
+ 4. Try adding another user with 'mailAlternateAddress=alt-mail@value.net'
|
||||
+ 5. Try adding another user with 'mail=alt-mail@value.net'
|
||||
+ 6. Try adding another user with 'mailAlternateAddress=non-uniq@value.net'
|
||||
+ :expectedresults:
|
||||
+ 1. Success
|
||||
+ 2. Success
|
||||
+ 3. Should raise CONSTRAINT_VIOLATION
|
||||
+ 4. Should raise CONSTRAINT_VIOLATION
|
||||
+ 5. Should raise CONSTRAINT_VIOLATION
|
||||
+ 6. Should raise CONSTRAINT_VIOLATION
|
||||
+ """
|
||||
+ attruniq = AttributeUniquenessPlugin(topology_st.standalone, dn="cn=attruniq,cn=plugins,cn=config")
|
||||
+
|
||||
+ try:
|
||||
+ log.debug(f'Setup PLUGIN_ATTR_UNIQUENESS plugin for {MAIL_ATTR_VALUE} attribute for the group2')
|
||||
+ attruniq.create(properties={'cn': 'attruniq'})
|
||||
+ attruniq.add_unique_attribute('mail')
|
||||
+ attruniq.add_unique_attribute('mailAlternateAddress')
|
||||
+ attruniq.add_unique_subtree(DEFAULT_SUFFIX)
|
||||
+ attruniq.enable_all_subtrees()
|
||||
+ log.debug(f'Enable PLUGIN_ATTR_UNIQUENESS plugin as "ON"')
|
||||
+ attruniq.enable()
|
||||
+ except ldap.LDAPError as e:
|
||||
+ log.fatal('test_multiple_attribute_uniqueness: Failed to configure plugin for "mail": error {}'.format(e.args[0]['desc']))
|
||||
+ assert False
|
||||
+
|
||||
+ topology_st.standalone.restart()
|
||||
+
|
||||
+ users = UserAccounts(topology_st.standalone, DEFAULT_SUFFIX)
|
||||
+
|
||||
+ testuser1 = users.create_test_user(100,100)
|
||||
+ testuser1.add('objectclass', 'extensibleObject')
|
||||
+ testuser1.add('mail', MAIL_ATTR_VALUE)
|
||||
+ testuser1.add('mailAlternateAddress', MAIL_ATTR_VALUE_ALT)
|
||||
+
|
||||
+ testuser2 = users.create_test_user(200, 200)
|
||||
+ testuser2.add('objectclass', 'extensibleObject')
|
||||
+
|
||||
+ with pytest.raises(ldap.CONSTRAINT_VIOLATION):
|
||||
+ testuser2.add('mail', MAIL_ATTR_VALUE)
|
||||
+
|
||||
+ with pytest.raises(ldap.CONSTRAINT_VIOLATION):
|
||||
+ testuser2.add('mailAlternateAddress', MAIL_ATTR_VALUE_ALT)
|
||||
+
|
||||
+ with pytest.raises(ldap.CONSTRAINT_VIOLATION):
|
||||
+ testuser2.add('mail', MAIL_ATTR_VALUE_ALT)
|
||||
+
|
||||
+ with pytest.raises(ldap.CONSTRAINT_VIOLATION):
|
||||
+ testuser2.add('mailAlternateAddress', MAIL_ATTR_VALUE)
|
||||
+
|
||||
+ # Cleanup
|
||||
+ testuser1.delete()
|
||||
+ testuser2.delete()
|
||||
+ attruniq.disable()
|
||||
+ attruniq.delete()
|
||||
+
|
||||
+
|
||||
+def test_exclude_subtrees(topology_st):
|
||||
+ """ Test attribute uniqueness with exclude scope
|
||||
+
|
||||
+ :id: 43d29a60-40e1-4ebd-b897-6ef9f20e9f27
|
||||
+ :setup: Standalone instance
|
||||
+ :steps:
|
||||
+ 1. Setup and enable attribute uniqueness plugin for telephonenumber unique attribute
|
||||
+ 2. Create subtrees and test users
|
||||
+ 3. Add a unique attribute to a user within uniqueness scope
|
||||
+ 4. Add exclude subtree
|
||||
+ 5. Try to add existing value attribute to an entry within uniqueness scope
|
||||
+ 6. Try to add existing value attribute to an entry within exclude scope
|
||||
+ 7. Remove the attribute from affected entries
|
||||
+ 8. Add a unique attribute to a user within exclude scope
|
||||
+ 9. Try to add existing value attribute to an entry within uniqueness scope
|
||||
+ 10. Try to add existing value attribute to another entry within uniqueness scope
|
||||
+ 11. Remove the attribute from affected entries
|
||||
+ 12. Add another exclude subtree
|
||||
+ 13. Add a unique attribute to a user within uniqueness scope
|
||||
+ 14. Try to add existing value attribute to an entry within uniqueness scope
|
||||
+ 15. Try to add existing value attribute to an entry within exclude scope
|
||||
+ 16. Try to add existing value attribute to an entry within another exclude scope
|
||||
+ 17. Clean up entries
|
||||
+ :expectedresults:
|
||||
+ 1. Success
|
||||
+ 2. Success
|
||||
+ 3. Success
|
||||
+ 4. Success
|
||||
+ 5. Should raise CONSTRAINT_VIOLATION
|
||||
+ 6. Success
|
||||
+ 7. Success
|
||||
+ 8. Success
|
||||
+ 9. Success
|
||||
+ 10. Should raise CONSTRAINT_VIOLATION
|
||||
+ 11. Success
|
||||
+ 12. Success
|
||||
+ 13. Success
|
||||
+ 14. Should raise CONSTRAINT_VIOLATION
|
||||
+ 15. Success
|
||||
+ 16. Success
|
||||
+ 17. Success
|
||||
+ """
|
||||
+ log.info('Setup attribute uniqueness plugin')
|
||||
+ attruniq = AttributeUniquenessPlugin(topology_st.standalone, dn="cn=attruniq,cn=plugins,cn=config")
|
||||
+ attruniq.create(properties={'cn': 'attruniq'})
|
||||
+ attruniq.add_unique_attribute('telephonenumber')
|
||||
+ attruniq.add_unique_subtree(DEFAULT_SUFFIX)
|
||||
+ attruniq.enable_all_subtrees()
|
||||
+ attruniq.enable()
|
||||
+ topology_st.standalone.restart()
|
||||
+
|
||||
+ log.info('Create subtrees container')
|
||||
+ containers = nsContainers(topology_st.standalone, DEFAULT_SUFFIX)
|
||||
+ cont1 = containers.create(properties={'cn': EXCLUDED_CONTAINER_CN})
|
||||
+ cont2 = containers.create(properties={'cn': EXCLUDED_BIS_CONTAINER_CN})
|
||||
+ cont3 = containers.create(properties={'cn': ENFORCED_CONTAINER_CN})
|
||||
+
|
||||
+ log.info('Create test users')
|
||||
+ users = UserAccounts(topology_st.standalone, DEFAULT_SUFFIX,
|
||||
+ rdn='cn={}'.format(ENFORCED_CONTAINER_CN))
|
||||
+ users_excluded = UserAccounts(topology_st.standalone, DEFAULT_SUFFIX,
|
||||
+ rdn='cn={}'.format(EXCLUDED_CONTAINER_CN))
|
||||
+ users_excluded2 = UserAccounts(topology_st.standalone, DEFAULT_SUFFIX,
|
||||
+ rdn='cn={}'.format(EXCLUDED_BIS_CONTAINER_CN))
|
||||
+
|
||||
+ user1 = users.create(properties={'cn': USER_1_CN,
|
||||
+ 'uid': USER_1_CN,
|
||||
+ 'sn': USER_1_CN,
|
||||
+ 'uidNumber': '1',
|
||||
+ 'gidNumber': '11',
|
||||
+ 'homeDirectory': '/home/{}'.format(USER_1_CN)})
|
||||
+ user2 = users.create(properties={'cn': USER_2_CN,
|
||||
+ 'uid': USER_2_CN,
|
||||
+ 'sn': USER_2_CN,
|
||||
+ 'uidNumber': '2',
|
||||
+ 'gidNumber': '22',
|
||||
+ 'homeDirectory': '/home/{}'.format(USER_2_CN)})
|
||||
+ user3 = users_excluded.create(properties={'cn': USER_3_CN,
|
||||
+ 'uid': USER_3_CN,
|
||||
+ 'sn': USER_3_CN,
|
||||
+ 'uidNumber': '3',
|
||||
+ 'gidNumber': '33',
|
||||
+ 'homeDirectory': '/home/{}'.format(USER_3_CN)})
|
||||
+ user4 = users_excluded2.create(properties={'cn': USER_4_CN,
|
||||
+ 'uid': USER_4_CN,
|
||||
+ 'sn': USER_4_CN,
|
||||
+ 'uidNumber': '4',
|
||||
+ 'gidNumber': '44',
|
||||
+ 'homeDirectory': '/home/{}'.format(USER_4_CN)})
|
||||
+
|
||||
+ UNIQUE_VALUE = '1234'
|
||||
+
|
||||
+ try:
|
||||
+ log.info('Create user with unique attribute')
|
||||
+ user1.add('telephonenumber', UNIQUE_VALUE)
|
||||
+ assert user1.present('telephonenumber', UNIQUE_VALUE)
|
||||
+
|
||||
+ log.info('Add exclude subtree')
|
||||
+ attruniq.add_exclude_subtree(EXCLUDED_CONTAINER_DN)
|
||||
+ topology_st.standalone.restart()
|
||||
+
|
||||
+ log.info('Verify an already used attribute value cannot be added within the same subtree')
|
||||
+ with pytest.raises(ldap.CONSTRAINT_VIOLATION):
|
||||
+ user2.add('telephonenumber', UNIQUE_VALUE)
|
||||
+
|
||||
+ log.info('Verify an entry with same attribute value can be added within exclude subtree')
|
||||
+ user3.add('telephonenumber', UNIQUE_VALUE)
|
||||
+ assert user3.present('telephonenumber', UNIQUE_VALUE)
|
||||
+
|
||||
+ log.info('Cleanup unique attribute values')
|
||||
+ user1.remove_all('telephonenumber')
|
||||
+ user3.remove_all('telephonenumber')
|
||||
+
|
||||
+ log.info('Add a unique value to an entry in excluded scope')
|
||||
+ user3.add('telephonenumber', UNIQUE_VALUE)
|
||||
+ assert user3.present('telephonenumber', UNIQUE_VALUE)
|
||||
+
|
||||
+ log.info('Verify the same value can be added to an entry within uniqueness scope')
|
||||
+ user1.add('telephonenumber', UNIQUE_VALUE)
|
||||
+ assert user1.present('telephonenumber', UNIQUE_VALUE)
|
||||
+
|
||||
+ log.info('Verify that yet another same value cannot be added to another entry within uniqueness scope')
|
||||
+ with pytest.raises(ldap.CONSTRAINT_VIOLATION):
|
||||
+ user2.add('telephonenumber', UNIQUE_VALUE)
|
||||
+
|
||||
+ log.info('Cleanup unique attribute values')
|
||||
+ user1.remove_all('telephonenumber')
|
||||
+ user3.remove_all('telephonenumber')
|
||||
+
|
||||
+ log.info('Add another exclude subtree')
|
||||
+ attruniq.add_exclude_subtree(EXCLUDED_BIS_CONTAINER_DN)
|
||||
+ topology_st.standalone.restart()
|
||||
+
|
||||
+ user1.add('telephonenumber', UNIQUE_VALUE)
|
||||
+ log.info('Verify an already used attribute value cannot be added within the same subtree')
|
||||
+ with pytest.raises(ldap.CONSTRAINT_VIOLATION):
|
||||
+ user2.add('telephonenumber', UNIQUE_VALUE)
|
||||
+
|
||||
+ log.info('Verify an already used attribute can be added to an entry in exclude scope')
|
||||
+ user3.add('telephonenumber', UNIQUE_VALUE)
|
||||
+ assert user3.present('telephonenumber', UNIQUE_VALUE)
|
||||
+ user4.add('telephonenumber', UNIQUE_VALUE)
|
||||
+ assert user4.present('telephonenumber', UNIQUE_VALUE)
|
||||
+
|
||||
+ finally:
|
||||
+ log.info('Clean up users, containers and attribute uniqueness plugin')
|
||||
+ user1.delete()
|
||||
+ user2.delete()
|
||||
+ user3.delete()
|
||||
+ user4.delete()
|
||||
+ cont1.delete()
|
||||
+ cont2.delete()
|
||||
+ cont3.delete()
|
||||
+ attruniq.disable()
|
||||
+ attruniq.delete()
|
||||
+
|
||||
+
|
||||
+def test_matchingrule_attr(topology_st):
|
||||
+ """ Test list extension MR attribute. Check for "cn" using CES (versus it
|
||||
+ being defined as CIS)
|
||||
+
|
||||
+ :id: 5cde4342-6fa3-4225-b23d-0af918981075
|
||||
+ :setup: Standalone instance
|
||||
+ :steps:
|
||||
+ 1. Setup and enable attribute uniqueness plugin to use CN attribute
|
||||
+ with a matching rule of CaseExactMatch.
|
||||
+ 2. Add user with CN value is lowercase
|
||||
+ 3. Add second user with same lowercase CN which should be rejected
|
||||
+ 4. Add second user with same CN value but with mixed case
|
||||
+ 5. Modify second user replacing CN value to lc which should be rejected
|
||||
+
|
||||
+ :expectedresults:
|
||||
+ 1. Success
|
||||
+ 2. Success
|
||||
+ 3. Success
|
||||
+ 4. Success
|
||||
+ 5. Success
|
||||
+ """
|
||||
+
|
||||
+ inst = topology_st.standalone
|
||||
+
|
||||
+ attruniq = AttributeUniquenessPlugin(inst,
|
||||
+ dn="cn=attribute uniqueness,cn=plugins,cn=config")
|
||||
+ attruniq.add_unique_attribute('cn:CaseExactMatch:')
|
||||
+ attruniq.enable_all_subtrees()
|
||||
+ attruniq.enable()
|
||||
+ inst.restart()
|
||||
+
|
||||
+ users = UserAccounts(inst, DEFAULT_SUFFIX)
|
||||
+ users.create(properties={'cn': "common_name",
|
||||
+ 'uid': "uid_name",
|
||||
+ 'sn': "uid_name",
|
||||
+ 'uidNumber': '1',
|
||||
+ 'gidNumber': '11',
|
||||
+ 'homeDirectory': '/home/uid_name'})
|
||||
+
|
||||
+ log.info('Add entry with the exact CN value which should be rejected')
|
||||
+ with pytest.raises(ldap.CONSTRAINT_VIOLATION):
|
||||
+ users.create(properties={'cn': "common_name",
|
||||
+ 'uid': "uid_name2",
|
||||
+ 'sn': "uid_name2",
|
||||
+ 'uidNumber': '11',
|
||||
+ 'gidNumber': '111',
|
||||
+ 'homeDirectory': '/home/uid_name2'})
|
||||
+
|
||||
+ log.info('Add entry with the mixed case CN value which should be allowed')
|
||||
+ user = users.create(properties={'cn': "Common_Name",
|
||||
+ 'uid': "uid_name2",
|
||||
+ 'sn': "uid_name2",
|
||||
+ 'uidNumber': '11',
|
||||
+ 'gidNumber': '111',
|
||||
+ 'homeDirectory': '/home/uid_name2'})
|
||||
+
|
||||
+ log.info('Mod entry with exact case CN value which should be rejected')
|
||||
+ with pytest.raises(ldap.CONSTRAINT_VIOLATION):
|
||||
+ user.replace('cn', 'common_name')
|
||||
diff --git a/ldap/servers/plugins/uiduniq/uid.c b/ldap/servers/plugins/uiduniq/uid.c
|
||||
index 887e79d78..fdb1404a0 100644
|
||||
--- a/ldap/servers/plugins/uiduniq/uid.c
|
||||
+++ b/ldap/servers/plugins/uiduniq/uid.c
|
||||
@@ -1178,6 +1178,10 @@ preop_modify(Slapi_PBlock *pb)
|
||||
for (; mods && *mods; mods++) {
|
||||
mod = *mods;
|
||||
for (i = 0; attrNames && attrNames[i]; i++) {
|
||||
+ char *attr_match = strchr(attrNames[i], ':');
|
||||
+ if (attr_match != NULL) {
|
||||
+ attr_match[0] = '\0';
|
||||
+ }
|
||||
if ((slapi_attr_type_cmp(mod->mod_type, attrNames[i], 1) == 0) && /* mod contains target attr */
|
||||
(mod->mod_op & LDAP_MOD_BVALUES) && /* mod is bval encoded (not string val) */
|
||||
(mod->mod_bvalues && mod->mod_bvalues[0]) && /* mod actually contains some values */
|
||||
@@ -1186,6 +1190,9 @@ preop_modify(Slapi_PBlock *pb)
|
||||
{
|
||||
addMod(&checkmods, &checkmodsCapacity, &modcount, mod);
|
||||
}
|
||||
+ if (attr_match != NULL) {
|
||||
+ attr_match[0] = ':';
|
||||
+ }
|
||||
}
|
||||
}
|
||||
if (modcount == 0) {
|
||||
diff --git a/ldap/servers/slapd/plugin_mr.c b/ldap/servers/slapd/plugin_mr.c
|
||||
index b262820c5..67051a5ff 100644
|
||||
--- a/ldap/servers/slapd/plugin_mr.c
|
||||
+++ b/ldap/servers/slapd/plugin_mr.c
|
||||
@@ -626,7 +626,7 @@ attempt_mr_filter_create(mr_filter_t *f, struct slapdplugin *mrp, Slapi_PBlock *
|
||||
int rc;
|
||||
IFP mrf_create = NULL;
|
||||
f->mrf_match = NULL;
|
||||
- pblock_init(pb);
|
||||
+ slapi_pblock_init(pb);
|
||||
if (!(rc = slapi_pblock_set(pb, SLAPI_PLUGIN, mrp)) &&
|
||||
!(rc = slapi_pblock_get(pb, SLAPI_PLUGIN_MR_FILTER_CREATE_FN, &mrf_create)) &&
|
||||
mrf_create != NULL &&
|
||||
diff --git a/ldap/servers/slapd/str2filter.c b/ldap/servers/slapd/str2filter.c
|
||||
index 9fdc500f7..5620b7439 100644
|
||||
--- a/ldap/servers/slapd/str2filter.c
|
||||
+++ b/ldap/servers/slapd/str2filter.c
|
||||
@@ -344,6 +344,14 @@ str2simple(char *str, int unescape_filter)
|
||||
return NULL; /* error */
|
||||
} else {
|
||||
f->f_choice = LDAP_FILTER_EXTENDED;
|
||||
+ if (f->f_mr_oid) {
|
||||
+ /* apply the MR indexers */
|
||||
+ rc = plugin_mr_filter_create(&f->f_mr);
|
||||
+ if (rc) {
|
||||
+ slapi_filter_free(f, 1);
|
||||
+ return NULL; /* error */
|
||||
+ }
|
||||
+ }
|
||||
}
|
||||
} else if (str_find_star(value) == NULL) {
|
||||
f->f_choice = LDAP_FILTER_EQUALITY;
|
||||
--
|
||||
2.49.0
|
||||
|
||||
@ -0,0 +1,163 @@
|
||||
From 406563c136d78235751e34a3c7e22ccaf114f754 Mon Sep 17 00:00:00 2001
|
||||
From: Mark Reynolds <mreynolds@redhat.com>
|
||||
Date: Tue, 15 Jul 2025 17:56:18 -0400
|
||||
Subject: [PATCH] Issue 6872 - compressed log rotation creates files with world
|
||||
readable permission
|
||||
|
||||
Description:
|
||||
|
||||
When compressing a log file, first create the empty file using open()
|
||||
so we can set the correct permissions right from the start. gzopen()
|
||||
always uses permission 644 and that is not safe. So after creating it
|
||||
with open(), with the correct permissions, then pass the FD to gzdopen()
|
||||
and write the compressed content.
|
||||
|
||||
relates: https://github.com/389ds/389-ds-base/issues/6872
|
||||
|
||||
Reviewed by: progier(Thanks!)
|
||||
---
|
||||
.../logging/logging_compression_test.py | 15 ++++++++--
|
||||
ldap/servers/slapd/log.c | 28 +++++++++++++------
|
||||
ldap/servers/slapd/schema.c | 2 +-
|
||||
3 files changed, 33 insertions(+), 12 deletions(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/logging/logging_compression_test.py b/dirsrvtests/tests/suites/logging/logging_compression_test.py
|
||||
index e30874cc0..3a987d62c 100644
|
||||
--- a/dirsrvtests/tests/suites/logging/logging_compression_test.py
|
||||
+++ b/dirsrvtests/tests/suites/logging/logging_compression_test.py
|
||||
@@ -1,5 +1,5 @@
|
||||
# --- BEGIN COPYRIGHT BLOCK ---
|
||||
-# Copyright (C) 2022 Red Hat, Inc.
|
||||
+# Copyright (C) 2025 Red Hat, Inc.
|
||||
# All rights reserved.
|
||||
#
|
||||
# License: GPL (version 3 or any later version).
|
||||
@@ -22,12 +22,21 @@ log = logging.getLogger(__name__)
|
||||
|
||||
pytestmark = pytest.mark.tier1
|
||||
|
||||
+
|
||||
def log_rotated_count(log_type, log_dir, check_compressed=False):
|
||||
- # Check if the log was rotated
|
||||
+ """
|
||||
+ Check if the log was rotated and has the correct permissions
|
||||
+ """
|
||||
log_file = f'{log_dir}/{log_type}.2*'
|
||||
if check_compressed:
|
||||
log_file += ".gz"
|
||||
- return len(glob.glob(log_file))
|
||||
+ log_files = glob.glob(log_file)
|
||||
+ for logf in log_files:
|
||||
+ # Check permissions
|
||||
+ st = os.stat(logf)
|
||||
+ assert oct(st.st_mode) == '0o100600' # 0600
|
||||
+
|
||||
+ return len(log_files)
|
||||
|
||||
|
||||
def update_and_sleep(inst, suffix, sleep=True):
|
||||
diff --git a/ldap/servers/slapd/log.c b/ldap/servers/slapd/log.c
|
||||
index a018ca2d5..178d29b89 100644
|
||||
--- a/ldap/servers/slapd/log.c
|
||||
+++ b/ldap/servers/slapd/log.c
|
||||
@@ -172,17 +172,28 @@ get_syslog_loglevel(int loglevel)
|
||||
}
|
||||
|
||||
static int
|
||||
-compress_log_file(char *log_name)
|
||||
+compress_log_file(char *log_name, int32_t mode)
|
||||
{
|
||||
char gzip_log[BUFSIZ] = {0};
|
||||
char buf[LOG_CHUNK] = {0};
|
||||
size_t bytes_read = 0;
|
||||
gzFile outfile = NULL;
|
||||
FILE *source = NULL;
|
||||
+ int fd = 0;
|
||||
|
||||
PR_snprintf(gzip_log, sizeof(gzip_log), "%s.gz", log_name);
|
||||
- if ((outfile = gzopen(gzip_log,"wb")) == NULL) {
|
||||
- /* Failed to open new gzip file */
|
||||
+
|
||||
+ /*
|
||||
+ * Try to open the file as we may have an incorrect path. We also need to
|
||||
+ * set the permissions using open() as gzopen() creates the file with
|
||||
+ * 644 permissions (world readable - bad). So we create an empty file with
|
||||
+ * the correct permissions, then we pass the FD to gzdopen() to write the
|
||||
+ * compressed content.
|
||||
+ */
|
||||
+ if ((fd = open(gzip_log, O_WRONLY|O_CREAT|O_TRUNC, mode)) >= 0) {
|
||||
+ /* FIle successfully created, now pass the FD to gzdopen() */
|
||||
+ outfile = gzdopen(fd, "ab");
|
||||
+ } else {
|
||||
return -1;
|
||||
}
|
||||
|
||||
@@ -191,6 +202,7 @@ compress_log_file(char *log_name)
|
||||
gzclose(outfile);
|
||||
return -1;
|
||||
}
|
||||
+
|
||||
bytes_read = fread(buf, 1, LOG_CHUNK, source);
|
||||
while (bytes_read > 0) {
|
||||
int bytes_written = gzwrite(outfile, buf, bytes_read);
|
||||
@@ -3291,7 +3303,7 @@ log__open_accesslogfile(int logfile_state, int locked)
|
||||
return LOG_UNABLE_TO_OPENFILE;
|
||||
}
|
||||
} else if (loginfo.log_access_compress) {
|
||||
- if (compress_log_file(newfile) != 0) {
|
||||
+ if (compress_log_file(newfile, loginfo.log_access_mode) != 0) {
|
||||
slapi_log_err(SLAPI_LOG_ERR, "log__open_auditfaillogfile",
|
||||
"failed to compress rotated access log (%s)\n",
|
||||
newfile);
|
||||
@@ -3455,7 +3467,7 @@ log__open_securitylogfile(int logfile_state, int locked)
|
||||
return LOG_UNABLE_TO_OPENFILE;
|
||||
}
|
||||
} else if (loginfo.log_security_compress) {
|
||||
- if (compress_log_file(newfile) != 0) {
|
||||
+ if (compress_log_file(newfile, loginfo.log_security_mode) != 0) {
|
||||
slapi_log_err(SLAPI_LOG_ERR, "log__open_securitylogfile",
|
||||
"failed to compress rotated security audit log (%s)\n",
|
||||
newfile);
|
||||
@@ -6172,7 +6184,7 @@ log__open_errorlogfile(int logfile_state, int locked)
|
||||
return LOG_UNABLE_TO_OPENFILE;
|
||||
}
|
||||
} else if (loginfo.log_error_compress) {
|
||||
- if (compress_log_file(newfile) != 0) {
|
||||
+ if (compress_log_file(newfile, loginfo.log_error_mode) != 0) {
|
||||
PR_snprintf(buffer, sizeof(buffer), "Failed to compress errors log file (%s)\n", newfile);
|
||||
log__error_emergency(buffer, 1, 1);
|
||||
} else {
|
||||
@@ -6355,7 +6367,7 @@ log__open_auditlogfile(int logfile_state, int locked)
|
||||
return LOG_UNABLE_TO_OPENFILE;
|
||||
}
|
||||
} else if (loginfo.log_audit_compress) {
|
||||
- if (compress_log_file(newfile) != 0) {
|
||||
+ if (compress_log_file(newfile, loginfo.log_audit_mode) != 0) {
|
||||
slapi_log_err(SLAPI_LOG_ERR, "log__open_auditfaillogfile",
|
||||
"failed to compress rotated audit log (%s)\n",
|
||||
newfile);
|
||||
@@ -6514,7 +6526,7 @@ log__open_auditfaillogfile(int logfile_state, int locked)
|
||||
return LOG_UNABLE_TO_OPENFILE;
|
||||
}
|
||||
} else if (loginfo.log_auditfail_compress) {
|
||||
- if (compress_log_file(newfile) != 0) {
|
||||
+ if (compress_log_file(newfile, loginfo.log_auditfail_mode) != 0) {
|
||||
slapi_log_err(SLAPI_LOG_ERR, "log__open_auditfaillogfile",
|
||||
"failed to compress rotated auditfail log (%s)\n",
|
||||
newfile);
|
||||
diff --git a/ldap/servers/slapd/schema.c b/ldap/servers/slapd/schema.c
|
||||
index a8e6b1210..9ef4ee4bf 100644
|
||||
--- a/ldap/servers/slapd/schema.c
|
||||
+++ b/ldap/servers/slapd/schema.c
|
||||
@@ -903,7 +903,7 @@ oc_check_allowed_sv(Slapi_PBlock *pb, Slapi_Entry *e, const char *type, struct o
|
||||
|
||||
if (pb) {
|
||||
PR_snprintf(errtext, sizeof(errtext),
|
||||
- "attribute \"%s\" not allowed\n",
|
||||
+ "attribute \"%s\" not allowed",
|
||||
escape_string(type, ebuf));
|
||||
slapi_pblock_set(pb, SLAPI_PB_RESULT_TEXT, errtext);
|
||||
}
|
||||
--
|
||||
2.49.0
|
||||
|
||||
@ -0,0 +1,116 @@
|
||||
From 9b8b23f6d46f16fbc1784b26cfc04dd6b4fa94e1 Mon Sep 17 00:00:00 2001
|
||||
From: Simon Pichugin <spichugi@redhat.com>
|
||||
Date: Fri, 18 Jul 2025 18:50:33 -0700
|
||||
Subject: [PATCH] Issue 6878 - Prevent repeated disconnect logs during shutdown
|
||||
(#6879)
|
||||
|
||||
Description: Avoid logging non-active initialized connections via CONN in disconnect_server_nomutex_ext by adding a check to skip invalid conn=0 with invalid sockets, preventing excessive repeated messages.
|
||||
|
||||
Update ds_logs_test.py by adding test_no_repeated_disconnect_messages to verify the fix.
|
||||
|
||||
Fixes: https://github.com/389ds/389-ds-base/issues/6878
|
||||
|
||||
Reviewed by: @mreynolds389 (Thanks!)
|
||||
---
|
||||
.../tests/suites/ds_logs/ds_logs_test.py | 51 ++++++++++++++++++-
|
||||
ldap/servers/slapd/connection.c | 15 +++---
|
||||
2 files changed, 59 insertions(+), 7 deletions(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/ds_logs/ds_logs_test.py b/dirsrvtests/tests/suites/ds_logs/ds_logs_test.py
|
||||
index 2c22347bb..b86c72687 100644
|
||||
--- a/dirsrvtests/tests/suites/ds_logs/ds_logs_test.py
|
||||
+++ b/dirsrvtests/tests/suites/ds_logs/ds_logs_test.py
|
||||
@@ -24,7 +24,7 @@ from lib389.plugins import AutoMembershipPlugin, ReferentialIntegrityPlugin, Aut
|
||||
from lib389.idm.user import UserAccounts, UserAccount
|
||||
from lib389.idm.group import Groups
|
||||
from lib389.idm.organizationalunit import OrganizationalUnits
|
||||
-from lib389._constants import DEFAULT_SUFFIX, LOG_ACCESS_LEVEL, PASSWORD
|
||||
+from lib389._constants import DEFAULT_SUFFIX, LOG_ACCESS_LEVEL, PASSWORD, ErrorLog
|
||||
from lib389.utils import ds_is_older, ds_is_newer
|
||||
from lib389.config import RSA
|
||||
from lib389.dseldif import DSEldif
|
||||
@@ -1435,6 +1435,55 @@ def test_errorlog_buffering(topology_st, request):
|
||||
assert inst.ds_error_log.match(".*slapd_daemon - slapd started.*")
|
||||
|
||||
|
||||
+def test_no_repeated_disconnect_messages(topology_st):
|
||||
+ """Test that there are no repeated "Not setting conn 0 to be disconnected: socket is invalid" messages on restart
|
||||
+
|
||||
+ :id: 72b5e1ce-2db8-458f-b2cd-0a0b6525f51f
|
||||
+ :setup: Standalone Instance
|
||||
+ :steps:
|
||||
+ 1. Set error log level to CONNECTION
|
||||
+ 2. Clear existing error logs
|
||||
+ 3. Restart the server with 30 second timeout
|
||||
+ 4. Check error log for repeated disconnect messages
|
||||
+ 5. Verify there are no more than 10 occurrences of the disconnect message
|
||||
+ :expectedresults:
|
||||
+ 1. Error log level should be set successfully
|
||||
+ 2. Error logs should be cleared
|
||||
+ 3. Server should restart successfully within 30 seconds
|
||||
+ 4. Error log should be accessible
|
||||
+ 5. There should be no more than 10 repeated disconnect messages
|
||||
+ """
|
||||
+
|
||||
+ inst = topology_st.standalone
|
||||
+
|
||||
+ log.info('Set error log level to CONNECTION')
|
||||
+ inst.config.loglevel([ErrorLog.CONNECT])
|
||||
+ current_level = inst.config.get_attr_val_int('nsslapd-errorlog-level')
|
||||
+ log.info(f'Error log level set to: {current_level}')
|
||||
+
|
||||
+ log.info('Clear existing error logs')
|
||||
+ inst.deleteErrorLogs()
|
||||
+
|
||||
+ log.info('Restart the server with 30 second timeout')
|
||||
+ inst.restart(timeout=30)
|
||||
+
|
||||
+ log.info('Check error log for repeated disconnect messages')
|
||||
+ disconnect_message = "Not setting conn 0 to be disconnected: socket is invalid"
|
||||
+
|
||||
+ # Count occurrences of the disconnect message
|
||||
+ error_log_lines = inst.ds_error_log.readlines()
|
||||
+ disconnect_count = 0
|
||||
+
|
||||
+ for line in error_log_lines:
|
||||
+ if disconnect_message in line:
|
||||
+ disconnect_count += 1
|
||||
+
|
||||
+ log.info(f'Found {disconnect_count} occurrences of disconnect message')
|
||||
+
|
||||
+ log.info('Verify there are no more than 10 occurrences')
|
||||
+ assert disconnect_count <= 10, f"Found {disconnect_count} repeated disconnect messages, expected <= 10"
|
||||
+
|
||||
+
|
||||
if __name__ == '__main__':
|
||||
# Run isolated
|
||||
# -s for DEBUG mode
|
||||
diff --git a/ldap/servers/slapd/connection.c b/ldap/servers/slapd/connection.c
|
||||
index bb4fcd77f..2967de15b 100644
|
||||
--- a/ldap/servers/slapd/connection.c
|
||||
+++ b/ldap/servers/slapd/connection.c
|
||||
@@ -2465,12 +2465,15 @@ disconnect_server_nomutex_ext(Connection *conn, PRUint64 opconnid, int opid, PRE
|
||||
}
|
||||
|
||||
} else {
|
||||
- slapi_log_err(SLAPI_LOG_CONNS, "disconnect_server_nomutex_ext",
|
||||
- "Not setting conn %d to be disconnected: %s\n",
|
||||
- conn->c_sd,
|
||||
- (conn->c_sd == SLAPD_INVALID_SOCKET) ? "socket is invalid" :
|
||||
- ((conn->c_connid != opconnid) ? "conn id does not match op conn id" :
|
||||
- ((conn->c_flags & CONN_FLAG_CLOSING) ? "conn is closing" : "unknown")));
|
||||
+ /* We avoid logging an invalid conn=0 connection as it is not a real connection. */
|
||||
+ if (!(conn->c_sd == SLAPD_INVALID_SOCKET && conn->c_connid == 0)) {
|
||||
+ slapi_log_err(SLAPI_LOG_CONNS, "disconnect_server_nomutex_ext",
|
||||
+ "Not setting conn %d to be disconnected: %s\n",
|
||||
+ conn->c_sd,
|
||||
+ (conn->c_sd == SLAPD_INVALID_SOCKET) ? "socket is invalid" :
|
||||
+ ((conn->c_connid != opconnid) ? "conn id does not match op conn id" :
|
||||
+ ((conn->c_flags & CONN_FLAG_CLOSING) ? "conn is closing" : "unknown")));
|
||||
+ }
|
||||
}
|
||||
}
|
||||
|
||||
--
|
||||
2.49.0
|
||||
|
||||
@ -0,0 +1,67 @@
|
||||
From fef4875a9c3d67ef424a1fb1698ae011152735b1 Mon Sep 17 00:00:00 2001
|
||||
From: Anuar Beisembayev <111912342+abeisemb@users.noreply.github.com>
|
||||
Date: Wed, 23 Jul 2025 23:48:11 -0400
|
||||
Subject: [PATCH] Issue 6772 - dsconf - Replicas with the "consumer" role allow
|
||||
for viewing and modification of their changelog. (#6773)
|
||||
|
||||
dsconf currently allows users to set and retrieve changelogs in consumer replicas, which do not have officially supported changelogs. This can lead to undefined behavior and confusion.
|
||||
This commit prints a warning message if the user tries to interact with a changelog on a consumer replica.
|
||||
|
||||
Resolves: https://github.com/389ds/389-ds-base/issues/6772
|
||||
|
||||
Reviewed by: @droideck
|
||||
---
|
||||
src/lib389/lib389/cli_conf/replication.py | 23 +++++++++++++++++++++++
|
||||
1 file changed, 23 insertions(+)
|
||||
|
||||
diff --git a/src/lib389/lib389/cli_conf/replication.py b/src/lib389/lib389/cli_conf/replication.py
|
||||
index 6f77f34ca..a18bf83ca 100644
|
||||
--- a/src/lib389/lib389/cli_conf/replication.py
|
||||
+++ b/src/lib389/lib389/cli_conf/replication.py
|
||||
@@ -686,6 +686,9 @@ def set_per_backend_cl(inst, basedn, log, args):
|
||||
replace_list = []
|
||||
did_something = False
|
||||
|
||||
+ if (is_replica_role_consumer(inst, suffix)):
|
||||
+ log.info("Warning: Changelogs are not supported for consumer replicas. You may run into undefined behavior.")
|
||||
+
|
||||
if args.encrypt:
|
||||
cl.replace('nsslapd-encryptionalgorithm', 'AES')
|
||||
del args.encrypt
|
||||
@@ -715,6 +718,10 @@ def set_per_backend_cl(inst, basedn, log, args):
|
||||
# that means there is a changelog config entry per backend (aka suffix)
|
||||
def get_per_backend_cl(inst, basedn, log, args):
|
||||
suffix = args.suffix
|
||||
+
|
||||
+ if (is_replica_role_consumer(inst, suffix)):
|
||||
+ log.info("Warning: Changelogs are not supported for consumer replicas. You may run into undefined behavior.")
|
||||
+
|
||||
cl = Changelog(inst, suffix)
|
||||
if args and args.json:
|
||||
log.info(cl.get_all_attrs_json())
|
||||
@@ -822,6 +829,22 @@ def del_repl_manager(inst, basedn, log, args):
|
||||
|
||||
log.info("Successfully deleted replication manager: " + manager_dn)
|
||||
|
||||
+def is_replica_role_consumer(inst, suffix):
|
||||
+ """Helper function for get_per_backend_cl and set_per_backend_cl.
|
||||
+ Makes sure the instance in question is not a consumer, which is a role that
|
||||
+ does not support changelogs.
|
||||
+ """
|
||||
+ replicas = Replicas(inst)
|
||||
+ try:
|
||||
+ replica = replicas.get(suffix)
|
||||
+ role = replica.get_role()
|
||||
+ except ldap.NO_SUCH_OBJECT:
|
||||
+ raise ValueError(f"Backend \"{suffix}\" is not enabled for replication")
|
||||
+
|
||||
+ if role == ReplicaRole.CONSUMER:
|
||||
+ return True
|
||||
+ else:
|
||||
+ return False
|
||||
|
||||
#
|
||||
# Agreements
|
||||
--
|
||||
2.49.0
|
||||
|
||||
@ -0,0 +1,143 @@
|
||||
From 4cb50f83397e6a5e14a9b75ed15f24189ee2792b Mon Sep 17 00:00:00 2001
|
||||
From: Mark Reynolds <mreynolds@redhat.com>
|
||||
Date: Mon, 21 Jul 2025 18:07:21 -0400
|
||||
Subject: [PATCH] Issue 6893 - Log user that is updated during password modify
|
||||
extended operation
|
||||
|
||||
Description:
|
||||
|
||||
When a user's password is updated via an extended operation (password modify
|
||||
plugin) we only log the bind DN and not what user was updated. While "internal
|
||||
operation" logging will display the the user it should be logged by the default
|
||||
logging level.
|
||||
|
||||
Add access logging using "EXT_INFO" where we display the bind dn, target
|
||||
dn, and message.
|
||||
|
||||
Relates: https://github.com/389ds/389-ds-base/issues/6893
|
||||
|
||||
Reviewed by: spichugi & tbordaz(Thanks!!)
|
||||
---
|
||||
ldap/servers/slapd/passwd_extop.c | 56 +++++++++++++++----------------
|
||||
1 file changed, 28 insertions(+), 28 deletions(-)
|
||||
|
||||
diff --git a/ldap/servers/slapd/passwd_extop.c b/ldap/servers/slapd/passwd_extop.c
|
||||
index 4bb60afd6..0296d64fb 100644
|
||||
--- a/ldap/servers/slapd/passwd_extop.c
|
||||
+++ b/ldap/servers/slapd/passwd_extop.c
|
||||
@@ -465,12 +465,13 @@ passwd_modify_extop(Slapi_PBlock *pb)
|
||||
BerElement *response_ber = NULL;
|
||||
Slapi_Entry *targetEntry = NULL;
|
||||
Connection *conn = NULL;
|
||||
+ Operation *pb_op = NULL;
|
||||
LDAPControl **req_controls = NULL;
|
||||
LDAPControl **resp_controls = NULL;
|
||||
passwdPolicy *pwpolicy = NULL;
|
||||
Slapi_DN *target_sdn = NULL;
|
||||
Slapi_Entry *referrals = NULL;
|
||||
- /* Slapi_DN sdn; */
|
||||
+ Slapi_Backend *be = NULL;
|
||||
|
||||
slapi_log_err(SLAPI_LOG_TRACE, "passwd_modify_extop", "=>\n");
|
||||
|
||||
@@ -647,7 +648,7 @@ parse_req_done:
|
||||
}
|
||||
dn = slapi_sdn_get_ndn(target_sdn);
|
||||
if (dn == NULL || *dn == '\0') {
|
||||
- /* Refuse the operation because they're bound anonymously */
|
||||
+ /* Invalid DN - refuse the operation */
|
||||
errMesg = "Invalid dn.";
|
||||
rc = LDAP_INVALID_DN_SYNTAX;
|
||||
goto free_and_return;
|
||||
@@ -724,14 +725,19 @@ parse_req_done:
|
||||
ber_free(response_ber, 1);
|
||||
}
|
||||
|
||||
- slapi_pblock_set(pb, SLAPI_ORIGINAL_TARGET, (void *)dn);
|
||||
+ slapi_pblock_get(pb, SLAPI_OPERATION, &pb_op);
|
||||
+ if (pb_op == NULL) {
|
||||
+ slapi_log_err(SLAPI_LOG_ERR, "passwd_modify_extop", "pb_op is NULL\n");
|
||||
+ goto free_and_return;
|
||||
+ }
|
||||
|
||||
+ slapi_pblock_set(pb, SLAPI_ORIGINAL_TARGET, (void *)dn);
|
||||
/* Now we have the DN, look for the entry */
|
||||
ret = passwd_modify_getEntry(dn, &targetEntry);
|
||||
/* If we can't find the entry, then that's an error */
|
||||
if (ret) {
|
||||
/* Couldn't find the entry, fail */
|
||||
- errMesg = "No such Entry exists.";
|
||||
+ errMesg = "No such entry exists.";
|
||||
rc = LDAP_NO_SUCH_OBJECT;
|
||||
goto free_and_return;
|
||||
}
|
||||
@@ -742,30 +748,18 @@ parse_req_done:
|
||||
leak any useful information to the client such as current password
|
||||
wrong, etc.
|
||||
*/
|
||||
- Operation *pb_op = NULL;
|
||||
- slapi_pblock_get(pb, SLAPI_OPERATION, &pb_op);
|
||||
- if (pb_op == NULL) {
|
||||
- slapi_log_err(SLAPI_LOG_ERR, "passwd_modify_extop", "pb_op is NULL\n");
|
||||
- goto free_and_return;
|
||||
- }
|
||||
-
|
||||
operation_set_target_spec(pb_op, slapi_entry_get_sdn(targetEntry));
|
||||
slapi_pblock_set(pb, SLAPI_REQUESTOR_ISROOT, &pb_op->o_isroot);
|
||||
|
||||
- /* In order to perform the access control check , we need to select a backend (even though
|
||||
- * we don't actually need it otherwise).
|
||||
- */
|
||||
- {
|
||||
- Slapi_Backend *be = NULL;
|
||||
-
|
||||
- be = slapi_mapping_tree_find_backend_for_sdn(slapi_entry_get_sdn(targetEntry));
|
||||
- if (NULL == be) {
|
||||
- errMesg = "Failed to find backend for target entry";
|
||||
- rc = LDAP_OPERATIONS_ERROR;
|
||||
- goto free_and_return;
|
||||
- }
|
||||
- slapi_pblock_set(pb, SLAPI_BACKEND, be);
|
||||
+ /* In order to perform the access control check, we need to select a backend (even though
|
||||
+ * we don't actually need it otherwise). */
|
||||
+ be = slapi_mapping_tree_find_backend_for_sdn(slapi_entry_get_sdn(targetEntry));
|
||||
+ if (NULL == be) {
|
||||
+ errMesg = "Failed to find backend for target entry";
|
||||
+ rc = LDAP_NO_SUCH_OBJECT;
|
||||
+ goto free_and_return;
|
||||
}
|
||||
+ slapi_pblock_set(pb, SLAPI_BACKEND, be);
|
||||
|
||||
/* Check if the pwpolicy control is present */
|
||||
slapi_pblock_get(pb, SLAPI_PWPOLICY, &need_pwpolicy_ctrl);
|
||||
@@ -797,10 +791,7 @@ parse_req_done:
|
||||
/* Check if password policy allows users to change their passwords. We need to do
|
||||
* this here since the normal modify code doesn't perform this check for
|
||||
* internal operations. */
|
||||
-
|
||||
- Connection *pb_conn;
|
||||
- slapi_pblock_get(pb, SLAPI_CONNECTION, &pb_conn);
|
||||
- if (!pb_op->o_isroot && !pb_conn->c_needpw && !pwpolicy->pw_change) {
|
||||
+ if (!pb_op->o_isroot && !conn->c_needpw && !pwpolicy->pw_change) {
|
||||
if (NULL == bindSDN) {
|
||||
bindSDN = slapi_sdn_new_normdn_byref(bindDN);
|
||||
}
|
||||
@@ -848,6 +839,15 @@ free_and_return:
|
||||
slapi_log_err(SLAPI_LOG_PLUGIN, "passwd_modify_extop",
|
||||
"%s\n", errMesg ? errMesg : "success");
|
||||
|
||||
+ if (dn) {
|
||||
+ /* Log the target ndn (if we have a target ndn) */
|
||||
+ slapi_log_access(LDAP_DEBUG_STATS,
|
||||
+ "conn=%" PRIu64 " op=%d EXT_INFO name=\"passwd_modify_plugin\" bind_dn=\"%s\" target_dn=\"%s\" msg=\"%s\" rc=%d\n",
|
||||
+ conn ? conn->c_connid : -1, pb_op ? pb_op->o_opid : -1,
|
||||
+ bindDN ? bindDN : "", dn,
|
||||
+ errMesg ? errMesg : "success", rc);
|
||||
+ }
|
||||
+
|
||||
if ((rc == LDAP_REFERRAL) && (referrals)) {
|
||||
send_referrals_from_entry(pb, referrals);
|
||||
} else {
|
||||
--
|
||||
2.49.0
|
||||
|
||||
@ -0,0 +1,98 @@
|
||||
From ffc3a81ed5852b7f1fbaed79b9b776af23d65b7c Mon Sep 17 00:00:00 2001
|
||||
From: Mark Reynolds <mreynolds@redhat.com>
|
||||
Date: Wed, 23 Jul 2025 19:35:32 -0400
|
||||
Subject: [PATCH] Issue 6895 - Crash if repl keep alive entry can not be
|
||||
created
|
||||
|
||||
Description:
|
||||
|
||||
Heap use after free when logging that the replicaton keep-alive entry can not
|
||||
be created. slapi_add_internal_pb() frees the slapi entry, then
|
||||
we try and get the dn from the entry and we get a use-after-free crash.
|
||||
|
||||
Relates: https://github.com/389ds/389-ds-base/issues/6895
|
||||
|
||||
Reviewed by: spichugi(Thanks!)
|
||||
---
|
||||
ldap/servers/plugins/chainingdb/cb_config.c | 3 +--
|
||||
ldap/servers/plugins/posix-winsync/posix-winsync.c | 1 -
|
||||
ldap/servers/plugins/replication/repl5_init.c | 3 ---
|
||||
ldap/servers/plugins/replication/repl5_replica.c | 8 ++++----
|
||||
4 files changed, 5 insertions(+), 10 deletions(-)
|
||||
|
||||
diff --git a/ldap/servers/plugins/chainingdb/cb_config.c b/ldap/servers/plugins/chainingdb/cb_config.c
|
||||
index 40a7088d7..24fa1bcb3 100644
|
||||
--- a/ldap/servers/plugins/chainingdb/cb_config.c
|
||||
+++ b/ldap/servers/plugins/chainingdb/cb_config.c
|
||||
@@ -44,8 +44,7 @@ cb_config_add_dse_entries(cb_backend *cb, char **entries, char *string1, char *s
|
||||
slapi_pblock_get(util_pb, SLAPI_PLUGIN_INTOP_RESULT, &res);
|
||||
if (LDAP_SUCCESS != res && LDAP_ALREADY_EXISTS != res) {
|
||||
slapi_log_err(SLAPI_LOG_ERR, CB_PLUGIN_SUBSYSTEM,
|
||||
- "cb_config_add_dse_entries - Unable to add config entry (%s) to the DSE: %s\n",
|
||||
- slapi_entry_get_dn(e),
|
||||
+ "cb_config_add_dse_entries - Unable to add config entry to the DSE: %s\n",
|
||||
ldap_err2string(res));
|
||||
rc = res;
|
||||
slapi_pblock_destroy(util_pb);
|
||||
diff --git a/ldap/servers/plugins/posix-winsync/posix-winsync.c b/ldap/servers/plugins/posix-winsync/posix-winsync.c
|
||||
index 51a55b643..3a002bb70 100644
|
||||
--- a/ldap/servers/plugins/posix-winsync/posix-winsync.c
|
||||
+++ b/ldap/servers/plugins/posix-winsync/posix-winsync.c
|
||||
@@ -1626,7 +1626,6 @@ posix_winsync_end_update_cb(void *cbdata __attribute__((unused)),
|
||||
"posix_winsync_end_update_cb: "
|
||||
"add task entry\n");
|
||||
}
|
||||
- /* slapi_entry_free(e_task); */
|
||||
slapi_pblock_destroy(pb);
|
||||
pb = NULL;
|
||||
posix_winsync_config_reset_MOFTaskCreated();
|
||||
diff --git a/ldap/servers/plugins/replication/repl5_init.c b/ldap/servers/plugins/replication/repl5_init.c
|
||||
index 8bc0b5372..5047fb8dc 100644
|
||||
--- a/ldap/servers/plugins/replication/repl5_init.c
|
||||
+++ b/ldap/servers/plugins/replication/repl5_init.c
|
||||
@@ -682,7 +682,6 @@ create_repl_schema_policy(void)
|
||||
repl_schema_top,
|
||||
ldap_err2string(return_value));
|
||||
rc = -1;
|
||||
- slapi_entry_free(e); /* The entry was not consumed */
|
||||
goto done;
|
||||
}
|
||||
slapi_pblock_destroy(pb);
|
||||
@@ -703,7 +702,6 @@ create_repl_schema_policy(void)
|
||||
repl_schema_supplier,
|
||||
ldap_err2string(return_value));
|
||||
rc = -1;
|
||||
- slapi_entry_free(e); /* The entry was not consumed */
|
||||
goto done;
|
||||
}
|
||||
slapi_pblock_destroy(pb);
|
||||
@@ -724,7 +722,6 @@ create_repl_schema_policy(void)
|
||||
repl_schema_consumer,
|
||||
ldap_err2string(return_value));
|
||||
rc = -1;
|
||||
- slapi_entry_free(e); /* The entry was not consumed */
|
||||
goto done;
|
||||
}
|
||||
slapi_pblock_destroy(pb);
|
||||
diff --git a/ldap/servers/plugins/replication/repl5_replica.c b/ldap/servers/plugins/replication/repl5_replica.c
|
||||
index 59062b46b..a97c807e9 100644
|
||||
--- a/ldap/servers/plugins/replication/repl5_replica.c
|
||||
+++ b/ldap/servers/plugins/replication/repl5_replica.c
|
||||
@@ -465,10 +465,10 @@ replica_subentry_create(const char *repl_root, ReplicaId rid)
|
||||
if (return_value != LDAP_SUCCESS &&
|
||||
return_value != LDAP_ALREADY_EXISTS &&
|
||||
return_value != LDAP_REFERRAL /* CONSUMER */) {
|
||||
- slapi_log_err(SLAPI_LOG_ERR, repl_plugin_name, "replica_subentry_create - Unable to "
|
||||
- "create replication keep alive entry %s: error %d - %s\n",
|
||||
- slapi_entry_get_dn_const(e),
|
||||
- return_value, ldap_err2string(return_value));
|
||||
+ slapi_log_err(SLAPI_LOG_ERR, repl_plugin_name, "replica_subentry_create - "
|
||||
+ "Unable to create replication keep alive entry 'cn=%s %d,%s': error %d - %s\n",
|
||||
+ KEEP_ALIVE_ENTRY, rid, repl_root,
|
||||
+ return_value, ldap_err2string(return_value));
|
||||
rc = -1;
|
||||
goto done;
|
||||
}
|
||||
--
|
||||
2.49.0
|
||||
|
||||
@ -0,0 +1,352 @@
|
||||
From 191634746fdcb7e26a154cd00a22324e02a10110 Mon Sep 17 00:00:00 2001
|
||||
From: Simon Pichugin <spichugi@redhat.com>
|
||||
Date: Mon, 28 Jul 2025 10:50:26 -0700
|
||||
Subject: [PATCH] Issue 6250 - Add test for entryUSN overflow on failed add
|
||||
operations (#6821)
|
||||
|
||||
Description: Add comprehensive test to reproduce the entryUSN
|
||||
overflow issue where failed attempts to add existing entries followed by
|
||||
modify operations cause entryUSN values to underflow/overflow instead of
|
||||
incrementing properly.
|
||||
|
||||
Related: https://github.com/389ds/389-ds-base/issues/6250
|
||||
|
||||
Reviewed by: @tbordaz (Thanks!)
|
||||
---
|
||||
.../suites/plugins/entryusn_overflow_test.py | 323 ++++++++++++++++++
|
||||
1 file changed, 323 insertions(+)
|
||||
create mode 100644 dirsrvtests/tests/suites/plugins/entryusn_overflow_test.py
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/plugins/entryusn_overflow_test.py b/dirsrvtests/tests/suites/plugins/entryusn_overflow_test.py
|
||||
new file mode 100644
|
||||
index 000000000..a23d734ca
|
||||
--- /dev/null
|
||||
+++ b/dirsrvtests/tests/suites/plugins/entryusn_overflow_test.py
|
||||
@@ -0,0 +1,323 @@
|
||||
+# --- BEGIN COPYRIGHT BLOCK ---
|
||||
+# Copyright (C) 2025 Red Hat, Inc.
|
||||
+# All rights reserved.
|
||||
+#
|
||||
+# License: GPL (version 3 or any later version).
|
||||
+# See LICENSE for details.
|
||||
+# --- END COPYRIGHT BLOCK ---
|
||||
+#
|
||||
+import os
|
||||
+import ldap
|
||||
+import logging
|
||||
+import pytest
|
||||
+import time
|
||||
+import random
|
||||
+from lib389._constants import DEFAULT_SUFFIX
|
||||
+from lib389.config import Config
|
||||
+from lib389.plugins import USNPlugin
|
||||
+from lib389.idm.user import UserAccounts
|
||||
+from lib389.topologies import topology_st
|
||||
+from lib389.rootdse import RootDSE
|
||||
+
|
||||
+pytestmark = pytest.mark.tier2
|
||||
+
|
||||
+log = logging.getLogger(__name__)
|
||||
+
|
||||
+# Test constants
|
||||
+DEMO_USER_BASE_DN = "uid=demo_user,ou=people," + DEFAULT_SUFFIX
|
||||
+TEST_USER_PREFIX = "Demo User"
|
||||
+MAX_USN_64BIT = 18446744073709551615 # 2^64 - 1
|
||||
+ITERATIONS = 10
|
||||
+ADD_EXISTING_ENTRY_MAX_ATTEMPTS = 5
|
||||
+
|
||||
+
|
||||
+@pytest.fixture(scope="module")
|
||||
+def setup_usn_test(topology_st, request):
|
||||
+ """Setup USN plugin and test data for entryUSN overflow testing"""
|
||||
+
|
||||
+ inst = topology_st.standalone
|
||||
+
|
||||
+ log.info("Enable the USN plugin...")
|
||||
+ plugin = USNPlugin(inst)
|
||||
+ plugin.enable()
|
||||
+ plugin.enable_global_mode()
|
||||
+
|
||||
+ inst.restart()
|
||||
+
|
||||
+ # Create initial test users
|
||||
+ users = UserAccounts(inst, DEFAULT_SUFFIX)
|
||||
+ created_users = []
|
||||
+
|
||||
+ log.info("Creating initial test users...")
|
||||
+ for i in range(3):
|
||||
+ user_props = {
|
||||
+ 'uid': f'{TEST_USER_PREFIX}-{i}',
|
||||
+ 'cn': f'{TEST_USER_PREFIX}-{i}',
|
||||
+ 'sn': f'User{i}',
|
||||
+ 'uidNumber': str(1000 + i),
|
||||
+ 'gidNumber': str(1000 + i),
|
||||
+ 'homeDirectory': f'/home/{TEST_USER_PREFIX}-{i}',
|
||||
+ 'userPassword': 'password123'
|
||||
+ }
|
||||
+ try:
|
||||
+ user = users.create(properties=user_props)
|
||||
+ created_users.append(user)
|
||||
+ log.info(f"Created user: {user.dn}")
|
||||
+ except ldap.ALREADY_EXISTS:
|
||||
+ log.info(f"User {user_props['uid']} already exists, skipping creation")
|
||||
+ user = users.get(user_props['uid'])
|
||||
+ created_users.append(user)
|
||||
+
|
||||
+ def fin():
|
||||
+ log.info("Cleaning up test users...")
|
||||
+ for user in created_users:
|
||||
+ try:
|
||||
+ user.delete()
|
||||
+ except ldap.NO_SUCH_OBJECT:
|
||||
+ pass
|
||||
+
|
||||
+ request.addfinalizer(fin)
|
||||
+
|
||||
+ return created_users
|
||||
+
|
||||
+
|
||||
+def test_entryusn_overflow_on_add_existing_entries(topology_st, setup_usn_test):
|
||||
+ """Test that reproduces entryUSN overflow when adding existing entries
|
||||
+
|
||||
+ :id: a5a8c33d-82f3-4113-be2b-027de51791c8
|
||||
+ :setup: Standalone instance with USN plugin enabled and test users
|
||||
+ :steps:
|
||||
+ 1. Record initial entryUSN values for existing users
|
||||
+ 2. Attempt to add existing entries multiple times (should fail)
|
||||
+ 3. Perform modify operations on the entries
|
||||
+ 4. Check that entryUSN values increment correctly without overflow
|
||||
+ 5. Verify lastusn values are consistent
|
||||
+ :expectedresults:
|
||||
+ 1. Initial entryUSN values are recorded successfully
|
||||
+ 2. Add operations fail with ALREADY_EXISTS error
|
||||
+ 3. Modify operations succeed
|
||||
+ 4. EntryUSN values increment properly without underflow/overflow
|
||||
+ 5. LastUSN values are consistent and increasing
|
||||
+ """
|
||||
+
|
||||
+ inst = topology_st.standalone
|
||||
+ users = setup_usn_test
|
||||
+
|
||||
+ # Enable detailed logging for debugging
|
||||
+ config = Config(inst)
|
||||
+ config.replace('nsslapd-accesslog-level', '260') # Internal op logging
|
||||
+ config.replace('nsslapd-errorlog-level', '65536')
|
||||
+ config.replace('nsslapd-plugin-logging', 'on')
|
||||
+
|
||||
+ root_dse = RootDSE(inst)
|
||||
+
|
||||
+ log.info("Starting entryUSN overflow reproduction test")
|
||||
+
|
||||
+ # Record initial state
|
||||
+ initial_usn_values = {}
|
||||
+ for user in users:
|
||||
+ initial_usn = user.get_attr_val_int('entryusn')
|
||||
+ initial_usn_values[user.dn] = initial_usn
|
||||
+ log.info(f"Initial entryUSN for {user.get_attr_val_utf8('cn')}: {initial_usn}")
|
||||
+
|
||||
+ initial_lastusn = root_dse.get_attr_val_int("lastusn")
|
||||
+ log.info(f"Initial lastUSN: {initial_lastusn}")
|
||||
+
|
||||
+ # Perform test iterations
|
||||
+ for iteration in range(1, ITERATIONS + 1):
|
||||
+ log.info(f"\n--- Iteration {iteration} ---")
|
||||
+
|
||||
+ # Step 1: Try to add existing entries multiple times
|
||||
+ selected_user = random.choice(users)
|
||||
+ cn_value = selected_user.get_attr_val_utf8('cn')
|
||||
+ attempts = random.randint(1, ADD_EXISTING_ENTRY_MAX_ATTEMPTS)
|
||||
+
|
||||
+ log.info(f"Attempting to add existing entry '{cn_value}' {attempts} times")
|
||||
+
|
||||
+ # Get user attributes for recreation attempt
|
||||
+ user_attrs = {
|
||||
+ 'uid': selected_user.get_attr_val_utf8('uid'),
|
||||
+ 'cn': selected_user.get_attr_val_utf8('cn'),
|
||||
+ 'sn': selected_user.get_attr_val_utf8('sn'),
|
||||
+ 'uidNumber': selected_user.get_attr_val_utf8('uidNumber'),
|
||||
+ 'gidNumber': selected_user.get_attr_val_utf8('gidNumber'),
|
||||
+ 'homeDirectory': selected_user.get_attr_val_utf8('homeDirectory'),
|
||||
+ 'userPassword': 'password123'
|
||||
+ }
|
||||
+
|
||||
+ users_collection = UserAccounts(inst, DEFAULT_SUFFIX)
|
||||
+
|
||||
+ # Try to add the existing user multiple times
|
||||
+ for attempt in range(attempts):
|
||||
+ try:
|
||||
+ users_collection.create(properties=user_attrs)
|
||||
+ log.error(f"ERROR: Add operation should have failed but succeeded on attempt {attempt + 1}")
|
||||
+ assert False, "Add operation should have failed with ALREADY_EXISTS"
|
||||
+ except ldap.ALREADY_EXISTS:
|
||||
+ log.info(f"Attempt {attempt + 1}: Got expected ALREADY_EXISTS error")
|
||||
+ except Exception as e:
|
||||
+ log.error(f"Unexpected error on attempt {attempt + 1}: {e}")
|
||||
+ raise
|
||||
+
|
||||
+ # Step 2: Perform modify operation
|
||||
+ target_user = random.choice(users)
|
||||
+ cn_value = target_user.get_attr_val_utf8('cn')
|
||||
+ old_usn = target_user.get_attr_val_int('entryusn')
|
||||
+
|
||||
+ # Modify the user entry
|
||||
+ new_description = f"Modified in iteration {iteration} - {time.time()}"
|
||||
+ target_user.replace('description', new_description)
|
||||
+
|
||||
+ # Get new USN value
|
||||
+ new_usn = target_user.get_attr_val_int('entryusn')
|
||||
+
|
||||
+ log.info(f"Modified entry '{cn_value}': old USN = {old_usn}, new USN = {new_usn}")
|
||||
+
|
||||
+ # Step 3: Validate USN values
|
||||
+ # Check for overflow/underflow conditions
|
||||
+ assert new_usn > 0, f"EntryUSN should be positive, got {new_usn}"
|
||||
+ assert new_usn < MAX_USN_64BIT, f"EntryUSN overflow detected: {new_usn} >= {MAX_USN_64BIT}"
|
||||
+
|
||||
+ # Check that USN didn't wrap around (underflow detection)
|
||||
+ usn_diff = new_usn - old_usn
|
||||
+ assert usn_diff < 1000, f"USN increment too large, possible overflow: {usn_diff}"
|
||||
+
|
||||
+ # Verify lastUSN is also reasonable
|
||||
+ current_lastusn = root_dse.get_attr_val_int("lastusn")
|
||||
+ assert current_lastusn >= new_usn, f"LastUSN ({current_lastusn}) should be >= entryUSN ({new_usn})"
|
||||
+ assert current_lastusn < MAX_USN_64BIT, f"LastUSN overflow detected: {current_lastusn}"
|
||||
+
|
||||
+ log.info(f"USN validation passed for iteration {iteration}")
|
||||
+
|
||||
+ # Add a new entry occasionally to increase USN diversity
|
||||
+ if iteration % 3 == 0:
|
||||
+ new_user_props = {
|
||||
+ 'uid': f'{TEST_USER_PREFIX}-new-{iteration}',
|
||||
+ 'cn': f'{TEST_USER_PREFIX}-new-{iteration}',
|
||||
+ 'sn': f'NewUser{iteration}',
|
||||
+ 'uidNumber': str(2000 + iteration),
|
||||
+ 'gidNumber': str(2000 + iteration),
|
||||
+ 'homeDirectory': f'/home/{TEST_USER_PREFIX}-new-{iteration}',
|
||||
+ 'userPassword': 'newpassword123'
|
||||
+ }
|
||||
+ try:
|
||||
+ new_user = users_collection.create(properties=new_user_props)
|
||||
+ new_user_usn = new_user.get_attr_val_int('entryusn')
|
||||
+ log.info(f"Created new entry '{new_user.get_attr_val_utf8('cn')}' with USN: {new_user_usn}")
|
||||
+ users.append(new_user) # Add to cleanup list
|
||||
+ except Exception as e:
|
||||
+ log.warning(f"Failed to create new user in iteration {iteration}: {e}")
|
||||
+
|
||||
+ # Final validation: Check all USN values are reasonable
|
||||
+ log.info("\nFinal USN validation")
|
||||
+ final_lastusn = root_dse.get_attr_val_int("lastusn")
|
||||
+
|
||||
+ for user in users:
|
||||
+ try:
|
||||
+ final_usn = user.get_attr_val_int('entryusn')
|
||||
+ cn_value = user.get_attr_val_utf8('cn')
|
||||
+ log.info(f"Final entryUSN for '{cn_value}': {final_usn}")
|
||||
+
|
||||
+ # Ensure no overflow occurred
|
||||
+ assert final_usn > 0, f"Final entryUSN should be positive for {cn_value}: {final_usn}"
|
||||
+ assert final_usn < MAX_USN_64BIT, f"EntryUSN overflow for {cn_value}: {final_usn}"
|
||||
+
|
||||
+ except ldap.NO_SUCH_OBJECT:
|
||||
+ log.info(f"User {user.dn} was deleted during test")
|
||||
+
|
||||
+ log.info(f"Final lastUSN: {final_lastusn}")
|
||||
+ assert final_lastusn > initial_lastusn, "LastUSN should have increased during test"
|
||||
+ assert final_lastusn < MAX_USN_64BIT, f"LastUSN overflow detected: {final_lastusn}"
|
||||
+
|
||||
+ log.info("EntryUSN overflow test completed successfully")
|
||||
+
|
||||
+
|
||||
+def test_entryusn_consistency_after_failed_adds(topology_st, setup_usn_test):
|
||||
+ """Test that entryUSN remains consistent after failed add operations
|
||||
+
|
||||
+ :id: e380ccad-527b-427e-a331-df5c41badbed
|
||||
+ :setup: Standalone instance with USN plugin enabled and test users
|
||||
+ :steps:
|
||||
+ 1. Record entryUSN values before failed add attempts
|
||||
+ 2. Attempt to add existing entries (should fail)
|
||||
+ 3. Verify entryUSN values haven't changed due to failed operations
|
||||
+ 4. Perform successful modify operations
|
||||
+ 5. Verify entryUSN increments correctly
|
||||
+ :expectedresults:
|
||||
+ 1. Initial entryUSN values recorded
|
||||
+ 2. Add operations fail as expected
|
||||
+ 3. EntryUSN values unchanged after failed adds
|
||||
+ 4. Modify operations succeed
|
||||
+ 5. EntryUSN values increment correctly without overflow
|
||||
+ """
|
||||
+
|
||||
+ inst = topology_st.standalone
|
||||
+ users = setup_usn_test
|
||||
+
|
||||
+ log.info("Testing entryUSN consistency after failed adds")
|
||||
+
|
||||
+ # Record USN values before any operations
|
||||
+ pre_operation_usns = {}
|
||||
+ for user in users:
|
||||
+ usn = user.get_attr_val_int('entryusn')
|
||||
+ pre_operation_usns[user.dn] = usn
|
||||
+ log.info(f"Pre-operation entryUSN for {user.get_attr_val_utf8('cn')}: {usn}")
|
||||
+
|
||||
+ # Attempt to add existing entries - these should fail
|
||||
+ users_collection = UserAccounts(inst, DEFAULT_SUFFIX)
|
||||
+
|
||||
+ for user in users:
|
||||
+ cn_value = user.get_attr_val_utf8('cn')
|
||||
+ log.info(f"Attempting to add existing user: {cn_value}")
|
||||
+
|
||||
+ user_attrs = {
|
||||
+ 'uid': user.get_attr_val_utf8('uid'),
|
||||
+ 'cn': cn_value,
|
||||
+ 'sn': user.get_attr_val_utf8('sn'),
|
||||
+ 'uidNumber': user.get_attr_val_utf8('uidNumber'),
|
||||
+ 'gidNumber': user.get_attr_val_utf8('gidNumber'),
|
||||
+ 'homeDirectory': user.get_attr_val_utf8('homeDirectory'),
|
||||
+ 'userPassword': 'password123'
|
||||
+ }
|
||||
+
|
||||
+ try:
|
||||
+ users_collection.create(properties=user_attrs)
|
||||
+ assert False, f"Add operation should have failed for existing user {cn_value}"
|
||||
+ except ldap.ALREADY_EXISTS:
|
||||
+ log.info(f"Got expected ALREADY_EXISTS for {cn_value}")
|
||||
+
|
||||
+ # Verify USN values haven't changed after failed adds
|
||||
+ log.info("Verifying entryUSN values after failed add operations...")
|
||||
+ for user in users:
|
||||
+ current_usn = user.get_attr_val_int('entryusn')
|
||||
+ expected_usn = pre_operation_usns[user.dn]
|
||||
+ cn_value = user.get_attr_val_utf8('cn')
|
||||
+
|
||||
+ assert current_usn == expected_usn, \
|
||||
+ f"EntryUSN changed after failed add for {cn_value}: was {expected_usn}, now {current_usn}"
|
||||
+ log.info(f"EntryUSN unchanged for {cn_value}: {current_usn}")
|
||||
+
|
||||
+ # Now perform successful modify operations
|
||||
+ log.info("Performing successful modify operations...")
|
||||
+ for i, user in enumerate(users):
|
||||
+ cn_value = user.get_attr_val_utf8('cn')
|
||||
+ old_usn = user.get_attr_val_int('entryusn')
|
||||
+
|
||||
+ # Modify the user
|
||||
+ user.replace('description', f'Consistency test modification {i + 1}')
|
||||
+
|
||||
+ new_usn = user.get_attr_val_int('entryusn')
|
||||
+ log.info(f"Modified {cn_value}: USN {old_usn} -> {new_usn}")
|
||||
+
|
||||
+ # Verify proper increment
|
||||
+ assert (new_usn - old_usn) == 1, f"EntryUSN should increment by 1 for {cn_value}: {old_usn} -> {new_usn}"
|
||||
+ assert new_usn < MAX_USN_64BIT, f"EntryUSN overflow for {cn_value}: {new_usn}"
|
||||
+
|
||||
+ log.info("EntryUSN consistency test completed successfully")
|
||||
+
|
||||
+
|
||||
+if __name__ == '__main__':
|
||||
+ # Run isolated
|
||||
+ # -s for DEBUG mode
|
||||
+ CURRENT_FILE = os.path.realpath(__file__)
|
||||
+ pytest.main("-s %s" % CURRENT_FILE)
|
||||
\ No newline at end of file
|
||||
--
|
||||
2.49.0
|
||||
|
||||
@ -0,0 +1,172 @@
|
||||
From 37a56f75afac2805e1ba958eebd496e77b7079e7 Mon Sep 17 00:00:00 2001
|
||||
From: Simon Pichugin <spichugi@redhat.com>
|
||||
Date: Mon, 28 Jul 2025 15:35:50 -0700
|
||||
Subject: [PATCH] Issue 6594 - Add test for numSubordinates replication
|
||||
consistency with tombstones (#6862)
|
||||
|
||||
Description: Add a comprehensive test to verify that numSubordinates and
|
||||
tombstoneNumSubordinates attributes are correctly replicated between
|
||||
instances when tombstone entries are present.
|
||||
|
||||
Fixes: https://github.com/389ds/389-ds-base/issues/6594
|
||||
|
||||
Reviewed by: @progier389 (Thanks!)
|
||||
---
|
||||
.../numsubordinates_replication_test.py | 144 ++++++++++++++++++
|
||||
1 file changed, 144 insertions(+)
|
||||
create mode 100644 dirsrvtests/tests/suites/replication/numsubordinates_replication_test.py
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/replication/numsubordinates_replication_test.py b/dirsrvtests/tests/suites/replication/numsubordinates_replication_test.py
|
||||
new file mode 100644
|
||||
index 000000000..9ba10657d
|
||||
--- /dev/null
|
||||
+++ b/dirsrvtests/tests/suites/replication/numsubordinates_replication_test.py
|
||||
@@ -0,0 +1,144 @@
|
||||
+# --- BEGIN COPYRIGHT BLOCK ---
|
||||
+# Copyright (C) 2025 Red Hat, Inc.
|
||||
+# All rights reserved.
|
||||
+#
|
||||
+# License: GPL (version 3 or any later version).
|
||||
+# See LICENSE for details.
|
||||
+# --- END COPYRIGHT BLOCK ---
|
||||
+
|
||||
+import os
|
||||
+import logging
|
||||
+import pytest
|
||||
+from lib389._constants import DEFAULT_SUFFIX
|
||||
+from lib389.replica import ReplicationManager
|
||||
+from lib389.idm.organizationalunit import OrganizationalUnits
|
||||
+from lib389.idm.user import UserAccounts
|
||||
+from lib389.topologies import topology_i2 as topo_i2
|
||||
+
|
||||
+
|
||||
+pytestmark = pytest.mark.tier1
|
||||
+
|
||||
+DEBUGGING = os.getenv("DEBUGGING", default=False)
|
||||
+if DEBUGGING:
|
||||
+ logging.getLogger(__name__).setLevel(logging.DEBUG)
|
||||
+else:
|
||||
+ logging.getLogger(__name__).setLevel(logging.INFO)
|
||||
+log = logging.getLogger(__name__)
|
||||
+
|
||||
+
|
||||
+def test_numsubordinates_tombstone_replication_mismatch(topo_i2):
|
||||
+ """Test that numSubordinates values match between replicas after tombstone creation
|
||||
+
|
||||
+ :id: c43ecc7a-d706-42e8-9179-1ff7d0e7163a
|
||||
+ :setup: Two standalone instances
|
||||
+ :steps:
|
||||
+ 1. Create a container (organizational unit) on the first instance
|
||||
+ 2. Create a user object in that container
|
||||
+ 3. Delete the user object (this creates a tombstone)
|
||||
+ 4. Set up replication between the two instances
|
||||
+ 5. Wait for replication to complete
|
||||
+ 6. Check numSubordinates on both instances
|
||||
+ 7. Check tombstoneNumSubordinates on both instances
|
||||
+ 8. Verify that numSubordinates values match on both instances
|
||||
+ :expectedresults:
|
||||
+ 1. Container should be created successfully
|
||||
+ 2. User object should be created successfully
|
||||
+ 3. User object should be deleted successfully
|
||||
+ 4. Replication should be set up successfully
|
||||
+ 5. Replication should complete successfully
|
||||
+ 6. numSubordinates should be accessible on both instances
|
||||
+ 7. tombstoneNumSubordinates should be accessible on both instances
|
||||
+ 8. numSubordinates values should match on both instances
|
||||
+ """
|
||||
+
|
||||
+ instance1 = topo_i2.ins["standalone1"]
|
||||
+ instance2 = topo_i2.ins["standalone2"]
|
||||
+
|
||||
+ log.info("Create a container (organizational unit) on the first instance")
|
||||
+ ous1 = OrganizationalUnits(instance1, DEFAULT_SUFFIX)
|
||||
+ container = ous1.create(properties={
|
||||
+ 'ou': 'test_container',
|
||||
+ 'description': 'Test container for numSubordinates replication test'
|
||||
+ })
|
||||
+ container_rdn = container.rdn
|
||||
+ log.info(f"Created container: {container_rdn}")
|
||||
+
|
||||
+ log.info("Create a user object in that container")
|
||||
+ users1 = UserAccounts(instance1, DEFAULT_SUFFIX, rdn=f"ou={container_rdn}")
|
||||
+ test_user = users1.create_test_user(uid=1001)
|
||||
+ log.info(f"Created user: {test_user.dn}")
|
||||
+
|
||||
+ log.info("Checking initial numSubordinates on container")
|
||||
+ container_obj1 = OrganizationalUnits(instance1, DEFAULT_SUFFIX).get(container_rdn)
|
||||
+ initial_numsubordinates = container_obj1.get_attr_val_int('numSubordinates')
|
||||
+ log.info(f"Initial numSubordinates: {initial_numsubordinates}")
|
||||
+ assert initial_numsubordinates == 1
|
||||
+
|
||||
+ log.info("Delete the user object (this creates a tombstone)")
|
||||
+ test_user.delete()
|
||||
+
|
||||
+ log.info("Checking numSubordinates after deletion")
|
||||
+ after_delete_numsubordinates = container_obj1.get_attr_val_int('numSubordinates')
|
||||
+ log.info(f"numSubordinates after deletion: {after_delete_numsubordinates}")
|
||||
+
|
||||
+ log.info("Checking tombstoneNumSubordinates after deletion")
|
||||
+ try:
|
||||
+ tombstone_numsubordinates = container_obj1.get_attr_val_int('tombstoneNumSubordinates')
|
||||
+ log.info(f"tombstoneNumSubordinates: {tombstone_numsubordinates}")
|
||||
+ except Exception as e:
|
||||
+ log.info(f"tombstoneNumSubordinates not found or error: {e}")
|
||||
+ tombstone_numsubordinates = 0
|
||||
+
|
||||
+ log.info("Set up replication between the two instances")
|
||||
+ repl = ReplicationManager(DEFAULT_SUFFIX)
|
||||
+ repl.create_first_supplier(instance1)
|
||||
+ repl.join_supplier(instance1, instance2)
|
||||
+
|
||||
+ log.info("Wait for replication to complete")
|
||||
+ repl.wait_for_replication(instance1, instance2)
|
||||
+
|
||||
+ log.info("Check numSubordinates on both instances")
|
||||
+ container_obj1 = OrganizationalUnits(instance1, DEFAULT_SUFFIX).get(container_rdn)
|
||||
+ numsubordinates_instance1 = container_obj1.get_attr_val_int('numSubordinates')
|
||||
+ log.info(f"numSubordinates on instance1: {numsubordinates_instance1}")
|
||||
+
|
||||
+ container_obj2 = OrganizationalUnits(instance2, DEFAULT_SUFFIX).get(container_rdn)
|
||||
+ numsubordinates_instance2 = container_obj2.get_attr_val_int('numSubordinates')
|
||||
+ log.info(f"numSubordinates on instance2: {numsubordinates_instance2}")
|
||||
+
|
||||
+ log.info("Check tombstoneNumSubordinates on both instances")
|
||||
+ try:
|
||||
+ tombstone_numsubordinates_instance1 = container_obj1.get_attr_val_int('tombstoneNumSubordinates')
|
||||
+ log.info(f"tombstoneNumSubordinates on instance1: {tombstone_numsubordinates_instance1}")
|
||||
+ except Exception as e:
|
||||
+ log.info(f"tombstoneNumSubordinates not found on instance1: {e}")
|
||||
+ tombstone_numsubordinates_instance1 = 0
|
||||
+
|
||||
+ try:
|
||||
+ tombstone_numsubordinates_instance2 = container_obj2.get_attr_val_int('tombstoneNumSubordinates')
|
||||
+ log.info(f"tombstoneNumSubordinates on instance2: {tombstone_numsubordinates_instance2}")
|
||||
+ except Exception as e:
|
||||
+ log.info(f"tombstoneNumSubordinates not found on instance2: {e}")
|
||||
+ tombstone_numsubordinates_instance2 = 0
|
||||
+
|
||||
+ log.info("Verify that numSubordinates values match on both instances")
|
||||
+ log.info(f"Comparison: instance1 numSubordinates={numsubordinates_instance1}, "
|
||||
+ f"instance2 numSubordinates={numsubordinates_instance2}")
|
||||
+ log.info(f"Comparison: instance1 tombstoneNumSubordinates={tombstone_numsubordinates_instance1}, "
|
||||
+ f"instance2 tombstoneNumSubordinates={tombstone_numsubordinates_instance2}")
|
||||
+
|
||||
+ assert numsubordinates_instance1 == numsubordinates_instance2, (
|
||||
+ f"numSubordinates mismatch: instance1 has {numsubordinates_instance1}, "
|
||||
+ f"instance2 has {numsubordinates_instance2}. "
|
||||
+ )
|
||||
+ assert tombstone_numsubordinates_instance1 == tombstone_numsubordinates_instance2, (
|
||||
+ f"tombstoneNumSubordinates mismatch: instance1 has {tombstone_numsubordinates_instance1}, "
|
||||
+ f"instance2 has {tombstone_numsubordinates_instance2}. "
|
||||
+ )
|
||||
+
|
||||
+
|
||||
+if __name__ == '__main__':
|
||||
+ # Run isolated
|
||||
+ # -s for DEBUG mode
|
||||
+ CURRENT_FILE = os.path.realpath(__file__)
|
||||
+ pytest.main("-s %s" % CURRENT_FILE)
|
||||
\ No newline at end of file
|
||||
--
|
||||
2.49.0
|
||||
|
||||
@ -0,0 +1,814 @@
|
||||
From e05653cbff500c47b89e43e4a1c85b7cb30321ff Mon Sep 17 00:00:00 2001
|
||||
From: Simon Pichugin <spichugi@redhat.com>
|
||||
Date: Mon, 28 Jul 2025 15:41:29 -0700
|
||||
Subject: [PATCH] Issue 6884 - Mask password hashes in audit logs (#6885)
|
||||
|
||||
Description: Fix the audit log functionality to mask password hash values for
|
||||
userPassword, nsslapd-rootpw, nsmultiplexorcredentials, nsds5ReplicaCredentials,
|
||||
and nsds5ReplicaBootstrapCredentials attributes in ADD and MODIFY operations.
|
||||
Update auditlog.c to detect password attributes and replace their values with
|
||||
asterisks (**********************) in both LDIF and JSON audit log formats.
|
||||
Add a comprehensive test suite audit_password_masking_test.py to verify
|
||||
password masking works correctly across all log formats and operation types.
|
||||
|
||||
Fixes: https://github.com/389ds/389-ds-base/issues/6884
|
||||
|
||||
Reviewed by: @mreynolds389, @vashirov (Thanks!!)
|
||||
---
|
||||
.../logging/audit_password_masking_test.py | 501 ++++++++++++++++++
|
||||
ldap/servers/slapd/auditlog.c | 170 +++++-
|
||||
ldap/servers/slapd/slapi-private.h | 1 +
|
||||
src/lib389/lib389/chaining.py | 3 +-
|
||||
4 files changed, 652 insertions(+), 23 deletions(-)
|
||||
create mode 100644 dirsrvtests/tests/suites/logging/audit_password_masking_test.py
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/logging/audit_password_masking_test.py b/dirsrvtests/tests/suites/logging/audit_password_masking_test.py
|
||||
new file mode 100644
|
||||
index 000000000..3b6a54849
|
||||
--- /dev/null
|
||||
+++ b/dirsrvtests/tests/suites/logging/audit_password_masking_test.py
|
||||
@@ -0,0 +1,501 @@
|
||||
+# --- BEGIN COPYRIGHT BLOCK ---
|
||||
+# Copyright (C) 2025 Red Hat, Inc.
|
||||
+# All rights reserved.
|
||||
+#
|
||||
+# License: GPL (version 3 or any later version).
|
||||
+# See LICENSE for details.
|
||||
+# --- END COPYRIGHT BLOCK ---
|
||||
+#
|
||||
+import logging
|
||||
+import pytest
|
||||
+import os
|
||||
+import re
|
||||
+import time
|
||||
+import ldap
|
||||
+from lib389._constants import DEFAULT_SUFFIX, DN_DM, PW_DM
|
||||
+from lib389.topologies import topology_m2 as topo
|
||||
+from lib389.idm.user import UserAccounts
|
||||
+from lib389.dirsrv_log import DirsrvAuditJSONLog
|
||||
+from lib389.plugins import ChainingBackendPlugin
|
||||
+from lib389.chaining import ChainingLinks
|
||||
+from lib389.agreement import Agreements
|
||||
+from lib389.replica import ReplicationManager, Replicas
|
||||
+from lib389.idm.directorymanager import DirectoryManager
|
||||
+
|
||||
+log = logging.getLogger(__name__)
|
||||
+
|
||||
+MASKED_PASSWORD = "**********************"
|
||||
+TEST_PASSWORD = "MySecret123"
|
||||
+TEST_PASSWORD_2 = "NewPassword789"
|
||||
+TEST_PASSWORD_3 = "NewPassword101"
|
||||
+
|
||||
+
|
||||
+def setup_audit_logging(inst, log_format='default', display_attrs=None):
|
||||
+ """Configure audit logging settings"""
|
||||
+ inst.config.replace('nsslapd-auditlog-logbuffering', 'off')
|
||||
+ inst.config.replace('nsslapd-auditlog-logging-enabled', 'on')
|
||||
+ inst.config.replace('nsslapd-auditlog-log-format', log_format)
|
||||
+
|
||||
+ if display_attrs is not None:
|
||||
+ inst.config.replace('nsslapd-auditlog-display-attrs', display_attrs)
|
||||
+
|
||||
+ inst.deleteAuditLogs()
|
||||
+
|
||||
+
|
||||
+def check_password_masked(inst, log_format, expected_password, actual_password):
|
||||
+ """Helper function to check password masking in audit logs"""
|
||||
+
|
||||
+ time.sleep(1) # Allow log to flush
|
||||
+
|
||||
+ # List of all password/credential attributes that should be masked
|
||||
+ password_attributes = [
|
||||
+ 'userPassword',
|
||||
+ 'nsslapd-rootpw',
|
||||
+ 'nsmultiplexorcredentials',
|
||||
+ 'nsDS5ReplicaCredentials',
|
||||
+ 'nsDS5ReplicaBootstrapCredentials'
|
||||
+ ]
|
||||
+
|
||||
+ # Get password schemes to check for hash leakage
|
||||
+ user_password_scheme = inst.config.get_attr_val_utf8('passwordStorageScheme')
|
||||
+ root_password_scheme = inst.config.get_attr_val_utf8('nsslapd-rootpwstoragescheme')
|
||||
+
|
||||
+ if log_format == 'json':
|
||||
+ # Check JSON format logs
|
||||
+ audit_log = DirsrvAuditJSONLog(inst)
|
||||
+ log_lines = audit_log.readlines()
|
||||
+
|
||||
+ found_masked = False
|
||||
+ found_actual = False
|
||||
+ found_hashed = False
|
||||
+
|
||||
+ for line in log_lines:
|
||||
+ # Check if any password attribute is present in the line
|
||||
+ for attr in password_attributes:
|
||||
+ if attr in line:
|
||||
+ if expected_password in line:
|
||||
+ found_masked = True
|
||||
+ if actual_password in line:
|
||||
+ found_actual = True
|
||||
+ # Check for password scheme indicators (hashed passwords)
|
||||
+ if user_password_scheme and f'{{{user_password_scheme}}}' in line:
|
||||
+ found_hashed = True
|
||||
+ if root_password_scheme and f'{{{root_password_scheme}}}' in line:
|
||||
+ found_hashed = True
|
||||
+ break # Found a password attribute, no need to check others for this line
|
||||
+
|
||||
+ else:
|
||||
+ # Check LDIF format logs
|
||||
+ found_masked = False
|
||||
+ found_actual = False
|
||||
+ found_hashed = False
|
||||
+
|
||||
+ # Check each password attribute for masked password
|
||||
+ for attr in password_attributes:
|
||||
+ if inst.ds_audit_log.match(f"{attr}: {re.escape(expected_password)}"):
|
||||
+ found_masked = True
|
||||
+ if inst.ds_audit_log.match(f"{attr}: {actual_password}"):
|
||||
+ found_actual = True
|
||||
+
|
||||
+ # Check for hashed passwords in LDIF format
|
||||
+ if user_password_scheme:
|
||||
+ if inst.ds_audit_log.match(f"userPassword: {{{user_password_scheme}}}"):
|
||||
+ found_hashed = True
|
||||
+ if root_password_scheme:
|
||||
+ if inst.ds_audit_log.match(f"nsslapd-rootpw: {{{root_password_scheme}}}"):
|
||||
+ found_hashed = True
|
||||
+
|
||||
+ # Delete audit logs to avoid interference with other tests
|
||||
+ # We need to reset the root password to default as deleteAuditLogs()
|
||||
+ # opens a new connection with the default password
|
||||
+ dm = DirectoryManager(inst)
|
||||
+ dm.change_password(PW_DM)
|
||||
+ inst.deleteAuditLogs()
|
||||
+
|
||||
+ return found_masked, found_actual, found_hashed
|
||||
+
|
||||
+
|
||||
+@pytest.mark.parametrize("log_format,display_attrs", [
|
||||
+ ("default", None),
|
||||
+ ("default", "*"),
|
||||
+ ("default", "userPassword"),
|
||||
+ ("json", None),
|
||||
+ ("json", "*"),
|
||||
+ ("json", "userPassword")
|
||||
+])
|
||||
+def test_password_masking_add_operation(topo, log_format, display_attrs):
|
||||
+ """Test password masking in ADD operations
|
||||
+
|
||||
+ :id: 4358bd75-bcc7-401c-b492-d3209b10412d
|
||||
+ :parametrized: yes
|
||||
+ :setup: Standalone Instance
|
||||
+ :steps:
|
||||
+ 1. Configure audit logging format
|
||||
+ 2. Add user with password
|
||||
+ 3. Check that password is masked in audit log
|
||||
+ 4. Verify actual password does not appear in log
|
||||
+ :expectedresults:
|
||||
+ 1. Success
|
||||
+ 2. Success
|
||||
+ 3. Password should be masked with asterisks
|
||||
+ 4. Actual password should not be found in log
|
||||
+ """
|
||||
+ inst = topo.ms['supplier1']
|
||||
+ setup_audit_logging(inst, log_format, display_attrs)
|
||||
+
|
||||
+ users = UserAccounts(inst, DEFAULT_SUFFIX)
|
||||
+ user = None
|
||||
+
|
||||
+ try:
|
||||
+ user = users.create(properties={
|
||||
+ 'uid': 'test_add_pwd_mask',
|
||||
+ 'cn': 'Test Add User',
|
||||
+ 'sn': 'User',
|
||||
+ 'uidNumber': '1000',
|
||||
+ 'gidNumber': '1000',
|
||||
+ 'homeDirectory': '/home/test_add',
|
||||
+ 'userPassword': TEST_PASSWORD
|
||||
+ })
|
||||
+
|
||||
+ found_masked, found_actual, found_hashed = check_password_masked(inst, log_format, MASKED_PASSWORD, TEST_PASSWORD)
|
||||
+
|
||||
+ assert found_masked, f"Masked password not found in {log_format} ADD operation"
|
||||
+ assert not found_actual, f"Actual password found in {log_format} ADD log (should be masked)"
|
||||
+ assert not found_hashed, f"Hashed password found in {log_format} ADD log (should be masked)"
|
||||
+
|
||||
+ finally:
|
||||
+ if user is not None:
|
||||
+ try:
|
||||
+ user.delete()
|
||||
+ except:
|
||||
+ pass
|
||||
+
|
||||
+
|
||||
+@pytest.mark.parametrize("log_format,display_attrs", [
|
||||
+ ("default", None),
|
||||
+ ("default", "*"),
|
||||
+ ("default", "userPassword"),
|
||||
+ ("json", None),
|
||||
+ ("json", "*"),
|
||||
+ ("json", "userPassword")
|
||||
+])
|
||||
+def test_password_masking_modify_operation(topo, log_format, display_attrs):
|
||||
+ """Test password masking in MODIFY operations
|
||||
+
|
||||
+ :id: e6963aa9-7609-419c-aae2-1d517aa434bd
|
||||
+ :parametrized: yes
|
||||
+ :setup: Standalone Instance
|
||||
+ :steps:
|
||||
+ 1. Configure audit logging format
|
||||
+ 2. Add user without password
|
||||
+ 3. Add password via MODIFY operation
|
||||
+ 4. Check that password is masked in audit log
|
||||
+ 5. Modify password to new value
|
||||
+ 6. Check that new password is also masked
|
||||
+ 7. Verify actual passwords do not appear in log
|
||||
+ :expectedresults:
|
||||
+ 1. Success
|
||||
+ 2. Success
|
||||
+ 3. Success
|
||||
+ 4. Password should be masked with asterisks
|
||||
+ 5. Success
|
||||
+ 6. New password should be masked with asterisks
|
||||
+ 7. No actual password values should be found in log
|
||||
+ """
|
||||
+ inst = topo.ms['supplier1']
|
||||
+ setup_audit_logging(inst, log_format, display_attrs)
|
||||
+
|
||||
+ users = UserAccounts(inst, DEFAULT_SUFFIX)
|
||||
+ user = None
|
||||
+
|
||||
+ try:
|
||||
+ user = users.create(properties={
|
||||
+ 'uid': 'test_modify_pwd_mask',
|
||||
+ 'cn': 'Test Modify User',
|
||||
+ 'sn': 'User',
|
||||
+ 'uidNumber': '2000',
|
||||
+ 'gidNumber': '2000',
|
||||
+ 'homeDirectory': '/home/test_modify'
|
||||
+ })
|
||||
+
|
||||
+ user.replace('userPassword', TEST_PASSWORD)
|
||||
+
|
||||
+ found_masked, found_actual, found_hashed = check_password_masked(inst, log_format, MASKED_PASSWORD, TEST_PASSWORD)
|
||||
+ assert found_masked, f"Masked password not found in {log_format} MODIFY operation (first password)"
|
||||
+ assert not found_actual, f"Actual password found in {log_format} MODIFY log (should be masked)"
|
||||
+ assert not found_hashed, f"Hashed password found in {log_format} MODIFY log (should be masked)"
|
||||
+
|
||||
+ user.replace('userPassword', TEST_PASSWORD_2)
|
||||
+
|
||||
+ found_masked_2, found_actual_2, found_hashed_2 = check_password_masked(inst, log_format, MASKED_PASSWORD, TEST_PASSWORD_2)
|
||||
+ assert found_masked_2, f"Masked password not found in {log_format} MODIFY operation (second password)"
|
||||
+ assert not found_actual_2, f"Second actual password found in {log_format} MODIFY log (should be masked)"
|
||||
+ assert not found_hashed_2, f"Second hashed password found in {log_format} MODIFY log (should be masked)"
|
||||
+
|
||||
+ finally:
|
||||
+ if user is not None:
|
||||
+ try:
|
||||
+ user.delete()
|
||||
+ except:
|
||||
+ pass
|
||||
+
|
||||
+
|
||||
+@pytest.mark.parametrize("log_format,display_attrs", [
|
||||
+ ("default", None),
|
||||
+ ("default", "*"),
|
||||
+ ("default", "nsslapd-rootpw"),
|
||||
+ ("json", None),
|
||||
+ ("json", "*"),
|
||||
+ ("json", "nsslapd-rootpw")
|
||||
+])
|
||||
+def test_password_masking_rootpw_modify_operation(topo, log_format, display_attrs):
|
||||
+ """Test password masking for nsslapd-rootpw MODIFY operations
|
||||
+
|
||||
+ :id: ec8c9fd4-56ba-4663-ab65-58efb3b445e4
|
||||
+ :parametrized: yes
|
||||
+ :setup: Standalone Instance
|
||||
+ :steps:
|
||||
+ 1. Configure audit logging format
|
||||
+ 2. Modify nsslapd-rootpw in configuration
|
||||
+ 3. Check that root password is masked in audit log
|
||||
+ 4. Modify root password to new value
|
||||
+ 5. Check that new root password is also masked
|
||||
+ 6. Verify actual root passwords do not appear in log
|
||||
+ :expectedresults:
|
||||
+ 1. Success
|
||||
+ 2. Success
|
||||
+ 3. Root password should be masked with asterisks
|
||||
+ 4. Success
|
||||
+ 5. New root password should be masked with asterisks
|
||||
+ 6. No actual root password values should be found in log
|
||||
+ """
|
||||
+ inst = topo.ms['supplier1']
|
||||
+ setup_audit_logging(inst, log_format, display_attrs)
|
||||
+ dm = DirectoryManager(inst)
|
||||
+
|
||||
+ try:
|
||||
+ dm.change_password(TEST_PASSWORD)
|
||||
+ dm.rebind(TEST_PASSWORD)
|
||||
+
|
||||
+ found_masked, found_actual, found_hashed = check_password_masked(inst, log_format, MASKED_PASSWORD, TEST_PASSWORD)
|
||||
+ assert found_masked, f"Masked root password not found in {log_format} MODIFY operation (first root password)"
|
||||
+ assert not found_actual, f"Actual root password found in {log_format} MODIFY log (should be masked)"
|
||||
+ assert not found_hashed, f"Hashed root password found in {log_format} MODIFY log (should be masked)"
|
||||
+
|
||||
+ dm.change_password(TEST_PASSWORD_2)
|
||||
+ dm.rebind(TEST_PASSWORD_2)
|
||||
+
|
||||
+ found_masked_2, found_actual_2, found_hashed_2 = check_password_masked(inst, log_format, MASKED_PASSWORD, TEST_PASSWORD_2)
|
||||
+ assert found_masked_2, f"Masked root password not found in {log_format} MODIFY operation (second root password)"
|
||||
+ assert not found_actual_2, f"Second actual root password found in {log_format} MODIFY log (should be masked)"
|
||||
+ assert not found_hashed_2, f"Second hashed root password found in {log_format} MODIFY log (should be masked)"
|
||||
+
|
||||
+ finally:
|
||||
+ dm.change_password(PW_DM)
|
||||
+ dm.rebind(PW_DM)
|
||||
+
|
||||
+
|
||||
+@pytest.mark.parametrize("log_format,display_attrs", [
|
||||
+ ("default", None),
|
||||
+ ("default", "*"),
|
||||
+ ("default", "nsmultiplexorcredentials"),
|
||||
+ ("json", None),
|
||||
+ ("json", "*"),
|
||||
+ ("json", "nsmultiplexorcredentials")
|
||||
+])
|
||||
+def test_password_masking_multiplexor_credentials(topo, log_format, display_attrs):
|
||||
+ """Test password masking for nsmultiplexorcredentials in chaining/multiplexor configurations
|
||||
+
|
||||
+ :id: 161a9498-b248-4926-90be-a696a36ed36e
|
||||
+ :parametrized: yes
|
||||
+ :setup: Standalone Instance
|
||||
+ :steps:
|
||||
+ 1. Configure audit logging format
|
||||
+ 2. Create a chaining backend configuration entry with nsmultiplexorcredentials
|
||||
+ 3. Check that multiplexor credentials are masked in audit log
|
||||
+ 4. Modify the credentials
|
||||
+ 5. Check that updated credentials are also masked
|
||||
+ 6. Verify actual credentials do not appear in log
|
||||
+ :expectedresults:
|
||||
+ 1. Success
|
||||
+ 2. Success
|
||||
+ 3. Multiplexor credentials should be masked with asterisks
|
||||
+ 4. Success
|
||||
+ 5. Updated credentials should be masked with asterisks
|
||||
+ 6. No actual credential values should be found in log
|
||||
+ """
|
||||
+ inst = topo.ms['supplier1']
|
||||
+ setup_audit_logging(inst, log_format, display_attrs)
|
||||
+
|
||||
+ # Enable chaining plugin and create chaining link
|
||||
+ chain_plugin = ChainingBackendPlugin(inst)
|
||||
+ chain_plugin.enable()
|
||||
+
|
||||
+ chains = ChainingLinks(inst)
|
||||
+ chain = None
|
||||
+
|
||||
+ try:
|
||||
+ # Create chaining link with multiplexor credentials
|
||||
+ chain = chains.create(properties={
|
||||
+ 'cn': 'testchain',
|
||||
+ 'nsfarmserverurl': 'ldap://localhost:389/',
|
||||
+ 'nsslapd-suffix': 'dc=example,dc=com',
|
||||
+ 'nsmultiplexorbinddn': 'cn=manager',
|
||||
+ 'nsmultiplexorcredentials': TEST_PASSWORD,
|
||||
+ 'nsCheckLocalACI': 'on',
|
||||
+ 'nsConnectionLife': '30',
|
||||
+ })
|
||||
+
|
||||
+ found_masked, found_actual, found_hashed = check_password_masked(inst, log_format, MASKED_PASSWORD, TEST_PASSWORD)
|
||||
+ assert found_masked, f"Masked multiplexor credentials not found in {log_format} ADD operation"
|
||||
+ assert not found_actual, f"Actual multiplexor credentials found in {log_format} ADD log (should be masked)"
|
||||
+ assert not found_hashed, f"Hashed multiplexor credentials found in {log_format} ADD log (should be masked)"
|
||||
+
|
||||
+ # Modify the credentials
|
||||
+ chain.replace('nsmultiplexorcredentials', TEST_PASSWORD_2)
|
||||
+
|
||||
+ found_masked_2, found_actual_2, found_hashed_2 = check_password_masked(inst, log_format, MASKED_PASSWORD, TEST_PASSWORD_2)
|
||||
+ assert found_masked_2, f"Masked multiplexor credentials not found in {log_format} MODIFY operation"
|
||||
+ assert not found_actual_2, f"Actual multiplexor credentials found in {log_format} MODIFY log (should be masked)"
|
||||
+ assert not found_hashed_2, f"Hashed multiplexor credentials found in {log_format} MODIFY log (should be masked)"
|
||||
+
|
||||
+ finally:
|
||||
+ chain_plugin.disable()
|
||||
+ if chain is not None:
|
||||
+ inst.delete_branch_s(chain.dn, ldap.SCOPE_ONELEVEL)
|
||||
+ chain.delete()
|
||||
+
|
||||
+
|
||||
+@pytest.mark.parametrize("log_format,display_attrs", [
|
||||
+ ("default", None),
|
||||
+ ("default", "*"),
|
||||
+ ("default", "nsDS5ReplicaCredentials"),
|
||||
+ ("json", None),
|
||||
+ ("json", "*"),
|
||||
+ ("json", "nsDS5ReplicaCredentials")
|
||||
+])
|
||||
+def test_password_masking_replica_credentials(topo, log_format, display_attrs):
|
||||
+ """Test password masking for nsDS5ReplicaCredentials in replication agreements
|
||||
+
|
||||
+ :id: 7bf9e612-1b7c-49af-9fc0-de4c7df84b2a
|
||||
+ :parametrized: yes
|
||||
+ :setup: Standalone Instance
|
||||
+ :steps:
|
||||
+ 1. Configure audit logging format
|
||||
+ 2. Create a replication agreement entry with nsDS5ReplicaCredentials
|
||||
+ 3. Check that replica credentials are masked in audit log
|
||||
+ 4. Modify the credentials
|
||||
+ 5. Check that updated credentials are also masked
|
||||
+ 6. Verify actual credentials do not appear in log
|
||||
+ :expectedresults:
|
||||
+ 1. Success
|
||||
+ 2. Success
|
||||
+ 3. Replica credentials should be masked with asterisks
|
||||
+ 4. Success
|
||||
+ 5. Updated credentials should be masked with asterisks
|
||||
+ 6. No actual credential values should be found in log
|
||||
+ """
|
||||
+ inst = topo.ms['supplier2']
|
||||
+ setup_audit_logging(inst, log_format, display_attrs)
|
||||
+ agmt = None
|
||||
+
|
||||
+ try:
|
||||
+ replicas = Replicas(inst)
|
||||
+ replica = replicas.get(DEFAULT_SUFFIX)
|
||||
+ agmts = replica.get_agreements()
|
||||
+ agmt = agmts.create(properties={
|
||||
+ 'cn': 'testagmt',
|
||||
+ 'nsDS5ReplicaHost': 'localhost',
|
||||
+ 'nsDS5ReplicaPort': '389',
|
||||
+ 'nsDS5ReplicaBindDN': 'cn=replication manager,cn=config',
|
||||
+ 'nsDS5ReplicaCredentials': TEST_PASSWORD,
|
||||
+ 'nsDS5ReplicaRoot': DEFAULT_SUFFIX
|
||||
+ })
|
||||
+
|
||||
+ found_masked, found_actual, found_hashed = check_password_masked(inst, log_format, MASKED_PASSWORD, TEST_PASSWORD)
|
||||
+ assert found_masked, f"Masked replica credentials not found in {log_format} ADD operation"
|
||||
+ assert not found_actual, f"Actual replica credentials found in {log_format} ADD log (should be masked)"
|
||||
+ assert not found_hashed, f"Hashed replica credentials found in {log_format} ADD log (should be masked)"
|
||||
+
|
||||
+ # Modify the credentials
|
||||
+ agmt.replace('nsDS5ReplicaCredentials', TEST_PASSWORD_2)
|
||||
+
|
||||
+ found_masked_2, found_actual_2, found_hashed_2 = check_password_masked(inst, log_format, MASKED_PASSWORD, TEST_PASSWORD_2)
|
||||
+ assert found_masked_2, f"Masked replica credentials not found in {log_format} MODIFY operation"
|
||||
+ assert not found_actual_2, f"Actual replica credentials found in {log_format} MODIFY log (should be masked)"
|
||||
+ assert not found_hashed_2, f"Hashed replica credentials found in {log_format} MODIFY log (should be masked)"
|
||||
+
|
||||
+ finally:
|
||||
+ if agmt is not None:
|
||||
+ agmt.delete()
|
||||
+
|
||||
+
|
||||
+@pytest.mark.parametrize("log_format,display_attrs", [
|
||||
+ ("default", None),
|
||||
+ ("default", "*"),
|
||||
+ ("default", "nsDS5ReplicaBootstrapCredentials"),
|
||||
+ ("json", None),
|
||||
+ ("json", "*"),
|
||||
+ ("json", "nsDS5ReplicaBootstrapCredentials")
|
||||
+])
|
||||
+def test_password_masking_bootstrap_credentials(topo, log_format, display_attrs):
|
||||
+ """Test password masking for nsDS5ReplicaCredentials and nsDS5ReplicaBootstrapCredentials in replication agreements
|
||||
+
|
||||
+ :id: 248bd418-ffa4-4733-963d-2314c60b7c5b
|
||||
+ :parametrized: yes
|
||||
+ :setup: Standalone Instance
|
||||
+ :steps:
|
||||
+ 1. Configure audit logging format
|
||||
+ 2. Create a replication agreement entry with both nsDS5ReplicaCredentials and nsDS5ReplicaBootstrapCredentials
|
||||
+ 3. Check that both credentials are masked in audit log
|
||||
+ 4. Modify both credentials
|
||||
+ 5. Check that both updated credentials are also masked
|
||||
+ 6. Verify actual credentials do not appear in log
|
||||
+ :expectedresults:
|
||||
+ 1. Success
|
||||
+ 2. Success
|
||||
+ 3. Both credentials should be masked with asterisks
|
||||
+ 4. Success
|
||||
+ 5. Both updated credentials should be masked with asterisks
|
||||
+ 6. No actual credential values should be found in log
|
||||
+ """
|
||||
+ inst = topo.ms['supplier2']
|
||||
+ setup_audit_logging(inst, log_format, display_attrs)
|
||||
+ agmt = None
|
||||
+
|
||||
+ try:
|
||||
+ replicas = Replicas(inst)
|
||||
+ replica = replicas.get(DEFAULT_SUFFIX)
|
||||
+ agmts = replica.get_agreements()
|
||||
+ agmt = agmts.create(properties={
|
||||
+ 'cn': 'testbootstrapagmt',
|
||||
+ 'nsDS5ReplicaHost': 'localhost',
|
||||
+ 'nsDS5ReplicaPort': '389',
|
||||
+ 'nsDS5ReplicaBindDN': 'cn=replication manager,cn=config',
|
||||
+ 'nsDS5ReplicaCredentials': TEST_PASSWORD,
|
||||
+ 'nsDS5replicabootstrapbinddn': 'cn=bootstrap manager,cn=config',
|
||||
+ 'nsDS5ReplicaBootstrapCredentials': TEST_PASSWORD_2,
|
||||
+ 'nsDS5ReplicaRoot': DEFAULT_SUFFIX
|
||||
+ })
|
||||
+
|
||||
+ found_masked_bootstrap, found_actual_bootstrap, found_hashed_bootstrap = check_password_masked(inst, log_format, MASKED_PASSWORD, TEST_PASSWORD_2)
|
||||
+ assert found_masked_bootstrap, f"Masked bootstrap credentials not found in {log_format} ADD operation"
|
||||
+ assert not found_actual_bootstrap, f"Actual bootstrap credentials found in {log_format} ADD log (should be masked)"
|
||||
+ assert not found_hashed_bootstrap, f"Hashed bootstrap credentials found in {log_format} ADD log (should be masked)"
|
||||
+
|
||||
+ agmt.replace('nsDS5ReplicaBootstrapCredentials', TEST_PASSWORD_3)
|
||||
+
|
||||
+ found_masked_bootstrap_2, found_actual_bootstrap_2, found_hashed_bootstrap_2 = check_password_masked(inst, log_format, MASKED_PASSWORD, TEST_PASSWORD_3)
|
||||
+ assert found_masked_bootstrap_2, f"Masked bootstrap credentials not found in {log_format} MODIFY operation"
|
||||
+ assert not found_actual_bootstrap_2, f"Actual bootstrap credentials found in {log_format} MODIFY log (should be masked)"
|
||||
+ assert not found_hashed_bootstrap_2, f"Hashed bootstrap credentials found in {log_format} MODIFY log (should be masked)"
|
||||
+
|
||||
+ finally:
|
||||
+ if agmt is not None:
|
||||
+ agmt.delete()
|
||||
+
|
||||
+
|
||||
+
|
||||
+if __name__ == '__main__':
|
||||
+ CURRENT_FILE = os.path.realpath(__file__)
|
||||
+ pytest.main(["-s", CURRENT_FILE])
|
||||
\ No newline at end of file
|
||||
diff --git a/ldap/servers/slapd/auditlog.c b/ldap/servers/slapd/auditlog.c
|
||||
index 3945b0533..3a34959f6 100644
|
||||
--- a/ldap/servers/slapd/auditlog.c
|
||||
+++ b/ldap/servers/slapd/auditlog.c
|
||||
@@ -39,6 +39,89 @@ static void write_audit_file(Slapi_PBlock *pb, Slapi_Entry *entry, int logtype,
|
||||
|
||||
static const char *modrdn_changes[4];
|
||||
|
||||
+/* Helper function to check if an attribute is a password that needs masking */
|
||||
+static int
|
||||
+is_password_attribute(const char *attr_name)
|
||||
+{
|
||||
+ return (strcasecmp(attr_name, SLAPI_USERPWD_ATTR) == 0 ||
|
||||
+ strcasecmp(attr_name, CONFIG_ROOTPW_ATTRIBUTE) == 0 ||
|
||||
+ strcasecmp(attr_name, SLAPI_MB_CREDENTIALS) == 0 ||
|
||||
+ strcasecmp(attr_name, SLAPI_REP_CREDENTIALS) == 0 ||
|
||||
+ strcasecmp(attr_name, SLAPI_REP_BOOTSTRAP_CREDENTIALS) == 0);
|
||||
+}
|
||||
+
|
||||
+/* Helper function to create a masked string representation of an entry */
|
||||
+static char *
|
||||
+create_masked_entry_string(Slapi_Entry *original_entry, int *len)
|
||||
+{
|
||||
+ Slapi_Attr *attr = NULL;
|
||||
+ char *entry_str = NULL;
|
||||
+ char *current_pos = NULL;
|
||||
+ char *line_start = NULL;
|
||||
+ char *next_line = NULL;
|
||||
+ char *colon_pos = NULL;
|
||||
+ int has_password_attrs = 0;
|
||||
+
|
||||
+ if (original_entry == NULL) {
|
||||
+ return NULL;
|
||||
+ }
|
||||
+
|
||||
+ /* Single pass through attributes to check for password attributes */
|
||||
+ for (slapi_entry_first_attr(original_entry, &attr); attr != NULL;
|
||||
+ slapi_entry_next_attr(original_entry, attr, &attr)) {
|
||||
+
|
||||
+ char *attr_name = NULL;
|
||||
+ slapi_attr_get_type(attr, &attr_name);
|
||||
+
|
||||
+ if (is_password_attribute(attr_name)) {
|
||||
+ has_password_attrs = 1;
|
||||
+ break;
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ /* If no password attributes, return original string - no masking needed */
|
||||
+ entry_str = slapi_entry2str(original_entry, len);
|
||||
+ if (!has_password_attrs) {
|
||||
+ return entry_str;
|
||||
+ }
|
||||
+
|
||||
+ /* Process the string in-place, replacing password values */
|
||||
+ current_pos = entry_str;
|
||||
+ while ((line_start = current_pos) != NULL && *line_start != '\0') {
|
||||
+ /* Find the end of current line */
|
||||
+ next_line = strchr(line_start, '\n');
|
||||
+ if (next_line != NULL) {
|
||||
+ *next_line = '\0'; /* Temporarily terminate line */
|
||||
+ current_pos = next_line + 1;
|
||||
+ } else {
|
||||
+ current_pos = NULL; /* Last line */
|
||||
+ }
|
||||
+
|
||||
+ /* Find the colon that separates attribute name from value */
|
||||
+ colon_pos = strchr(line_start, ':');
|
||||
+ if (colon_pos != NULL) {
|
||||
+ char saved_colon = *colon_pos;
|
||||
+ *colon_pos = '\0'; /* Temporarily null-terminate attribute name */
|
||||
+
|
||||
+ /* Check if this is a password attribute that needs masking */
|
||||
+ if (is_password_attribute(line_start)) {
|
||||
+ strcpy(colon_pos + 1, " **********************");
|
||||
+ }
|
||||
+
|
||||
+ *colon_pos = saved_colon; /* Restore colon */
|
||||
+ }
|
||||
+
|
||||
+ /* Restore newline if it was there */
|
||||
+ if (next_line != NULL) {
|
||||
+ *next_line = '\n';
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ /* Update length since we may have shortened the string */
|
||||
+ *len = strlen(entry_str);
|
||||
+ return entry_str; /* Return the modified original string */
|
||||
+}
|
||||
+
|
||||
void
|
||||
write_audit_log_entry(Slapi_PBlock *pb)
|
||||
{
|
||||
@@ -279,10 +362,31 @@ add_entry_attrs_ext(Slapi_Entry *entry, lenstr *l, PRBool use_json, json_object
|
||||
{
|
||||
slapi_entry_attr_find(entry, req_attr, &entry_attr);
|
||||
if (entry_attr) {
|
||||
- if (use_json) {
|
||||
- log_entry_attr_json(entry_attr, req_attr, id_list);
|
||||
+ if (strcmp(req_attr, PSEUDO_ATTR_UNHASHEDUSERPASSWORD) == 0) {
|
||||
+ /* Do not write the unhashed clear-text password */
|
||||
+ continue;
|
||||
+ }
|
||||
+
|
||||
+ /* Check if this is a password attribute that needs masking */
|
||||
+ if (is_password_attribute(req_attr)) {
|
||||
+ /* userpassword/rootdn password - mask the value */
|
||||
+ if (use_json) {
|
||||
+ json_object *secret_obj = json_object_new_object();
|
||||
+ json_object_object_add(secret_obj, req_attr,
|
||||
+ json_object_new_string("**********************"));
|
||||
+ json_object_array_add(id_list, secret_obj);
|
||||
+ } else {
|
||||
+ addlenstr(l, "#");
|
||||
+ addlenstr(l, req_attr);
|
||||
+ addlenstr(l, ": **********************\n");
|
||||
+ }
|
||||
} else {
|
||||
- log_entry_attr(entry_attr, req_attr, l);
|
||||
+ /* Regular attribute - log normally */
|
||||
+ if (use_json) {
|
||||
+ log_entry_attr_json(entry_attr, req_attr, id_list);
|
||||
+ } else {
|
||||
+ log_entry_attr(entry_attr, req_attr, l);
|
||||
+ }
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -297,9 +401,7 @@ add_entry_attrs_ext(Slapi_Entry *entry, lenstr *l, PRBool use_json, json_object
|
||||
continue;
|
||||
}
|
||||
|
||||
- if (strcasecmp(attr, SLAPI_USERPWD_ATTR) == 0 ||
|
||||
- strcasecmp(attr, CONFIG_ROOTPW_ATTRIBUTE) == 0)
|
||||
- {
|
||||
+ if (is_password_attribute(attr)) {
|
||||
/* userpassword/rootdn password - mask the value */
|
||||
if (use_json) {
|
||||
json_object *secret_obj = json_object_new_object();
|
||||
@@ -309,7 +411,7 @@ add_entry_attrs_ext(Slapi_Entry *entry, lenstr *l, PRBool use_json, json_object
|
||||
} else {
|
||||
addlenstr(l, "#");
|
||||
addlenstr(l, attr);
|
||||
- addlenstr(l, ": ****************************\n");
|
||||
+ addlenstr(l, ": **********************\n");
|
||||
}
|
||||
continue;
|
||||
}
|
||||
@@ -478,6 +580,9 @@ write_audit_file_json(Slapi_PBlock *pb, Slapi_Entry *entry, int logtype,
|
||||
}
|
||||
}
|
||||
|
||||
+ /* Check if this is a password attribute that needs masking */
|
||||
+ int is_password_attr = is_password_attribute(mods[j]->mod_type);
|
||||
+
|
||||
mod = json_object_new_object();
|
||||
switch (operationtype) {
|
||||
case LDAP_MOD_ADD:
|
||||
@@ -502,7 +607,12 @@ write_audit_file_json(Slapi_PBlock *pb, Slapi_Entry *entry, int logtype,
|
||||
json_object *val_list = NULL;
|
||||
val_list = json_object_new_array();
|
||||
for (size_t i = 0; mods[j]->mod_bvalues != NULL && mods[j]->mod_bvalues[i] != NULL; i++) {
|
||||
- json_object_array_add(val_list, json_object_new_string(mods[j]->mod_bvalues[i]->bv_val));
|
||||
+ if (is_password_attr) {
|
||||
+ /* Mask password values */
|
||||
+ json_object_array_add(val_list, json_object_new_string("**********************"));
|
||||
+ } else {
|
||||
+ json_object_array_add(val_list, json_object_new_string(mods[j]->mod_bvalues[i]->bv_val));
|
||||
+ }
|
||||
}
|
||||
json_object_object_add(mod, "values", val_list);
|
||||
}
|
||||
@@ -514,8 +624,11 @@ write_audit_file_json(Slapi_PBlock *pb, Slapi_Entry *entry, int logtype,
|
||||
|
||||
case SLAPI_OPERATION_ADD:
|
||||
int len;
|
||||
+
|
||||
e = change;
|
||||
- tmp = slapi_entry2str(e, &len);
|
||||
+
|
||||
+ /* Create a masked string representation for password attributes */
|
||||
+ tmp = create_masked_entry_string(e, &len);
|
||||
tmpsave = tmp;
|
||||
while ((tmp = strchr(tmp, '\n')) != NULL) {
|
||||
tmp++;
|
||||
@@ -662,6 +775,10 @@ write_audit_file(
|
||||
break;
|
||||
}
|
||||
}
|
||||
+
|
||||
+ /* Check if this is a password attribute that needs masking */
|
||||
+ int is_password_attr = is_password_attribute(mods[j]->mod_type);
|
||||
+
|
||||
switch (operationtype) {
|
||||
case LDAP_MOD_ADD:
|
||||
addlenstr(l, "add: ");
|
||||
@@ -686,18 +803,27 @@ write_audit_file(
|
||||
break;
|
||||
}
|
||||
if (operationtype != LDAP_MOD_IGNORE) {
|
||||
- for (i = 0; mods[j]->mod_bvalues != NULL && mods[j]->mod_bvalues[i] != NULL; i++) {
|
||||
- char *buf, *bufp;
|
||||
- len = strlen(mods[j]->mod_type);
|
||||
- len = LDIF_SIZE_NEEDED(len, mods[j]->mod_bvalues[i]->bv_len) + 1;
|
||||
- buf = slapi_ch_malloc(len);
|
||||
- bufp = buf;
|
||||
- slapi_ldif_put_type_and_value_with_options(&bufp, mods[j]->mod_type,
|
||||
- mods[j]->mod_bvalues[i]->bv_val,
|
||||
- mods[j]->mod_bvalues[i]->bv_len, 0);
|
||||
- *bufp = '\0';
|
||||
- addlenstr(l, buf);
|
||||
- slapi_ch_free((void **)&buf);
|
||||
+ if (is_password_attr) {
|
||||
+ /* Add masked password */
|
||||
+ for (i = 0; mods[j]->mod_bvalues != NULL && mods[j]->mod_bvalues[i] != NULL; i++) {
|
||||
+ addlenstr(l, mods[j]->mod_type);
|
||||
+ addlenstr(l, ": **********************\n");
|
||||
+ }
|
||||
+ } else {
|
||||
+ /* Add actual values for non-password attributes */
|
||||
+ for (i = 0; mods[j]->mod_bvalues != NULL && mods[j]->mod_bvalues[i] != NULL; i++) {
|
||||
+ char *buf, *bufp;
|
||||
+ len = strlen(mods[j]->mod_type);
|
||||
+ len = LDIF_SIZE_NEEDED(len, mods[j]->mod_bvalues[i]->bv_len) + 1;
|
||||
+ buf = slapi_ch_malloc(len);
|
||||
+ bufp = buf;
|
||||
+ slapi_ldif_put_type_and_value_with_options(&bufp, mods[j]->mod_type,
|
||||
+ mods[j]->mod_bvalues[i]->bv_val,
|
||||
+ mods[j]->mod_bvalues[i]->bv_len, 0);
|
||||
+ *bufp = '\0';
|
||||
+ addlenstr(l, buf);
|
||||
+ slapi_ch_free((void **)&buf);
|
||||
+ }
|
||||
}
|
||||
}
|
||||
addlenstr(l, "-\n");
|
||||
@@ -708,7 +834,7 @@ write_audit_file(
|
||||
e = change;
|
||||
addlenstr(l, attr_changetype);
|
||||
addlenstr(l, ": add\n");
|
||||
- tmp = slapi_entry2str(e, &len);
|
||||
+ tmp = create_masked_entry_string(e, &len);
|
||||
tmpsave = tmp;
|
||||
while ((tmp = strchr(tmp, '\n')) != NULL) {
|
||||
tmp++;
|
||||
diff --git a/ldap/servers/slapd/slapi-private.h b/ldap/servers/slapd/slapi-private.h
|
||||
index 7a3eb3fdf..fb88488b1 100644
|
||||
--- a/ldap/servers/slapd/slapi-private.h
|
||||
+++ b/ldap/servers/slapd/slapi-private.h
|
||||
@@ -848,6 +848,7 @@ void task_cleanup(void);
|
||||
/* for reversible encyrption */
|
||||
#define SLAPI_MB_CREDENTIALS "nsmultiplexorcredentials"
|
||||
#define SLAPI_REP_CREDENTIALS "nsds5ReplicaCredentials"
|
||||
+#define SLAPI_REP_BOOTSTRAP_CREDENTIALS "nsds5ReplicaBootstrapCredentials"
|
||||
int pw_rever_encode(Slapi_Value **vals, char *attr_name);
|
||||
int pw_rever_decode(char *cipher, char **plain, const char *attr_name);
|
||||
|
||||
diff --git a/src/lib389/lib389/chaining.py b/src/lib389/lib389/chaining.py
|
||||
index 533b83ebf..33ae78c8b 100644
|
||||
--- a/src/lib389/lib389/chaining.py
|
||||
+++ b/src/lib389/lib389/chaining.py
|
||||
@@ -134,7 +134,7 @@ class ChainingLink(DSLdapObject):
|
||||
"""
|
||||
|
||||
# Create chaining entry
|
||||
- super(ChainingLink, self).create(rdn, properties, basedn)
|
||||
+ link = super(ChainingLink, self).create(rdn, properties, basedn)
|
||||
|
||||
# Create mapping tree entry
|
||||
dn_comps = ldap.explode_dn(properties['nsslapd-suffix'][0])
|
||||
@@ -149,6 +149,7 @@ class ChainingLink(DSLdapObject):
|
||||
self._mts.ensure_state(properties=mt_properties)
|
||||
except ldap.ALREADY_EXISTS:
|
||||
pass
|
||||
+ return link
|
||||
|
||||
|
||||
class ChainingLinks(DSLdapObjects):
|
||||
--
|
||||
2.49.0
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,63 @@
|
||||
From 574a5295e13cf01c34226d676104057468198616 Mon Sep 17 00:00:00 2001
|
||||
From: Simon Pichugin <spichugi@redhat.com>
|
||||
Date: Fri, 4 Oct 2024 08:55:11 -0700
|
||||
Subject: [PATCH] Issue 6339 - Address Coverity scan issues in memberof and
|
||||
bdb_layer (#6353)
|
||||
|
||||
Description: Add null check for memberof attribute in memberof.c
|
||||
Fix memory leak by freeing 'cookie' in memberof.c
|
||||
Add null check for database environment in bdb_layer.c
|
||||
Fix race condition by adding mutex lock/unlock in bdb_layer.c
|
||||
|
||||
Fixes: https://github.com/389ds/389-ds-base/issues/6339
|
||||
|
||||
Reviewed by: @progier389, @tbordaz (Thanks!)
|
||||
---
|
||||
ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.c | 17 ++++++++++++++---
|
||||
1 file changed, 14 insertions(+), 3 deletions(-)
|
||||
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.c b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.c
|
||||
index b04cd68e2..4f069197e 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.c
|
||||
@@ -6987,6 +6987,7 @@ bdb_public_private_open(backend *be, const char *db_filename, int rw, dbi_env_t
|
||||
bdb_config *conf = (bdb_config *)li->li_dblayer_config;
|
||||
bdb_db_env **ppEnv = (bdb_db_env**)&priv->dblayer_env;
|
||||
char dbhome[MAXPATHLEN];
|
||||
+ bdb_db_env *pEnv = NULL;
|
||||
DB_ENV *bdb_env = NULL;
|
||||
DB *bdb_db = NULL;
|
||||
struct stat st = {0};
|
||||
@@ -7036,7 +7037,13 @@ bdb_public_private_open(backend *be, const char *db_filename, int rw, dbi_env_t
|
||||
conf->bdb_tx_max = 50;
|
||||
rc = bdb_start(li, DBLAYER_NORMAL_MODE);
|
||||
if (rc == 0) {
|
||||
- bdb_env = ((struct bdb_db_env*)(priv->dblayer_env))->bdb_DB_ENV;
|
||||
+ pEnv = (bdb_db_env *)priv->dblayer_env;
|
||||
+ if (pEnv == NULL) {
|
||||
+ fprintf(stderr, "bdb_public_private_open: dbenv is not available (0x%p) for database %s\n",
|
||||
+ (void *)pEnv, db_filename ? db_filename : "unknown");
|
||||
+ return EINVAL;
|
||||
+ }
|
||||
+ bdb_env = pEnv->bdb_DB_ENV;
|
||||
}
|
||||
} else {
|
||||
/* Setup minimal environment */
|
||||
@@ -7080,8 +7087,12 @@ bdb_public_private_close(struct ldbminfo *li, dbi_env_t **env, dbi_db_t **db)
|
||||
if (priv) {
|
||||
/* Detect if db is fully set up in read write mode */
|
||||
bdb_db_env *pEnv = (bdb_db_env *)priv->dblayer_env;
|
||||
- if (pEnv && pEnv->bdb_thread_count>0) {
|
||||
- rw = 1;
|
||||
+ if (pEnv) {
|
||||
+ pthread_mutex_lock(&pEnv->bdb_thread_count_lock);
|
||||
+ if (pEnv->bdb_thread_count > 0) {
|
||||
+ rw = 1;
|
||||
+ }
|
||||
+ pthread_mutex_unlock(&pEnv->bdb_thread_count_lock);
|
||||
}
|
||||
}
|
||||
if (rw == 0) {
|
||||
--
|
||||
2.49.0
|
||||
|
||||
@ -0,0 +1,31 @@
|
||||
From 972ddeed2029975d5d89e165db1db554f2e8bc28 Mon Sep 17 00:00:00 2001
|
||||
From: Viktor Ashirov <vashirov@redhat.com>
|
||||
Date: Tue, 29 Jul 2025 08:00:00 +0200
|
||||
Subject: [PATCH] Issue 6468 - CLI - Fix default error log level
|
||||
|
||||
Description:
|
||||
Default error log level is 16384
|
||||
|
||||
Relates: https://github.com/389ds/389-ds-base/issues/6468
|
||||
|
||||
Reviewed by: @droideck (Thanks!)
|
||||
---
|
||||
src/lib389/lib389/cli_conf/logging.py | 2 +-
|
||||
1 file changed, 1 insertion(+), 1 deletion(-)
|
||||
|
||||
diff --git a/src/lib389/lib389/cli_conf/logging.py b/src/lib389/lib389/cli_conf/logging.py
|
||||
index d1e32822c..c48c75faa 100644
|
||||
--- a/src/lib389/lib389/cli_conf/logging.py
|
||||
+++ b/src/lib389/lib389/cli_conf/logging.py
|
||||
@@ -44,7 +44,7 @@ ERROR_LEVELS = {
|
||||
+ "methods used for a SASL bind"
|
||||
},
|
||||
"default": {
|
||||
- "level": 6384,
|
||||
+ "level": 16384,
|
||||
"desc": "Default logging level"
|
||||
},
|
||||
"filter": {
|
||||
--
|
||||
2.49.0
|
||||
|
||||
222
SOURCES/0022-Issues-6913-6886-6250-Adjust-xfail-marks-6914.patch
Normal file
222
SOURCES/0022-Issues-6913-6886-6250-Adjust-xfail-marks-6914.patch
Normal file
@ -0,0 +1,222 @@
|
||||
From f28deac93c552a9c4dc9dd9c18f449fcd5cc7731 Mon Sep 17 00:00:00 2001
|
||||
From: Simon Pichugin <spichugi@redhat.com>
|
||||
Date: Fri, 1 Aug 2025 09:28:39 -0700
|
||||
Subject: [PATCH] Issues 6913, 6886, 6250 - Adjust xfail marks (#6914)
|
||||
|
||||
Description: Some of the ACI invalid syntax issues were fixed,
|
||||
so we need to remove xfail marks.
|
||||
Disk space issue should have a 'skipif' mark.
|
||||
Display all attrs (nsslapd-auditlog-display-attrs: *) fails because of a bug.
|
||||
EntryUSN inconsistency and overflow bugs were exposed with the tests.
|
||||
|
||||
Related: https://github.com/389ds/389-ds-base/issues/6913
|
||||
Related: https://github.com/389ds/389-ds-base/issues/6886
|
||||
Related: https://github.com/389ds/389-ds-base/issues/6250
|
||||
|
||||
Reviewed by: @vashirov (Thanks!)
|
||||
---
|
||||
dirsrvtests/tests/suites/acl/syntax_test.py | 13 ++++++++--
|
||||
.../tests/suites/import/regression_test.py | 18 +++++++-------
|
||||
.../logging/audit_password_masking_test.py | 24 +++++++++----------
|
||||
.../suites/plugins/entryusn_overflow_test.py | 2 ++
|
||||
4 files changed, 34 insertions(+), 23 deletions(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/acl/syntax_test.py b/dirsrvtests/tests/suites/acl/syntax_test.py
|
||||
index 4edc7fa4b..ed9919ba3 100644
|
||||
--- a/dirsrvtests/tests/suites/acl/syntax_test.py
|
||||
+++ b/dirsrvtests/tests/suites/acl/syntax_test.py
|
||||
@@ -190,10 +190,9 @@ FAILED = [('test_targattrfilters_18',
|
||||
f'(all)userdn="ldap:///anyone";)'), ]
|
||||
|
||||
|
||||
-@pytest.mark.xfail(reason='https://bugzilla.redhat.com/show_bug.cgi?id=1691473')
|
||||
@pytest.mark.parametrize("real_value", [a[1] for a in FAILED],
|
||||
ids=[a[0] for a in FAILED])
|
||||
-def test_aci_invalid_syntax_fail(topo, real_value):
|
||||
+def test_aci_invalid_syntax_fail(topo, real_value, request):
|
||||
"""Try to set wrong ACI syntax.
|
||||
|
||||
:id: 83c40784-fff5-49c8-9535-7064c9c19e7e
|
||||
@@ -206,6 +205,16 @@ def test_aci_invalid_syntax_fail(topo, real_value):
|
||||
1. It should pass
|
||||
2. It should not pass
|
||||
"""
|
||||
+ # Mark specific test cases as xfail
|
||||
+ xfail_cases = [
|
||||
+ 'test_targattrfilters_18',
|
||||
+ 'test_targattrfilters_20',
|
||||
+ 'test_bind_rule_set_with_more_than_three'
|
||||
+ ]
|
||||
+
|
||||
+ if request.node.callspec.id in xfail_cases:
|
||||
+ pytest.xfail("DS6913 - This test case is expected to fail")
|
||||
+
|
||||
domain = Domain(topo.standalone, DEFAULT_SUFFIX)
|
||||
with pytest.raises(ldap.INVALID_SYNTAX):
|
||||
domain.add("aci", real_value)
|
||||
diff --git a/dirsrvtests/tests/suites/import/regression_test.py b/dirsrvtests/tests/suites/import/regression_test.py
|
||||
index 2f850a19a..18611de35 100644
|
||||
--- a/dirsrvtests/tests/suites/import/regression_test.py
|
||||
+++ b/dirsrvtests/tests/suites/import/regression_test.py
|
||||
@@ -323,7 +323,7 @@ ou: myDups00001
|
||||
|
||||
@pytest.mark.bz1749595
|
||||
@pytest.mark.tier2
|
||||
-@pytest.mark.xfail(not _check_disk_space(), reason="not enough disk space for lmdb map")
|
||||
+@pytest.mark.skipif(not _check_disk_space(), reason="not enough disk space for lmdb map")
|
||||
@pytest.mark.xfail(ds_is_older("1.3.10.1"), reason="bz1749595 not fixed on versions older than 1.3.10.1")
|
||||
def test_large_ldif2db_ancestorid_index_creation(topo, _set_mdb_map_size):
|
||||
"""Import with ldif2db a large file - check that the ancestorid index creation phase has a correct performance
|
||||
@@ -399,39 +399,39 @@ def test_large_ldif2db_ancestorid_index_creation(topo, _set_mdb_map_size):
|
||||
log.info('Starting the server')
|
||||
topo.standalone.start()
|
||||
|
||||
- # With lmdb there is no more any special phase for ancestorid
|
||||
+ # With lmdb there is no more any special phase for ancestorid
|
||||
# because ancestorsid get updated on the fly while processing the
|
||||
# entryrdn (by up the parents chain to compute the parentid
|
||||
- #
|
||||
+ #
|
||||
# But there is still a numSubordinates generation phase
|
||||
if get_default_db_lib() == "mdb":
|
||||
log.info('parse the errors logs to check lines with "Generating numSubordinates complete." are present')
|
||||
end_numsubordinates = str(topo.standalone.ds_error_log.match(r'.*Generating numSubordinates complete.*'))[1:-1]
|
||||
assert len(end_numsubordinates) > 0
|
||||
-
|
||||
+
|
||||
else:
|
||||
log.info('parse the errors logs to check lines with "Starting sort of ancestorid" are present')
|
||||
start_sort_str = str(topo.standalone.ds_error_log.match(r'.*Starting sort of ancestorid non-leaf IDs*'))[1:-1]
|
||||
assert len(start_sort_str) > 0
|
||||
-
|
||||
+
|
||||
log.info('parse the errors logs to check lines with "Finished sort of ancestorid" are present')
|
||||
end_sort_str = str(topo.standalone.ds_error_log.match(r'.*Finished sort of ancestorid non-leaf IDs*'))[1:-1]
|
||||
assert len(end_sort_str) > 0
|
||||
-
|
||||
+
|
||||
log.info('parse the error logs for the line with "Gathering ancestorid non-leaf IDs"')
|
||||
start_ancestorid_indexing_op_str = str(topo.standalone.ds_error_log.match(r'.*Gathering ancestorid non-leaf IDs*'))[1:-1]
|
||||
assert len(start_ancestorid_indexing_op_str) > 0
|
||||
-
|
||||
+
|
||||
log.info('parse the error logs for the line with "Created ancestorid index"')
|
||||
end_ancestorid_indexing_op_str = str(topo.standalone.ds_error_log.match(r'.*Created ancestorid index*'))[1:-1]
|
||||
assert len(end_ancestorid_indexing_op_str) > 0
|
||||
-
|
||||
+
|
||||
log.info('get the ancestorid non-leaf IDs indexing start and end time from the collected strings')
|
||||
# Collected lines look like : '[15/May/2020:05:30:27.245967313 -0400] - INFO - bdb_get_nonleaf_ids - import userRoot: Gathering ancestorid non-leaf IDs...'
|
||||
# We are getting the sec.nanosec part of the date, '27.245967313' in the above example
|
||||
start_time = (start_ancestorid_indexing_op_str.split()[0]).split(':')[3]
|
||||
end_time = (end_ancestorid_indexing_op_str.split()[0]).split(':')[3]
|
||||
-
|
||||
+
|
||||
log.info('Calculate the elapsed time for the ancestorid non-leaf IDs index creation')
|
||||
etime = (Decimal(end_time) - Decimal(start_time))
|
||||
# The time for the ancestorid index creation should be less than 10s for an offline import of an ldif file with 100000 entries / 5 entries per node
|
||||
diff --git a/dirsrvtests/tests/suites/logging/audit_password_masking_test.py b/dirsrvtests/tests/suites/logging/audit_password_masking_test.py
|
||||
index 3b6a54849..69a36cb5d 100644
|
||||
--- a/dirsrvtests/tests/suites/logging/audit_password_masking_test.py
|
||||
+++ b/dirsrvtests/tests/suites/logging/audit_password_masking_test.py
|
||||
@@ -117,10 +117,10 @@ def check_password_masked(inst, log_format, expected_password, actual_password):
|
||||
|
||||
@pytest.mark.parametrize("log_format,display_attrs", [
|
||||
("default", None),
|
||||
- ("default", "*"),
|
||||
+ pytest.param("default", "*", marks=pytest.mark.xfail(reason="DS6886")),
|
||||
("default", "userPassword"),
|
||||
("json", None),
|
||||
- ("json", "*"),
|
||||
+ pytest.param("json", "*", marks=pytest.mark.xfail(reason="DS6886")),
|
||||
("json", "userPassword")
|
||||
])
|
||||
def test_password_masking_add_operation(topo, log_format, display_attrs):
|
||||
@@ -173,10 +173,10 @@ def test_password_masking_add_operation(topo, log_format, display_attrs):
|
||||
|
||||
@pytest.mark.parametrize("log_format,display_attrs", [
|
||||
("default", None),
|
||||
- ("default", "*"),
|
||||
+ pytest.param("default", "*", marks=pytest.mark.xfail(reason="DS6886")),
|
||||
("default", "userPassword"),
|
||||
("json", None),
|
||||
- ("json", "*"),
|
||||
+ pytest.param("json", "*", marks=pytest.mark.xfail(reason="DS6886")),
|
||||
("json", "userPassword")
|
||||
])
|
||||
def test_password_masking_modify_operation(topo, log_format, display_attrs):
|
||||
@@ -242,10 +242,10 @@ def test_password_masking_modify_operation(topo, log_format, display_attrs):
|
||||
|
||||
@pytest.mark.parametrize("log_format,display_attrs", [
|
||||
("default", None),
|
||||
- ("default", "*"),
|
||||
+ pytest.param("default", "*", marks=pytest.mark.xfail(reason="DS6886")),
|
||||
("default", "nsslapd-rootpw"),
|
||||
("json", None),
|
||||
- ("json", "*"),
|
||||
+ pytest.param("json", "*", marks=pytest.mark.xfail(reason="DS6886")),
|
||||
("json", "nsslapd-rootpw")
|
||||
])
|
||||
def test_password_masking_rootpw_modify_operation(topo, log_format, display_attrs):
|
||||
@@ -297,10 +297,10 @@ def test_password_masking_rootpw_modify_operation(topo, log_format, display_attr
|
||||
|
||||
@pytest.mark.parametrize("log_format,display_attrs", [
|
||||
("default", None),
|
||||
- ("default", "*"),
|
||||
+ pytest.param("default", "*", marks=pytest.mark.xfail(reason="DS6886")),
|
||||
("default", "nsmultiplexorcredentials"),
|
||||
("json", None),
|
||||
- ("json", "*"),
|
||||
+ pytest.param("json", "*", marks=pytest.mark.xfail(reason="DS6886")),
|
||||
("json", "nsmultiplexorcredentials")
|
||||
])
|
||||
def test_password_masking_multiplexor_credentials(topo, log_format, display_attrs):
|
||||
@@ -368,10 +368,10 @@ def test_password_masking_multiplexor_credentials(topo, log_format, display_attr
|
||||
|
||||
@pytest.mark.parametrize("log_format,display_attrs", [
|
||||
("default", None),
|
||||
- ("default", "*"),
|
||||
+ pytest.param("default", "*", marks=pytest.mark.xfail(reason="DS6886")),
|
||||
("default", "nsDS5ReplicaCredentials"),
|
||||
("json", None),
|
||||
- ("json", "*"),
|
||||
+ pytest.param("json", "*", marks=pytest.mark.xfail(reason="DS6886")),
|
||||
("json", "nsDS5ReplicaCredentials")
|
||||
])
|
||||
def test_password_masking_replica_credentials(topo, log_format, display_attrs):
|
||||
@@ -432,10 +432,10 @@ def test_password_masking_replica_credentials(topo, log_format, display_attrs):
|
||||
|
||||
@pytest.mark.parametrize("log_format,display_attrs", [
|
||||
("default", None),
|
||||
- ("default", "*"),
|
||||
+ pytest.param("default", "*", marks=pytest.mark.xfail(reason="DS6886")),
|
||||
("default", "nsDS5ReplicaBootstrapCredentials"),
|
||||
("json", None),
|
||||
- ("json", "*"),
|
||||
+ pytest.param("json", "*", marks=pytest.mark.xfail(reason="DS6886")),
|
||||
("json", "nsDS5ReplicaBootstrapCredentials")
|
||||
])
|
||||
def test_password_masking_bootstrap_credentials(topo, log_format, display_attrs):
|
||||
diff --git a/dirsrvtests/tests/suites/plugins/entryusn_overflow_test.py b/dirsrvtests/tests/suites/plugins/entryusn_overflow_test.py
|
||||
index a23d734ca..8c3a537ab 100644
|
||||
--- a/dirsrvtests/tests/suites/plugins/entryusn_overflow_test.py
|
||||
+++ b/dirsrvtests/tests/suites/plugins/entryusn_overflow_test.py
|
||||
@@ -81,6 +81,7 @@ def setup_usn_test(topology_st, request):
|
||||
return created_users
|
||||
|
||||
|
||||
+@pytest.mark.xfail(reason="DS6250")
|
||||
def test_entryusn_overflow_on_add_existing_entries(topology_st, setup_usn_test):
|
||||
"""Test that reproduces entryUSN overflow when adding existing entries
|
||||
|
||||
@@ -232,6 +233,7 @@ def test_entryusn_overflow_on_add_existing_entries(topology_st, setup_usn_test):
|
||||
log.info("EntryUSN overflow test completed successfully")
|
||||
|
||||
|
||||
+@pytest.mark.xfail(reason="DS6250")
|
||||
def test_entryusn_consistency_after_failed_adds(topology_st, setup_usn_test):
|
||||
"""Test that entryUSN remains consistent after failed add operations
|
||||
|
||||
--
|
||||
2.49.0
|
||||
|
||||
@ -0,0 +1,32 @@
|
||||
From 58a9e1083865e75bba3cf9867a3df109031d7810 Mon Sep 17 00:00:00 2001
|
||||
From: Viktor Ashirov <vashirov@redhat.com>
|
||||
Date: Mon, 28 Jul 2025 13:18:26 +0200
|
||||
Subject: [PATCH] Issue 6181 - RFE - Allow system to manage uid/gid at startup
|
||||
|
||||
Description:
|
||||
Expand CapabilityBoundingSet to include CAP_FOWNER
|
||||
|
||||
Relates: https://github.com/389ds/389-ds-base/issues/6181
|
||||
Relates: https://github.com/389ds/389-ds-base/issues/6906
|
||||
|
||||
Reviewed by: @progier389 (Thanks!)
|
||||
---
|
||||
wrappers/systemd.template.service.in | 2 +-
|
||||
1 file changed, 1 insertion(+), 1 deletion(-)
|
||||
|
||||
diff --git a/wrappers/systemd.template.service.in b/wrappers/systemd.template.service.in
|
||||
index fa05c9f60..6db1f6f8f 100644
|
||||
--- a/wrappers/systemd.template.service.in
|
||||
+++ b/wrappers/systemd.template.service.in
|
||||
@@ -25,7 +25,7 @@ MemoryAccounting=yes
|
||||
|
||||
# Allow non-root instances to bind to low ports.
|
||||
AmbientCapabilities=CAP_NET_BIND_SERVICE
|
||||
-CapabilityBoundingSet=CAP_NET_BIND_SERVICE CAP_SETUID CAP_SETGID CAP_DAC_OVERRIDE CAP_CHOWN
|
||||
+CapabilityBoundingSet=CAP_NET_BIND_SERVICE CAP_SETUID CAP_SETGID CAP_DAC_OVERRIDE CAP_CHOWN CAP_FOWNER
|
||||
|
||||
PrivateTmp=on
|
||||
# https://en.opensuse.org/openSUSE:Security_Features#Systemd_hardening_effort
|
||||
--
|
||||
2.49.0
|
||||
|
||||
@ -0,0 +1,92 @@
|
||||
From e03af0aa7e041fc2ca20caf3bcb5810e968043dc Mon Sep 17 00:00:00 2001
|
||||
From: Viktor Ashirov <vashirov@redhat.com>
|
||||
Date: Tue, 13 May 2025 13:53:05 +0200
|
||||
Subject: [PATCH] Issue 6778 - Memory leak in
|
||||
roles_cache_create_object_from_entry
|
||||
|
||||
Bug Description:
|
||||
`this_role` has internal allocations (`dn`, `rolescopedn`, etc.)
|
||||
that are not freed.
|
||||
|
||||
Fix Description:
|
||||
Use `roles_cache_role_object_free` to free `this_role` and all its
|
||||
internal structures.
|
||||
|
||||
Fixes: https://github.com/389ds/389-ds-base/issues/6778
|
||||
|
||||
Reviewed by: @mreynolds389 (Thanks!)
|
||||
---
|
||||
ldap/servers/plugins/roles/roles_cache.c | 15 ++++++++-------
|
||||
1 file changed, 8 insertions(+), 7 deletions(-)
|
||||
|
||||
diff --git a/ldap/servers/plugins/roles/roles_cache.c b/ldap/servers/plugins/roles/roles_cache.c
|
||||
index bbed11802..60d7182e2 100644
|
||||
--- a/ldap/servers/plugins/roles/roles_cache.c
|
||||
+++ b/ldap/servers/plugins/roles/roles_cache.c
|
||||
@@ -1098,7 +1098,7 @@ roles_cache_create_object_from_entry(Slapi_Entry *role_entry, role_object **resu
|
||||
/* We determine the role type by reading the objectclass */
|
||||
if (roles_cache_is_role_entry(role_entry) == 0) {
|
||||
/* Bad type */
|
||||
- slapi_ch_free((void **)&this_role);
|
||||
+ roles_cache_role_object_free((caddr_t)this_role);
|
||||
return SLAPI_ROLE_DEFINITION_ERROR;
|
||||
}
|
||||
|
||||
@@ -1108,7 +1108,7 @@ roles_cache_create_object_from_entry(Slapi_Entry *role_entry, role_object **resu
|
||||
this_role->type = type;
|
||||
} else {
|
||||
/* Bad type */
|
||||
- slapi_ch_free((void **)&this_role);
|
||||
+ roles_cache_role_object_free((caddr_t)this_role);
|
||||
return SLAPI_ROLE_DEFINITION_ERROR;
|
||||
}
|
||||
|
||||
@@ -1166,7 +1166,7 @@ roles_cache_create_object_from_entry(Slapi_Entry *role_entry, role_object **resu
|
||||
filter_attr_value = (char *)slapi_entry_attr_get_charptr(role_entry, ROLE_FILTER_ATTR_NAME);
|
||||
if (filter_attr_value == NULL) {
|
||||
/* Means probably no attribute or no value there */
|
||||
- slapi_ch_free((void **)&this_role);
|
||||
+ roles_cache_role_object_free((caddr_t)this_role);
|
||||
return SLAPI_ROLE_ERROR_NO_FILTER_SPECIFIED;
|
||||
}
|
||||
|
||||
@@ -1205,7 +1205,7 @@ roles_cache_create_object_from_entry(Slapi_Entry *role_entry, role_object **resu
|
||||
(char *)slapi_sdn_get_ndn(this_role->dn),
|
||||
ROLE_FILTER_ATTR_NAME, filter_attr_value,
|
||||
ROLE_FILTER_ATTR_NAME);
|
||||
- slapi_ch_free((void **)&this_role);
|
||||
+ roles_cache_role_object_free((caddr_t)this_role);
|
||||
slapi_ch_free_string(&filter_attr_value);
|
||||
return SLAPI_ROLE_ERROR_FILTER_BAD;
|
||||
}
|
||||
@@ -1217,7 +1217,7 @@ roles_cache_create_object_from_entry(Slapi_Entry *role_entry, role_object **resu
|
||||
filter = slapi_str2filter(filter_attr_value);
|
||||
if (filter == NULL) {
|
||||
/* An error has occured */
|
||||
- slapi_ch_free((void **)&this_role);
|
||||
+ roles_cache_role_object_free((caddr_t)this_role);
|
||||
slapi_ch_free_string(&filter_attr_value);
|
||||
return SLAPI_ROLE_ERROR_FILTER_BAD;
|
||||
}
|
||||
@@ -1228,7 +1228,8 @@ roles_cache_create_object_from_entry(Slapi_Entry *role_entry, role_object **resu
|
||||
(char *)slapi_sdn_get_ndn(this_role->dn),
|
||||
filter_attr_value,
|
||||
ROLE_FILTER_ATTR_NAME);
|
||||
- slapi_ch_free((void **)&this_role);
|
||||
+ roles_cache_role_object_free((caddr_t)this_role);
|
||||
+ slapi_filter_free(filter, 1);
|
||||
slapi_ch_free_string(&filter_attr_value);
|
||||
return SLAPI_ROLE_ERROR_FILTER_BAD;
|
||||
}
|
||||
@@ -1285,7 +1286,7 @@ roles_cache_create_object_from_entry(Slapi_Entry *role_entry, role_object **resu
|
||||
if (rc == 0) {
|
||||
*result = this_role;
|
||||
} else {
|
||||
- slapi_ch_free((void **)&this_role);
|
||||
+ roles_cache_role_object_free((caddr_t)this_role);
|
||||
}
|
||||
|
||||
slapi_log_err(SLAPI_LOG_PLUGIN, ROLES_PLUGIN_SUBSYSTEM,
|
||||
--
|
||||
2.49.0
|
||||
|
||||
@ -0,0 +1,262 @@
|
||||
From c8c9d8814bd328d9772b6a248aa142b72430cba1 Mon Sep 17 00:00:00 2001
|
||||
From: Viktor Ashirov <vashirov@redhat.com>
|
||||
Date: Wed, 16 Jul 2025 11:22:30 +0200
|
||||
Subject: [PATCH] Issue 6778 - Memory leak in
|
||||
roles_cache_create_object_from_entry part 2
|
||||
|
||||
Bug Description:
|
||||
Everytime a role with scope DN is processed, we leak rolescopeDN.
|
||||
|
||||
Fix Description:
|
||||
* Initialize all pointer variables to NULL
|
||||
* Add additional NULL checks
|
||||
* Free rolescopeDN
|
||||
* Move test_rewriter_with_invalid_filter before the DB contains 90k entries
|
||||
* Use task.wait() for import task completion instead of parsing logs,
|
||||
increase the timeout
|
||||
|
||||
Fixes: https://github.com/389ds/389-ds-base/issues/6778
|
||||
|
||||
Reviewed by: @progier389 (Thanks!)
|
||||
---
|
||||
dirsrvtests/tests/suites/roles/basic_test.py | 164 +++++++++----------
|
||||
ldap/servers/plugins/roles/roles_cache.c | 10 +-
|
||||
2 files changed, 82 insertions(+), 92 deletions(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/roles/basic_test.py b/dirsrvtests/tests/suites/roles/basic_test.py
|
||||
index d92d6f0c3..ec208bae9 100644
|
||||
--- a/dirsrvtests/tests/suites/roles/basic_test.py
|
||||
+++ b/dirsrvtests/tests/suites/roles/basic_test.py
|
||||
@@ -510,6 +510,76 @@ def test_vattr_on_managed_role(topo, request):
|
||||
|
||||
request.addfinalizer(fin)
|
||||
|
||||
+def test_rewriter_with_invalid_filter(topo, request):
|
||||
+ """Test that server does not crash when having
|
||||
+ invalid filter in filtered role
|
||||
+
|
||||
+ :id: 5013b0b2-0af6-11f0-8684-482ae39447e5
|
||||
+ :setup: standalone server
|
||||
+ :steps:
|
||||
+ 1. Setup filtered role with good filter
|
||||
+ 2. Setup nsrole rewriter
|
||||
+ 3. Restart the server
|
||||
+ 4. Search for entries
|
||||
+ 5. Setup filtered role with bad filter
|
||||
+ 6. Search for entries
|
||||
+ :expectedresults:
|
||||
+ 1. Operation should succeed
|
||||
+ 2. Operation should succeed
|
||||
+ 3. Operation should succeed
|
||||
+ 4. Operation should succeed
|
||||
+ 5. Operation should succeed
|
||||
+ 6. Operation should succeed
|
||||
+ """
|
||||
+ inst = topo.standalone
|
||||
+ entries = []
|
||||
+
|
||||
+ def fin():
|
||||
+ inst.start()
|
||||
+ for entry in entries:
|
||||
+ entry.delete()
|
||||
+ request.addfinalizer(fin)
|
||||
+
|
||||
+ # Setup filtered role
|
||||
+ roles = FilteredRoles(inst, f'ou=people,{DEFAULT_SUFFIX}')
|
||||
+ filter_ko = '(&((objectClass=top)(objectClass=nsPerson))'
|
||||
+ filter_ok = '(&(objectClass=top)(objectClass=nsPerson))'
|
||||
+ role_properties = {
|
||||
+ 'cn': 'TestFilteredRole',
|
||||
+ 'nsRoleFilter': filter_ok,
|
||||
+ 'description': 'Test good filter',
|
||||
+ }
|
||||
+ role = roles.create(properties=role_properties)
|
||||
+ entries.append(role)
|
||||
+
|
||||
+ # Setup nsrole rewriter
|
||||
+ rewriters = Rewriters(inst)
|
||||
+ rewriter_properties = {
|
||||
+ "cn": "nsrole",
|
||||
+ "nsslapd-libpath": 'libroles-plugin',
|
||||
+ "nsslapd-filterrewriter": 'role_nsRole_filter_rewriter',
|
||||
+ }
|
||||
+ rewriter = rewriters.ensure_state(properties=rewriter_properties)
|
||||
+ entries.append(rewriter)
|
||||
+
|
||||
+ # Restart thge instance
|
||||
+ inst.restart()
|
||||
+
|
||||
+ # Search for entries
|
||||
+ entries = inst.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, "(nsrole=%s)" % role.dn)
|
||||
+
|
||||
+ # Set bad filter
|
||||
+ role_properties = {
|
||||
+ 'cn': 'TestFilteredRole',
|
||||
+ 'nsRoleFilter': filter_ko,
|
||||
+ 'description': 'Test bad filter',
|
||||
+ }
|
||||
+ role.ensure_state(properties=role_properties)
|
||||
+
|
||||
+ # Search for entries
|
||||
+ entries = inst.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, "(nsrole=%s)" % role.dn)
|
||||
+
|
||||
+
|
||||
def test_managed_and_filtered_role_rewrite(topo, request):
|
||||
"""Test that filter components containing 'nsrole=xxx'
|
||||
are reworked if xxx is either a filtered role or a managed
|
||||
@@ -581,17 +651,11 @@ def test_managed_and_filtered_role_rewrite(topo, request):
|
||||
PARENT="ou=people,%s" % DEFAULT_SUFFIX
|
||||
dbgen_users(topo.standalone, 90000, import_ldif, DEFAULT_SUFFIX, entry_name=RDN, generic=True, parent=PARENT)
|
||||
|
||||
- # online import
|
||||
+ # Online import
|
||||
import_task = ImportTask(topo.standalone)
|
||||
import_task.import_suffix_from_ldif(ldiffile=import_ldif, suffix=DEFAULT_SUFFIX)
|
||||
- # Check for up to 200sec that the completion
|
||||
- for i in range(1, 20):
|
||||
- if len(topo.standalone.ds_error_log.match('.*import userRoot: Import complete. Processed 9000.*')) > 0:
|
||||
- break
|
||||
- time.sleep(10)
|
||||
- import_complete = topo.standalone.ds_error_log.match('.*import userRoot: Import complete. Processed 9000.*')
|
||||
- assert (len(import_complete) == 1)
|
||||
-
|
||||
+ import_task.wait(timeout=400)
|
||||
+ assert import_task.get_exit_code() == 0
|
||||
# Restart server
|
||||
topo.standalone.restart()
|
||||
|
||||
@@ -715,17 +779,11 @@ def test_not_such_entry_role_rewrite(topo, request):
|
||||
PARENT="ou=people,%s" % DEFAULT_SUFFIX
|
||||
dbgen_users(topo.standalone, 91000, import_ldif, DEFAULT_SUFFIX, entry_name=RDN, generic=True, parent=PARENT)
|
||||
|
||||
- # online import
|
||||
+ # Online import
|
||||
import_task = ImportTask(topo.standalone)
|
||||
import_task.import_suffix_from_ldif(ldiffile=import_ldif, suffix=DEFAULT_SUFFIX)
|
||||
- # Check for up to 200sec that the completion
|
||||
- for i in range(1, 20):
|
||||
- if len(topo.standalone.ds_error_log.match('.*import userRoot: Import complete. Processed 9100.*')) > 0:
|
||||
- break
|
||||
- time.sleep(10)
|
||||
- import_complete = topo.standalone.ds_error_log.match('.*import userRoot: Import complete. Processed 9100.*')
|
||||
- assert (len(import_complete) == 1)
|
||||
-
|
||||
+ import_task.wait(timeout=400)
|
||||
+ assert import_task.get_exit_code() == 0
|
||||
# Restart server
|
||||
topo.standalone.restart()
|
||||
|
||||
@@ -769,76 +827,6 @@ def test_not_such_entry_role_rewrite(topo, request):
|
||||
request.addfinalizer(fin)
|
||||
|
||||
|
||||
-def test_rewriter_with_invalid_filter(topo, request):
|
||||
- """Test that server does not crash when having
|
||||
- invalid filter in filtered role
|
||||
-
|
||||
- :id: 5013b0b2-0af6-11f0-8684-482ae39447e5
|
||||
- :setup: standalone server
|
||||
- :steps:
|
||||
- 1. Setup filtered role with good filter
|
||||
- 2. Setup nsrole rewriter
|
||||
- 3. Restart the server
|
||||
- 4. Search for entries
|
||||
- 5. Setup filtered role with bad filter
|
||||
- 6. Search for entries
|
||||
- :expectedresults:
|
||||
- 1. Operation should succeed
|
||||
- 2. Operation should succeed
|
||||
- 3. Operation should succeed
|
||||
- 4. Operation should succeed
|
||||
- 5. Operation should succeed
|
||||
- 6. Operation should succeed
|
||||
- """
|
||||
- inst = topo.standalone
|
||||
- entries = []
|
||||
-
|
||||
- def fin():
|
||||
- inst.start()
|
||||
- for entry in entries:
|
||||
- entry.delete()
|
||||
- request.addfinalizer(fin)
|
||||
-
|
||||
- # Setup filtered role
|
||||
- roles = FilteredRoles(inst, f'ou=people,{DEFAULT_SUFFIX}')
|
||||
- filter_ko = '(&((objectClass=top)(objectClass=nsPerson))'
|
||||
- filter_ok = '(&(objectClass=top)(objectClass=nsPerson))'
|
||||
- role_properties = {
|
||||
- 'cn': 'TestFilteredRole',
|
||||
- 'nsRoleFilter': filter_ok,
|
||||
- 'description': 'Test good filter',
|
||||
- }
|
||||
- role = roles.create(properties=role_properties)
|
||||
- entries.append(role)
|
||||
-
|
||||
- # Setup nsrole rewriter
|
||||
- rewriters = Rewriters(inst)
|
||||
- rewriter_properties = {
|
||||
- "cn": "nsrole",
|
||||
- "nsslapd-libpath": 'libroles-plugin',
|
||||
- "nsslapd-filterrewriter": 'role_nsRole_filter_rewriter',
|
||||
- }
|
||||
- rewriter = rewriters.ensure_state(properties=rewriter_properties)
|
||||
- entries.append(rewriter)
|
||||
-
|
||||
- # Restart thge instance
|
||||
- inst.restart()
|
||||
-
|
||||
- # Search for entries
|
||||
- entries = inst.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, "(nsrole=%s)" % role.dn)
|
||||
-
|
||||
- # Set bad filter
|
||||
- role_properties = {
|
||||
- 'cn': 'TestFilteredRole',
|
||||
- 'nsRoleFilter': filter_ko,
|
||||
- 'description': 'Test bad filter',
|
||||
- }
|
||||
- role.ensure_state(properties=role_properties)
|
||||
-
|
||||
- # Search for entries
|
||||
- entries = inst.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, "(nsrole=%s)" % role.dn)
|
||||
-
|
||||
-
|
||||
if __name__ == "__main__":
|
||||
CURRENT_FILE = os.path.realpath(__file__)
|
||||
pytest.main("-s -v %s" % CURRENT_FILE)
|
||||
diff --git a/ldap/servers/plugins/roles/roles_cache.c b/ldap/servers/plugins/roles/roles_cache.c
|
||||
index 60d7182e2..60f5a919a 100644
|
||||
--- a/ldap/servers/plugins/roles/roles_cache.c
|
||||
+++ b/ldap/servers/plugins/roles/roles_cache.c
|
||||
@@ -1117,16 +1117,17 @@ roles_cache_create_object_from_entry(Slapi_Entry *role_entry, role_object **resu
|
||||
|
||||
rolescopeDN = slapi_entry_attr_get_charptr(role_entry, ROLE_SCOPE_DN);
|
||||
if (rolescopeDN) {
|
||||
- Slapi_DN *rolescopeSDN;
|
||||
- Slapi_DN *top_rolescopeSDN, *top_this_roleSDN;
|
||||
+ Slapi_DN *rolescopeSDN = NULL;
|
||||
+ Slapi_DN *top_rolescopeSDN = NULL;
|
||||
+ Slapi_DN *top_this_roleSDN = NULL;
|
||||
|
||||
/* Before accepting to use this scope, first check if it belongs to the same suffix */
|
||||
rolescopeSDN = slapi_sdn_new_dn_byref(rolescopeDN);
|
||||
- if ((strlen((char *)slapi_sdn_get_ndn(rolescopeSDN)) > 0) &&
|
||||
+ if (rolescopeSDN && (strlen((char *)slapi_sdn_get_ndn(rolescopeSDN)) > 0) &&
|
||||
(slapi_dn_syntax_check(NULL, (char *)slapi_sdn_get_ndn(rolescopeSDN), 1) == 0)) {
|
||||
top_rolescopeSDN = roles_cache_get_top_suffix(rolescopeSDN);
|
||||
top_this_roleSDN = roles_cache_get_top_suffix(this_role->dn);
|
||||
- if (slapi_sdn_compare(top_rolescopeSDN, top_this_roleSDN) == 0) {
|
||||
+ if (top_rolescopeSDN && top_this_roleSDN && slapi_sdn_compare(top_rolescopeSDN, top_this_roleSDN) == 0) {
|
||||
/* rolescopeDN belongs to the same suffix as the role, we can use this scope */
|
||||
this_role->rolescopedn = rolescopeSDN;
|
||||
} else {
|
||||
@@ -1148,6 +1149,7 @@ roles_cache_create_object_from_entry(Slapi_Entry *role_entry, role_object **resu
|
||||
rolescopeDN);
|
||||
slapi_sdn_free(&rolescopeSDN);
|
||||
}
|
||||
+ slapi_ch_free_string(&rolescopeDN);
|
||||
}
|
||||
|
||||
/* Depending upon role type, pull out the remaining information we need */
|
||||
--
|
||||
2.49.0
|
||||
|
||||
@ -0,0 +1,65 @@
|
||||
From f83a1996e3438e471cec086d53fb94be0c8666aa Mon Sep 17 00:00:00 2001
|
||||
From: Viktor Ashirov <vashirov@redhat.com>
|
||||
Date: Mon, 7 Jul 2025 23:11:17 +0200
|
||||
Subject: [PATCH] Issue 6850 - AddressSanitizer: memory leak in mdb_init
|
||||
|
||||
Bug Description:
|
||||
`dbmdb_componentid` can be allocated multiple times. To avoid a memory
|
||||
leak, allocate it only once, and free at the cleanup.
|
||||
|
||||
Fixes: https://github.com/389ds/389-ds-base/issues/6850
|
||||
|
||||
Reviewed by: @mreynolds389, @tbordaz (Tnanks!)
|
||||
---
|
||||
ldap/servers/slapd/back-ldbm/db-mdb/mdb_config.c | 4 +++-
|
||||
ldap/servers/slapd/back-ldbm/db-mdb/mdb_layer.c | 2 +-
|
||||
ldap/servers/slapd/back-ldbm/db-mdb/mdb_misc.c | 5 +++++
|
||||
3 files changed, 9 insertions(+), 2 deletions(-)
|
||||
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/db-mdb/mdb_config.c b/ldap/servers/slapd/back-ldbm/db-mdb/mdb_config.c
|
||||
index 1f7b71442..bebc83b76 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/db-mdb/mdb_config.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/db-mdb/mdb_config.c
|
||||
@@ -146,7 +146,9 @@ dbmdb_compute_limits(struct ldbminfo *li)
|
||||
int mdb_init(struct ldbminfo *li, config_info *config_array)
|
||||
{
|
||||
dbmdb_ctx_t *conf = (dbmdb_ctx_t *)slapi_ch_calloc(1, sizeof(dbmdb_ctx_t));
|
||||
- dbmdb_componentid = generate_componentid(NULL, "db-mdb");
|
||||
+ if (dbmdb_componentid == NULL) {
|
||||
+ dbmdb_componentid = generate_componentid(NULL, "db-mdb");
|
||||
+ }
|
||||
|
||||
li->li_dblayer_config = conf;
|
||||
strncpy(conf->home, li->li_directory, MAXPATHLEN-1);
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/db-mdb/mdb_layer.c b/ldap/servers/slapd/back-ldbm/db-mdb/mdb_layer.c
|
||||
index 3ecc47170..c6e9f8b01 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/db-mdb/mdb_layer.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/db-mdb/mdb_layer.c
|
||||
@@ -19,7 +19,7 @@
|
||||
#include <prclist.h>
|
||||
#include <glob.h>
|
||||
|
||||
-Slapi_ComponentId *dbmdb_componentid;
|
||||
+Slapi_ComponentId *dbmdb_componentid = NULL;
|
||||
|
||||
#define BULKOP_MAX_RECORDS 100 /* Max records handled by a single bulk operations */
|
||||
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/db-mdb/mdb_misc.c b/ldap/servers/slapd/back-ldbm/db-mdb/mdb_misc.c
|
||||
index 2d07db9b5..ae10ac7cf 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/db-mdb/mdb_misc.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/db-mdb/mdb_misc.c
|
||||
@@ -49,6 +49,11 @@ dbmdb_cleanup(struct ldbminfo *li)
|
||||
}
|
||||
slapi_ch_free((void **)&(li->li_dblayer_config));
|
||||
|
||||
+ if (dbmdb_componentid != NULL) {
|
||||
+ release_componentid(dbmdb_componentid);
|
||||
+ dbmdb_componentid = NULL;
|
||||
+ }
|
||||
+
|
||||
return 0;
|
||||
}
|
||||
|
||||
--
|
||||
2.49.0
|
||||
|
||||
@ -0,0 +1,58 @@
|
||||
From e98acc1bfe2194fcdd0e420777eb65a20d55a64b Mon Sep 17 00:00:00 2001
|
||||
From: Viktor Ashirov <vashirov@redhat.com>
|
||||
Date: Mon, 7 Jul 2025 22:01:09 +0200
|
||||
Subject: [PATCH] Issue 6848 - AddressSanitizer: leak in do_search
|
||||
|
||||
Bug Description:
|
||||
When there's a BER decoding error and the function goes to
|
||||
`free_and_return`, the `attrs` variable is not being freed because it's
|
||||
only freed if `!psearch || rc != 0 || err != 0`, but `err` is still 0 at
|
||||
that point.
|
||||
|
||||
If we reach `free_and_return` from the `ber_scanf` error path, `attrs`
|
||||
was never set in the pblock with `slapi_pblock_set()`, so the
|
||||
`slapi_pblock_get()` call will not retrieve the potentially partially
|
||||
allocated `attrs` from the BER decoding.
|
||||
|
||||
Fixes: https://github.com/389ds/389-ds-base/issues/6848
|
||||
|
||||
Reviewed by: @tbordaz, @droideck (Thanks!)
|
||||
---
|
||||
ldap/servers/slapd/search.c | 14 ++++++++++++--
|
||||
1 file changed, 12 insertions(+), 2 deletions(-)
|
||||
|
||||
diff --git a/ldap/servers/slapd/search.c b/ldap/servers/slapd/search.c
|
||||
index e9b2c3670..f9d03c090 100644
|
||||
--- a/ldap/servers/slapd/search.c
|
||||
+++ b/ldap/servers/slapd/search.c
|
||||
@@ -235,6 +235,7 @@ do_search(Slapi_PBlock *pb)
|
||||
log_search_access(pb, base, scope, fstr, "decoding error");
|
||||
send_ldap_result(pb, LDAP_PROTOCOL_ERROR, NULL, NULL, 0,
|
||||
NULL);
|
||||
+ err = 1; /* Make sure we free everything */
|
||||
goto free_and_return;
|
||||
}
|
||||
|
||||
@@ -420,8 +421,17 @@ free_and_return:
|
||||
if (!psearch || rc != 0 || err != 0) {
|
||||
slapi_ch_free_string(&fstr);
|
||||
slapi_filter_free(filter, 1);
|
||||
- slapi_pblock_get(pb, SLAPI_SEARCH_ATTRS, &attrs);
|
||||
- charray_free(attrs); /* passing NULL is fine */
|
||||
+
|
||||
+ /* Get attrs from pblock if it was set there, otherwise use local attrs */
|
||||
+ char **pblock_attrs = NULL;
|
||||
+ slapi_pblock_get(pb, SLAPI_SEARCH_ATTRS, &pblock_attrs);
|
||||
+ if (pblock_attrs != NULL) {
|
||||
+ charray_free(pblock_attrs); /* Free attrs from pblock */
|
||||
+ slapi_pblock_set(pb, SLAPI_SEARCH_ATTRS, NULL);
|
||||
+ } else if (attrs != NULL) {
|
||||
+ /* Free attrs that were allocated but never put in pblock */
|
||||
+ charray_free(attrs);
|
||||
+ }
|
||||
charray_free(gerattrs); /* passing NULL is fine */
|
||||
/*
|
||||
* Fix for defect 526719 / 553356 : Persistent search op failed.
|
||||
--
|
||||
2.49.0
|
||||
|
||||
@ -0,0 +1,58 @@
|
||||
From 120bc2666b682a27ffd6ace5cc238b33fab32c21 Mon Sep 17 00:00:00 2001
|
||||
From: Viktor Ashirov <vashirov@redhat.com>
|
||||
Date: Fri, 11 Jul 2025 12:32:38 +0200
|
||||
Subject: [PATCH] Issue 6865 - AddressSanitizer: leak in
|
||||
agmt_update_init_status
|
||||
|
||||
Bug Description:
|
||||
We allocate an array of `LDAPMod *` pointers, but never free it:
|
||||
|
||||
```
|
||||
=================================================================
|
||||
==2748356==ERROR: LeakSanitizer: detected memory leaks
|
||||
|
||||
Direct leak of 24 byte(s) in 1 object(s) allocated from:
|
||||
#0 0x7f05e8cb4a07 in __interceptor_malloc (/lib64/libasan.so.6+0xb4a07)
|
||||
#1 0x7f05e85c0138 in slapi_ch_malloc (/usr/lib64/dirsrv/libslapd.so.0+0x1c0138)
|
||||
#2 0x7f05e109e481 in agmt_update_init_status ldap/servers/plugins/replication/repl5_agmt.c:2583
|
||||
#3 0x7f05e10a0aa5 in agmtlist_shutdown ldap/servers/plugins/replication/repl5_agmtlist.c:789
|
||||
#4 0x7f05e10ab6bc in multisupplier_stop ldap/servers/plugins/replication/repl5_init.c:844
|
||||
#5 0x7f05e10ab6bc in multisupplier_stop ldap/servers/plugins/replication/repl5_init.c:837
|
||||
#6 0x7f05e862507d in plugin_call_func ldap/servers/slapd/plugin.c:2001
|
||||
#7 0x7f05e8625be1 in plugin_call_one ldap/servers/slapd/plugin.c:1950
|
||||
#8 0x7f05e8625be1 in plugin_dependency_closeall ldap/servers/slapd/plugin.c:1844
|
||||
#9 0x55e1a7ff9815 in slapd_daemon ldap/servers/slapd/daemon.c:1275
|
||||
#10 0x55e1a7fd36ef in main (/usr/sbin/ns-slapd+0x3e6ef)
|
||||
#11 0x7f05e80295cf in __libc_start_call_main (/lib64/libc.so.6+0x295cf)
|
||||
#12 0x7f05e802967f in __libc_start_main_alias_2 (/lib64/libc.so.6+0x2967f)
|
||||
#13 0x55e1a7fd74a4 in _start (/usr/sbin/ns-slapd+0x424a4)
|
||||
|
||||
SUMMARY: AddressSanitizer: 24 byte(s) leaked in 1 allocation(s).
|
||||
```
|
||||
|
||||
Fix Description:
|
||||
Ensure `mods` is freed in the cleanup code.
|
||||
|
||||
Fixes: https://github.com/389ds/389-ds-base/issues/6865
|
||||
Relates: https://github.com/389ds/389-ds-base/issues/6470
|
||||
|
||||
Reviewed by: @mreynolds389 (Thanks!)
|
||||
---
|
||||
ldap/servers/plugins/replication/repl5_agmt.c | 1 +
|
||||
1 file changed, 1 insertion(+)
|
||||
|
||||
diff --git a/ldap/servers/plugins/replication/repl5_agmt.c b/ldap/servers/plugins/replication/repl5_agmt.c
|
||||
index 6ffb074d4..c6cfcda07 100644
|
||||
--- a/ldap/servers/plugins/replication/repl5_agmt.c
|
||||
+++ b/ldap/servers/plugins/replication/repl5_agmt.c
|
||||
@@ -2653,6 +2653,7 @@ agmt_update_init_status(Repl_Agmt *ra)
|
||||
} else {
|
||||
PR_Unlock(ra->lock);
|
||||
}
|
||||
+ slapi_ch_free((void **)&mods);
|
||||
slapi_mod_done(&smod_start_time);
|
||||
slapi_mod_done(&smod_end_time);
|
||||
slapi_mod_done(&smod_status);
|
||||
--
|
||||
2.49.0
|
||||
|
||||
@ -0,0 +1,97 @@
|
||||
From 5cc13c70dfe22d95686bec9214c53f1b4114cd90 Mon Sep 17 00:00:00 2001
|
||||
From: James Chapman <jachapma@redhat.com>
|
||||
Date: Fri, 1 Aug 2025 13:27:02 +0100
|
||||
Subject: [PATCH] Issue 6768 - ns-slapd crashes when a referral is added
|
||||
(#6780)
|
||||
|
||||
Bug description: When a paged result search is successfully run on a referred
|
||||
suffix, we retrieve the search result set from the pblock and try to release
|
||||
it. In this case the search result set is NULL, which triggers a SEGV during
|
||||
the release.
|
||||
|
||||
Fix description: If the search result code is LDAP_REFERRAL, skip deletion of
|
||||
the search result set. Added test case.
|
||||
|
||||
Fixes: https://github.com/389ds/389-ds-base/issues/6768
|
||||
|
||||
Reviewed by: @tbordaz, @progier389 (Thank you)
|
||||
---
|
||||
.../paged_results/paged_results_test.py | 46 +++++++++++++++++++
|
||||
ldap/servers/slapd/opshared.c | 4 +-
|
||||
2 files changed, 49 insertions(+), 1 deletion(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/paged_results/paged_results_test.py b/dirsrvtests/tests/suites/paged_results/paged_results_test.py
|
||||
index fca48db0f..1bb94b53a 100644
|
||||
--- a/dirsrvtests/tests/suites/paged_results/paged_results_test.py
|
||||
+++ b/dirsrvtests/tests/suites/paged_results/paged_results_test.py
|
||||
@@ -1271,6 +1271,52 @@ def test_search_stress_abandon(create_40k_users, create_user):
|
||||
paged_search(conn, create_40k_users.suffix, [req_ctrl], search_flt, searchreq_attrlist, abandon_rate=abandon_rate)
|
||||
|
||||
|
||||
+def test_search_referral(topology_st):
|
||||
+ """Test a paged search on a referred suffix doesnt crash the server.
|
||||
+
|
||||
+ :id: c788bdbf-965b-4f12-ac24-d4d695e2cce2
|
||||
+
|
||||
+ :setup: Standalone instance
|
||||
+
|
||||
+ :steps:
|
||||
+ 1. Configure a default referral.
|
||||
+ 2. Create a paged result search control.
|
||||
+ 3. Paged result search on referral suffix (doesnt exist on the instance, triggering a referral).
|
||||
+ 4. Check the server is still running.
|
||||
+ 5. Remove referral.
|
||||
+
|
||||
+ :expectedresults:
|
||||
+ 1. Referral sucessfully set.
|
||||
+ 2. Control created.
|
||||
+ 3. Search returns ldap.REFERRAL (10).
|
||||
+ 4. Server still running.
|
||||
+ 5. Referral removed.
|
||||
+ """
|
||||
+
|
||||
+ page_size = 5
|
||||
+ SEARCH_SUFFIX = "dc=referme,dc=com"
|
||||
+ REFERRAL = "ldap://localhost.localdomain:389/o%3dnetscaperoot"
|
||||
+
|
||||
+ log.info('Configuring referral')
|
||||
+ topology_st.standalone.config.set('nsslapd-referral', REFERRAL)
|
||||
+ referral = topology_st.standalone.config.get_attr_val_utf8('nsslapd-referral')
|
||||
+ assert (referral == REFERRAL)
|
||||
+
|
||||
+ log.info('Create paged result search control')
|
||||
+ req_ctrl = SimplePagedResultsControl(True, size=page_size, cookie='')
|
||||
+
|
||||
+ log.info('Perform a paged result search on referred suffix, no chase')
|
||||
+ with pytest.raises(ldap.REFERRAL):
|
||||
+ topology_st.standalone.search_ext_s(SEARCH_SUFFIX, ldap.SCOPE_SUBTREE, serverctrls=[req_ctrl])
|
||||
+
|
||||
+ log.info('Confirm instance is still running')
|
||||
+ assert (topology_st.standalone.status())
|
||||
+
|
||||
+ log.info('Remove referral')
|
||||
+ topology_st.standalone.config.remove_all('nsslapd-referral')
|
||||
+ referral = topology_st.standalone.config.get_attr_val_utf8('nsslapd-referral')
|
||||
+ assert (referral == None)
|
||||
+
|
||||
if __name__ == '__main__':
|
||||
# Run isolated
|
||||
# -s for DEBUG mode
|
||||
diff --git a/ldap/servers/slapd/opshared.c b/ldap/servers/slapd/opshared.c
|
||||
index 14a7dcdfb..03ed60981 100644
|
||||
--- a/ldap/servers/slapd/opshared.c
|
||||
+++ b/ldap/servers/slapd/opshared.c
|
||||
@@ -879,7 +879,9 @@ op_shared_search(Slapi_PBlock *pb, int send_result)
|
||||
/* Free the results if not "no_such_object" */
|
||||
void *sr = NULL;
|
||||
slapi_pblock_get(pb, SLAPI_SEARCH_RESULT_SET, &sr);
|
||||
- be->be_search_results_release(&sr);
|
||||
+ if (be->be_search_results_release != NULL) {
|
||||
+ be->be_search_results_release(&sr);
|
||||
+ }
|
||||
}
|
||||
pagedresults_set_search_result(pb_conn, operation, NULL, 1, pr_idx);
|
||||
rc = pagedresults_set_current_be(pb_conn, NULL, pr_idx, 1);
|
||||
--
|
||||
2.49.0
|
||||
|
||||
1018
SOURCES/Cargo-2.7.0-1.lock
Normal file
1018
SOURCES/Cargo-2.7.0-1.lock
Normal file
File diff suppressed because it is too large
Load Diff
@ -46,9 +46,9 @@ ExcludeArch: i686
|
||||
|
||||
Summary: 389 Directory Server (base)
|
||||
Name: 389-ds-base
|
||||
Version: 2.6.1
|
||||
Release: 6%{?dist}
|
||||
License: GPL-3.0-or-later WITH GPL-3.0-389-ds-base-exception AND (0BSD OR Apache-2.0 OR MIT) AND (Apache-2.0 OR Apache-2.0 WITH LLVM-exception OR MIT) AND (Apache-2.0 OR BSD-2-Clause OR MIT) AND (Apache-2.0 OR BSL-1.0) AND (Apache-2.0 OR MIT OR Zlib) AND (Apache-2.0 OR MIT) AND (CC-BY-4.0 AND MIT) AND (MIT OR Apache-2.0) AND Unicode-3.0 AND (MIT OR CC0-1.0) AND (MIT OR Unlicense) AND 0BSD AND Apache-2.0 AND BSD-2-Clause AND BSD-3-Clause AND ISC AND MIT AND MIT AND ISC AND MPL-2.0 AND PSF-2.0
|
||||
Version: 2.7.0
|
||||
Release: 5%{?dist}
|
||||
License: GPL-3.0-or-later WITH GPL-3.0-389-ds-base-exception AND (0BSD OR Apache-2.0 OR MIT) AND (Apache-2.0 OR Apache-2.0 WITH LLVM-exception OR MIT) AND (Apache-2.0 OR BSL-1.0) AND (Apache-2.0 OR LGPL-2.1-or-later OR MIT) AND (Apache-2.0 OR MIT OR Zlib) AND (Apache-2.0 OR MIT) AND (MIT OR Apache-2.0) AND Unicode-3.0 AND (MIT OR Unlicense) AND Apache-2.0 AND MIT AND MPL-2.0 AND Zlib
|
||||
URL: https://www.port389.org
|
||||
Conflicts: selinux-policy-base < 3.9.8
|
||||
Conflicts: freeipa-server < 4.0.3
|
||||
@ -59,287 +59,102 @@ Provides: ldif2ldbm >= 0
|
||||
|
||||
##### Bundled cargo crates list - START #####
|
||||
Provides: bundled(crate(addr2line)) = 0.24.2
|
||||
Provides: bundled(crate(adler2)) = 2.0.0
|
||||
Provides: bundled(crate(ahash)) = 0.7.8
|
||||
Provides: bundled(crate(adler2)) = 2.0.1
|
||||
Provides: bundled(crate(allocator-api2)) = 0.2.21
|
||||
Provides: bundled(crate(atty)) = 0.2.14
|
||||
Provides: bundled(crate(autocfg)) = 1.4.0
|
||||
Provides: bundled(crate(backtrace)) = 0.3.74
|
||||
Provides: bundled(crate(autocfg)) = 1.5.0
|
||||
Provides: bundled(crate(backtrace)) = 0.3.75
|
||||
Provides: bundled(crate(base64)) = 0.13.1
|
||||
Provides: bundled(crate(bitflags)) = 2.8.0
|
||||
Provides: bundled(crate(bitflags)) = 2.9.1
|
||||
Provides: bundled(crate(byteorder)) = 1.5.0
|
||||
Provides: bundled(crate(cbindgen)) = 0.26.0
|
||||
Provides: bundled(crate(cc)) = 1.2.10
|
||||
Provides: bundled(crate(cfg-if)) = 1.0.0
|
||||
Provides: bundled(crate(cc)) = 1.2.31
|
||||
Provides: bundled(crate(cfg-if)) = 1.0.1
|
||||
Provides: bundled(crate(clap)) = 3.2.25
|
||||
Provides: bundled(crate(clap_lex)) = 0.2.4
|
||||
Provides: bundled(crate(concread)) = 0.2.21
|
||||
Provides: bundled(crate(crossbeam)) = 0.8.4
|
||||
Provides: bundled(crate(crossbeam-channel)) = 0.5.14
|
||||
Provides: bundled(crate(crossbeam-deque)) = 0.8.6
|
||||
Provides: bundled(crate(concread)) = 0.5.7
|
||||
Provides: bundled(crate(crossbeam-epoch)) = 0.9.18
|
||||
Provides: bundled(crate(crossbeam-queue)) = 0.3.12
|
||||
Provides: bundled(crate(crossbeam-utils)) = 0.8.21
|
||||
Provides: bundled(crate(errno)) = 0.3.10
|
||||
Provides: bundled(crate(equivalent)) = 1.0.2
|
||||
Provides: bundled(crate(errno)) = 0.3.13
|
||||
Provides: bundled(crate(fastrand)) = 2.3.0
|
||||
Provides: bundled(crate(fernet)) = 0.1.4
|
||||
Provides: bundled(crate(foldhash)) = 0.1.5
|
||||
Provides: bundled(crate(foreign-types)) = 0.3.2
|
||||
Provides: bundled(crate(foreign-types-shared)) = 0.1.1
|
||||
Provides: bundled(crate(getrandom)) = 0.2.15
|
||||
Provides: bundled(crate(getrandom)) = 0.3.3
|
||||
Provides: bundled(crate(gimli)) = 0.31.1
|
||||
Provides: bundled(crate(hashbrown)) = 0.12.3
|
||||
Provides: bundled(crate(hashbrown)) = 0.15.4
|
||||
Provides: bundled(crate(heck)) = 0.4.1
|
||||
Provides: bundled(crate(hermit-abi)) = 0.1.19
|
||||
Provides: bundled(crate(indexmap)) = 1.9.3
|
||||
Provides: bundled(crate(instant)) = 0.1.13
|
||||
Provides: bundled(crate(itoa)) = 1.0.14
|
||||
Provides: bundled(crate(jobserver)) = 0.1.32
|
||||
Provides: bundled(crate(libc)) = 0.2.169
|
||||
Provides: bundled(crate(linux-raw-sys)) = 0.4.15
|
||||
Provides: bundled(crate(lock_api)) = 0.4.12
|
||||
Provides: bundled(crate(log)) = 0.4.25
|
||||
Provides: bundled(crate(lru)) = 0.7.8
|
||||
Provides: bundled(crate(memchr)) = 2.7.4
|
||||
Provides: bundled(crate(miniz_oxide)) = 0.8.3
|
||||
Provides: bundled(crate(io-uring)) = 0.7.9
|
||||
Provides: bundled(crate(itoa)) = 1.0.15
|
||||
Provides: bundled(crate(jobserver)) = 0.1.33
|
||||
Provides: bundled(crate(libc)) = 0.2.174
|
||||
Provides: bundled(crate(linux-raw-sys)) = 0.9.4
|
||||
Provides: bundled(crate(log)) = 0.4.27
|
||||
Provides: bundled(crate(lru)) = 0.13.0
|
||||
Provides: bundled(crate(memchr)) = 2.7.5
|
||||
Provides: bundled(crate(miniz_oxide)) = 0.8.9
|
||||
Provides: bundled(crate(mio)) = 1.0.4
|
||||
Provides: bundled(crate(object)) = 0.36.7
|
||||
Provides: bundled(crate(once_cell)) = 1.20.2
|
||||
Provides: bundled(crate(openssl)) = 0.10.68
|
||||
Provides: bundled(crate(once_cell)) = 1.21.3
|
||||
Provides: bundled(crate(openssl)) = 0.10.73
|
||||
Provides: bundled(crate(openssl-macros)) = 0.1.1
|
||||
Provides: bundled(crate(openssl-sys)) = 0.9.104
|
||||
Provides: bundled(crate(openssl-sys)) = 0.9.109
|
||||
Provides: bundled(crate(os_str_bytes)) = 6.6.1
|
||||
Provides: bundled(crate(parking_lot)) = 0.11.2
|
||||
Provides: bundled(crate(parking_lot_core)) = 0.8.6
|
||||
Provides: bundled(crate(paste)) = 0.1.18
|
||||
Provides: bundled(crate(paste-impl)) = 0.1.18
|
||||
Provides: bundled(crate(pin-project-lite)) = 0.2.16
|
||||
Provides: bundled(crate(pkg-config)) = 0.3.31
|
||||
Provides: bundled(crate(ppv-lite86)) = 0.2.20
|
||||
Provides: bundled(crate(pkg-config)) = 0.3.32
|
||||
Provides: bundled(crate(proc-macro-hack)) = 0.5.20+deprecated
|
||||
Provides: bundled(crate(proc-macro2)) = 1.0.93
|
||||
Provides: bundled(crate(quote)) = 1.0.38
|
||||
Provides: bundled(crate(rand)) = 0.8.5
|
||||
Provides: bundled(crate(rand_chacha)) = 0.3.1
|
||||
Provides: bundled(crate(rand_core)) = 0.6.4
|
||||
Provides: bundled(crate(redox_syscall)) = 0.2.16
|
||||
Provides: bundled(crate(rustc-demangle)) = 0.1.24
|
||||
Provides: bundled(crate(rustix)) = 0.38.44
|
||||
Provides: bundled(crate(ryu)) = 1.0.18
|
||||
Provides: bundled(crate(scopeguard)) = 1.2.0
|
||||
Provides: bundled(crate(serde)) = 1.0.217
|
||||
Provides: bundled(crate(serde_derive)) = 1.0.217
|
||||
Provides: bundled(crate(serde_json)) = 1.0.137
|
||||
Provides: bundled(crate(proc-macro2)) = 1.0.95
|
||||
Provides: bundled(crate(quote)) = 1.0.40
|
||||
Provides: bundled(crate(r-efi)) = 5.3.0
|
||||
Provides: bundled(crate(rustc-demangle)) = 0.1.26
|
||||
Provides: bundled(crate(rustix)) = 1.0.8
|
||||
Provides: bundled(crate(ryu)) = 1.0.20
|
||||
Provides: bundled(crate(serde)) = 1.0.219
|
||||
Provides: bundled(crate(serde_derive)) = 1.0.219
|
||||
Provides: bundled(crate(serde_json)) = 1.0.142
|
||||
Provides: bundled(crate(shlex)) = 1.3.0
|
||||
Provides: bundled(crate(smallvec)) = 1.13.2
|
||||
Provides: bundled(crate(slab)) = 0.4.10
|
||||
Provides: bundled(crate(smallvec)) = 1.15.1
|
||||
Provides: bundled(crate(sptr)) = 0.3.2
|
||||
Provides: bundled(crate(strsim)) = 0.10.0
|
||||
Provides: bundled(crate(syn)) = 2.0.96
|
||||
Provides: bundled(crate(tempfile)) = 3.15.0
|
||||
Provides: bundled(crate(syn)) = 2.0.104
|
||||
Provides: bundled(crate(tempfile)) = 3.20.0
|
||||
Provides: bundled(crate(termcolor)) = 1.4.1
|
||||
Provides: bundled(crate(textwrap)) = 0.16.1
|
||||
Provides: bundled(crate(tokio)) = 1.43.0
|
||||
Provides: bundled(crate(tokio-macros)) = 2.5.0
|
||||
Provides: bundled(crate(textwrap)) = 0.16.2
|
||||
Provides: bundled(crate(tokio)) = 1.47.1
|
||||
Provides: bundled(crate(toml)) = 0.5.11
|
||||
Provides: bundled(crate(unicode-ident)) = 1.0.15
|
||||
Provides: bundled(crate(tracing)) = 0.1.41
|
||||
Provides: bundled(crate(tracing-attributes)) = 0.1.30
|
||||
Provides: bundled(crate(tracing-core)) = 0.1.34
|
||||
Provides: bundled(crate(unicode-ident)) = 1.0.18
|
||||
Provides: bundled(crate(uuid)) = 0.8.2
|
||||
Provides: bundled(crate(vcpkg)) = 0.2.15
|
||||
Provides: bundled(crate(version_check)) = 0.9.5
|
||||
Provides: bundled(crate(wasi)) = 0.11.0+wasi_snapshot_preview1
|
||||
Provides: bundled(crate(wasi)) = 0.14.2+wasi_0.2.4
|
||||
Provides: bundled(crate(winapi)) = 0.3.9
|
||||
Provides: bundled(crate(winapi-i686-pc-windows-gnu)) = 0.4.0
|
||||
Provides: bundled(crate(winapi-util)) = 0.1.9
|
||||
Provides: bundled(crate(winapi-x86_64-pc-windows-gnu)) = 0.4.0
|
||||
Provides: bundled(crate(windows-sys)) = 0.59.0
|
||||
Provides: bundled(crate(windows-targets)) = 0.52.6
|
||||
Provides: bundled(crate(windows_aarch64_gnullvm)) = 0.52.6
|
||||
Provides: bundled(crate(windows_aarch64_msvc)) = 0.52.6
|
||||
Provides: bundled(crate(windows_i686_gnu)) = 0.52.6
|
||||
Provides: bundled(crate(windows_i686_gnullvm)) = 0.52.6
|
||||
Provides: bundled(crate(windows_i686_msvc)) = 0.52.6
|
||||
Provides: bundled(crate(windows_x86_64_gnu)) = 0.52.6
|
||||
Provides: bundled(crate(windows_x86_64_gnullvm)) = 0.52.6
|
||||
Provides: bundled(crate(windows_x86_64_msvc)) = 0.52.6
|
||||
Provides: bundled(crate(zerocopy)) = 0.7.35
|
||||
Provides: bundled(crate(zerocopy-derive)) = 0.7.35
|
||||
Provides: bundled(crate(windows-link)) = 0.1.3
|
||||
Provides: bundled(crate(windows-sys)) = 0.60.2
|
||||
Provides: bundled(crate(windows-targets)) = 0.53.3
|
||||
Provides: bundled(crate(windows_aarch64_gnullvm)) = 0.53.0
|
||||
Provides: bundled(crate(windows_aarch64_msvc)) = 0.53.0
|
||||
Provides: bundled(crate(windows_i686_gnu)) = 0.53.0
|
||||
Provides: bundled(crate(windows_i686_gnullvm)) = 0.53.0
|
||||
Provides: bundled(crate(windows_i686_msvc)) = 0.53.0
|
||||
Provides: bundled(crate(windows_x86_64_gnu)) = 0.53.0
|
||||
Provides: bundled(crate(windows_x86_64_gnullvm)) = 0.53.0
|
||||
Provides: bundled(crate(windows_x86_64_msvc)) = 0.53.0
|
||||
Provides: bundled(crate(wit-bindgen-rt)) = 0.39.0
|
||||
Provides: bundled(crate(zeroize)) = 1.8.1
|
||||
Provides: bundled(crate(zeroize_derive)) = 1.4.2
|
||||
Provides: bundled(npm(@aashutoshrathi/word-wrap)) = 1.2.6
|
||||
Provides: bundled(npm(@eslint-community/eslint-utils)) = 4.4.0
|
||||
Provides: bundled(npm(@eslint-community/regexpp)) = 4.5.1
|
||||
Provides: bundled(npm(@eslint/eslintrc)) = 2.0.3
|
||||
Provides: bundled(npm(@eslint/js)) = 8.42.0
|
||||
Provides: bundled(npm(@fortawesome/fontawesome-common-types)) = 0.2.36
|
||||
Provides: bundled(npm(@fortawesome/fontawesome-svg-core)) = 1.2.36
|
||||
Provides: bundled(npm(@fortawesome/free-solid-svg-icons)) = 5.15.4
|
||||
Provides: bundled(npm(@fortawesome/react-fontawesome)) = 0.1.19
|
||||
Provides: bundled(npm(@humanwhocodes/config-array)) = 0.11.10
|
||||
Provides: bundled(npm(@humanwhocodes/module-importer)) = 1.0.1
|
||||
Provides: bundled(npm(@humanwhocodes/object-schema)) = 1.2.1
|
||||
Provides: bundled(npm(@nodelib/fs.scandir)) = 2.1.5
|
||||
Provides: bundled(npm(@nodelib/fs.stat)) = 2.0.5
|
||||
Provides: bundled(npm(@nodelib/fs.walk)) = 1.2.8
|
||||
Provides: bundled(npm(@patternfly/patternfly)) = 4.224.2
|
||||
Provides: bundled(npm(@patternfly/react-charts)) = 6.94.19
|
||||
Provides: bundled(npm(@patternfly/react-core)) = 4.276.8
|
||||
Provides: bundled(npm(@patternfly/react-icons)) = 4.93.6
|
||||
Provides: bundled(npm(@patternfly/react-styles)) = 4.92.6
|
||||
Provides: bundled(npm(@patternfly/react-table)) = 4.113.0
|
||||
Provides: bundled(npm(@patternfly/react-tokens)) = 4.94.6
|
||||
Provides: bundled(npm(@types/d3-array)) = 3.0.5
|
||||
Provides: bundled(npm(@types/d3-color)) = 3.1.0
|
||||
Provides: bundled(npm(@types/d3-ease)) = 3.0.0
|
||||
Provides: bundled(npm(@types/d3-interpolate)) = 3.0.1
|
||||
Provides: bundled(npm(@types/d3-path)) = 3.0.0
|
||||
Provides: bundled(npm(@types/d3-scale)) = 4.0.3
|
||||
Provides: bundled(npm(@types/d3-shape)) = 3.1.1
|
||||
Provides: bundled(npm(@types/d3-time)) = 3.0.0
|
||||
Provides: bundled(npm(@types/d3-timer)) = 3.0.0
|
||||
Provides: bundled(npm(acorn)) = 8.8.2
|
||||
Provides: bundled(npm(acorn-jsx)) = 5.3.2
|
||||
Provides: bundled(npm(ajv)) = 6.12.6
|
||||
Provides: bundled(npm(ansi-regex)) = 5.0.1
|
||||
Provides: bundled(npm(ansi-styles)) = 4.3.0
|
||||
Provides: bundled(npm(argparse)) = 2.0.1
|
||||
Provides: bundled(npm(attr-accept)) = 1.1.3
|
||||
Provides: bundled(npm(balanced-match)) = 1.0.2
|
||||
Provides: bundled(npm(brace-expansion)) = 1.1.11
|
||||
Provides: bundled(npm(callsites)) = 3.1.0
|
||||
Provides: bundled(npm(chalk)) = 4.1.2
|
||||
Provides: bundled(npm(color-convert)) = 2.0.1
|
||||
Provides: bundled(npm(color-name)) = 1.1.4
|
||||
Provides: bundled(npm(concat-map)) = 0.0.1
|
||||
Provides: bundled(npm(core-js)) = 2.6.12
|
||||
Provides: bundled(npm(cross-spawn)) = 7.0.6
|
||||
Provides: bundled(npm(d3-array)) = 3.2.4
|
||||
Provides: bundled(npm(d3-color)) = 3.1.0
|
||||
Provides: bundled(npm(d3-ease)) = 3.0.1
|
||||
Provides: bundled(npm(d3-format)) = 3.1.0
|
||||
Provides: bundled(npm(d3-interpolate)) = 3.0.1
|
||||
Provides: bundled(npm(d3-path)) = 3.1.0
|
||||
Provides: bundled(npm(d3-scale)) = 4.0.2
|
||||
Provides: bundled(npm(d3-shape)) = 3.2.0
|
||||
Provides: bundled(npm(d3-time)) = 3.1.0
|
||||
Provides: bundled(npm(d3-time-format)) = 4.1.0
|
||||
Provides: bundled(npm(d3-timer)) = 3.0.1
|
||||
Provides: bundled(npm(debug)) = 4.3.4
|
||||
Provides: bundled(npm(deep-is)) = 0.1.4
|
||||
Provides: bundled(npm(delaunator)) = 4.0.1
|
||||
Provides: bundled(npm(delaunay-find)) = 0.0.6
|
||||
Provides: bundled(npm(doctrine)) = 3.0.0
|
||||
Provides: bundled(npm(encoding)) = 0.1.13
|
||||
Provides: bundled(npm(escape-string-regexp)) = 4.0.0
|
||||
Provides: bundled(npm(eslint)) = 8.42.0
|
||||
Provides: bundled(npm(eslint-plugin-react-hooks)) = 4.6.0
|
||||
Provides: bundled(npm(eslint-scope)) = 7.2.0
|
||||
Provides: bundled(npm(eslint-visitor-keys)) = 3.4.1
|
||||
Provides: bundled(npm(espree)) = 9.5.2
|
||||
Provides: bundled(npm(esquery)) = 1.5.0
|
||||
Provides: bundled(npm(esrecurse)) = 4.3.0
|
||||
Provides: bundled(npm(estraverse)) = 5.3.0
|
||||
Provides: bundled(npm(esutils)) = 2.0.3
|
||||
Provides: bundled(npm(fast-deep-equal)) = 3.1.3
|
||||
Provides: bundled(npm(fast-json-stable-stringify)) = 2.1.0
|
||||
Provides: bundled(npm(fast-levenshtein)) = 2.0.6
|
||||
Provides: bundled(npm(fastq)) = 1.15.0
|
||||
Provides: bundled(npm(file-entry-cache)) = 6.0.1
|
||||
Provides: bundled(npm(file-selector)) = 0.1.19
|
||||
Provides: bundled(npm(find-up)) = 5.0.0
|
||||
Provides: bundled(npm(flat-cache)) = 3.0.4
|
||||
Provides: bundled(npm(flatted)) = 3.2.7
|
||||
Provides: bundled(npm(focus-trap)) = 6.9.2
|
||||
Provides: bundled(npm(fs.realpath)) = 1.0.0
|
||||
Provides: bundled(npm(gettext-parser)) = 2.0.0
|
||||
Provides: bundled(npm(glob)) = 7.2.3
|
||||
Provides: bundled(npm(glob-parent)) = 6.0.2
|
||||
Provides: bundled(npm(globals)) = 13.20.0
|
||||
Provides: bundled(npm(graphemer)) = 1.4.0
|
||||
Provides: bundled(npm(has-flag)) = 4.0.0
|
||||
Provides: bundled(npm(hoist-non-react-statics)) = 3.3.2
|
||||
Provides: bundled(npm(iconv-lite)) = 0.6.3
|
||||
Provides: bundled(npm(ignore)) = 5.2.4
|
||||
Provides: bundled(npm(import-fresh)) = 3.3.0
|
||||
Provides: bundled(npm(imurmurhash)) = 0.1.4
|
||||
Provides: bundled(npm(inflight)) = 1.0.6
|
||||
Provides: bundled(npm(inherits)) = 2.0.4
|
||||
Provides: bundled(npm(internmap)) = 2.0.3
|
||||
Provides: bundled(npm(is-extglob)) = 2.1.1
|
||||
Provides: bundled(npm(is-glob)) = 4.0.3
|
||||
Provides: bundled(npm(is-path-inside)) = 3.0.3
|
||||
Provides: bundled(npm(isexe)) = 2.0.0
|
||||
Provides: bundled(npm(js-tokens)) = 4.0.0
|
||||
Provides: bundled(npm(js-yaml)) = 4.1.0
|
||||
Provides: bundled(npm(json-schema-traverse)) = 0.4.1
|
||||
Provides: bundled(npm(json-stable-stringify-without-jsonify)) = 1.0.1
|
||||
Provides: bundled(npm(json-stringify-safe)) = 5.0.1
|
||||
Provides: bundled(npm(levn)) = 0.4.1
|
||||
Provides: bundled(npm(locate-path)) = 6.0.0
|
||||
Provides: bundled(npm(lodash)) = 4.17.21
|
||||
Provides: bundled(npm(lodash.merge)) = 4.6.2
|
||||
Provides: bundled(npm(loose-envify)) = 1.4.0
|
||||
Provides: bundled(npm(minimatch)) = 3.1.2
|
||||
Provides: bundled(npm(ms)) = 2.1.2
|
||||
Provides: bundled(npm(natural-compare)) = 1.4.0
|
||||
Provides: bundled(npm(object-assign)) = 4.1.1
|
||||
Provides: bundled(npm(once)) = 1.4.0
|
||||
Provides: bundled(npm(optionator)) = 0.9.3
|
||||
Provides: bundled(npm(p-limit)) = 3.1.0
|
||||
Provides: bundled(npm(p-locate)) = 5.0.0
|
||||
Provides: bundled(npm(parent-module)) = 1.0.1
|
||||
Provides: bundled(npm(path-exists)) = 4.0.0
|
||||
Provides: bundled(npm(path-is-absolute)) = 1.0.1
|
||||
Provides: bundled(npm(path-key)) = 3.1.1
|
||||
Provides: bundled(npm(popper.js)) = 1.16.1
|
||||
Provides: bundled(npm(prelude-ls)) = 1.2.1
|
||||
Provides: bundled(npm(prop-types)) = 15.8.1
|
||||
Provides: bundled(npm(prop-types-extra)) = 1.1.1
|
||||
Provides: bundled(npm(punycode)) = 2.3.0
|
||||
Provides: bundled(npm(queue-microtask)) = 1.2.3
|
||||
Provides: bundled(npm(react)) = 17.0.2
|
||||
Provides: bundled(npm(react-dom)) = 17.0.2
|
||||
Provides: bundled(npm(react-dropzone)) = 9.0.0
|
||||
Provides: bundled(npm(react-fast-compare)) = 3.2.2
|
||||
Provides: bundled(npm(react-is)) = 16.13.1
|
||||
Provides: bundled(npm(resolve-from)) = 4.0.0
|
||||
Provides: bundled(npm(reusify)) = 1.0.4
|
||||
Provides: bundled(npm(rimraf)) = 3.0.2
|
||||
Provides: bundled(npm(run-parallel)) = 1.2.0
|
||||
Provides: bundled(npm(safe-buffer)) = 5.2.1
|
||||
Provides: bundled(npm(safer-buffer)) = 2.1.2
|
||||
Provides: bundled(npm(scheduler)) = 0.20.2
|
||||
Provides: bundled(npm(shebang-command)) = 2.0.0
|
||||
Provides: bundled(npm(shebang-regex)) = 3.0.0
|
||||
Provides: bundled(npm(strip-ansi)) = 6.0.1
|
||||
Provides: bundled(npm(strip-json-comments)) = 3.1.1
|
||||
Provides: bundled(npm(supports-color)) = 7.2.0
|
||||
Provides: bundled(npm(tabbable)) = 5.3.3
|
||||
Provides: bundled(npm(text-table)) = 0.2.0
|
||||
Provides: bundled(npm(tippy.js)) = 5.1.2
|
||||
Provides: bundled(npm(tslib)) = 2.5.3
|
||||
Provides: bundled(npm(type-check)) = 0.4.0
|
||||
Provides: bundled(npm(type-fest)) = 0.20.2
|
||||
Provides: bundled(npm(uri-js)) = 4.4.1
|
||||
Provides: bundled(npm(victory-area)) = 36.6.10
|
||||
Provides: bundled(npm(victory-axis)) = 36.6.10
|
||||
Provides: bundled(npm(victory-bar)) = 36.6.10
|
||||
Provides: bundled(npm(victory-brush-container)) = 36.6.10
|
||||
Provides: bundled(npm(victory-chart)) = 36.6.10
|
||||
Provides: bundled(npm(victory-core)) = 36.6.10
|
||||
Provides: bundled(npm(victory-create-container)) = 36.6.10
|
||||
Provides: bundled(npm(victory-cursor-container)) = 36.6.10
|
||||
Provides: bundled(npm(victory-group)) = 36.6.10
|
||||
Provides: bundled(npm(victory-legend)) = 36.6.10
|
||||
Provides: bundled(npm(victory-line)) = 36.6.10
|
||||
Provides: bundled(npm(victory-pie)) = 36.6.10
|
||||
Provides: bundled(npm(victory-polar-axis)) = 36.6.10
|
||||
Provides: bundled(npm(victory-scatter)) = 36.6.10
|
||||
Provides: bundled(npm(victory-selection-container)) = 36.6.10
|
||||
Provides: bundled(npm(victory-shared-events)) = 36.6.10
|
||||
Provides: bundled(npm(victory-stack)) = 36.6.10
|
||||
Provides: bundled(npm(victory-tooltip)) = 36.6.10
|
||||
Provides: bundled(npm(victory-vendor)) = 36.6.10
|
||||
Provides: bundled(npm(victory-voronoi-container)) = 36.6.10
|
||||
Provides: bundled(npm(victory-zoom-container)) = 36.6.10
|
||||
Provides: bundled(npm(warning)) = 4.0.3
|
||||
Provides: bundled(npm(which)) = 2.0.2
|
||||
Provides: bundled(npm(wrappy)) = 1.0.2
|
||||
Provides: bundled(npm(yocto-queue)) = 0.1.0
|
||||
##### Bundled cargo crates list - END #####
|
||||
|
||||
BuildRequires: nspr-devel >= 4.32
|
||||
@ -407,6 +222,7 @@ BuildRequires: python%{python3_pkgversion}-argparse-manpage
|
||||
BuildRequires: python%{python3_pkgversion}-libselinux
|
||||
BuildRequires: python%{python3_pkgversion}-policycoreutils
|
||||
BuildRequires: python%{python3_pkgversion}-cryptography
|
||||
BuildRequires: python%{python3_pkgversion}-psutil
|
||||
|
||||
# For cockpit
|
||||
%if %{use_cockpit}
|
||||
@ -470,16 +286,38 @@ Source3: https://github.com/jemalloc/%{jemalloc_name}/releases/download
|
||||
%endif
|
||||
Source4: 389-ds-base.sysusers
|
||||
|
||||
Patch: 0001-Issue-6468-Fix-building-for-older-versions-of-Python.patch
|
||||
Patch: 0002-Issue-6489-After-log-rotation-refresh-the-FD-pointer.patch
|
||||
Patch: 0003-Issue-6374-nsslapd-mdb-max-dbs-autotuning-doesn-t-wo.patch
|
||||
Patch: 0004-Issue-6090-Fix-dbscan-options-and-man-pages-6315.patch
|
||||
Patch: 0005-Issue-6566-RI-plugin-failure-to-handle-a-modrdn-for-.patch
|
||||
Patch: 0006-Issue-6258-Mitigate-race-condition-in-paged_results_.patch
|
||||
Patch: 0007-Issue-6229-After-an-initial-failure-subsequent-onlin.patch
|
||||
Patch: 0008-Issue-6554-During-import-of-entries-without-nsUnique.patch
|
||||
Patch: 0009-Issue-6561-TLS-1.2-stickiness-in-FIPS-mode.patch
|
||||
Patch: 0010-Issue-6090-dbscan-use-bdb-by-default.patch
|
||||
Source5: vendor-%{version}-1.tar.gz
|
||||
Source6: Cargo-%{version}-1.lock
|
||||
|
||||
Patch: 0001-Issue-6377-syntax-error-in-setup.py-6378.patch
|
||||
Patch: 0002-Issue-6838-lib389-replica.py-is-using-nonexistent-da.patch
|
||||
Patch: 0003-Issue-6680-instance-read-only-mode-is-broken-6681.patch
|
||||
Patch: 0004-Issue-6825-RootDN-Access-Control-Plugin-with-wildcar.patch
|
||||
Patch: 0005-Issue-6119-Synchronise-accept_thread-with-slapd_daem.patch
|
||||
Patch: 0006-Issue-6782-Improve-paged-result-locking.patch
|
||||
Patch: 0007-Issue-6822-Backend-creation-cleanup-and-Database-UI-.patch
|
||||
Patch: 0008-Issue-6857-uiduniq-allow-specifying-match-rules-in-t.patch
|
||||
Patch: 0009-Issue-6756-CLI-UI-Properly-handle-disabled-NDN-cache.patch
|
||||
Patch: 0010-Issue-6859-str2filter-is-not-fully-applying-matching.patch
|
||||
Patch: 0011-Issue-6872-compressed-log-rotation-creates-files-wit.patch
|
||||
Patch: 0012-Issue-6878-Prevent-repeated-disconnect-logs-during-s.patch
|
||||
Patch: 0013-Issue-6772-dsconf-Replicas-with-the-consumer-role-al.patch
|
||||
Patch: 0014-Issue-6893-Log-user-that-is-updated-during-password-.patch
|
||||
Patch: 0015-Issue-6895-Crash-if-repl-keep-alive-entry-can-not-be.patch
|
||||
Patch: 0016-Issue-6250-Add-test-for-entryUSN-overflow-on-failed-.patch
|
||||
Patch: 0017-Issue-6594-Add-test-for-numSubordinates-replication-.patch
|
||||
Patch: 0018-Issue-6884-Mask-password-hashes-in-audit-logs-6885.patch
|
||||
Patch: 0019-Issue-6897-Fix-disk-monitoring-test-failures-and-imp.patch
|
||||
Patch: 0020-Issue-6339-Address-Coverity-scan-issues-in-memberof-.patch
|
||||
Patch: 0021-Issue-6468-CLI-Fix-default-error-log-level.patch
|
||||
Patch: 0022-Issues-6913-6886-6250-Adjust-xfail-marks-6914.patch
|
||||
Patch: 0023-Issue-6181-RFE-Allow-system-to-manage-uid-gid-at-sta.patch
|
||||
Patch: 0024-Issue-6778-Memory-leak-in-roles_cache_create_object_.patch
|
||||
Patch: 0025-Issue-6778-Memory-leak-in-roles_cache_create_object_.patch
|
||||
Patch: 0026-Issue-6850-AddressSanitizer-memory-leak-in-mdb_init.patch
|
||||
Patch: 0027-Issue-6848-AddressSanitizer-leak-in-do_search.patch
|
||||
Patch: 0028-Issue-6865-AddressSanitizer-leak-in-agmt_update_init.patch
|
||||
Patch: 0029-Issue-6768-ns-slapd-crashes-when-a-referral-is-added.patch
|
||||
|
||||
%description
|
||||
389 Directory Server is an LDAPv3 compliant server. The base package includes
|
||||
@ -560,6 +398,7 @@ Requires: python%{python3_pkgversion}-argcomplete
|
||||
Requires: python%{python3_pkgversion}-libselinux
|
||||
Requires: python%{python3_pkgversion}-setuptools
|
||||
Requires: python%{python3_pkgversion}-cryptography
|
||||
Requires: python%{python3_pkgversion}-psutil
|
||||
%{?python_provide:%python_provide python%{python3_pkgversion}-lib389}
|
||||
|
||||
%description -n python%{python3_pkgversion}-lib389
|
||||
@ -582,6 +421,10 @@ A cockpit UI Plugin for configuring and administering the 389 Directory Server
|
||||
%prep
|
||||
|
||||
%autosetup -p1 -n %{name}-%{version}
|
||||
rm -rf vendor
|
||||
tar xzf %{SOURCE5}
|
||||
cp %{SOURCE6} src/Cargo.lock
|
||||
|
||||
%if %{bundle_jemalloc}
|
||||
%setup -q -n %{name}-%{version} -T -D -b 3
|
||||
%endif
|
||||
@ -643,7 +486,7 @@ pushd ../%{jemalloc_name}-%{jemalloc_ver}
|
||||
--libdir=%{_libdir}/%{pkgname}/lib \
|
||||
--bindir=%{_libdir}/%{pkgname}/bin \
|
||||
--enable-prof
|
||||
make %{?_smp_mflags}
|
||||
%make_build
|
||||
popd
|
||||
%endif
|
||||
|
||||
@ -678,8 +521,7 @@ sed -i "1s/\"1\"/\"8\"/" %{_builddir}/%{name}-%{version}/src/lib389/man/dscreat
|
||||
# Generate symbolic info for debuggers
|
||||
export XCFLAGS=$RPM_OPT_FLAGS
|
||||
|
||||
#make %{?_smp_mflags}
|
||||
make
|
||||
%make_build
|
||||
|
||||
%install
|
||||
|
||||
@ -922,6 +764,37 @@ exit 0
|
||||
%endif
|
||||
|
||||
%changelog
|
||||
* Tue Aug 05 2025 Viktor Ashirov <vashirov@redhat.com> - 2.7.0-5
|
||||
- Resolves: RHEL-89762 - dsidm Error: float() argument must be a string or a number, not 'NoneType' [rhel-9]
|
||||
- Resolves: RHEL-92041 - Memory leak in roles_cache_create_object_from_entry
|
||||
- Resolves: RHEL-95444 - ns-slapd[xxxx]: segfault at 10d7d0d0 ip 00007ff734050cdb sp 00007ff6de9f1430 error 6 in libslapd.so.0.1.0[7ff733ec0000+1b3000] [rhel-9]
|
||||
- Resolves: RHEL-104821 - ipa-restore fails to restore SELinux contexts, causes ns-slapd AVC denials on /dev/shm after restore.
|
||||
- Resolves: RHEL-107005 - Failure to get Server monitoring data when NDN cache is disabled. [rhel-9]
|
||||
- Resolves: RHEL-107581 - segfault - error 4 in libpthread-2.28.so [rhel-9]
|
||||
- Resolves: RHEL-107585 - ns-slapd crashed when we add nsslapd-referral [rhel-9]
|
||||
- Resolves: RHEL-107586 - CWE-284 dirsrv log rotation creates files with world readable permission [rhel-9]
|
||||
- Resolves: RHEL-107587 - CWE-532 Created user password hash available to see in audit log [rhel-9]
|
||||
- Resolves: RHEL-107588 - CWE-778 Log doesn't show what user gets password changed by administrator [rhel-9]
|
||||
|
||||
* Mon Jul 21 2025 Viktor Ashirov <vashirov@redhat.com> - 2.7.0-4
|
||||
- Resolves: RHEL-61347 - Directory Server is unavailable after a restart with nsslapd-readonly=on and consumes 100% CPU
|
||||
|
||||
* Tue Jul 01 2025 Viktor Ashirov <vashirov@redhat.com> - 2.7.0-3
|
||||
- Resolves: RHEL-77983 - Defects found by OpenScanHub
|
||||
- Resolves: RHEL-79673 - Improve the "result" field of ipa-healthcheck if replicas are busy
|
||||
- Resolves: RHEL-80496 - Can't rename users member of automember rule [rhel-9]
|
||||
- Resolves: RHEL-81141 - Healthcheck tool should warn admin about creating a substring index on membership attribute [rhel-9]
|
||||
- Resolves: RHEL-89736 - dsconf backend replication monitor fails if replica id starts with 0 [rhel-9]
|
||||
- Resolves: RHEL-89745 - ns-slapd crash in dbmdb_import_prepare_worker_entry() [rhel-9]
|
||||
- Resolves: RHEL-89753 - Nested group does not receive memberOf attribute [rhel-9]
|
||||
- Resolves: RHEL-89769 - Crash in __strlen_sse2 when using the nsRole filter rewriter. [rhel-9]
|
||||
- Resolves: RHEL-89782 - RHDS12.2 NSMMReplicationPlugin - release_replica Unable to parse the response [rhel-9]
|
||||
- Resolves: RHEL-95768 - Improve error message when bulk import connection is closed [rhel-9]
|
||||
- Resolves: RHEL-101189 - lib389/replica.py is using unexisting datetime.UTC in python3.9
|
||||
|
||||
* Mon Jun 30 2025 Viktor Ashirov <vashirov@redhat.com> - 2.7.0-1
|
||||
- Resolves: RHEL-80163 - Rebase 389-ds-base to 2.7.x
|
||||
|
||||
* Fri Mar 14 2025 Viktor Ashirov <vashirov@redhat.com> - 2.6.1-6
|
||||
- Resolves: RHEL-82271 - ipa-restore is failing with "Failed to start Directory Service"
|
||||
|
||||
|
||||
Loading…
Reference in New Issue
Block a user