Compare commits
No commits in common. "c8-stream-1.4" and "c9-beta" have entirely different histories.
c8-stream-
...
c9-beta
@ -1,3 +1,3 @@
|
||||
bd9aab32d9cbf9231058d585479813f3420dc872 SOURCES/389-ds-base-1.4.3.39.tar.bz2
|
||||
e9ce5b0affef3f7a319958610c5382152f1b559f SOURCES/389-ds-base-2.7.0.tar.bz2
|
||||
1c8f2d0dfbf39fa8cd86363bf3314351ab21f8d4 SOURCES/jemalloc-5.3.0.tar.bz2
|
||||
8d3275209f2f8e1a69053340930ad1fb037d61fb SOURCES/vendor-1.4.3.39-3.tar.gz
|
||||
b183c1ebee9c1d81d4b394df6de6521a8b333cbc SOURCES/vendor-2.7.0-1.tar.gz
|
||||
|
4
.gitignore
vendored
4
.gitignore
vendored
@ -1,3 +1,3 @@
|
||||
SOURCES/389-ds-base-1.4.3.39.tar.bz2
|
||||
SOURCES/389-ds-base-2.7.0.tar.bz2
|
||||
SOURCES/jemalloc-5.3.0.tar.bz2
|
||||
SOURCES/vendor-1.4.3.39-3.tar.gz
|
||||
SOURCES/vendor-2.7.0-1.tar.gz
|
||||
|
40
SOURCES/0001-Issue-6377-syntax-error-in-setup.py-6378.patch
Normal file
40
SOURCES/0001-Issue-6377-syntax-error-in-setup.py-6378.patch
Normal file
@ -0,0 +1,40 @@
|
||||
From 5903fac2334f984d18aea663735fb260d6b100ed Mon Sep 17 00:00:00 2001
|
||||
From: progier389 <progier@redhat.com>
|
||||
Date: Tue, 22 Oct 2024 17:26:46 +0200
|
||||
Subject: [PATCH] Issue 6377 - syntax error in setup.py (#6378)
|
||||
|
||||
Syntax error due to badly nested quotes in dblib.py cause trouble in setup.py and dsconf dblib b2b2mdb/mdb2dbd
|
||||
Fix bit using double quotes in the f-expression and quotes for the embedded strings.
|
||||
|
||||
Issue: #6377
|
||||
|
||||
Reviewed by: @tbordaz, @droideck (Thank!)
|
||||
---
|
||||
src/lib389/lib389/cli_ctl/dblib.py | 4 ++--
|
||||
1 file changed, 2 insertions(+), 2 deletions(-)
|
||||
|
||||
diff --git a/src/lib389/lib389/cli_ctl/dblib.py b/src/lib389/lib389/cli_ctl/dblib.py
|
||||
index ff81f0e19..3f6e7b456 100644
|
||||
--- a/src/lib389/lib389/cli_ctl/dblib.py
|
||||
+++ b/src/lib389/lib389/cli_ctl/dblib.py
|
||||
@@ -183,7 +183,7 @@ def export_changelog(be, dblib):
|
||||
return False
|
||||
try:
|
||||
cl5dbname = be['eccl5dbname'] if dblib == "bdb" else be['cl5dbname']
|
||||
- _log.info(f'Exporting changelog {cl5dbname} to {be['cl5name']}')
|
||||
+ _log.info(f"Exporting changelog {cl5dbname} to {be['cl5name']}")
|
||||
run_dbscan(['-D', dblib, '-f', cl5dbname, '-X', be['cl5name']])
|
||||
return True
|
||||
except subprocess.CalledProcessError as e:
|
||||
@@ -194,7 +194,7 @@ def import_changelog(be, dblib):
|
||||
# import backend changelog
|
||||
try:
|
||||
cl5dbname = be['eccl5dbname'] if dblib == "bdb" else be['cl5dbname']
|
||||
- _log.info(f'Importing changelog {cl5dbname} from {be['cl5name']}')
|
||||
+ _log.info(f"Importing changelog {cl5dbname} from {be['cl5name']}")
|
||||
run_dbscan(['-D', dblib, '-f', cl5dbname, '--import', be['cl5name'], '--do-it'])
|
||||
return True
|
||||
except subprocess.CalledProcessError as e:
|
||||
--
|
||||
2.49.0
|
||||
|
@ -1,119 +0,0 @@
|
||||
From dddb14210b402f317e566b6387c76a8e659bf7fa Mon Sep 17 00:00:00 2001
|
||||
From: progier389 <progier@redhat.com>
|
||||
Date: Tue, 14 Feb 2023 13:34:10 +0100
|
||||
Subject: [PATCH 1/2] issue 5647 - covscan: memory leak in audit log when
|
||||
adding entries (#5650)
|
||||
|
||||
covscan reported an issue about "vals" variable in auditlog.c:231 and indeed a charray_free is missing.
|
||||
Issue: 5647
|
||||
Reviewed by: @mreynolds389, @droideck
|
||||
---
|
||||
ldap/servers/slapd/auditlog.c | 71 +++++++++++++++++++----------------
|
||||
1 file changed, 38 insertions(+), 33 deletions(-)
|
||||
|
||||
diff --git a/ldap/servers/slapd/auditlog.c b/ldap/servers/slapd/auditlog.c
|
||||
index 68cbc674d..3128e0497 100644
|
||||
--- a/ldap/servers/slapd/auditlog.c
|
||||
+++ b/ldap/servers/slapd/auditlog.c
|
||||
@@ -177,6 +177,40 @@ write_auditfail_log_entry(Slapi_PBlock *pb)
|
||||
slapi_ch_free_string(&audit_config);
|
||||
}
|
||||
|
||||
+/*
|
||||
+ * Write the attribute values to the audit log as "comments"
|
||||
+ *
|
||||
+ * Slapi_Attr *entry - the attribute begin logged.
|
||||
+ * char *attrname - the attribute name.
|
||||
+ * lenstr *l - the audit log buffer
|
||||
+ *
|
||||
+ * Resulting output in the log:
|
||||
+ *
|
||||
+ * #ATTR: VALUE
|
||||
+ * #ATTR: VALUE
|
||||
+ */
|
||||
+static void
|
||||
+log_entry_attr(Slapi_Attr *entry_attr, char *attrname, lenstr *l)
|
||||
+{
|
||||
+ Slapi_Value **vals = attr_get_present_values(entry_attr);
|
||||
+ for(size_t i = 0; vals && vals[i]; i++) {
|
||||
+ char log_val[256] = "";
|
||||
+ const struct berval *bv = slapi_value_get_berval(vals[i]);
|
||||
+ if (bv->bv_len >= 256) {
|
||||
+ strncpy(log_val, bv->bv_val, 252);
|
||||
+ strcpy(log_val+252, "...");
|
||||
+ } else {
|
||||
+ strncpy(log_val, bv->bv_val, bv->bv_len);
|
||||
+ log_val[bv->bv_len] = 0;
|
||||
+ }
|
||||
+ addlenstr(l, "#");
|
||||
+ addlenstr(l, attrname);
|
||||
+ addlenstr(l, ": ");
|
||||
+ addlenstr(l, log_val);
|
||||
+ addlenstr(l, "\n");
|
||||
+ }
|
||||
+}
|
||||
+
|
||||
/*
|
||||
* Write "requested" attributes from the entry to the audit log as "comments"
|
||||
*
|
||||
@@ -212,21 +246,9 @@ add_entry_attrs(Slapi_Entry *entry, lenstr *l)
|
||||
for (req_attr = ldap_utf8strtok_r(display_attrs, ", ", &last); req_attr;
|
||||
req_attr = ldap_utf8strtok_r(NULL, ", ", &last))
|
||||
{
|
||||
- char **vals = slapi_entry_attr_get_charray(entry, req_attr);
|
||||
- for(size_t i = 0; vals && vals[i]; i++) {
|
||||
- char log_val[256] = {0};
|
||||
-
|
||||
- if (strlen(vals[i]) > 256) {
|
||||
- strncpy(log_val, vals[i], 252);
|
||||
- strcat(log_val, "...");
|
||||
- } else {
|
||||
- strcpy(log_val, vals[i]);
|
||||
- }
|
||||
- addlenstr(l, "#");
|
||||
- addlenstr(l, req_attr);
|
||||
- addlenstr(l, ": ");
|
||||
- addlenstr(l, log_val);
|
||||
- addlenstr(l, "\n");
|
||||
+ slapi_entry_attr_find(entry, req_attr, &entry_attr);
|
||||
+ if (entry_attr) {
|
||||
+ log_entry_attr(entry_attr, req_attr, l);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
@@ -234,7 +256,6 @@ add_entry_attrs(Slapi_Entry *entry, lenstr *l)
|
||||
for (; entry_attr; entry_attr = entry_attr->a_next) {
|
||||
Slapi_Value **vals = attr_get_present_values(entry_attr);
|
||||
char *attr = NULL;
|
||||
- const char *val = NULL;
|
||||
|
||||
slapi_attr_get_type(entry_attr, &attr);
|
||||
if (strcmp(attr, PSEUDO_ATTR_UNHASHEDUSERPASSWORD) == 0) {
|
||||
@@ -251,23 +272,7 @@ add_entry_attrs(Slapi_Entry *entry, lenstr *l)
|
||||
addlenstr(l, ": ****************************\n");
|
||||
continue;
|
||||
}
|
||||
-
|
||||
- for(size_t i = 0; vals && vals[i]; i++) {
|
||||
- char log_val[256] = {0};
|
||||
-
|
||||
- val = slapi_value_get_string(vals[i]);
|
||||
- if (strlen(val) > 256) {
|
||||
- strncpy(log_val, val, 252);
|
||||
- strcat(log_val, "...");
|
||||
- } else {
|
||||
- strcpy(log_val, val);
|
||||
- }
|
||||
- addlenstr(l, "#");
|
||||
- addlenstr(l, attr);
|
||||
- addlenstr(l, ": ");
|
||||
- addlenstr(l, log_val);
|
||||
- addlenstr(l, "\n");
|
||||
- }
|
||||
+ log_entry_attr(entry_attr, attr, l);
|
||||
}
|
||||
}
|
||||
slapi_ch_free_string(&display_attrs);
|
||||
--
|
||||
2.43.0
|
||||
|
@ -1,27 +0,0 @@
|
||||
From be7c2b82958e91ce08775bf6b5da3c311d3b00e5 Mon Sep 17 00:00:00 2001
|
||||
From: progier389 <progier@redhat.com>
|
||||
Date: Mon, 20 Feb 2023 16:14:05 +0100
|
||||
Subject: [PATCH 2/2] Issue 5647 - Fix unused variable warning from previous
|
||||
commit (#5670)
|
||||
|
||||
* issue 5647 - memory leak in audit log when adding entries
|
||||
* Issue 5647 - Fix unused variable warning from previous commit
|
||||
---
|
||||
ldap/servers/slapd/auditlog.c | 1 -
|
||||
1 file changed, 1 deletion(-)
|
||||
|
||||
diff --git a/ldap/servers/slapd/auditlog.c b/ldap/servers/slapd/auditlog.c
|
||||
index 3128e0497..0597ecc6f 100644
|
||||
--- a/ldap/servers/slapd/auditlog.c
|
||||
+++ b/ldap/servers/slapd/auditlog.c
|
||||
@@ -254,7 +254,6 @@ add_entry_attrs(Slapi_Entry *entry, lenstr *l)
|
||||
} else {
|
||||
/* Return all attributes */
|
||||
for (; entry_attr; entry_attr = entry_attr->a_next) {
|
||||
- Slapi_Value **vals = attr_get_present_values(entry_attr);
|
||||
char *attr = NULL;
|
||||
|
||||
slapi_attr_get_type(entry_attr, &attr);
|
||||
--
|
||||
2.43.0
|
||||
|
@ -0,0 +1,37 @@
|
||||
From a91c2641646824e44ef3b31a7eea238e3f55e5c3 Mon Sep 17 00:00:00 2001
|
||||
From: Viktor Ashirov <vashirov@redhat.com>
|
||||
Date: Tue, 1 Jul 2025 12:44:04 +0200
|
||||
Subject: [PATCH] Issue 6838 - lib389/replica.py is using nonexistent
|
||||
datetime.UTC in Python 3.9
|
||||
|
||||
Bug Description:
|
||||
389-ds-base-2.x is supposed to be used with Python 3.9.
|
||||
But lib389/replica.py is using `datetime.UTC`, which is an alias
|
||||
to `datetime.timezone.utc` was added only in Python 3.11.
|
||||
|
||||
Fix Description:
|
||||
Use `datetime.timezone.utc` instead.
|
||||
|
||||
Fixes: https://github.com/389ds/389-ds-base/issues/6838
|
||||
|
||||
Reviewed by: @mreynolds389 (Thanks!)
|
||||
---
|
||||
src/lib389/lib389/replica.py | 2 +-
|
||||
1 file changed, 1 insertion(+), 1 deletion(-)
|
||||
|
||||
diff --git a/src/lib389/lib389/replica.py b/src/lib389/lib389/replica.py
|
||||
index 8791f7f4c..78d6eb4eb 100644
|
||||
--- a/src/lib389/lib389/replica.py
|
||||
+++ b/src/lib389/lib389/replica.py
|
||||
@@ -917,7 +917,7 @@ class RUV(object):
|
||||
ValueError("Wrong CSN value was supplied")
|
||||
|
||||
timestamp = int(csn[:8], 16)
|
||||
- time_str = datetime.datetime.fromtimestamp(timestamp, datetime.UTC).strftime('%Y-%m-%d %H:%M:%S')
|
||||
+ time_str = datetime.datetime.fromtimestamp(timestamp, datetime.timezone.utc).strftime('%Y-%m-%d %H:%M:%S')
|
||||
# We are parsing shorter CSN which contains only timestamp
|
||||
if len(csn) == 8:
|
||||
return time_str
|
||||
--
|
||||
2.49.0
|
||||
|
@ -1,147 +0,0 @@
|
||||
From 692c4cec6cc5c0086cf58f83bcfa690c766c9887 Mon Sep 17 00:00:00 2001
|
||||
From: Thierry Bordaz <tbordaz@redhat.com>
|
||||
Date: Fri, 2 Feb 2024 14:14:28 +0100
|
||||
Subject: [PATCH] Issue 5407 - sync_repl crashes if enabled while dynamic
|
||||
plugin is enabled (#5411)
|
||||
|
||||
Bug description:
|
||||
When dynamic plugin is enabled, if a MOD enables sync_repl plugin
|
||||
then sync_repl init function registers the postop callback
|
||||
that will be called for the MOD itself while the preop
|
||||
has not been called.
|
||||
postop expects preop to be called and so primary operation
|
||||
to be set. When it is not set it crashes
|
||||
|
||||
Fix description:
|
||||
If the primary operation is not set, just return
|
||||
|
||||
relates: #5407
|
||||
---
|
||||
.../suites/syncrepl_plugin/basic_test.py | 68 +++++++++++++++++++
|
||||
ldap/servers/plugins/sync/sync_persist.c | 23 ++++++-
|
||||
2 files changed, 90 insertions(+), 1 deletion(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/syncrepl_plugin/basic_test.py b/dirsrvtests/tests/suites/syncrepl_plugin/basic_test.py
|
||||
index eb3770b78..cdf35eeaa 100644
|
||||
--- a/dirsrvtests/tests/suites/syncrepl_plugin/basic_test.py
|
||||
+++ b/dirsrvtests/tests/suites/syncrepl_plugin/basic_test.py
|
||||
@@ -592,6 +592,74 @@ def test_sync_repl_cenotaph(topo_m2, request):
|
||||
|
||||
request.addfinalizer(fin)
|
||||
|
||||
+def test_sync_repl_dynamic_plugin(topology, request):
|
||||
+ """Test sync_repl with dynamic plugin
|
||||
+
|
||||
+ :id: d4f84913-c18a-459f-8525-110f610ca9e6
|
||||
+ :setup: install a standalone instance
|
||||
+ :steps:
|
||||
+ 1. reset instance to standard (no retroCL, no sync_repl, no dynamic plugin)
|
||||
+ 2. Enable dynamic plugin
|
||||
+ 3. Enable retroCL/content_sync
|
||||
+ 4. Establish a sync_repl req
|
||||
+ :expectedresults:
|
||||
+ 1. Should succeeds
|
||||
+ 2. Should succeeds
|
||||
+ 3. Should succeeds
|
||||
+ 4. Should succeeds
|
||||
+ """
|
||||
+
|
||||
+ # Reset the instance in a default config
|
||||
+ # Disable content sync plugin
|
||||
+ topology.standalone.plugins.disable(name=PLUGIN_REPL_SYNC)
|
||||
+
|
||||
+ # Disable retro changelog
|
||||
+ topology.standalone.plugins.disable(name=PLUGIN_RETRO_CHANGELOG)
|
||||
+
|
||||
+ # Disable dynamic plugins
|
||||
+ topology.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-dynamic-plugins', b'off')])
|
||||
+ topology.standalone.restart()
|
||||
+
|
||||
+ # Now start the test
|
||||
+ # Enable dynamic plugins
|
||||
+ try:
|
||||
+ topology.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-dynamic-plugins', b'on')])
|
||||
+ except ldap.LDAPError as e:
|
||||
+ log.error('Failed to enable dynamic plugin! {}'.format(e.args[0]['desc']))
|
||||
+ assert False
|
||||
+
|
||||
+ # Enable retro changelog
|
||||
+ topology.standalone.plugins.enable(name=PLUGIN_RETRO_CHANGELOG)
|
||||
+
|
||||
+ # Enbale content sync plugin
|
||||
+ topology.standalone.plugins.enable(name=PLUGIN_REPL_SYNC)
|
||||
+
|
||||
+ # create a sync repl client and wait 5 seconds to be sure it is running
|
||||
+ sync_repl = Sync_persist(topology.standalone)
|
||||
+ sync_repl.start()
|
||||
+ time.sleep(5)
|
||||
+
|
||||
+ # create users
|
||||
+ users = UserAccounts(topology.standalone, DEFAULT_SUFFIX)
|
||||
+ users_set = []
|
||||
+ for i in range(10001, 10004):
|
||||
+ users_set.append(users.create_test_user(uid=i))
|
||||
+
|
||||
+ time.sleep(10)
|
||||
+ # delete users, that automember/memberof will generate nested updates
|
||||
+ for user in users_set:
|
||||
+ user.delete()
|
||||
+ # stop the server to get the sync_repl result set (exit from while loop).
|
||||
+ # Only way I found to acheive that.
|
||||
+ # and wait a bit to let sync_repl thread time to set its result before fetching it.
|
||||
+ topology.standalone.stop()
|
||||
+ sync_repl.get_result()
|
||||
+ sync_repl.join()
|
||||
+ log.info('test_sync_repl_dynamic_plugin: PASS\n')
|
||||
+
|
||||
+ # Success
|
||||
+ log.info('Test complete')
|
||||
+
|
||||
def test_sync_repl_invalid_cookie(topology, request):
|
||||
"""Test sync_repl with invalid cookie
|
||||
|
||||
diff --git a/ldap/servers/plugins/sync/sync_persist.c b/ldap/servers/plugins/sync/sync_persist.c
|
||||
index d2210b64c..283607361 100644
|
||||
--- a/ldap/servers/plugins/sync/sync_persist.c
|
||||
+++ b/ldap/servers/plugins/sync/sync_persist.c
|
||||
@@ -156,6 +156,17 @@ ignore_op_pl(Slapi_PBlock *pb)
|
||||
* This is the same for ident
|
||||
*/
|
||||
prim_op = get_thread_primary_op();
|
||||
+ if (prim_op == NULL) {
|
||||
+ /* This can happen if the PRE_OP (sync_update_persist_betxn_pre_op) was not called.
|
||||
+ * The only known case it happens is with dynamic plugin enabled and an
|
||||
+ * update that enable the sync_repl plugin. In such case sync_repl registers
|
||||
+ * the postop (sync_update_persist_op) that is called while the preop was not called
|
||||
+ */
|
||||
+ slapi_log_err(SLAPI_LOG_PLUGIN, SYNC_PLUGIN_SUBSYSTEM,
|
||||
+ "ignore_op_pl - Operation without primary op set (0x%lx)\n",
|
||||
+ (ulong) op);
|
||||
+ return;
|
||||
+ }
|
||||
ident = sync_persist_get_operation_extension(pb);
|
||||
|
||||
if (ident) {
|
||||
@@ -232,8 +243,18 @@ sync_update_persist_op(Slapi_PBlock *pb, Slapi_Entry *e, Slapi_Entry *eprev, ber
|
||||
|
||||
|
||||
prim_op = get_thread_primary_op();
|
||||
+ if (prim_op == NULL) {
|
||||
+ /* This can happen if the PRE_OP (sync_update_persist_betxn_pre_op) was not called.
|
||||
+ * The only known case it happens is with dynamic plugin enabled and an
|
||||
+ * update that enable the sync_repl plugin. In such case sync_repl registers
|
||||
+ * the postop (sync_update_persist_op) that is called while the preop was not called
|
||||
+ */
|
||||
+ slapi_log_err(SLAPI_LOG_PLUGIN, SYNC_PLUGIN_SUBSYSTEM,
|
||||
+ "sync_update_persist_op - Operation without primary op set (0x%lx)\n",
|
||||
+ (ulong) pb_op);
|
||||
+ return;
|
||||
+ }
|
||||
ident = sync_persist_get_operation_extension(pb);
|
||||
- PR_ASSERT(prim_op);
|
||||
|
||||
if ((ident == NULL) && operation_is_flag_set(pb_op, OP_FLAG_NOOP)) {
|
||||
/* This happens for URP (add cenotaph, fixup rename, tombstone resurrect)
|
||||
--
|
||||
2.43.0
|
||||
|
@ -0,0 +1,351 @@
|
||||
From 4eef34cec551582d1de23266bc6cde84a7e38b5d Mon Sep 17 00:00:00 2001
|
||||
From: progier389 <progier@redhat.com>
|
||||
Date: Mon, 24 Mar 2025 10:43:21 +0100
|
||||
Subject: [PATCH] Issue 6680 - instance read-only mode is broken (#6681)
|
||||
|
||||
Read only mode is broken because some plugins fails to starts as they are not able to create/updates some entries in the dse backend.
|
||||
Solution is to allow interrnal operations to write in dse.backend but not modify the dse.ldif (except for the special case when trying to modify nsslapd-readonly flags (to be allowed to set/unset the readonly mode)
|
||||
|
||||
Issue: #6680
|
||||
|
||||
Reviewed by: @droideck, @tbordaz (thanks!)
|
||||
---
|
||||
.../tests/suites/config/regression_test.py | 60 ++++++++++
|
||||
ldap/servers/slapd/dse.c | 110 +++++++++++++++++-
|
||||
ldap/servers/slapd/mapping_tree.c | 90 ++++++++++++--
|
||||
3 files changed, 247 insertions(+), 13 deletions(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/config/regression_test.py b/dirsrvtests/tests/suites/config/regression_test.py
|
||||
index 8dbba8cd2..6e313ac8a 100644
|
||||
--- a/dirsrvtests/tests/suites/config/regression_test.py
|
||||
+++ b/dirsrvtests/tests/suites/config/regression_test.py
|
||||
@@ -28,6 +28,8 @@ CUSTOM_MEM = '9100100100'
|
||||
IDLETIMEOUT = 5
|
||||
DN_TEST_USER = f'uid={TEST_USER_PROPERTIES["uid"]},ou=People,{DEFAULT_SUFFIX}'
|
||||
|
||||
+RO_ATTR = 'nsslapd-readonly'
|
||||
+
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
def idletimeout_topo(topo, request):
|
||||
@@ -190,3 +192,61 @@ def test_idletimeout(idletimeout_topo, dn, expected_result):
|
||||
except ldap.SERVER_DOWN:
|
||||
result = True
|
||||
assert expected_result == result
|
||||
+
|
||||
+
|
||||
+def test_instance_readonly_mode(topo):
|
||||
+ """Check that readonly mode is supported
|
||||
+
|
||||
+ :id: 34d2e28e-04d7-11f0-b0cf-482ae39447e5
|
||||
+ :setup: Standalone Instance
|
||||
+ :steps:
|
||||
+ 1. Set readonly mode
|
||||
+ 2. Stop the instance
|
||||
+ 3. Get dse.ldif modification time
|
||||
+ 4. Start the instance
|
||||
+ 5. Get dse.ldif modification time
|
||||
+ 6. Check that modification time has not changed
|
||||
+ 7. Check that readonly mode is set
|
||||
+ 8. Try to modify another config attribute
|
||||
+ 9. Unset readonly mode
|
||||
+ 10. Restart the instance
|
||||
+ 11. Check that modification time has not changed
|
||||
+ 12. Check that modification time has changed
|
||||
+ 13. Check that readonly mode is unset
|
||||
+ 14. Try to modify another config attribute
|
||||
+ :expectedresults:
|
||||
+ 1. Success
|
||||
+ 2. Success
|
||||
+ 3. Success
|
||||
+ 4. Success
|
||||
+ 5. Success
|
||||
+ 6. Success
|
||||
+ 7. Success
|
||||
+ 8. Should get ldap.UNWILLING_TO_PERFORM exception
|
||||
+ 9. Success
|
||||
+ 10. Success
|
||||
+ 11. Success
|
||||
+ 12. Success
|
||||
+ 13. Success
|
||||
+ 14. Success
|
||||
+ """
|
||||
+
|
||||
+ inst = topo.standalone
|
||||
+ dse_path = f'{topo.standalone.get_config_dir()}/dse.ldif'
|
||||
+ inst.config.replace(RO_ATTR, 'on')
|
||||
+ inst.stop()
|
||||
+ dse_mtime = os.stat(dse_path).st_mtime
|
||||
+ inst.start()
|
||||
+ new_dse_mtime = os.stat(dse_path).st_mtime
|
||||
+ assert dse_mtime == new_dse_mtime
|
||||
+ assert inst.config.get_attr_val_utf8(RO_ATTR) == "on"
|
||||
+ attr = 'nsslapd-errorlog-maxlogsize'
|
||||
+ val = inst.config.get_attr_val_utf8(attr)
|
||||
+ with pytest.raises(ldap.UNWILLING_TO_PERFORM):
|
||||
+ inst.config.replace(attr, val)
|
||||
+ inst.config.replace(RO_ATTR, 'off')
|
||||
+ inst.restart()
|
||||
+ new_dse_mtime = os.stat(dse_path).st_mtime
|
||||
+ assert dse_mtime != new_dse_mtime
|
||||
+ assert inst.config.get_attr_val_utf8(RO_ATTR) == "off"
|
||||
+ inst.config.replace(attr, val)
|
||||
diff --git a/ldap/servers/slapd/dse.c b/ldap/servers/slapd/dse.c
|
||||
index e3157c1ce..0f266f0d7 100644
|
||||
--- a/ldap/servers/slapd/dse.c
|
||||
+++ b/ldap/servers/slapd/dse.c
|
||||
@@ -1031,6 +1031,114 @@ dse_check_for_readonly_error(Slapi_PBlock *pb, struct dse *pdse)
|
||||
return rc; /* no error */
|
||||
}
|
||||
|
||||
+/* Trivial wrapper around slapi_re_comp to handle errors */
|
||||
+static Slapi_Regex *
|
||||
+recomp(const char *regexp)
|
||||
+{
|
||||
+ char *error = "";
|
||||
+ Slapi_Regex *re = slapi_re_comp(regexp, &error);
|
||||
+ if (re == NULL) {
|
||||
+ slapi_log_err(SLAPI_LOG_ERR, "is_readonly_set_in_dse",
|
||||
+ "Failed to compile '%s' regular expression. Error is %s\n",
|
||||
+ regexp, error);
|
||||
+ }
|
||||
+ slapi_ch_free_string(&error);
|
||||
+ return re;
|
||||
+}
|
||||
+
|
||||
+/*
|
||||
+ * Check if "nsslapd-readonly: on" is in cn-config in dse.ldif file
|
||||
+ * ( If the flag is set in memory but on in the file, the file should
|
||||
+ * be written (to let dsconf able to modify the nsslapd-readonly flag)
|
||||
+ */
|
||||
+static bool
|
||||
+is_readonly_set_in_dse(const char *dsename)
|
||||
+{
|
||||
+ Slapi_Regex *re_config = recomp("^dn:\\s+cn=config\\s*$");
|
||||
+ Slapi_Regex *re_isro = recomp("^" CONFIG_READONLY_ATTRIBUTE ":\\s+on\\s*$");
|
||||
+ Slapi_Regex *re_eoe = recomp("^$");
|
||||
+ bool isconfigentry = false;
|
||||
+ bool isro = false;
|
||||
+ FILE *fdse = NULL;
|
||||
+ char line[128];
|
||||
+ char *error = NULL;
|
||||
+ const char *regexp = "";
|
||||
+
|
||||
+ if (!dsename) {
|
||||
+ goto done;
|
||||
+ }
|
||||
+ if (re_config == NULL || re_isro == NULL || re_eoe == NULL) {
|
||||
+ goto done;
|
||||
+ }
|
||||
+ fdse = fopen(dsename, "r");
|
||||
+ if (fdse == NULL) {
|
||||
+ /* No dse file, we need to write it */
|
||||
+ goto done;
|
||||
+ }
|
||||
+ while (fgets(line, (sizeof line), fdse)) {
|
||||
+ /* Convert the read line to lowercase */
|
||||
+ for (char *pt=line; *pt; pt++) {
|
||||
+ if (isalpha(*pt)) {
|
||||
+ *pt = tolower(*pt);
|
||||
+ }
|
||||
+ }
|
||||
+ if (slapi_re_exec_nt(re_config, line)) {
|
||||
+ isconfigentry = true;
|
||||
+ }
|
||||
+ if (slapi_re_exec_nt(re_eoe, line)) {
|
||||
+ if (isconfigentry) {
|
||||
+ /* End of config entry ==> readonly flag is not set */
|
||||
+ break;
|
||||
+ }
|
||||
+ }
|
||||
+ if (isconfigentry && slapi_re_exec_nt(re_isro, line)) {
|
||||
+ /* Found readonly flag */
|
||||
+ isro = true;
|
||||
+ break;
|
||||
+ }
|
||||
+ }
|
||||
+done:
|
||||
+ if (fdse) {
|
||||
+ (void) fclose(fdse);
|
||||
+ }
|
||||
+ slapi_re_free(re_config);
|
||||
+ slapi_re_free(re_isro);
|
||||
+ slapi_re_free(re_eoe);
|
||||
+ return isro;
|
||||
+}
|
||||
+
|
||||
+/*
|
||||
+ * Check if dse.ldif can be written
|
||||
+ * Beware that even in read-only mode dse.ldif file
|
||||
+ * should still be written to change the nsslapd-readonly value
|
||||
+ */
|
||||
+static bool
|
||||
+check_if_readonly(struct dse *pdse)
|
||||
+{
|
||||
+ static bool ro = false;
|
||||
+
|
||||
+ if (pdse->dse_filename == NULL) {
|
||||
+ return false;
|
||||
+ }
|
||||
+ if (!slapi_config_get_readonly()) {
|
||||
+ ro = false;
|
||||
+ return ro;
|
||||
+ }
|
||||
+ if (ro) {
|
||||
+ /* read-only mode and dse is up to date ==> Do not modify it. */
|
||||
+ return ro;
|
||||
+ }
|
||||
+ /* First attempt to write the dse.ldif since readonly mode is enabled.
|
||||
+ * Lets check if "nsslapd-readonly: on" is in cn=config entry
|
||||
+ * and allow to write the dse.ldif if it is the case
|
||||
+ */
|
||||
+ if (is_readonly_set_in_dse(pdse->dse_filename)) {
|
||||
+ /* read-only mode and dse is up to date ==> Do not modify it. */
|
||||
+ ro = true;
|
||||
+ }
|
||||
+ /* Read only mode but nsslapd-readonly value is not up to date. */
|
||||
+ return ro;
|
||||
+}
|
||||
|
||||
/*
|
||||
* Write the AVL tree of entries back to the LDIF file.
|
||||
@@ -1041,7 +1149,7 @@ dse_write_file_nolock(struct dse *pdse)
|
||||
FPWrapper fpw;
|
||||
int rc = 0;
|
||||
|
||||
- if (dont_ever_write_dse_files) {
|
||||
+ if (dont_ever_write_dse_files || check_if_readonly(pdse)) {
|
||||
return rc;
|
||||
}
|
||||
|
||||
diff --git a/ldap/servers/slapd/mapping_tree.c b/ldap/servers/slapd/mapping_tree.c
|
||||
index dd7b1af37..e51b3b948 100644
|
||||
--- a/ldap/servers/slapd/mapping_tree.c
|
||||
+++ b/ldap/servers/slapd/mapping_tree.c
|
||||
@@ -2058,6 +2058,82 @@ slapi_dn_write_needs_referral(Slapi_DN *target_sdn, Slapi_Entry **referral)
|
||||
done:
|
||||
return ret;
|
||||
}
|
||||
+
|
||||
+/*
|
||||
+ * This function dermines if an operation should be rejected
|
||||
+ * when readonly mode is enabled.
|
||||
+ * All operations are rejected except:
|
||||
+ * - if they target a private backend that is not the DSE backend
|
||||
+ * - if they are read operations (SEARCH, COMPARE, BIND, UNBIND)
|
||||
+ * - if they are tombstone fixup operation (i.e: tombstone purging)
|
||||
+ * - if they are internal operation that targets the DSE backend.
|
||||
+ * (change will then be done in memory but not written in dse.ldif)
|
||||
+ * - single modify modify operation on cn=config changing nsslapd-readonly
|
||||
+ * (to allow "dsconf instance config replace nsslapd-readonly=xxx",
|
||||
+ change will then be done both in memory and in dse.ldif)
|
||||
+ */
|
||||
+static bool
|
||||
+is_rejected_op(Slapi_Operation *op, Slapi_Backend *be)
|
||||
+{
|
||||
+ const char *betype = slapi_be_gettype(be);
|
||||
+ unsigned long be_op_type = operation_get_type(op);
|
||||
+ int isdse = (betype && strcmp(betype, "DSE") == 0);
|
||||
+
|
||||
+ /* Private backend operations are not rejected */
|
||||
+
|
||||
+ /* Read operations are not rejected */
|
||||
+ if ((be_op_type == SLAPI_OPERATION_SEARCH) ||
|
||||
+ (be_op_type == SLAPI_OPERATION_COMPARE) ||
|
||||
+ (be_op_type == SLAPI_OPERATION_BIND) ||
|
||||
+ (be_op_type == SLAPI_OPERATION_UNBIND)) {
|
||||
+ return false;
|
||||
+ }
|
||||
+
|
||||
+ /* Tombstone fixup are not rejected. */
|
||||
+ if (operation_is_flag_set(op, OP_FLAG_TOMBSTONE_FIXUP)) {
|
||||
+ return false;
|
||||
+ }
|
||||
+
|
||||
+ if (!isdse) {
|
||||
+ /* write operation on readonly backends are rejected */
|
||||
+ if (be->be_readonly) {
|
||||
+ return true;
|
||||
+ }
|
||||
+
|
||||
+ /* private backends (DSE excepted) are not backed on files
|
||||
+ * so write operations are accepted.
|
||||
+ * but other operations (not on DSE) are rejected.
|
||||
+ */
|
||||
+ if (slapi_be_private(be)) {
|
||||
+ return false;
|
||||
+ } else {
|
||||
+ return true;
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ /* Allowed operations in dse backend are:
|
||||
+ * - the internal operations and
|
||||
+ * - modify of nsslapd-readonly flag in cn=config
|
||||
+ */
|
||||
+
|
||||
+ if (operation_is_flag_set(op, OP_FLAG_INTERNAL)) {
|
||||
+ return false;
|
||||
+ }
|
||||
+ if (be_op_type == SLAPI_OPERATION_MODIFY) {
|
||||
+ Slapi_DN *sdn = operation_get_target_spec(op);
|
||||
+ Slapi_DN config = {0};
|
||||
+ LDAPMod **mods = op->o_params.p.p_modify.modify_mods;
|
||||
+ slapi_sdn_init_ndn_byref(&config, SLAPD_CONFIG_DN);
|
||||
+ if (mods && mods[0] && !mods[1] &&
|
||||
+ slapi_sdn_compare(sdn, &config) == 0 &&
|
||||
+ strcasecmp(mods[0]->mod_type, CONFIG_READONLY_ATTRIBUTE) == 0) {
|
||||
+ /* Single modifier impacting nsslapd-readonly */
|
||||
+ return false;
|
||||
+ }
|
||||
+ }
|
||||
+ return true;
|
||||
+}
|
||||
+
|
||||
/*
|
||||
* Description:
|
||||
* The reason we have a mapping tree. This function selects a backend or
|
||||
@@ -2095,7 +2171,6 @@ slapi_mapping_tree_select(Slapi_PBlock *pb, Slapi_Backend **be, Slapi_Entry **re
|
||||
int ret;
|
||||
int scope = LDAP_SCOPE_BASE;
|
||||
int op_type;
|
||||
- int fixup = 0;
|
||||
|
||||
if (slapi_atomic_load_32(&mapping_tree_freed, __ATOMIC_RELAXED)) {
|
||||
/* shutdown detected */
|
||||
@@ -2112,7 +2187,6 @@ slapi_mapping_tree_select(Slapi_PBlock *pb, Slapi_Backend **be, Slapi_Entry **re
|
||||
|
||||
/* Get the target for this op */
|
||||
target_sdn = operation_get_target_spec(op);
|
||||
- fixup = operation_is_flag_set(op, OP_FLAG_TOMBSTONE_FIXUP);
|
||||
|
||||
PR_ASSERT(mapping_tree_inited == 1);
|
||||
|
||||
@@ -2161,22 +2235,14 @@ slapi_mapping_tree_select(Slapi_PBlock *pb, Slapi_Backend **be, Slapi_Entry **re
|
||||
* or if the whole server is readonly AND backend is public (!private)
|
||||
*/
|
||||
if ((ret == LDAP_SUCCESS) && *be && !be_isdeleted(*be) &&
|
||||
- (((*be)->be_readonly && !fixup) ||
|
||||
- ((slapi_config_get_readonly() && !fixup) &&
|
||||
- !slapi_be_private(*be)))) {
|
||||
- unsigned long be_op_type = operation_get_type(op);
|
||||
-
|
||||
- if ((be_op_type != SLAPI_OPERATION_SEARCH) &&
|
||||
- (be_op_type != SLAPI_OPERATION_COMPARE) &&
|
||||
- (be_op_type != SLAPI_OPERATION_BIND) &&
|
||||
- (be_op_type != SLAPI_OPERATION_UNBIND)) {
|
||||
+ ((*be)->be_readonly || slapi_config_get_readonly()) &&
|
||||
+ is_rejected_op(op, *be)) {
|
||||
if (errorbuf) {
|
||||
PL_strncpyz(errorbuf, slapi_config_get_readonly() ? "Server is read-only" : "database is read-only", ebuflen);
|
||||
}
|
||||
ret = LDAP_UNWILLING_TO_PERFORM;
|
||||
slapi_be_Unlock(*be);
|
||||
*be = NULL;
|
||||
- }
|
||||
}
|
||||
|
||||
return ret;
|
||||
--
|
||||
2.49.0
|
||||
|
@ -1,840 +0,0 @@
|
||||
From 8dc61a176323f0d41df730abd715ccff3034c2be Mon Sep 17 00:00:00 2001
|
||||
From: Mark Reynolds <mreynolds@redhat.com>
|
||||
Date: Sun, 27 Nov 2022 09:37:19 -0500
|
||||
Subject: [PATCH] Issue 5547 - automember plugin improvements
|
||||
|
||||
Description:
|
||||
|
||||
Rebuild task has the following improvements:
|
||||
|
||||
- Only one task allowed at a time
|
||||
- Do not cleanup previous members by default. Add new CLI option to intentionally
|
||||
cleanup memberships before rebuilding from scratch.
|
||||
- Add better task logging to show fixup progress
|
||||
|
||||
To prevent automember from being called in a nested be_txn loop thread storage is
|
||||
used to check and skip these loops.
|
||||
|
||||
relates: https://github.com/389ds/389-ds-base/issues/5547
|
||||
|
||||
Reviewed by: spichugi(Thanks!)
|
||||
---
|
||||
.../automember_plugin/automember_mod_test.py | 43 +++-
|
||||
ldap/servers/plugins/automember/automember.c | 232 ++++++++++++++----
|
||||
ldap/servers/slapd/back-ldbm/ldbm_add.c | 11 +-
|
||||
ldap/servers/slapd/back-ldbm/ldbm_delete.c | 10 +-
|
||||
ldap/servers/slapd/back-ldbm/ldbm_modify.c | 11 +-
|
||||
.../lib389/cli_conf/plugins/automember.py | 10 +-
|
||||
src/lib389/lib389/plugins.py | 7 +-
|
||||
src/lib389/lib389/tasks.py | 9 +-
|
||||
8 files changed, 250 insertions(+), 83 deletions(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/automember_plugin/automember_mod_test.py b/dirsrvtests/tests/suites/automember_plugin/automember_mod_test.py
|
||||
index 8d25384bf..7a0ed3275 100644
|
||||
--- a/dirsrvtests/tests/suites/automember_plugin/automember_mod_test.py
|
||||
+++ b/dirsrvtests/tests/suites/automember_plugin/automember_mod_test.py
|
||||
@@ -5,12 +5,13 @@
|
||||
# License: GPL (version 3 or any later version).
|
||||
# See LICENSE for details.
|
||||
# --- END COPYRIGHT BLOCK ---
|
||||
-#
|
||||
+import ldap
|
||||
import logging
|
||||
import pytest
|
||||
import os
|
||||
+import time
|
||||
from lib389.utils import ds_is_older
|
||||
-from lib389._constants import *
|
||||
+from lib389._constants import DEFAULT_SUFFIX
|
||||
from lib389.plugins import AutoMembershipPlugin, AutoMembershipDefinitions
|
||||
from lib389.idm.user import UserAccounts
|
||||
from lib389.idm.group import Groups
|
||||
@@ -41,6 +42,11 @@ def automember_fixture(topo, request):
|
||||
user_accts = UserAccounts(topo.standalone, DEFAULT_SUFFIX)
|
||||
user = user_accts.create_test_user()
|
||||
|
||||
+ # Create extra users
|
||||
+ users = UserAccounts(topo.standalone, DEFAULT_SUFFIX)
|
||||
+ for i in range(0, 100):
|
||||
+ users.create_test_user(uid=i)
|
||||
+
|
||||
# Create automember definitions and regex rules
|
||||
automember_prop = {
|
||||
'cn': 'testgroup_definition',
|
||||
@@ -59,7 +65,7 @@ def automember_fixture(topo, request):
|
||||
automemberplugin.enable()
|
||||
topo.standalone.restart()
|
||||
|
||||
- return (user, groups)
|
||||
+ return user, groups
|
||||
|
||||
|
||||
def test_mods(automember_fixture, topo):
|
||||
@@ -72,19 +78,21 @@ def test_mods(automember_fixture, topo):
|
||||
2. Update user that should add it to group[1]
|
||||
3. Update user that should add it to group[2]
|
||||
4. Update user that should add it to group[0]
|
||||
- 5. Test rebuild task correctly moves user to group[1]
|
||||
+ 5. Test rebuild task adds user to group[1]
|
||||
+ 6. Test rebuild task cleanups groups and only adds it to group[1]
|
||||
:expectedresults:
|
||||
1. Success
|
||||
2. Success
|
||||
3. Success
|
||||
4. Success
|
||||
5. Success
|
||||
+ 6. Success
|
||||
"""
|
||||
(user, groups) = automember_fixture
|
||||
|
||||
# Update user which should go into group[0]
|
||||
user.replace('cn', 'whatever')
|
||||
- groups[0].is_member(user.dn)
|
||||
+ assert groups[0].is_member(user.dn)
|
||||
if groups[1].is_member(user.dn):
|
||||
assert False
|
||||
if groups[2].is_member(user.dn):
|
||||
@@ -92,7 +100,7 @@ def test_mods(automember_fixture, topo):
|
||||
|
||||
# Update user0 which should go into group[1]
|
||||
user.replace('cn', 'mark')
|
||||
- groups[1].is_member(user.dn)
|
||||
+ assert groups[1].is_member(user.dn)
|
||||
if groups[0].is_member(user.dn):
|
||||
assert False
|
||||
if groups[2].is_member(user.dn):
|
||||
@@ -100,7 +108,7 @@ def test_mods(automember_fixture, topo):
|
||||
|
||||
# Update user which should go into group[2]
|
||||
user.replace('cn', 'simon')
|
||||
- groups[2].is_member(user.dn)
|
||||
+ assert groups[2].is_member(user.dn)
|
||||
if groups[0].is_member(user.dn):
|
||||
assert False
|
||||
if groups[1].is_member(user.dn):
|
||||
@@ -108,7 +116,7 @@ def test_mods(automember_fixture, topo):
|
||||
|
||||
# Update user which should go back into group[0] (full circle)
|
||||
user.replace('cn', 'whatever')
|
||||
- groups[0].is_member(user.dn)
|
||||
+ assert groups[0].is_member(user.dn)
|
||||
if groups[1].is_member(user.dn):
|
||||
assert False
|
||||
if groups[2].is_member(user.dn):
|
||||
@@ -128,12 +136,24 @@ def test_mods(automember_fixture, topo):
|
||||
automemberplugin.enable()
|
||||
topo.standalone.restart()
|
||||
|
||||
- # Run rebuild task
|
||||
+ # Run rebuild task (no cleanup)
|
||||
task = automemberplugin.fixup(DEFAULT_SUFFIX, "objectclass=posixaccount")
|
||||
+ with pytest.raises(ldap.UNWILLING_TO_PERFORM):
|
||||
+ # test only one fixup task is allowed at a time
|
||||
+ automemberplugin.fixup(DEFAULT_SUFFIX, "objectclass=top")
|
||||
task.wait()
|
||||
|
||||
- # Test membership
|
||||
- groups[1].is_member(user.dn)
|
||||
+ # Test membership (user should still be in groups[0])
|
||||
+ assert groups[1].is_member(user.dn)
|
||||
+ if not groups[0].is_member(user.dn):
|
||||
+ assert False
|
||||
+
|
||||
+ # Run rebuild task with cleanup
|
||||
+ task = automemberplugin.fixup(DEFAULT_SUFFIX, "objectclass=posixaccount", cleanup=True)
|
||||
+ task.wait()
|
||||
+
|
||||
+ # Test membership (user should only be in groups[1])
|
||||
+ assert groups[1].is_member(user.dn)
|
||||
if groups[0].is_member(user.dn):
|
||||
assert False
|
||||
if groups[2].is_member(user.dn):
|
||||
@@ -148,4 +168,3 @@ if __name__ == '__main__':
|
||||
# -s for DEBUG mode
|
||||
CURRENT_FILE = os.path.realpath(__file__)
|
||||
pytest.main(["-s", CURRENT_FILE])
|
||||
-
|
||||
diff --git a/ldap/servers/plugins/automember/automember.c b/ldap/servers/plugins/automember/automember.c
|
||||
index 3494d0343..419adb052 100644
|
||||
--- a/ldap/servers/plugins/automember/automember.c
|
||||
+++ b/ldap/servers/plugins/automember/automember.c
|
||||
@@ -1,5 +1,5 @@
|
||||
/** BEGIN COPYRIGHT BLOCK
|
||||
- * Copyright (C) 2011 Red Hat, Inc.
|
||||
+ * Copyright (C) 2022 Red Hat, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* License: GPL (version 3 or any later version).
|
||||
@@ -14,7 +14,7 @@
|
||||
* Auto Membership Plug-in
|
||||
*/
|
||||
#include "automember.h"
|
||||
-
|
||||
+#include <pthread.h>
|
||||
|
||||
/*
|
||||
* Plug-in globals
|
||||
@@ -22,7 +22,9 @@
|
||||
static PRCList *g_automember_config = NULL;
|
||||
static Slapi_RWLock *g_automember_config_lock = NULL;
|
||||
static uint64_t abort_rebuild_task = 0;
|
||||
-
|
||||
+static pthread_key_t td_automem_block_nested;
|
||||
+static PRBool fixup_running = PR_FALSE;
|
||||
+static PRLock *fixup_lock = NULL;
|
||||
static void *_PluginID = NULL;
|
||||
static Slapi_DN *_PluginDN = NULL;
|
||||
static Slapi_DN *_ConfigAreaDN = NULL;
|
||||
@@ -93,9 +95,43 @@ static void automember_task_export_destructor(Slapi_Task *task);
|
||||
static void automember_task_map_destructor(Slapi_Task *task);
|
||||
|
||||
#define DEFAULT_FILE_MODE PR_IRUSR | PR_IWUSR
|
||||
+#define FIXUP_PROGRESS_LIMIT 1000
|
||||
static uint64_t plugin_do_modify = 0;
|
||||
static uint64_t plugin_is_betxn = 0;
|
||||
|
||||
+/* automember_plugin fixup task and add operations should block other be_txn
|
||||
+ * plugins from calling automember_post_op_mod() */
|
||||
+static int32_t
|
||||
+slapi_td_block_nested_post_op(void)
|
||||
+{
|
||||
+ int32_t val = 12345;
|
||||
+
|
||||
+ if (pthread_setspecific(td_automem_block_nested, (void *)&val) != 0) {
|
||||
+ return PR_FAILURE;
|
||||
+ }
|
||||
+ return PR_SUCCESS;
|
||||
+}
|
||||
+
|
||||
+static int32_t
|
||||
+slapi_td_unblock_nested_post_op(void)
|
||||
+{
|
||||
+ if (pthread_setspecific(td_automem_block_nested, NULL) != 0) {
|
||||
+ return PR_FAILURE;
|
||||
+ }
|
||||
+ return PR_SUCCESS;
|
||||
+}
|
||||
+
|
||||
+static int32_t
|
||||
+slapi_td_is_post_op_nested(void)
|
||||
+{
|
||||
+ int32_t *value = pthread_getspecific(td_automem_block_nested);
|
||||
+
|
||||
+ if (value == NULL) {
|
||||
+ return 0;
|
||||
+ }
|
||||
+ return 1;
|
||||
+}
|
||||
+
|
||||
/*
|
||||
* Config cache locking functions
|
||||
*/
|
||||
@@ -317,6 +353,14 @@ automember_start(Slapi_PBlock *pb)
|
||||
return -1;
|
||||
}
|
||||
|
||||
+ if (fixup_lock == NULL) {
|
||||
+ if ((fixup_lock = PR_NewLock()) == NULL) {
|
||||
+ slapi_log_err(SLAPI_LOG_ERR, AUTOMEMBER_PLUGIN_SUBSYSTEM,
|
||||
+ "automember_start - Failed to create fixup lock.\n");
|
||||
+ return -1;
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
/*
|
||||
* Get the plug-in target dn from the system
|
||||
* and store it for future use. */
|
||||
@@ -360,6 +404,11 @@ automember_start(Slapi_PBlock *pb)
|
||||
}
|
||||
}
|
||||
|
||||
+ if (pthread_key_create(&td_automem_block_nested, NULL) != 0) {
|
||||
+ slapi_log_err(SLAPI_LOG_ERR, AUTOMEMBER_PLUGIN_SUBSYSTEM,
|
||||
+ "automember_start - pthread_key_create failed\n");
|
||||
+ }
|
||||
+
|
||||
slapi_log_err(SLAPI_LOG_PLUGIN, AUTOMEMBER_PLUGIN_SUBSYSTEM,
|
||||
"automember_start - ready for service\n");
|
||||
slapi_log_err(SLAPI_LOG_TRACE, AUTOMEMBER_PLUGIN_SUBSYSTEM,
|
||||
@@ -394,6 +443,8 @@ automember_close(Slapi_PBlock *pb __attribute__((unused)))
|
||||
slapi_sdn_free(&_ConfigAreaDN);
|
||||
slapi_destroy_rwlock(g_automember_config_lock);
|
||||
g_automember_config_lock = NULL;
|
||||
+ PR_DestroyLock(fixup_lock);
|
||||
+ fixup_lock = NULL;
|
||||
|
||||
slapi_log_err(SLAPI_LOG_TRACE, AUTOMEMBER_PLUGIN_SUBSYSTEM,
|
||||
"<-- automember_close\n");
|
||||
@@ -1619,7 +1670,6 @@ out:
|
||||
return rc;
|
||||
}
|
||||
|
||||
-
|
||||
/*
|
||||
* automember_update_member_value()
|
||||
*
|
||||
@@ -1634,7 +1684,7 @@ automember_update_member_value(Slapi_Entry *member_e, const char *group_dn, char
|
||||
LDAPMod *mods[2];
|
||||
char *vals[2];
|
||||
char *member_value = NULL;
|
||||
- int rc = 0;
|
||||
+ int rc = LDAP_SUCCESS;
|
||||
Slapi_DN *group_sdn;
|
||||
|
||||
/* First thing check that the group still exists */
|
||||
@@ -1653,7 +1703,7 @@ automember_update_member_value(Slapi_Entry *member_e, const char *group_dn, char
|
||||
"automember_update_member_value - group (default or target) can not be retrieved (%s) err=%d\n",
|
||||
group_dn, rc);
|
||||
}
|
||||
- return rc;
|
||||
+ goto out;
|
||||
}
|
||||
|
||||
/* If grouping_value is dn, we need to fetch the dn instead. */
|
||||
@@ -1879,6 +1929,13 @@ automember_mod_post_op(Slapi_PBlock *pb)
|
||||
PRCList *list = NULL;
|
||||
int rc = SLAPI_PLUGIN_SUCCESS;
|
||||
|
||||
+ if (slapi_td_is_post_op_nested()) {
|
||||
+ /* don't process op twice in the same thread */
|
||||
+ return rc;
|
||||
+ } else {
|
||||
+ slapi_td_block_nested_post_op();
|
||||
+ }
|
||||
+
|
||||
slapi_log_err(SLAPI_LOG_TRACE, AUTOMEMBER_PLUGIN_SUBSYSTEM,
|
||||
"--> automember_mod_post_op\n");
|
||||
|
||||
@@ -2005,6 +2062,7 @@ automember_mod_post_op(Slapi_PBlock *pb)
|
||||
}
|
||||
}
|
||||
}
|
||||
+ slapi_td_unblock_nested_post_op();
|
||||
|
||||
slapi_log_err(SLAPI_LOG_TRACE, AUTOMEMBER_PLUGIN_SUBSYSTEM,
|
||||
"<-- automember_mod_post_op (%d)\n", rc);
|
||||
@@ -2024,6 +2082,13 @@ automember_add_post_op(Slapi_PBlock *pb)
|
||||
slapi_log_err(SLAPI_LOG_TRACE, AUTOMEMBER_PLUGIN_SUBSYSTEM,
|
||||
"--> automember_add_post_op\n");
|
||||
|
||||
+ if (slapi_td_is_post_op_nested()) {
|
||||
+ /* don't process op twice in the same thread */
|
||||
+ return rc;
|
||||
+ } else {
|
||||
+ slapi_td_block_nested_post_op();
|
||||
+ }
|
||||
+
|
||||
/* Reload config if a config entry was added. */
|
||||
if ((sdn = automember_get_sdn(pb))) {
|
||||
if (automember_dn_is_config(sdn)) {
|
||||
@@ -2039,7 +2104,7 @@ automember_add_post_op(Slapi_PBlock *pb)
|
||||
|
||||
/* If replication, just bail. */
|
||||
if (automember_isrepl(pb)) {
|
||||
- return SLAPI_PLUGIN_SUCCESS;
|
||||
+ goto bail;
|
||||
}
|
||||
|
||||
/* Get the newly added entry. */
|
||||
@@ -2052,7 +2117,7 @@ automember_add_post_op(Slapi_PBlock *pb)
|
||||
tombstone);
|
||||
slapi_value_free(&tombstone);
|
||||
if (is_tombstone) {
|
||||
- return SLAPI_PLUGIN_SUCCESS;
|
||||
+ goto bail;
|
||||
}
|
||||
|
||||
/* Check if a config entry applies
|
||||
@@ -2063,21 +2128,19 @@ automember_add_post_op(Slapi_PBlock *pb)
|
||||
list = PR_LIST_HEAD(g_automember_config);
|
||||
while (list != g_automember_config) {
|
||||
config = (struct configEntry *)list;
|
||||
-
|
||||
/* Does the entry meet scope and filter requirements? */
|
||||
if (slapi_dn_issuffix(slapi_sdn_get_dn(sdn), config->scope) &&
|
||||
- (slapi_filter_test_simple(e, config->filter) == 0)) {
|
||||
+ (slapi_filter_test_simple(e, config->filter) == 0))
|
||||
+ {
|
||||
/* Find out what membership changes are needed and make them. */
|
||||
if (automember_update_membership(config, e, NULL) == SLAPI_PLUGIN_FAILURE) {
|
||||
rc = SLAPI_PLUGIN_FAILURE;
|
||||
break;
|
||||
}
|
||||
}
|
||||
-
|
||||
list = PR_NEXT_LINK(list);
|
||||
}
|
||||
}
|
||||
-
|
||||
automember_config_unlock();
|
||||
} else {
|
||||
slapi_log_err(SLAPI_LOG_PLUGIN, AUTOMEMBER_PLUGIN_SUBSYSTEM,
|
||||
@@ -2098,6 +2161,7 @@ bail:
|
||||
slapi_pblock_set(pb, SLAPI_RESULT_CODE, &result);
|
||||
slapi_pblock_set(pb, SLAPI_PB_RESULT_TEXT, &errtxt);
|
||||
}
|
||||
+ slapi_td_unblock_nested_post_op();
|
||||
|
||||
return rc;
|
||||
}
|
||||
@@ -2138,6 +2202,7 @@ typedef struct _task_data
|
||||
Slapi_DN *base_dn;
|
||||
char *bind_dn;
|
||||
int scope;
|
||||
+ PRBool cleanup;
|
||||
} task_data;
|
||||
|
||||
static void
|
||||
@@ -2270,6 +2335,7 @@ automember_task_abort_thread(void *arg)
|
||||
* basedn: dc=example,dc=com
|
||||
* filter: (uid=*)
|
||||
* scope: sub
|
||||
+ * cleanup: yes/on (default is off)
|
||||
*
|
||||
* basedn and filter are required. If scope is omitted, the default is sub
|
||||
*/
|
||||
@@ -2284,9 +2350,22 @@ automember_task_add(Slapi_PBlock *pb, Slapi_Entry *e, Slapi_Entry *eAfter __attr
|
||||
const char *base_dn;
|
||||
const char *filter;
|
||||
const char *scope;
|
||||
+ const char *cleanup_str;
|
||||
+ PRBool cleanup = PR_FALSE;
|
||||
|
||||
*returncode = LDAP_SUCCESS;
|
||||
|
||||
+ PR_Lock(fixup_lock);
|
||||
+ if (fixup_running) {
|
||||
+ PR_Unlock(fixup_lock);
|
||||
+ *returncode = LDAP_UNWILLING_TO_PERFORM;
|
||||
+ slapi_log_err(SLAPI_LOG_ERR, AUTOMEMBER_PLUGIN_SUBSYSTEM,
|
||||
+ "automember_task_add - there is already a fixup task running\n");
|
||||
+ rv = SLAPI_DSE_CALLBACK_ERROR;
|
||||
+ goto out;
|
||||
+ }
|
||||
+ PR_Unlock(fixup_lock);
|
||||
+
|
||||
/*
|
||||
* Grab the task params
|
||||
*/
|
||||
@@ -2300,6 +2379,12 @@ automember_task_add(Slapi_PBlock *pb, Slapi_Entry *e, Slapi_Entry *eAfter __attr
|
||||
rv = SLAPI_DSE_CALLBACK_ERROR;
|
||||
goto out;
|
||||
}
|
||||
+ if ((cleanup_str = slapi_entry_attr_get_ref(e, "cleanup"))) {
|
||||
+ if (strcasecmp(cleanup_str, "yes") == 0 || strcasecmp(cleanup_str, "on")) {
|
||||
+ cleanup = PR_TRUE;
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
scope = slapi_fetch_attr(e, "scope", "sub");
|
||||
/*
|
||||
* setup our task data
|
||||
@@ -2315,6 +2400,7 @@ automember_task_add(Slapi_PBlock *pb, Slapi_Entry *e, Slapi_Entry *eAfter __attr
|
||||
mytaskdata->bind_dn = slapi_ch_strdup(bind_dn);
|
||||
mytaskdata->base_dn = slapi_sdn_new_dn_byval(base_dn);
|
||||
mytaskdata->filter_str = slapi_ch_strdup(filter);
|
||||
+ mytaskdata->cleanup = cleanup;
|
||||
|
||||
if (scope) {
|
||||
if (strcasecmp(scope, "sub") == 0) {
|
||||
@@ -2334,6 +2420,9 @@ automember_task_add(Slapi_PBlock *pb, Slapi_Entry *e, Slapi_Entry *eAfter __attr
|
||||
task = slapi_plugin_new_task(slapi_entry_get_ndn(e), arg);
|
||||
slapi_task_set_destructor_fn(task, automember_task_destructor);
|
||||
slapi_task_set_data(task, mytaskdata);
|
||||
+ PR_Lock(fixup_lock);
|
||||
+ fixup_running = PR_TRUE;
|
||||
+ PR_Unlock(fixup_lock);
|
||||
/*
|
||||
* Start the task as a separate thread
|
||||
*/
|
||||
@@ -2345,6 +2434,9 @@ automember_task_add(Slapi_PBlock *pb, Slapi_Entry *e, Slapi_Entry *eAfter __attr
|
||||
"automember_task_add - Unable to create task thread!\n");
|
||||
*returncode = LDAP_OPERATIONS_ERROR;
|
||||
slapi_task_finish(task, *returncode);
|
||||
+ PR_Lock(fixup_lock);
|
||||
+ fixup_running = PR_FALSE;
|
||||
+ PR_Unlock(fixup_lock);
|
||||
rv = SLAPI_DSE_CALLBACK_ERROR;
|
||||
} else {
|
||||
rv = SLAPI_DSE_CALLBACK_OK;
|
||||
@@ -2372,6 +2464,9 @@ automember_rebuild_task_thread(void *arg)
|
||||
PRCList *list = NULL;
|
||||
PRCList *include_list = NULL;
|
||||
int result = 0;
|
||||
+ int64_t fixup_progress_count = 0;
|
||||
+ int64_t fixup_progress_elapsed = 0;
|
||||
+ int64_t fixup_start_time = 0;
|
||||
size_t i = 0;
|
||||
|
||||
/* Reset abort flag */
|
||||
@@ -2380,6 +2475,7 @@ automember_rebuild_task_thread(void *arg)
|
||||
if (!task) {
|
||||
return; /* no task */
|
||||
}
|
||||
+
|
||||
slapi_task_inc_refcount(task);
|
||||
slapi_log_err(SLAPI_LOG_PLUGIN, AUTOMEMBER_PLUGIN_SUBSYSTEM,
|
||||
"automember_rebuild_task_thread - Refcount incremented.\n");
|
||||
@@ -2393,9 +2489,11 @@ automember_rebuild_task_thread(void *arg)
|
||||
slapi_task_log_status(task, "Automember rebuild task starting (base dn: (%s) filter (%s)...",
|
||||
slapi_sdn_get_dn(td->base_dn), td->filter_str);
|
||||
/*
|
||||
- * Set the bind dn in the local thread data
|
||||
+ * Set the bind dn in the local thread data, and block post op mods
|
||||
*/
|
||||
slapi_td_set_dn(slapi_ch_strdup(td->bind_dn));
|
||||
+ slapi_td_block_nested_post_op();
|
||||
+ fixup_start_time = slapi_current_rel_time_t();
|
||||
/*
|
||||
* Take the config lock now and search the database
|
||||
*/
|
||||
@@ -2426,6 +2524,21 @@ automember_rebuild_task_thread(void *arg)
|
||||
* Loop over the entries
|
||||
*/
|
||||
for (i = 0; entries && (entries[i] != NULL); i++) {
|
||||
+ fixup_progress_count++;
|
||||
+ if (fixup_progress_count % FIXUP_PROGRESS_LIMIT == 0 ) {
|
||||
+ slapi_task_log_notice(task,
|
||||
+ "Processed %ld entries in %ld seconds (+%ld seconds)",
|
||||
+ fixup_progress_count,
|
||||
+ slapi_current_rel_time_t() - fixup_start_time,
|
||||
+ slapi_current_rel_time_t() - fixup_progress_elapsed);
|
||||
+ slapi_task_log_status(task,
|
||||
+ "Processed %ld entries in %ld seconds (+%ld seconds)",
|
||||
+ fixup_progress_count,
|
||||
+ slapi_current_rel_time_t() - fixup_start_time,
|
||||
+ slapi_current_rel_time_t() - fixup_progress_elapsed);
|
||||
+ slapi_task_inc_progress(task);
|
||||
+ fixup_progress_elapsed = slapi_current_rel_time_t();
|
||||
+ }
|
||||
if (slapi_atomic_load_64(&abort_rebuild_task, __ATOMIC_ACQUIRE) == 1) {
|
||||
/* The task was aborted */
|
||||
slapi_task_log_notice(task, "Automember rebuild task was intentionally aborted");
|
||||
@@ -2443,48 +2556,66 @@ automember_rebuild_task_thread(void *arg)
|
||||
if (slapi_dn_issuffix(slapi_entry_get_dn(entries[i]), config->scope) &&
|
||||
(slapi_filter_test_simple(entries[i], config->filter) == 0))
|
||||
{
|
||||
- /* First clear out all the defaults groups */
|
||||
- for (size_t ii = 0; config->default_groups && config->default_groups[ii]; ii++) {
|
||||
- if ((result = automember_update_member_value(entries[i], config->default_groups[ii],
|
||||
- config->grouping_attr, config->grouping_value, NULL, DEL_MEMBER)))
|
||||
- {
|
||||
- slapi_task_log_notice(task, "Automember rebuild membership task unable to delete "
|
||||
- "member from default group (%s) error (%d)",
|
||||
- config->default_groups[ii], result);
|
||||
- slapi_task_log_status(task, "Automember rebuild membership task unable to delete "
|
||||
- "member from default group (%s) error (%d)",
|
||||
- config->default_groups[ii], result);
|
||||
- slapi_log_err(SLAPI_LOG_ERR, AUTOMEMBER_PLUGIN_SUBSYSTEM,
|
||||
- "automember_rebuild_task_thread - Unable to unable to delete from (%s) error (%d)\n",
|
||||
- config->default_groups[ii], result);
|
||||
- goto out;
|
||||
- }
|
||||
- }
|
||||
-
|
||||
- /* Then clear out the non-default group */
|
||||
- if (config->inclusive_rules && !PR_CLIST_IS_EMPTY((PRCList *)config->inclusive_rules)) {
|
||||
- include_list = PR_LIST_HEAD((PRCList *)config->inclusive_rules);
|
||||
- while (include_list != (PRCList *)config->inclusive_rules) {
|
||||
- struct automemberRegexRule *curr_rule = (struct automemberRegexRule *)include_list;
|
||||
- if ((result = automember_update_member_value(entries[i], slapi_sdn_get_dn(curr_rule->target_group_dn),
|
||||
- config->grouping_attr, config->grouping_value, NULL, DEL_MEMBER)))
|
||||
+ if (td->cleanup) {
|
||||
+
|
||||
+ slapi_log_err(SLAPI_LOG_PLUGIN, AUTOMEMBER_PLUGIN_SUBSYSTEM,
|
||||
+ "automember_rebuild_task_thread - Cleaning up groups (config %s)\n",
|
||||
+ config->dn);
|
||||
+ /* First clear out all the defaults groups */
|
||||
+ for (size_t ii = 0; config->default_groups && config->default_groups[ii]; ii++) {
|
||||
+ if ((result = automember_update_member_value(entries[i],
|
||||
+ config->default_groups[ii],
|
||||
+ config->grouping_attr,
|
||||
+ config->grouping_value,
|
||||
+ NULL, DEL_MEMBER)))
|
||||
{
|
||||
slapi_task_log_notice(task, "Automember rebuild membership task unable to delete "
|
||||
- "member from group (%s) error (%d)",
|
||||
- slapi_sdn_get_dn(curr_rule->target_group_dn), result);
|
||||
+ "member from default group (%s) error (%d)",
|
||||
+ config->default_groups[ii], result);
|
||||
slapi_task_log_status(task, "Automember rebuild membership task unable to delete "
|
||||
- "member from group (%s) error (%d)",
|
||||
- slapi_sdn_get_dn(curr_rule->target_group_dn), result);
|
||||
+ "member from default group (%s) error (%d)",
|
||||
+ config->default_groups[ii], result);
|
||||
slapi_log_err(SLAPI_LOG_ERR, AUTOMEMBER_PLUGIN_SUBSYSTEM,
|
||||
"automember_rebuild_task_thread - Unable to unable to delete from (%s) error (%d)\n",
|
||||
- slapi_sdn_get_dn(curr_rule->target_group_dn), result);
|
||||
+ config->default_groups[ii], result);
|
||||
goto out;
|
||||
}
|
||||
- include_list = PR_NEXT_LINK(include_list);
|
||||
}
|
||||
+
|
||||
+ /* Then clear out the non-default group */
|
||||
+ if (config->inclusive_rules && !PR_CLIST_IS_EMPTY((PRCList *)config->inclusive_rules)) {
|
||||
+ include_list = PR_LIST_HEAD((PRCList *)config->inclusive_rules);
|
||||
+ while (include_list != (PRCList *)config->inclusive_rules) {
|
||||
+ struct automemberRegexRule *curr_rule = (struct automemberRegexRule *)include_list;
|
||||
+ if ((result = automember_update_member_value(entries[i],
|
||||
+ slapi_sdn_get_dn(curr_rule->target_group_dn),
|
||||
+ config->grouping_attr,
|
||||
+ config->grouping_value,
|
||||
+ NULL, DEL_MEMBER)))
|
||||
+ {
|
||||
+ slapi_task_log_notice(task, "Automember rebuild membership task unable to delete "
|
||||
+ "member from group (%s) error (%d)",
|
||||
+ slapi_sdn_get_dn(curr_rule->target_group_dn), result);
|
||||
+ slapi_task_log_status(task, "Automember rebuild membership task unable to delete "
|
||||
+ "member from group (%s) error (%d)",
|
||||
+ slapi_sdn_get_dn(curr_rule->target_group_dn), result);
|
||||
+ slapi_log_err(SLAPI_LOG_ERR, AUTOMEMBER_PLUGIN_SUBSYSTEM,
|
||||
+ "automember_rebuild_task_thread - Unable to unable to delete from (%s) error (%d)\n",
|
||||
+ slapi_sdn_get_dn(curr_rule->target_group_dn), result);
|
||||
+ goto out;
|
||||
+ }
|
||||
+ include_list = PR_NEXT_LINK(include_list);
|
||||
+ }
|
||||
+ }
|
||||
+ slapi_log_err(SLAPI_LOG_PLUGIN, AUTOMEMBER_PLUGIN_SUBSYSTEM,
|
||||
+ "automember_rebuild_task_thread - Finished cleaning up groups (config %s)\n",
|
||||
+ config->dn);
|
||||
}
|
||||
|
||||
/* Update the memberships for this entries */
|
||||
+ slapi_log_err(SLAPI_LOG_PLUGIN, AUTOMEMBER_PLUGIN_SUBSYSTEM,
|
||||
+ "automember_rebuild_task_thread - Updating membership (config %s)\n",
|
||||
+ config->dn);
|
||||
if (slapi_is_shutting_down() ||
|
||||
automember_update_membership(config, entries[i], NULL) == SLAPI_PLUGIN_FAILURE)
|
||||
{
|
||||
@@ -2508,15 +2639,22 @@ out:
|
||||
slapi_task_log_notice(task, "Automember rebuild task aborted. Error (%d)", result);
|
||||
slapi_task_log_status(task, "Automember rebuild task aborted. Error (%d)", result);
|
||||
} else {
|
||||
- slapi_task_log_notice(task, "Automember rebuild task finished. Processed (%d) entries.", (int32_t)i);
|
||||
- slapi_task_log_status(task, "Automember rebuild task finished. Processed (%d) entries.", (int32_t)i);
|
||||
+ slapi_task_log_notice(task, "Automember rebuild task finished. Processed (%ld) entries in %ld seconds",
|
||||
+ (int64_t)i, slapi_current_rel_time_t() - fixup_start_time);
|
||||
+ slapi_task_log_status(task, "Automember rebuild task finished. Processed (%ld) entries in %ld seconds",
|
||||
+ (int64_t)i, slapi_current_rel_time_t() - fixup_start_time);
|
||||
}
|
||||
slapi_task_inc_progress(task);
|
||||
slapi_task_finish(task, result);
|
||||
slapi_task_dec_refcount(task);
|
||||
slapi_atomic_store_64(&abort_rebuild_task, 0, __ATOMIC_RELEASE);
|
||||
+ slapi_td_unblock_nested_post_op();
|
||||
+ PR_Lock(fixup_lock);
|
||||
+ fixup_running = PR_FALSE;
|
||||
+ PR_Unlock(fixup_lock);
|
||||
+
|
||||
slapi_log_err(SLAPI_LOG_PLUGIN, AUTOMEMBER_PLUGIN_SUBSYSTEM,
|
||||
- "automember_rebuild_task_thread - Refcount decremented.\n");
|
||||
+ "automember_rebuild_task_thread - task finished, refcount decremented.\n");
|
||||
}
|
||||
|
||||
/*
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/ldbm_add.c b/ldap/servers/slapd/back-ldbm/ldbm_add.c
|
||||
index ba2d73a84..ce4c314a1 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/ldbm_add.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/ldbm_add.c
|
||||
@@ -1,6 +1,6 @@
|
||||
/** BEGIN COPYRIGHT BLOCK
|
||||
* Copyright (C) 2001 Sun Microsystems, Inc. Used by permission.
|
||||
- * Copyright (C) 2005 Red Hat, Inc.
|
||||
+ * Copyright (C) 2022 Red Hat, Inc.
|
||||
* Copyright (C) 2009 Hewlett-Packard Development Company, L.P.
|
||||
* All rights reserved.
|
||||
*
|
||||
@@ -1264,10 +1264,6 @@ ldbm_back_add(Slapi_PBlock *pb)
|
||||
goto common_return;
|
||||
|
||||
error_return:
|
||||
- /* Revert the caches if this is the parent operation */
|
||||
- if (parent_op && betxn_callback_fails) {
|
||||
- revert_cache(inst, &parent_time);
|
||||
- }
|
||||
if (addingentry_id_assigned) {
|
||||
next_id_return(be, addingentry->ep_id);
|
||||
}
|
||||
@@ -1376,6 +1372,11 @@ diskfull_return:
|
||||
if (!not_an_error) {
|
||||
rc = SLAPI_FAIL_GENERAL;
|
||||
}
|
||||
+
|
||||
+ /* Revert the caches if this is the parent operation */
|
||||
+ if (parent_op && betxn_callback_fails) {
|
||||
+ revert_cache(inst, &parent_time);
|
||||
+ }
|
||||
}
|
||||
|
||||
common_return:
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/ldbm_delete.c b/ldap/servers/slapd/back-ldbm/ldbm_delete.c
|
||||
index de23190c3..27f0ac58a 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/ldbm_delete.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/ldbm_delete.c
|
||||
@@ -1407,11 +1407,6 @@ commit_return:
|
||||
goto common_return;
|
||||
|
||||
error_return:
|
||||
- /* Revert the caches if this is the parent operation */
|
||||
- if (parent_op && betxn_callback_fails) {
|
||||
- revert_cache(inst, &parent_time);
|
||||
- }
|
||||
-
|
||||
if (tombstone) {
|
||||
if (cache_is_in_cache(&inst->inst_cache, tombstone)) {
|
||||
tomb_ep_id = tombstone->ep_id; /* Otherwise, tombstone might have been freed. */
|
||||
@@ -1496,6 +1491,11 @@ error_return:
|
||||
conn_id, op_id, parent_modify_c.old_entry, parent_modify_c.new_entry, myrc);
|
||||
}
|
||||
|
||||
+ /* Revert the caches if this is the parent operation */
|
||||
+ if (parent_op && betxn_callback_fails) {
|
||||
+ revert_cache(inst, &parent_time);
|
||||
+ }
|
||||
+
|
||||
common_return:
|
||||
if (orig_entry) {
|
||||
/* NOTE: #define SLAPI_DELETE_BEPREOP_ENTRY SLAPI_ENTRY_PRE_OP */
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/ldbm_modify.c b/ldap/servers/slapd/back-ldbm/ldbm_modify.c
|
||||
index 537369055..64b293001 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/ldbm_modify.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/ldbm_modify.c
|
||||
@@ -1,6 +1,6 @@
|
||||
/** BEGIN COPYRIGHT BLOCK
|
||||
* Copyright (C) 2001 Sun Microsystems, Inc. Used by permission.
|
||||
- * Copyright (C) 2005 Red Hat, Inc.
|
||||
+ * Copyright (C) 2022 Red Hat, Inc.
|
||||
* Copyright (C) 2009 Hewlett-Packard Development Company, L.P.
|
||||
* All rights reserved.
|
||||
*
|
||||
@@ -1043,11 +1043,6 @@ ldbm_back_modify(Slapi_PBlock *pb)
|
||||
goto common_return;
|
||||
|
||||
error_return:
|
||||
- /* Revert the caches if this is the parent operation */
|
||||
- if (parent_op && betxn_callback_fails) {
|
||||
- revert_cache(inst, &parent_time);
|
||||
- }
|
||||
-
|
||||
if (postentry != NULL) {
|
||||
slapi_entry_free(postentry);
|
||||
postentry = NULL;
|
||||
@@ -1103,6 +1098,10 @@ error_return:
|
||||
if (!not_an_error) {
|
||||
rc = SLAPI_FAIL_GENERAL;
|
||||
}
|
||||
+ /* Revert the caches if this is the parent operation */
|
||||
+ if (parent_op && betxn_callback_fails) {
|
||||
+ revert_cache(inst, &parent_time);
|
||||
+ }
|
||||
}
|
||||
|
||||
/* if ec is in cache, remove it, then add back e if we still have it */
|
||||
diff --git a/src/lib389/lib389/cli_conf/plugins/automember.py b/src/lib389/lib389/cli_conf/plugins/automember.py
|
||||
index 15b00c633..568586ad8 100644
|
||||
--- a/src/lib389/lib389/cli_conf/plugins/automember.py
|
||||
+++ b/src/lib389/lib389/cli_conf/plugins/automember.py
|
||||
@@ -155,7 +155,7 @@ def fixup(inst, basedn, log, args):
|
||||
log.info('Attempting to add task entry... This will fail if Automembership plug-in is not enabled.')
|
||||
if not plugin.status():
|
||||
log.error("'%s' is disabled. Rebuild membership task can't be executed" % plugin.rdn)
|
||||
- fixup_task = plugin.fixup(args.DN, args.filter)
|
||||
+ fixup_task = plugin.fixup(args.DN, args.filter, args.cleanup)
|
||||
if args.wait:
|
||||
log.info(f'Waiting for fixup task "{fixup_task.dn}" to complete. You can safely exit by pressing Control C ...')
|
||||
fixup_task.wait(timeout=args.timeout)
|
||||
@@ -225,8 +225,8 @@ def create_parser(subparsers):
|
||||
subcommands = automember.add_subparsers(help='action')
|
||||
add_generic_plugin_parsers(subcommands, AutoMembershipPlugin)
|
||||
|
||||
- list = subcommands.add_parser('list', help='List Automembership definitions or regex rules.')
|
||||
- subcommands_list = list.add_subparsers(help='action')
|
||||
+ automember_list = subcommands.add_parser('list', help='List Automembership definitions or regex rules.')
|
||||
+ subcommands_list = automember_list.add_subparsers(help='action')
|
||||
list_definitions = subcommands_list.add_parser('definitions', help='Lists Automembership definitions.')
|
||||
list_definitions.set_defaults(func=definition_list)
|
||||
list_regexes = subcommands_list.add_parser('regexes', help='List Automembership regex rules.')
|
||||
@@ -269,6 +269,8 @@ def create_parser(subparsers):
|
||||
fixup_task.add_argument('-f', '--filter', required=True, help='Sets the LDAP filter for entries to fix up')
|
||||
fixup_task.add_argument('-s', '--scope', required=True, choices=['sub', 'base', 'one'], type=str.lower,
|
||||
help='Sets the LDAP search scope for entries to fix up')
|
||||
+ fixup_task.add_argument('--cleanup', action='store_true',
|
||||
+ help="Clean up previous group memberships before rebuilding")
|
||||
fixup_task.add_argument('--wait', action='store_true',
|
||||
help="Wait for the task to finish, this could take a long time")
|
||||
fixup_task.add_argument('--timeout', default=0, type=int,
|
||||
@@ -279,7 +281,7 @@ def create_parser(subparsers):
|
||||
fixup_status.add_argument('--dn', help="The task entry's DN")
|
||||
fixup_status.add_argument('--show-log', action='store_true', help="Display the task log")
|
||||
fixup_status.add_argument('--watch', action='store_true',
|
||||
- help="Watch the task's status and wait for it to finish")
|
||||
+ help="Watch the task's status and wait for it to finish")
|
||||
|
||||
abort_fixup = subcommands.add_parser('abort-fixup', help='Abort the rebuild membership task.')
|
||||
abort_fixup.set_defaults(func=abort)
|
||||
diff --git a/src/lib389/lib389/plugins.py b/src/lib389/lib389/plugins.py
|
||||
index 52691a44c..a1ad0a45b 100644
|
||||
--- a/src/lib389/lib389/plugins.py
|
||||
+++ b/src/lib389/lib389/plugins.py
|
||||
@@ -1141,13 +1141,15 @@ class AutoMembershipPlugin(Plugin):
|
||||
def __init__(self, instance, dn="cn=Auto Membership Plugin,cn=plugins,cn=config"):
|
||||
super(AutoMembershipPlugin, self).__init__(instance, dn)
|
||||
|
||||
- def fixup(self, basedn, _filter=None):
|
||||
+ def fixup(self, basedn, _filter=None, cleanup=False):
|
||||
"""Create an automember rebuild membership task
|
||||
|
||||
:param basedn: Basedn to fix up
|
||||
:type basedn: str
|
||||
:param _filter: a filter for entries to fix up
|
||||
:type _filter: str
|
||||
+ :param cleanup: cleanup old group memberships
|
||||
+ :type cleanup: boolean
|
||||
|
||||
:returns: an instance of Task(DSLdapObject)
|
||||
"""
|
||||
@@ -1156,6 +1158,9 @@ class AutoMembershipPlugin(Plugin):
|
||||
task_properties = {'basedn': basedn}
|
||||
if _filter is not None:
|
||||
task_properties['filter'] = _filter
|
||||
+ if cleanup:
|
||||
+ task_properties['cleanup'] = "yes"
|
||||
+
|
||||
task.create(properties=task_properties)
|
||||
|
||||
return task
|
||||
diff --git a/src/lib389/lib389/tasks.py b/src/lib389/lib389/tasks.py
|
||||
index 1a16bbb83..193805780 100644
|
||||
--- a/src/lib389/lib389/tasks.py
|
||||
+++ b/src/lib389/lib389/tasks.py
|
||||
@@ -1006,12 +1006,13 @@ class Tasks(object):
|
||||
return exitCode
|
||||
|
||||
def automemberRebuild(self, suffix=DEFAULT_SUFFIX, scope='sub',
|
||||
- filterstr='objectclass=top', args=None):
|
||||
+ filterstr='objectclass=top', cleanup=False, args=None):
|
||||
'''
|
||||
- @param suffix - The suffix the task should examine - defualt is
|
||||
+ @param suffix - The suffix the task should examine - default is
|
||||
"dc=example,dc=com"
|
||||
@param scope - The scope of the search to find entries
|
||||
- @param fitlerstr - THe search filter to find entries
|
||||
+ @param fitlerstr - The search filter to find entries
|
||||
+ @param cleanup - reset/clear the old group mmeberships prior to rebuilding
|
||||
@param args - is a dictionary that contains modifier of the task
|
||||
wait: True/[False] - If True, waits for the completion of
|
||||
the task before to return
|
||||
@@ -1027,6 +1028,8 @@ class Tasks(object):
|
||||
entry.setValues('basedn', suffix)
|
||||
entry.setValues('filter', filterstr)
|
||||
entry.setValues('scope', scope)
|
||||
+ if cleanup:
|
||||
+ entry.setValues('cleanup', 'yes')
|
||||
|
||||
# start the task and possibly wait for task completion
|
||||
try:
|
||||
--
|
||||
2.43.0
|
||||
|
@ -0,0 +1,125 @@
|
||||
From 5613937623f0037a54490b22c60f7eb1aa52cf4e Mon Sep 17 00:00:00 2001
|
||||
From: James Chapman <jachapma@redhat.com>
|
||||
Date: Wed, 25 Jun 2025 14:11:05 +0000
|
||||
Subject: [PATCH] =?UTF-8?q?Issue=206825=20-=20RootDN=20Access=20Control=20?=
|
||||
=?UTF-8?q?Plugin=20with=20wildcards=20for=20IP=20addre=E2=80=A6=20(#6826)?=
|
||||
MIME-Version: 1.0
|
||||
Content-Type: text/plain; charset=UTF-8
|
||||
Content-Transfer-Encoding: 8bit
|
||||
|
||||
Bug description:
|
||||
RootDN Access Control Plugin with wildcards for IP addresses fails withi
|
||||
an error "Invalid IP address"
|
||||
|
||||
socket.inet_aton() validates IPv4 IP addresses and does not support wildcards.
|
||||
|
||||
Fix description:
|
||||
Add a regex pattern to match wildcard IP addresses, check each octet is
|
||||
between 0-255
|
||||
|
||||
Fixes: https://github.com/389ds/389-ds-base/issues/6825
|
||||
|
||||
Reviewed by: @droideck (Thank you)
|
||||
---
|
||||
.../lib389/cli_conf/plugins/rootdn_ac.py | 16 +++-----
|
||||
src/lib389/lib389/utils.py | 40 +++++++++++++++++++
|
||||
2 files changed, 45 insertions(+), 11 deletions(-)
|
||||
|
||||
diff --git a/src/lib389/lib389/cli_conf/plugins/rootdn_ac.py b/src/lib389/lib389/cli_conf/plugins/rootdn_ac.py
|
||||
index 65486fff8..1456f5ebe 100644
|
||||
--- a/src/lib389/lib389/cli_conf/plugins/rootdn_ac.py
|
||||
+++ b/src/lib389/lib389/cli_conf/plugins/rootdn_ac.py
|
||||
@@ -8,7 +8,7 @@
|
||||
|
||||
import socket
|
||||
from lib389.plugins import RootDNAccessControlPlugin
|
||||
-from lib389.utils import is_valid_hostname
|
||||
+from lib389.utils import is_valid_hostname, is_valid_ip
|
||||
from lib389.cli_conf import add_generic_plugin_parsers, generic_object_edit
|
||||
from lib389.cli_base import CustomHelpFormatter
|
||||
|
||||
@@ -62,19 +62,13 @@ def validate_args(args):
|
||||
|
||||
if args.allow_ip is not None:
|
||||
for ip in args.allow_ip:
|
||||
- if ip != "delete":
|
||||
- try:
|
||||
- socket.inet_aton(ip)
|
||||
- except socket.error:
|
||||
- raise ValueError(f"Invalid IP address ({ip}) for '--allow-ip'")
|
||||
+ if ip != "delete" and not is_valid_ip(ip):
|
||||
+ raise ValueError(f"Invalid IP address ({ip}) for '--allow-ip'")
|
||||
|
||||
if args.deny_ip is not None and args.deny_ip != "delete":
|
||||
for ip in args.deny_ip:
|
||||
- if ip != "delete":
|
||||
- try:
|
||||
- socket.inet_aton(ip)
|
||||
- except socket.error:
|
||||
- raise ValueError(f"Invalid IP address ({ip}) for '--deny-ip'")
|
||||
+ if ip != "delete" and not is_valid_ip(ip):
|
||||
+ raise ValueError(f"Invalid IP address ({ip}) for '--deny-ip'")
|
||||
|
||||
if args.allow_host is not None:
|
||||
for hostname in args.allow_host:
|
||||
diff --git a/src/lib389/lib389/utils.py b/src/lib389/lib389/utils.py
|
||||
index afc282e94..3937fc1a8 100644
|
||||
--- a/src/lib389/lib389/utils.py
|
||||
+++ b/src/lib389/lib389/utils.py
|
||||
@@ -31,6 +31,7 @@ import logging
|
||||
import shutil
|
||||
import ldap
|
||||
import socket
|
||||
+import ipaddress
|
||||
import time
|
||||
import stat
|
||||
from datetime import (datetime, timedelta)
|
||||
@@ -1707,6 +1708,45 @@ def is_valid_hostname(hostname):
|
||||
allowed = re.compile(r"(?!-)[A-Z\d-]{1,63}(?<!-)$", re.IGNORECASE)
|
||||
return all(allowed.match(x) for x in hostname.split("."))
|
||||
|
||||
+def is_valid_ip(ip):
|
||||
+ """ Validate an IPv4 or IPv6 address, including asterisks for wildcards. """
|
||||
+ if '*' in ip and '.' in ip:
|
||||
+ ipv4_pattern = r'^(\d{1,3}|\*)\.(\d{1,3}|\*)\.(\d{1,3}|\*)\.(\d{1,3}|\*)$'
|
||||
+ if re.match(ipv4_pattern, ip):
|
||||
+ octets = ip.split('.')
|
||||
+ for octet in octets:
|
||||
+ if octet != '*':
|
||||
+ try:
|
||||
+ val = int(octet, 10)
|
||||
+ if not (0 <= val <= 255):
|
||||
+ return False
|
||||
+ except ValueError:
|
||||
+ return False
|
||||
+ return True
|
||||
+ else:
|
||||
+ return False
|
||||
+
|
||||
+ if '*' in ip and ':' in ip:
|
||||
+ ipv6_pattern = r'^([0-9a-fA-F]{1,4}|\*)(:([0-9a-fA-F]{1,4}|\*)){0,7}$'
|
||||
+ if re.match(ipv6_pattern, ip):
|
||||
+ octets = ip.split(':')
|
||||
+ for octet in octets:
|
||||
+ if octet != '*':
|
||||
+ try:
|
||||
+ val = int(octet, 16)
|
||||
+ if not (0 <= val <= 0xFFFF):
|
||||
+ return False
|
||||
+ except ValueError:
|
||||
+ return False
|
||||
+ return True
|
||||
+ else:
|
||||
+ return False
|
||||
+
|
||||
+ try:
|
||||
+ ipaddress.ip_address(ip)
|
||||
+ return True
|
||||
+ except ValueError:
|
||||
+ return False
|
||||
|
||||
def parse_size(size):
|
||||
"""
|
||||
--
|
||||
2.49.0
|
||||
|
@ -1,83 +0,0 @@
|
||||
From 9319d5b022918f14cacb00e3faef85a6ab730a26 Mon Sep 17 00:00:00 2001
|
||||
From: Simon Pichugin <spichugi@redhat.com>
|
||||
Date: Tue, 27 Feb 2024 16:30:47 -0800
|
||||
Subject: [PATCH] Issue 3527 - Support HAProxy and Instance on the same machine
|
||||
configuration (#6107)
|
||||
|
||||
Description: Improve how we handle HAProxy connections to work better when
|
||||
the DS and HAProxy are on the same machine.
|
||||
Ensure the client and header destination IPs are checked against the trusted IP list.
|
||||
|
||||
Additionally, this change will also allow configuration having
|
||||
HAProxy is listening on a different subnet than the one used to forward the request.
|
||||
|
||||
Related: https://github.com/389ds/389-ds-base/issues/3527
|
||||
|
||||
Reviewed by: @progier389, @jchapma (Thanks!)
|
||||
---
|
||||
ldap/servers/slapd/connection.c | 35 +++++++++++++++++++++++++--------
|
||||
1 file changed, 27 insertions(+), 8 deletions(-)
|
||||
|
||||
diff --git a/ldap/servers/slapd/connection.c b/ldap/servers/slapd/connection.c
|
||||
index d28a39bf7..10a8cc577 100644
|
||||
--- a/ldap/servers/slapd/connection.c
|
||||
+++ b/ldap/servers/slapd/connection.c
|
||||
@@ -1187,6 +1187,8 @@ connection_read_operation(Connection *conn, Operation *op, ber_tag_t *tag, int *
|
||||
char str_ip[INET6_ADDRSTRLEN + 1] = {0};
|
||||
char str_haproxy_ip[INET6_ADDRSTRLEN + 1] = {0};
|
||||
char str_haproxy_destip[INET6_ADDRSTRLEN + 1] = {0};
|
||||
+ int trusted_matches_ip_found = 0;
|
||||
+ int trusted_matches_destip_found = 0;
|
||||
struct berval **bvals = NULL;
|
||||
int proxy_connection = 0;
|
||||
|
||||
@@ -1245,21 +1247,38 @@ connection_read_operation(Connection *conn, Operation *op, ber_tag_t *tag, int *
|
||||
normalize_IPv4(conn->cin_addr, buf_ip, sizeof(buf_ip), str_ip, sizeof(str_ip));
|
||||
normalize_IPv4(&pr_netaddr_dest, buf_haproxy_destip, sizeof(buf_haproxy_destip),
|
||||
str_haproxy_destip, sizeof(str_haproxy_destip));
|
||||
+ size_t ip_len = strlen(buf_ip);
|
||||
+ size_t destip_len = strlen(buf_haproxy_destip);
|
||||
|
||||
/* Now, reset RC and set it to 0 only if a match is found */
|
||||
haproxy_rc = -1;
|
||||
|
||||
- /* Allow only:
|
||||
- * Trusted IP == Original Client IP == HAProxy Header Destination IP */
|
||||
+ /*
|
||||
+ * We need to allow a configuration where DS instance and HAProxy are on the same machine.
|
||||
+ * In this case, we need to check if
|
||||
+ * the HAProxy client IP (which will be a loopback address) matches one of the the trusted IP addresses,
|
||||
+ * while still checking that
|
||||
+ * the HAProxy header destination IP address matches one of the trusted IP addresses.
|
||||
+ * Additionally, this change will also allow configuration having
|
||||
+ * HAProxy listening on a different subnet than one used to forward the request.
|
||||
+ */
|
||||
for (size_t i = 0; bvals[i] != NULL; ++i) {
|
||||
- if ((strlen(bvals[i]->bv_val) == strlen(buf_ip)) &&
|
||||
- (strlen(bvals[i]->bv_val) == strlen(buf_haproxy_destip)) &&
|
||||
- (strncasecmp(bvals[i]->bv_val, buf_ip, strlen(buf_ip)) == 0) &&
|
||||
- (strncasecmp(bvals[i]->bv_val, buf_haproxy_destip, strlen(buf_haproxy_destip)) == 0)) {
|
||||
- haproxy_rc = 0;
|
||||
- break;
|
||||
+ size_t bval_len = strlen(bvals[i]->bv_val);
|
||||
+
|
||||
+ /* Check if the Client IP (HAProxy's machine IP) address matches the trusted IP address */
|
||||
+ if (!trusted_matches_ip_found) {
|
||||
+ trusted_matches_ip_found = (bval_len == ip_len) && (strncasecmp(bvals[i]->bv_val, buf_ip, ip_len) == 0);
|
||||
+ }
|
||||
+ /* Check if the HAProxy header destination IP address matches the trusted IP address */
|
||||
+ if (!trusted_matches_destip_found) {
|
||||
+ trusted_matches_destip_found = (bval_len == destip_len) && (strncasecmp(bvals[i]->bv_val, buf_haproxy_destip, destip_len) == 0);
|
||||
}
|
||||
}
|
||||
+
|
||||
+ if (trusted_matches_ip_found && trusted_matches_destip_found) {
|
||||
+ haproxy_rc = 0;
|
||||
+ }
|
||||
+
|
||||
if (haproxy_rc == -1) {
|
||||
slapi_log_err(SLAPI_LOG_CONNS, "connection_read_operation", "HAProxy header received from unknown source.\n");
|
||||
disconnect_server_nomutex(conn, conn->c_connid, -1, SLAPD_DISCONNECT_PROXY_UNKNOWN, EPROTO);
|
||||
--
|
||||
2.45.0
|
||||
|
@ -0,0 +1,50 @@
|
||||
From b8cac173ca2549d2142332107e06fcb4bd34bd65 Mon Sep 17 00:00:00 2001
|
||||
From: James Chapman <jachapma@redhat.com>
|
||||
Date: Fri, 8 Mar 2024 16:15:52 +0000
|
||||
Subject: [PATCH] Issue 6119 - Synchronise accept_thread with slapd_daemon
|
||||
(#6120)
|
||||
|
||||
Bug Description: A corner cases exists, where the slapd_daemon has
|
||||
begun its shutdown process but the accept_thread is still running
|
||||
and capable of handling new connections. When this scenario occurs,
|
||||
the connection subsystem has been partially deallocated and is in
|
||||
an unstable state. A segfault is generated when attempting to get a
|
||||
new connection from the connection table.
|
||||
|
||||
Fix Description: The connection table is only deallocated when the
|
||||
number of active threads is 0. Modify the accept_thread to adjust the
|
||||
the active thread count during creation/destruction, meaning the connection
|
||||
table can only be freed when the accept_thread has completed
|
||||
|
||||
Relates: https://github.com/389ds/389-ds-base/issues/6119
|
||||
|
||||
Reviewed by: @tbordaz, @Firstyear , @mreynolds389 (Thank you)
|
||||
---
|
||||
ldap/servers/slapd/daemon.c | 4 ++++
|
||||
1 file changed, 4 insertions(+)
|
||||
|
||||
diff --git a/ldap/servers/slapd/daemon.c b/ldap/servers/slapd/daemon.c
|
||||
index 5d01a2526..a43fc9285 100644
|
||||
--- a/ldap/servers/slapd/daemon.c
|
||||
+++ b/ldap/servers/slapd/daemon.c
|
||||
@@ -868,6 +868,8 @@ accept_thread(void *vports)
|
||||
slapi_ch_free((void **)&listener_idxs);
|
||||
slapd_sockets_ports_free(ports);
|
||||
slapi_ch_free((void **)&fds);
|
||||
+ g_decr_active_threadcnt();
|
||||
+ slapi_log_err(SLAPI_LOG_INFO, "slapd_daemon", "slapd shutting down - accept_thread\n");
|
||||
}
|
||||
|
||||
void
|
||||
@@ -1158,6 +1160,8 @@ slapd_daemon(daemon_ports_t *ports)
|
||||
slapi_log_err(SLAPI_LOG_EMERG, "slapd_daemon", "Unable to fd accept thread - Shutting Down (" SLAPI_COMPONENT_NAME_NSPR " error %d - %s)\n",
|
||||
errorCode, slapd_pr_strerror(errorCode));
|
||||
g_set_shutdown(SLAPI_SHUTDOWN_EXIT);
|
||||
+ } else{
|
||||
+ g_incr_active_threadcnt();
|
||||
}
|
||||
|
||||
#ifdef WITH_SYSTEMD
|
||||
--
|
||||
2.49.0
|
||||
|
@ -1,108 +0,0 @@
|
||||
From 016a2b6bd3e27cbff36609824a75b020dfd24823 Mon Sep 17 00:00:00 2001
|
||||
From: James Chapman <jachapma@redhat.com>
|
||||
Date: Wed, 1 May 2024 15:01:33 +0100
|
||||
Subject: [PATCH] CVE-2024-2199
|
||||
|
||||
---
|
||||
.../tests/suites/password/password_test.py | 56 +++++++++++++++++++
|
||||
ldap/servers/slapd/modify.c | 8 ++-
|
||||
2 files changed, 62 insertions(+), 2 deletions(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/password/password_test.py b/dirsrvtests/tests/suites/password/password_test.py
|
||||
index 38079476a..b3ff08904 100644
|
||||
--- a/dirsrvtests/tests/suites/password/password_test.py
|
||||
+++ b/dirsrvtests/tests/suites/password/password_test.py
|
||||
@@ -65,6 +65,62 @@ def test_password_delete_specific_password(topology_st):
|
||||
log.info('test_password_delete_specific_password: PASSED')
|
||||
|
||||
|
||||
+def test_password_modify_non_utf8(topology_st):
|
||||
+ """Attempt a modify of the userPassword attribute with
|
||||
+ an invalid non utf8 value
|
||||
+
|
||||
+ :id: a31af9d5-d665-42b9-8d6e-fea3d0837d36
|
||||
+ :setup: Standalone instance
|
||||
+ :steps:
|
||||
+ 1. Add a user if it doesnt exist and set its password
|
||||
+ 2. Verify password with a bind
|
||||
+ 3. Modify userPassword attr with invalid value
|
||||
+ 4. Attempt a bind with invalid password value
|
||||
+ 5. Verify original password with a bind
|
||||
+ :expectedresults:
|
||||
+ 1. The user with userPassword should be added successfully
|
||||
+ 2. Operation should be successful
|
||||
+ 3. Server returns ldap.UNWILLING_TO_PERFORM
|
||||
+ 4. Server returns ldap.INVALID_CREDENTIALS
|
||||
+ 5. Operation should be successful
|
||||
+ """
|
||||
+
|
||||
+ log.info('Running test_password_modify_non_utf8...')
|
||||
+
|
||||
+ # Create user and set password
|
||||
+ standalone = topology_st.standalone
|
||||
+ users = UserAccounts(standalone, DEFAULT_SUFFIX)
|
||||
+ if not users.exists(TEST_USER_PROPERTIES['uid'][0]):
|
||||
+ user = users.create(properties=TEST_USER_PROPERTIES)
|
||||
+ else:
|
||||
+ user = users.get(TEST_USER_PROPERTIES['uid'][0])
|
||||
+ user.set('userpassword', PASSWORD)
|
||||
+
|
||||
+ # Verify password
|
||||
+ try:
|
||||
+ user.bind(PASSWORD)
|
||||
+ except ldap.LDAPError as e:
|
||||
+ log.fatal('Failed to bind as {}, error: '.format(user.dn) + e.args[0]['desc'])
|
||||
+ assert False
|
||||
+
|
||||
+ # Modify userPassword with an invalid value
|
||||
+ password = b'tes\x82t-password' # A non UTF-8 encoded password
|
||||
+ with pytest.raises(ldap.UNWILLING_TO_PERFORM):
|
||||
+ user.replace('userpassword', password)
|
||||
+
|
||||
+ # Verify a bind fails with invalid pasword
|
||||
+ with pytest.raises(ldap.INVALID_CREDENTIALS):
|
||||
+ user.bind(password)
|
||||
+
|
||||
+ # Verify we can still bind with original password
|
||||
+ try:
|
||||
+ user.bind(PASSWORD)
|
||||
+ except ldap.LDAPError as e:
|
||||
+ log.fatal('Failed to bind as {}, error: '.format(user.dn) + e.args[0]['desc'])
|
||||
+ assert False
|
||||
+
|
||||
+ log.info('test_password_modify_non_utf8: PASSED')
|
||||
+
|
||||
if __name__ == '__main__':
|
||||
# Run isolated
|
||||
# -s for DEBUG mode
|
||||
diff --git a/ldap/servers/slapd/modify.c b/ldap/servers/slapd/modify.c
|
||||
index 5ca78539c..669bb104c 100644
|
||||
--- a/ldap/servers/slapd/modify.c
|
||||
+++ b/ldap/servers/slapd/modify.c
|
||||
@@ -765,8 +765,10 @@ op_shared_modify(Slapi_PBlock *pb, int pw_change, char *old_pw)
|
||||
* flagged - leave mod attributes alone */
|
||||
if (!repl_op && !skip_modified_attrs && lastmod) {
|
||||
modify_update_last_modified_attr(pb, &smods);
|
||||
+ slapi_pblock_set(pb, SLAPI_MODIFY_MODS, slapi_mods_get_ldapmods_byref(&smods));
|
||||
}
|
||||
|
||||
+
|
||||
if (0 == slapi_mods_get_num_mods(&smods)) {
|
||||
/* nothing to do - no mods - this is not an error - just
|
||||
send back LDAP_SUCCESS */
|
||||
@@ -933,8 +935,10 @@ op_shared_modify(Slapi_PBlock *pb, int pw_change, char *old_pw)
|
||||
|
||||
/* encode password */
|
||||
if (pw_encodevals_ext(pb, sdn, va)) {
|
||||
- slapi_log_err(SLAPI_LOG_CRIT, "op_shared_modify", "Unable to hash userPassword attribute for %s.\n", slapi_entry_get_dn_const(e));
|
||||
- send_ldap_result(pb, LDAP_UNWILLING_TO_PERFORM, NULL, "Unable to store attribute \"userPassword\" correctly\n", 0, NULL);
|
||||
+ slapi_log_err(SLAPI_LOG_CRIT, "op_shared_modify", "Unable to hash userPassword attribute for %s, "
|
||||
+ "check value is utf8 string.\n", slapi_entry_get_dn_const(e));
|
||||
+ send_ldap_result(pb, LDAP_UNWILLING_TO_PERFORM, NULL, "Unable to hash \"userPassword\" attribute, "
|
||||
+ "check value is utf8 string.\n", 0, NULL);
|
||||
valuearray_free(&va);
|
||||
goto free_and_return;
|
||||
}
|
||||
--
|
||||
2.45.0
|
||||
|
127
SOURCES/0006-Issue-6782-Improve-paged-result-locking.patch
Normal file
127
SOURCES/0006-Issue-6782-Improve-paged-result-locking.patch
Normal file
@ -0,0 +1,127 @@
|
||||
From 7943443bb92fca6676922349fb12503a527cb6b1 Mon Sep 17 00:00:00 2001
|
||||
From: Mark Reynolds <mreynolds@redhat.com>
|
||||
Date: Thu, 15 May 2025 10:35:27 -0400
|
||||
Subject: [PATCH] Issue 6782 - Improve paged result locking
|
||||
|
||||
Description:
|
||||
|
||||
When cleaning a slot, instead of mem setting everything to Zero and restoring
|
||||
the mutex, manually reset all the values leaving the mutex pointer
|
||||
intact.
|
||||
|
||||
There is also a deadlock possibility when checking for abandoned PR search
|
||||
in opshared.c, and we were checking a flag value outside of the per_conn
|
||||
lock.
|
||||
|
||||
Relates: https://github.com/389ds/389-ds-base/issues/6782
|
||||
|
||||
Reviewed by: progier & spichugi(Thanks!!)
|
||||
---
|
||||
ldap/servers/slapd/opshared.c | 10 +++++++++-
|
||||
ldap/servers/slapd/pagedresults.c | 27 +++++++++++++++++----------
|
||||
2 files changed, 26 insertions(+), 11 deletions(-)
|
||||
|
||||
diff --git a/ldap/servers/slapd/opshared.c b/ldap/servers/slapd/opshared.c
|
||||
index 7dc2d5983..14a7dcdfb 100644
|
||||
--- a/ldap/servers/slapd/opshared.c
|
||||
+++ b/ldap/servers/slapd/opshared.c
|
||||
@@ -592,6 +592,14 @@ op_shared_search(Slapi_PBlock *pb, int send_result)
|
||||
int32_t tlimit;
|
||||
slapi_pblock_get(pb, SLAPI_SEARCH_TIMELIMIT, &tlimit);
|
||||
pagedresults_set_timelimit(pb_conn, operation, (time_t)tlimit, pr_idx);
|
||||
+ /* When using this mutex in conjunction with the main paged
|
||||
+ * result lock, you must do so in this order:
|
||||
+ *
|
||||
+ * --> pagedresults_lock()
|
||||
+ * --> pagedresults_mutex
|
||||
+ * <-- pagedresults_mutex
|
||||
+ * <-- pagedresults_unlock()
|
||||
+ */
|
||||
pagedresults_mutex = pageresult_lock_get_addr(pb_conn);
|
||||
}
|
||||
|
||||
@@ -717,11 +725,11 @@ op_shared_search(Slapi_PBlock *pb, int send_result)
|
||||
pr_search_result = pagedresults_get_search_result(pb_conn, operation, 1 /*locked*/, pr_idx);
|
||||
if (pr_search_result) {
|
||||
if (pagedresults_is_abandoned_or_notavailable(pb_conn, 1 /*locked*/, pr_idx)) {
|
||||
+ pthread_mutex_unlock(pagedresults_mutex);
|
||||
pagedresults_unlock(pb_conn, pr_idx);
|
||||
/* Previous operation was abandoned and the simplepaged object is not in use. */
|
||||
send_ldap_result(pb, 0, NULL, "Simple Paged Results Search abandoned", 0, NULL);
|
||||
rc = LDAP_SUCCESS;
|
||||
- pthread_mutex_unlock(pagedresults_mutex);
|
||||
goto free_and_return;
|
||||
} else {
|
||||
slapi_pblock_set(pb, SLAPI_SEARCH_RESULT_SET, pr_search_result);
|
||||
diff --git a/ldap/servers/slapd/pagedresults.c b/ldap/servers/slapd/pagedresults.c
|
||||
index 642aefb3d..c3f3aae01 100644
|
||||
--- a/ldap/servers/slapd/pagedresults.c
|
||||
+++ b/ldap/servers/slapd/pagedresults.c
|
||||
@@ -48,7 +48,6 @@ pageresult_lock_get_addr(Connection *conn)
|
||||
static void
|
||||
_pr_cleanup_one_slot(PagedResults *prp)
|
||||
{
|
||||
- PRLock *prmutex = NULL;
|
||||
if (!prp) {
|
||||
return;
|
||||
}
|
||||
@@ -56,13 +55,17 @@ _pr_cleanup_one_slot(PagedResults *prp)
|
||||
/* sr is left; release it. */
|
||||
prp->pr_current_be->be_search_results_release(&(prp->pr_search_result_set));
|
||||
}
|
||||
- /* clean up the slot */
|
||||
- if (prp->pr_mutex) {
|
||||
- /* pr_mutex is reused; back it up and reset it. */
|
||||
- prmutex = prp->pr_mutex;
|
||||
- }
|
||||
- memset(prp, '\0', sizeof(PagedResults));
|
||||
- prp->pr_mutex = prmutex;
|
||||
+
|
||||
+ /* clean up the slot except the mutex */
|
||||
+ prp->pr_current_be = NULL;
|
||||
+ prp->pr_search_result_set = NULL;
|
||||
+ prp->pr_search_result_count = 0;
|
||||
+ prp->pr_search_result_set_size_estimate = 0;
|
||||
+ prp->pr_sort_result_code = 0;
|
||||
+ prp->pr_timelimit_hr.tv_sec = 0;
|
||||
+ prp->pr_timelimit_hr.tv_nsec = 0;
|
||||
+ prp->pr_flags = 0;
|
||||
+ prp->pr_msgid = 0;
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -1007,7 +1010,8 @@ op_set_pagedresults(Operation *op)
|
||||
|
||||
/*
|
||||
* pagedresults_lock/unlock -- introduced to protect search results for the
|
||||
- * asynchronous searches.
|
||||
+ * asynchronous searches. Do not call these functions while the PR conn lock
|
||||
+ * is held (e.g. pageresult_lock_get_addr(conn))
|
||||
*/
|
||||
void
|
||||
pagedresults_lock(Connection *conn, int index)
|
||||
@@ -1045,6 +1049,8 @@ int
|
||||
pagedresults_is_abandoned_or_notavailable(Connection *conn, int locked, int index)
|
||||
{
|
||||
PagedResults *prp;
|
||||
+ int32_t result;
|
||||
+
|
||||
if (!conn || (index < 0) || (index >= conn->c_pagedresults.prl_maxlen)) {
|
||||
return 1; /* not abandoned, but do not want to proceed paged results op. */
|
||||
}
|
||||
@@ -1052,10 +1058,11 @@ pagedresults_is_abandoned_or_notavailable(Connection *conn, int locked, int inde
|
||||
pthread_mutex_lock(pageresult_lock_get_addr(conn));
|
||||
}
|
||||
prp = conn->c_pagedresults.prl_list + index;
|
||||
+ result = prp->pr_flags & CONN_FLAG_PAGEDRESULTS_ABANDONED;
|
||||
if (!locked) {
|
||||
pthread_mutex_unlock(pageresult_lock_get_addr(conn));
|
||||
}
|
||||
- return prp->pr_flags & CONN_FLAG_PAGEDRESULTS_ABANDONED;
|
||||
+ return result;
|
||||
}
|
||||
|
||||
int
|
||||
--
|
||||
2.49.0
|
||||
|
@ -1,213 +0,0 @@
|
||||
From d5bbe52fbe84a7d3b5938bf82d5c4af15061a8e2 Mon Sep 17 00:00:00 2001
|
||||
From: Pierre Rogier <progier@redhat.com>
|
||||
Date: Wed, 17 Apr 2024 18:18:04 +0200
|
||||
Subject: [PATCH] CVE-2024-3657
|
||||
|
||||
---
|
||||
.../tests/suites/filter/large_filter_test.py | 34 +++++-
|
||||
ldap/servers/slapd/back-ldbm/index.c | 111 ++++++++++--------
|
||||
2 files changed, 92 insertions(+), 53 deletions(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/filter/large_filter_test.py b/dirsrvtests/tests/suites/filter/large_filter_test.py
|
||||
index ecc7bf979..40526bb16 100644
|
||||
--- a/dirsrvtests/tests/suites/filter/large_filter_test.py
|
||||
+++ b/dirsrvtests/tests/suites/filter/large_filter_test.py
|
||||
@@ -13,19 +13,29 @@ verify and testing Filter from a search
|
||||
|
||||
import os
|
||||
import pytest
|
||||
+import ldap
|
||||
|
||||
-from lib389._constants import PW_DM
|
||||
+from lib389._constants import PW_DM, DEFAULT_SUFFIX, ErrorLog
|
||||
from lib389.topologies import topology_st as topo
|
||||
from lib389.idm.user import UserAccounts, UserAccount
|
||||
from lib389.idm.account import Accounts
|
||||
from lib389.backend import Backends
|
||||
from lib389.idm.domain import Domain
|
||||
+from lib389.utils import get_ldapurl_from_serverid
|
||||
|
||||
SUFFIX = 'dc=anuj,dc=com'
|
||||
|
||||
pytestmark = pytest.mark.tier1
|
||||
|
||||
|
||||
+def open_new_ldapi_conn(dsinstance):
|
||||
+ ldapurl, certdir = get_ldapurl_from_serverid(dsinstance)
|
||||
+ assert 'ldapi://' in ldapurl
|
||||
+ conn = ldap.initialize(ldapurl)
|
||||
+ conn.sasl_interactive_bind_s("", ldap.sasl.external())
|
||||
+ return conn
|
||||
+
|
||||
+
|
||||
@pytest.fixture(scope="module")
|
||||
def _create_entries(request, topo):
|
||||
"""
|
||||
@@ -160,6 +170,28 @@ def test_large_filter(topo, _create_entries, real_value):
|
||||
assert len(Accounts(conn, SUFFIX).filter(real_value)) == 3
|
||||
|
||||
|
||||
+def test_long_filter_value(topo):
|
||||
+ """Exercise large eq filter with dn syntax attributes
|
||||
+
|
||||
+ :id: b069ef72-fcc3-11ee-981c-482ae39447e5
|
||||
+ :setup: Standalone
|
||||
+ :steps:
|
||||
+ 1. Try to pass filter rules as per the condition.
|
||||
+ :expectedresults:
|
||||
+ 1. Pass
|
||||
+ """
|
||||
+ inst = topo.standalone
|
||||
+ conn = open_new_ldapi_conn(inst.serverid)
|
||||
+ inst.config.loglevel(vals=(ErrorLog.DEFAULT,ErrorLog.TRACE,ErrorLog.SEARCH_FILTER))
|
||||
+ filter_value = "a\x1Edmin" * 1025
|
||||
+ conn.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, f'(cn={filter_value})')
|
||||
+ filter_value = "aAdmin" * 1025
|
||||
+ conn.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, f'(cn={filter_value})')
|
||||
+ filter_value = "*"
|
||||
+ conn.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, f'(cn={filter_value})')
|
||||
+ inst.config.loglevel(vals=(ErrorLog.DEFAULT,))
|
||||
+
|
||||
+
|
||||
if __name__ == '__main__':
|
||||
CURRENT_FILE = os.path.realpath(__file__)
|
||||
pytest.main("-s -v %s" % CURRENT_FILE)
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/index.c b/ldap/servers/slapd/back-ldbm/index.c
|
||||
index 410db23d1..30fa09ebb 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/index.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/index.c
|
||||
@@ -71,6 +71,32 @@ typedef struct _index_buffer_handle index_buffer_handle;
|
||||
#define INDEX_BUFFER_FLAG_SERIALIZE 1
|
||||
#define INDEX_BUFFER_FLAG_STATS 2
|
||||
|
||||
+/*
|
||||
+ * space needed to encode a byte:
|
||||
+ * 0x00-0x31 and 0x7f-0xff requires 3 bytes: \xx
|
||||
+ * 0x22 and 0x5C requires 2 bytes: \" and \\
|
||||
+ * other requires 1 byte: c
|
||||
+ */
|
||||
+static char encode_size[] = {
|
||||
+ /* 0x00 */ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
|
||||
+ /* 0x10 */ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
|
||||
+ /* 0x20 */ 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
|
||||
+ /* 0x30 */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
|
||||
+ /* 0x40 */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
|
||||
+ /* 0x50 */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1,
|
||||
+ /* 0x60 */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
|
||||
+ /* 0x70 */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 3,
|
||||
+ /* 0x80 */ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
|
||||
+ /* 0x90 */ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
|
||||
+ /* 0xA0 */ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
|
||||
+ /* 0xB0 */ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
|
||||
+ /* 0xC0 */ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
|
||||
+ /* 0xD0 */ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
|
||||
+ /* 0xE0 */ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
|
||||
+ /* 0xF0 */ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
|
||||
+};
|
||||
+
|
||||
+
|
||||
/* Index buffering functions */
|
||||
|
||||
static int
|
||||
@@ -799,65 +825,46 @@ index_add_mods(
|
||||
|
||||
/*
|
||||
* Convert a 'struct berval' into a displayable ASCII string
|
||||
+ * returns the printable string
|
||||
*/
|
||||
-
|
||||
-#define SPECIAL(c) (c < 32 || c > 126 || c == '\\' || c == '"')
|
||||
-
|
||||
const char *
|
||||
encode(const struct berval *data, char buf[BUFSIZ])
|
||||
{
|
||||
- char *s;
|
||||
- char *last;
|
||||
- if (data == NULL || data->bv_len == 0)
|
||||
- return "";
|
||||
- last = data->bv_val + data->bv_len - 1;
|
||||
- for (s = data->bv_val; s < last; ++s) {
|
||||
- if (SPECIAL(*s)) {
|
||||
- char *first = data->bv_val;
|
||||
- char *bufNext = buf;
|
||||
- size_t bufSpace = BUFSIZ - 4;
|
||||
- while (1) {
|
||||
- /* printf ("%lu bytes ASCII\n", (unsigned long)(s - first)); */
|
||||
- if (bufSpace < (size_t)(s - first))
|
||||
- s = first + bufSpace - 1;
|
||||
- if (s != first) {
|
||||
- memcpy(bufNext, first, s - first);
|
||||
- bufNext += (s - first);
|
||||
- bufSpace -= (s - first);
|
||||
- }
|
||||
- do {
|
||||
- if (bufSpace) {
|
||||
- *bufNext++ = '\\';
|
||||
- --bufSpace;
|
||||
- }
|
||||
- if (bufSpace < 2) {
|
||||
- memcpy(bufNext, "..", 2);
|
||||
- bufNext += 2;
|
||||
- goto bail;
|
||||
- }
|
||||
- if (*s == '\\' || *s == '"') {
|
||||
- *bufNext++ = *s;
|
||||
- --bufSpace;
|
||||
- } else {
|
||||
- sprintf(bufNext, "%02x", (unsigned)*(unsigned char *)s);
|
||||
- bufNext += 2;
|
||||
- bufSpace -= 2;
|
||||
- }
|
||||
- } while (++s <= last && SPECIAL(*s));
|
||||
- if (s > last)
|
||||
- break;
|
||||
- first = s;
|
||||
- while (!SPECIAL(*s) && s <= last)
|
||||
- ++s;
|
||||
- }
|
||||
- bail:
|
||||
- *bufNext = '\0';
|
||||
- /* printf ("%lu chars in buffer\n", (unsigned long)(bufNext - buf)); */
|
||||
+ if (!data || !data->bv_val) {
|
||||
+ strcpy(buf, "<NULL>");
|
||||
+ return buf;
|
||||
+ }
|
||||
+ char *endbuff = &buf[BUFSIZ-4]; /* Reserve space to append "...\0" */
|
||||
+ char *ptout = buf;
|
||||
+ unsigned char *ptin = (unsigned char*) data->bv_val;
|
||||
+ unsigned char *endptin = ptin+data->bv_len;
|
||||
+
|
||||
+ while (ptin < endptin) {
|
||||
+ if (ptout >= endbuff) {
|
||||
+ /*
|
||||
+ * BUFSIZ(8K) > SLAPI_LOG_BUFSIZ(2K) so the error log message will be
|
||||
+ * truncated anyway. So there is no real interrest to test if the original
|
||||
+ * data contains no special characters and return it as is.
|
||||
+ */
|
||||
+ strcpy(endbuff, "...");
|
||||
return buf;
|
||||
}
|
||||
+ switch (encode_size[*ptin]) {
|
||||
+ case 1:
|
||||
+ *ptout++ = *ptin++;
|
||||
+ break;
|
||||
+ case 2:
|
||||
+ *ptout++ = '\\';
|
||||
+ *ptout++ = *ptin++;
|
||||
+ break;
|
||||
+ case 3:
|
||||
+ sprintf(ptout, "\\%02x", *ptin++);
|
||||
+ ptout += 3;
|
||||
+ break;
|
||||
+ }
|
||||
}
|
||||
- /* printf ("%lu bytes, all ASCII\n", (unsigned long)(s - data->bv_val)); */
|
||||
- return data->bv_val;
|
||||
+ *ptout = 0;
|
||||
+ return buf;
|
||||
}
|
||||
|
||||
static const char *
|
||||
--
|
||||
2.45.0
|
||||
|
@ -0,0 +1,488 @@
|
||||
From b6729a99f3a3d4c6ebe82d4bb60ea2a6f8727782 Mon Sep 17 00:00:00 2001
|
||||
From: Simon Pichugin <spichugi@redhat.com>
|
||||
Date: Fri, 27 Jun 2025 18:43:39 -0700
|
||||
Subject: [PATCH] Issue 6822 - Backend creation cleanup and Database UI tab
|
||||
error handling (#6823)
|
||||
|
||||
Description: Add rollback functionality when mapping tree creation fails
|
||||
during backend creation to prevent orphaned backends.
|
||||
Improve error handling in Database, Replication and Monitoring UI tabs
|
||||
to gracefully handle backend get-tree command failures.
|
||||
|
||||
Fixes: https://github.com/389ds/389-ds-base/issues/6822
|
||||
|
||||
Reviewed by: @mreynolds389 (Thanks!)
|
||||
---
|
||||
src/cockpit/389-console/src/database.jsx | 119 ++++++++------
|
||||
src/cockpit/389-console/src/monitor.jsx | 172 +++++++++++---------
|
||||
src/cockpit/389-console/src/replication.jsx | 55 ++++---
|
||||
src/lib389/lib389/backend.py | 18 +-
|
||||
4 files changed, 210 insertions(+), 154 deletions(-)
|
||||
|
||||
diff --git a/src/cockpit/389-console/src/database.jsx b/src/cockpit/389-console/src/database.jsx
|
||||
index c0c4be414..276125dfc 100644
|
||||
--- a/src/cockpit/389-console/src/database.jsx
|
||||
+++ b/src/cockpit/389-console/src/database.jsx
|
||||
@@ -478,6 +478,59 @@ export class Database extends React.Component {
|
||||
}
|
||||
|
||||
loadSuffixTree(fullReset) {
|
||||
+ const treeData = [
|
||||
+ {
|
||||
+ name: _("Global Database Configuration"),
|
||||
+ icon: <CogIcon />,
|
||||
+ id: "dbconfig",
|
||||
+ },
|
||||
+ {
|
||||
+ name: _("Chaining Configuration"),
|
||||
+ icon: <ExternalLinkAltIcon />,
|
||||
+ id: "chaining-config",
|
||||
+ },
|
||||
+ {
|
||||
+ name: _("Backups & LDIFs"),
|
||||
+ icon: <CopyIcon />,
|
||||
+ id: "backups",
|
||||
+ },
|
||||
+ {
|
||||
+ name: _("Password Policies"),
|
||||
+ id: "pwp",
|
||||
+ icon: <KeyIcon />,
|
||||
+ children: [
|
||||
+ {
|
||||
+ name: _("Global Policy"),
|
||||
+ icon: <HomeIcon />,
|
||||
+ id: "pwpolicy",
|
||||
+ },
|
||||
+ {
|
||||
+ name: _("Local Policies"),
|
||||
+ icon: <UsersIcon />,
|
||||
+ id: "localpwpolicy",
|
||||
+ },
|
||||
+ ],
|
||||
+ defaultExpanded: true
|
||||
+ },
|
||||
+ {
|
||||
+ name: _("Suffixes"),
|
||||
+ icon: <CatalogIcon />,
|
||||
+ id: "suffixes-tree",
|
||||
+ children: [],
|
||||
+ defaultExpanded: true,
|
||||
+ action: (
|
||||
+ <Button
|
||||
+ onClick={this.handleShowSuffixModal}
|
||||
+ variant="plain"
|
||||
+ aria-label="Create new suffix"
|
||||
+ title={_("Create new suffix")}
|
||||
+ >
|
||||
+ <PlusIcon />
|
||||
+ </Button>
|
||||
+ ),
|
||||
+ }
|
||||
+ ];
|
||||
+
|
||||
const cmd = [
|
||||
"dsconf", "-j", "ldapi://%2fvar%2frun%2fslapd-" + this.props.serverId + ".socket",
|
||||
"backend", "get-tree",
|
||||
@@ -491,58 +544,20 @@ export class Database extends React.Component {
|
||||
suffixData = JSON.parse(content);
|
||||
this.processTree(suffixData);
|
||||
}
|
||||
- const treeData = [
|
||||
- {
|
||||
- name: _("Global Database Configuration"),
|
||||
- icon: <CogIcon />,
|
||||
- id: "dbconfig",
|
||||
- },
|
||||
- {
|
||||
- name: _("Chaining Configuration"),
|
||||
- icon: <ExternalLinkAltIcon />,
|
||||
- id: "chaining-config",
|
||||
- },
|
||||
- {
|
||||
- name: _("Backups & LDIFs"),
|
||||
- icon: <CopyIcon />,
|
||||
- id: "backups",
|
||||
- },
|
||||
- {
|
||||
- name: _("Password Policies"),
|
||||
- id: "pwp",
|
||||
- icon: <KeyIcon />,
|
||||
- children: [
|
||||
- {
|
||||
- name: _("Global Policy"),
|
||||
- icon: <HomeIcon />,
|
||||
- id: "pwpolicy",
|
||||
- },
|
||||
- {
|
||||
- name: _("Local Policies"),
|
||||
- icon: <UsersIcon />,
|
||||
- id: "localpwpolicy",
|
||||
- },
|
||||
- ],
|
||||
- defaultExpanded: true
|
||||
- },
|
||||
- {
|
||||
- name: _("Suffixes"),
|
||||
- icon: <CatalogIcon />,
|
||||
- id: "suffixes-tree",
|
||||
- children: suffixData,
|
||||
- defaultExpanded: true,
|
||||
- action: (
|
||||
- <Button
|
||||
- onClick={this.handleShowSuffixModal}
|
||||
- variant="plain"
|
||||
- aria-label="Create new suffix"
|
||||
- title={_("Create new suffix")}
|
||||
- >
|
||||
- <PlusIcon />
|
||||
- </Button>
|
||||
- ),
|
||||
- }
|
||||
- ];
|
||||
+
|
||||
+ let current_node = this.state.node_name;
|
||||
+ if (fullReset) {
|
||||
+ current_node = DB_CONFIG;
|
||||
+ }
|
||||
+
|
||||
+ treeData[4].children = suffixData; // suffixes node
|
||||
+ this.setState(() => ({
|
||||
+ nodes: treeData,
|
||||
+ node_name: current_node,
|
||||
+ }), this.loadAttrs);
|
||||
+ })
|
||||
+ .fail(err => {
|
||||
+ // Handle backend get-tree failure gracefully
|
||||
let current_node = this.state.node_name;
|
||||
if (fullReset) {
|
||||
current_node = DB_CONFIG;
|
||||
diff --git a/src/cockpit/389-console/src/monitor.jsx b/src/cockpit/389-console/src/monitor.jsx
|
||||
index ad48d1f87..91a8e3e37 100644
|
||||
--- a/src/cockpit/389-console/src/monitor.jsx
|
||||
+++ b/src/cockpit/389-console/src/monitor.jsx
|
||||
@@ -200,6 +200,84 @@ export class Monitor extends React.Component {
|
||||
}
|
||||
|
||||
loadSuffixTree(fullReset) {
|
||||
+ const basicData = [
|
||||
+ {
|
||||
+ name: _("Server Statistics"),
|
||||
+ icon: <ClusterIcon />,
|
||||
+ id: "server-monitor",
|
||||
+ type: "server",
|
||||
+ },
|
||||
+ {
|
||||
+ name: _("Replication"),
|
||||
+ icon: <TopologyIcon />,
|
||||
+ id: "replication-monitor",
|
||||
+ type: "replication",
|
||||
+ defaultExpanded: true,
|
||||
+ children: [
|
||||
+ {
|
||||
+ name: _("Synchronization Report"),
|
||||
+ icon: <MonitoringIcon />,
|
||||
+ id: "sync-report",
|
||||
+ item: "sync-report",
|
||||
+ type: "repl-mon",
|
||||
+ },
|
||||
+ {
|
||||
+ name: _("Log Analysis"),
|
||||
+ icon: <MonitoringIcon />,
|
||||
+ id: "log-analysis",
|
||||
+ item: "log-analysis",
|
||||
+ type: "repl-mon",
|
||||
+ }
|
||||
+ ],
|
||||
+ },
|
||||
+ {
|
||||
+ name: _("Database"),
|
||||
+ icon: <DatabaseIcon />,
|
||||
+ id: "database-monitor",
|
||||
+ type: "database",
|
||||
+ children: [], // Will be populated with treeData on success
|
||||
+ defaultExpanded: true,
|
||||
+ },
|
||||
+ {
|
||||
+ name: _("Logging"),
|
||||
+ icon: <CatalogIcon />,
|
||||
+ id: "log-monitor",
|
||||
+ defaultExpanded: true,
|
||||
+ children: [
|
||||
+ {
|
||||
+ name: _("Access Log"),
|
||||
+ icon: <BookIcon size="sm" />,
|
||||
+ id: "access-log-monitor",
|
||||
+ type: "log",
|
||||
+ },
|
||||
+ {
|
||||
+ name: _("Audit Log"),
|
||||
+ icon: <BookIcon size="sm" />,
|
||||
+ id: "audit-log-monitor",
|
||||
+ type: "log",
|
||||
+ },
|
||||
+ {
|
||||
+ name: _("Audit Failure Log"),
|
||||
+ icon: <BookIcon size="sm" />,
|
||||
+ id: "auditfail-log-monitor",
|
||||
+ type: "log",
|
||||
+ },
|
||||
+ {
|
||||
+ name: _("Errors Log"),
|
||||
+ icon: <BookIcon size="sm" />,
|
||||
+ id: "error-log-monitor",
|
||||
+ type: "log",
|
||||
+ },
|
||||
+ {
|
||||
+ name: _("Security Log"),
|
||||
+ icon: <BookIcon size="sm" />,
|
||||
+ id: "security-log-monitor",
|
||||
+ type: "log",
|
||||
+ },
|
||||
+ ]
|
||||
+ },
|
||||
+ ];
|
||||
+
|
||||
const cmd = [
|
||||
"dsconf", "-j", "ldapi://%2fvar%2frun%2fslapd-" + this.props.serverId + ".socket",
|
||||
"backend", "get-tree",
|
||||
@@ -210,83 +288,7 @@ export class Monitor extends React.Component {
|
||||
.done(content => {
|
||||
const treeData = JSON.parse(content);
|
||||
this.processTree(treeData);
|
||||
- const basicData = [
|
||||
- {
|
||||
- name: _("Server Statistics"),
|
||||
- icon: <ClusterIcon />,
|
||||
- id: "server-monitor",
|
||||
- type: "server",
|
||||
- },
|
||||
- {
|
||||
- name: _("Replication"),
|
||||
- icon: <TopologyIcon />,
|
||||
- id: "replication-monitor",
|
||||
- type: "replication",
|
||||
- defaultExpanded: true,
|
||||
- children: [
|
||||
- {
|
||||
- name: _("Synchronization Report"),
|
||||
- icon: <MonitoringIcon />,
|
||||
- id: "sync-report",
|
||||
- item: "sync-report",
|
||||
- type: "repl-mon",
|
||||
- },
|
||||
- {
|
||||
- name: _("Log Analysis"),
|
||||
- icon: <MonitoringIcon />,
|
||||
- id: "log-analysis",
|
||||
- item: "log-analysis",
|
||||
- type: "repl-mon",
|
||||
- }
|
||||
- ],
|
||||
- },
|
||||
- {
|
||||
- name: _("Database"),
|
||||
- icon: <DatabaseIcon />,
|
||||
- id: "database-monitor",
|
||||
- type: "database",
|
||||
- children: [],
|
||||
- defaultExpanded: true,
|
||||
- },
|
||||
- {
|
||||
- name: _("Logging"),
|
||||
- icon: <CatalogIcon />,
|
||||
- id: "log-monitor",
|
||||
- defaultExpanded: true,
|
||||
- children: [
|
||||
- {
|
||||
- name: _("Access Log"),
|
||||
- icon: <BookIcon size="sm" />,
|
||||
- id: "access-log-monitor",
|
||||
- type: "log",
|
||||
- },
|
||||
- {
|
||||
- name: _("Audit Log"),
|
||||
- icon: <BookIcon size="sm" />,
|
||||
- id: "audit-log-monitor",
|
||||
- type: "log",
|
||||
- },
|
||||
- {
|
||||
- name: _("Audit Failure Log"),
|
||||
- icon: <BookIcon size="sm" />,
|
||||
- id: "auditfail-log-monitor",
|
||||
- type: "log",
|
||||
- },
|
||||
- {
|
||||
- name: _("Errors Log"),
|
||||
- icon: <BookIcon size="sm" />,
|
||||
- id: "error-log-monitor",
|
||||
- type: "log",
|
||||
- },
|
||||
- {
|
||||
- name: _("Security Log"),
|
||||
- icon: <BookIcon size="sm" />,
|
||||
- id: "security-log-monitor",
|
||||
- type: "log",
|
||||
- },
|
||||
- ]
|
||||
- },
|
||||
- ];
|
||||
+
|
||||
let current_node = this.state.node_name;
|
||||
let type = this.state.node_type;
|
||||
if (fullReset) {
|
||||
@@ -296,6 +298,22 @@ export class Monitor extends React.Component {
|
||||
basicData[2].children = treeData; // database node
|
||||
this.processReplSuffixes(basicData[1].children);
|
||||
|
||||
+ this.setState(() => ({
|
||||
+ nodes: basicData,
|
||||
+ node_name: current_node,
|
||||
+ node_type: type,
|
||||
+ }), this.update_tree_nodes);
|
||||
+ })
|
||||
+ .fail(err => {
|
||||
+ // Handle backend get-tree failure gracefully
|
||||
+ let current_node = this.state.node_name;
|
||||
+ let type = this.state.node_type;
|
||||
+ if (fullReset) {
|
||||
+ current_node = "server-monitor";
|
||||
+ type = "server";
|
||||
+ }
|
||||
+ this.processReplSuffixes(basicData[1].children);
|
||||
+
|
||||
this.setState(() => ({
|
||||
nodes: basicData,
|
||||
node_name: current_node,
|
||||
diff --git a/src/cockpit/389-console/src/replication.jsx b/src/cockpit/389-console/src/replication.jsx
|
||||
index fa492fd2a..aa535bfc7 100644
|
||||
--- a/src/cockpit/389-console/src/replication.jsx
|
||||
+++ b/src/cockpit/389-console/src/replication.jsx
|
||||
@@ -177,6 +177,16 @@ export class Replication extends React.Component {
|
||||
loaded: false
|
||||
});
|
||||
|
||||
+ const basicData = [
|
||||
+ {
|
||||
+ name: _("Suffixes"),
|
||||
+ icon: <TopologyIcon />,
|
||||
+ id: "repl-suffixes",
|
||||
+ children: [],
|
||||
+ defaultExpanded: true
|
||||
+ }
|
||||
+ ];
|
||||
+
|
||||
const cmd = [
|
||||
"dsconf", "-j", "ldapi://%2fvar%2frun%2fslapd-" + this.props.serverId + ".socket",
|
||||
"backend", "get-tree",
|
||||
@@ -199,15 +209,7 @@ export class Replication extends React.Component {
|
||||
}
|
||||
}
|
||||
}
|
||||
- const basicData = [
|
||||
- {
|
||||
- name: _("Suffixes"),
|
||||
- icon: <TopologyIcon />,
|
||||
- id: "repl-suffixes",
|
||||
- children: [],
|
||||
- defaultExpanded: true
|
||||
- }
|
||||
- ];
|
||||
+
|
||||
let current_node = this.state.node_name;
|
||||
let current_type = this.state.node_type;
|
||||
let replicated = this.state.node_replicated;
|
||||
@@ -258,6 +260,19 @@ export class Replication extends React.Component {
|
||||
}
|
||||
|
||||
basicData[0].children = treeData;
|
||||
+ this.setState({
|
||||
+ nodes: basicData,
|
||||
+ node_name: current_node,
|
||||
+ node_type: current_type,
|
||||
+ node_replicated: replicated,
|
||||
+ }, () => { this.update_tree_nodes() });
|
||||
+ })
|
||||
+ .fail(err => {
|
||||
+ // Handle backend get-tree failure gracefully
|
||||
+ let current_node = this.state.node_name;
|
||||
+ let current_type = this.state.node_type;
|
||||
+ let replicated = this.state.node_replicated;
|
||||
+
|
||||
this.setState({
|
||||
nodes: basicData,
|
||||
node_name: current_node,
|
||||
@@ -905,18 +920,18 @@ export class Replication extends React.Component {
|
||||
disableTree: false
|
||||
});
|
||||
});
|
||||
- })
|
||||
- .fail(err => {
|
||||
- const errMsg = JSON.parse(err);
|
||||
- this.props.addNotification(
|
||||
- "error",
|
||||
- cockpit.format(_("Error loading replication agreements configuration - $0"), errMsg.desc)
|
||||
- );
|
||||
- this.setState({
|
||||
- suffixLoading: false,
|
||||
- disableTree: false
|
||||
+ })
|
||||
+ .fail(err => {
|
||||
+ const errMsg = JSON.parse(err);
|
||||
+ this.props.addNotification(
|
||||
+ "error",
|
||||
+ cockpit.format(_("Error loading replication agreements configuration - $0"), errMsg.desc)
|
||||
+ );
|
||||
+ this.setState({
|
||||
+ suffixLoading: false,
|
||||
+ disableTree: false
|
||||
+ });
|
||||
});
|
||||
- });
|
||||
})
|
||||
.fail(err => {
|
||||
// changelog failure
|
||||
diff --git a/src/lib389/lib389/backend.py b/src/lib389/lib389/backend.py
|
||||
index 1319fa0cd..5bff61c58 100644
|
||||
--- a/src/lib389/lib389/backend.py
|
||||
+++ b/src/lib389/lib389/backend.py
|
||||
@@ -694,24 +694,32 @@ class Backend(DSLdapObject):
|
||||
parent_suffix = properties.pop('parent', False)
|
||||
|
||||
# Okay, now try to make the backend.
|
||||
- super(Backend, self).create(dn, properties, basedn)
|
||||
+ backend_obj = super(Backend, self).create(dn, properties, basedn)
|
||||
|
||||
# We check if the mapping tree exists in create, so do this *after*
|
||||
if create_mapping_tree is True:
|
||||
- properties = {
|
||||
+ mapping_tree_properties = {
|
||||
'cn': self._nprops_stash['nsslapd-suffix'],
|
||||
'nsslapd-state': 'backend',
|
||||
'nsslapd-backend': self._nprops_stash['cn'],
|
||||
}
|
||||
if parent_suffix:
|
||||
# This is a subsuffix, set the parent suffix
|
||||
- properties['nsslapd-parent-suffix'] = parent_suffix
|
||||
- self._mts.create(properties=properties)
|
||||
+ mapping_tree_properties['nsslapd-parent-suffix'] = parent_suffix
|
||||
+
|
||||
+ try:
|
||||
+ self._mts.create(properties=mapping_tree_properties)
|
||||
+ except Exception as e:
|
||||
+ try:
|
||||
+ backend_obj.delete()
|
||||
+ except Exception as cleanup_error:
|
||||
+ self._instance.log.error(f"Failed to cleanup backend after mapping tree creation failure: {cleanup_error}")
|
||||
+ raise e
|
||||
|
||||
# We can't create the sample entries unless a mapping tree was installed.
|
||||
if sample_entries is not False and create_mapping_tree is True:
|
||||
self.create_sample_entries(sample_entries)
|
||||
- return self
|
||||
+ return backend_obj
|
||||
|
||||
def delete(self):
|
||||
"""Deletes the backend, it's mapping tree and all related indices.
|
||||
--
|
||||
2.49.0
|
||||
|
@ -1,143 +0,0 @@
|
||||
From 6e5f03d5872129963106024f53765234a282406c Mon Sep 17 00:00:00 2001
|
||||
From: James Chapman <jachapma@redhat.com>
|
||||
Date: Fri, 16 Feb 2024 11:13:16 +0000
|
||||
Subject: [PATCH] Issue 6096 - Improve connection timeout error logging (#6097)
|
||||
|
||||
Bug description: When a paged result search is run with a time limit,
|
||||
if the time limit is exceed the server closes the connection with
|
||||
closed IO timeout (nsslapd-ioblocktimeout) - T2. This error message
|
||||
is incorrect as the reason the connection has been closed was because
|
||||
the specified time limit on a paged result search has been exceeded.
|
||||
|
||||
Fix description: Correct error message
|
||||
|
||||
Relates: https://github.com/389ds/389-ds-base/issues/6096
|
||||
|
||||
Reviewed by: @tbordaz (Thank you)
|
||||
---
|
||||
ldap/admin/src/logconv.pl | 24 ++++++++++++++++++-
|
||||
ldap/servers/slapd/daemon.c | 4 ++--
|
||||
ldap/servers/slapd/disconnect_error_strings.h | 1 +
|
||||
ldap/servers/slapd/disconnect_errors.h | 2 +-
|
||||
4 files changed, 27 insertions(+), 4 deletions(-)
|
||||
|
||||
diff --git a/ldap/admin/src/logconv.pl b/ldap/admin/src/logconv.pl
|
||||
index 7698c383a..2a933c4a3 100755
|
||||
--- a/ldap/admin/src/logconv.pl
|
||||
+++ b/ldap/admin/src/logconv.pl
|
||||
@@ -267,7 +267,7 @@ my $optimeAvg = 0;
|
||||
my %cipher = ();
|
||||
my @removefiles = ();
|
||||
|
||||
-my @conncodes = qw(A1 B1 B4 T1 T2 B2 B3 R1 P1 P2 U1);
|
||||
+my @conncodes = qw(A1 B1 B4 T1 T2 T3 B2 B3 R1 P1 P2 U1);
|
||||
my %conn = ();
|
||||
map {$conn{$_} = $_} @conncodes;
|
||||
|
||||
@@ -355,6 +355,7 @@ $connmsg{"B1"} = "Bad Ber Tag Encountered";
|
||||
$connmsg{"B4"} = "Server failed to flush data (response) back to Client";
|
||||
$connmsg{"T1"} = "Idle Timeout Exceeded";
|
||||
$connmsg{"T2"} = "IO Block Timeout Exceeded or NTSSL Timeout";
|
||||
+$connmsg{"T3"} = "Paged Search Time Limit Exceeded";
|
||||
$connmsg{"B2"} = "Ber Too Big";
|
||||
$connmsg{"B3"} = "Ber Peek";
|
||||
$connmsg{"R1"} = "Revents";
|
||||
@@ -1723,6 +1724,10 @@ if ($usage =~ /j/i || $verb eq "yes"){
|
||||
print "\n $recCount. You have some coonections that are being closed by the ioblocktimeout setting. You may want to increase the ioblocktimeout.\n";
|
||||
$recCount++;
|
||||
}
|
||||
+ if (defined($conncount->{"T3"}) and $conncount->{"T3"} > 0){
|
||||
+ print "\n $recCount. You have some connections that are being closed because a paged result search limit has been exceeded. You may want to increase the search time limit.\n";
|
||||
+ $recCount++;
|
||||
+ }
|
||||
# compare binds to unbinds, if the difference is more than 30% of the binds, then report a issue
|
||||
if (($bindCount - $unbindCount) > ($bindCount*.3)){
|
||||
print "\n $recCount. You have a significant difference between binds and unbinds. You may want to investigate this difference.\n";
|
||||
@@ -2366,6 +2371,7 @@ sub parseLineNormal
|
||||
$brokenPipeCount++;
|
||||
if (m/- T1/){ $hashes->{rc}->{"T1"}++; }
|
||||
elsif (m/- T2/){ $hashes->{rc}->{"T2"}++; }
|
||||
+ elsif (m/- T3/){ $hashes->{rc}->{"T3"}++; }
|
||||
elsif (m/- A1/){ $hashes->{rc}->{"A1"}++; }
|
||||
elsif (m/- B1/){ $hashes->{rc}->{"B1"}++; }
|
||||
elsif (m/- B4/){ $hashes->{rc}->{"B4"}++; }
|
||||
@@ -2381,6 +2387,7 @@ sub parseLineNormal
|
||||
$connResetByPeerCount++;
|
||||
if (m/- T1/){ $hashes->{src}->{"T1"}++; }
|
||||
elsif (m/- T2/){ $hashes->{src}->{"T2"}++; }
|
||||
+ elsif (m/- T3/){ $hashes->{src}->{"T3"}++; }
|
||||
elsif (m/- A1/){ $hashes->{src}->{"A1"}++; }
|
||||
elsif (m/- B1/){ $hashes->{src}->{"B1"}++; }
|
||||
elsif (m/- B4/){ $hashes->{src}->{"B4"}++; }
|
||||
@@ -2396,6 +2403,7 @@ sub parseLineNormal
|
||||
$resourceUnavailCount++;
|
||||
if (m/- T1/){ $hashes->{rsrc}->{"T1"}++; }
|
||||
elsif (m/- T2/){ $hashes->{rsrc}->{"T2"}++; }
|
||||
+ elsif (m/- T3/){ $hashes->{rsrc}->{"T3"}++; }
|
||||
elsif (m/- A1/){ $hashes->{rsrc}->{"A1"}++; }
|
||||
elsif (m/- B1/){ $hashes->{rsrc}->{"B1"}++; }
|
||||
elsif (m/- B4/){ $hashes->{rsrc}->{"B4"}++; }
|
||||
@@ -2494,6 +2502,20 @@ sub parseLineNormal
|
||||
}
|
||||
}
|
||||
}
|
||||
+ if (m/- T3/){
|
||||
+ if ($_ =~ /conn= *([0-9A-Z]+)/i) {
|
||||
+ $exc = "no";
|
||||
+ $ip = getIPfromConn($1, $serverRestartCount);
|
||||
+ for (my $xxx = 0; $xxx < $#excludeIP; $xxx++){
|
||||
+ if ($ip eq $excludeIP[$xxx]){$exc = "yes";}
|
||||
+ }
|
||||
+ if ($exc ne "yes"){
|
||||
+ $hashes->{T3}->{$ip}++;
|
||||
+ $hashes->{conncount}->{"T3"}++;
|
||||
+ $connCodeCount++;
|
||||
+ }
|
||||
+ }
|
||||
+ }
|
||||
if (m/- B2/){
|
||||
if ($_ =~ /conn= *([0-9A-Z]+)/i) {
|
||||
$exc = "no";
|
||||
diff --git a/ldap/servers/slapd/daemon.c b/ldap/servers/slapd/daemon.c
|
||||
index 5a48aa66f..bb80dae36 100644
|
||||
--- a/ldap/servers/slapd/daemon.c
|
||||
+++ b/ldap/servers/slapd/daemon.c
|
||||
@@ -1599,9 +1599,9 @@ setup_pr_read_pds(Connection_Table *ct)
|
||||
int add_fd = 1;
|
||||
/* check timeout for PAGED RESULTS */
|
||||
if (pagedresults_is_timedout_nolock(c)) {
|
||||
- /* Exceeded the timelimit; disconnect the client */
|
||||
+ /* Exceeded the paged search timelimit; disconnect the client */
|
||||
disconnect_server_nomutex(c, c->c_connid, -1,
|
||||
- SLAPD_DISCONNECT_IO_TIMEOUT,
|
||||
+ SLAPD_DISCONNECT_PAGED_SEARCH_LIMIT,
|
||||
0);
|
||||
connection_table_move_connection_out_of_active_list(ct,
|
||||
c);
|
||||
diff --git a/ldap/servers/slapd/disconnect_error_strings.h b/ldap/servers/slapd/disconnect_error_strings.h
|
||||
index f7a31d728..c2d9e283b 100644
|
||||
--- a/ldap/servers/slapd/disconnect_error_strings.h
|
||||
+++ b/ldap/servers/slapd/disconnect_error_strings.h
|
||||
@@ -27,6 +27,7 @@ ER2(SLAPD_DISCONNECT_BER_FLUSH, "B4")
|
||||
ER2(SLAPD_DISCONNECT_IDLE_TIMEOUT, "T1")
|
||||
ER2(SLAPD_DISCONNECT_REVENTS, "R1")
|
||||
ER2(SLAPD_DISCONNECT_IO_TIMEOUT, "T2")
|
||||
+ER2(SLAPD_DISCONNECT_PAGED_SEARCH_LIMIT, "T3")
|
||||
ER2(SLAPD_DISCONNECT_PLUGIN, "P1")
|
||||
ER2(SLAPD_DISCONNECT_UNBIND, "U1")
|
||||
ER2(SLAPD_DISCONNECT_POLL, "P2")
|
||||
diff --git a/ldap/servers/slapd/disconnect_errors.h b/ldap/servers/slapd/disconnect_errors.h
|
||||
index a0484f1c2..e118f674c 100644
|
||||
--- a/ldap/servers/slapd/disconnect_errors.h
|
||||
+++ b/ldap/servers/slapd/disconnect_errors.h
|
||||
@@ -35,6 +35,6 @@
|
||||
#define SLAPD_DISCONNECT_SASL_FAIL SLAPD_DISCONNECT_ERROR_BASE + 12
|
||||
#define SLAPD_DISCONNECT_PROXY_INVALID_HEADER SLAPD_DISCONNECT_ERROR_BASE + 13
|
||||
#define SLAPD_DISCONNECT_PROXY_UNKNOWN SLAPD_DISCONNECT_ERROR_BASE + 14
|
||||
-
|
||||
+#define SLAPD_DISCONNECT_PAGED_SEARCH_LIMIT SLAPD_DISCONNECT_ERROR_BASE + 15
|
||||
|
||||
#endif /* __DISCONNECT_ERRORS_H_ */
|
||||
--
|
||||
2.45.0
|
||||
|
@ -1,4 +1,4 @@
|
||||
From 2b73c3596e724f314b0e09cf6209e0151260f7e5 Mon Sep 17 00:00:00 2001
|
||||
From 0a7fe7c6e18759459499f468443ded4313ebdeab Mon Sep 17 00:00:00 2001
|
||||
From: Alexander Bokovoy <abokovoy@redhat.com>
|
||||
Date: Wed, 9 Jul 2025 12:08:09 +0300
|
||||
Subject: [PATCH] Issue 6857 - uiduniq: allow specifying match rules in the
|
||||
@ -22,10 +22,10 @@ Signed-off-by: Alexander Bokovoy <abokovoy@redhat.com>
|
||||
1 file changed, 7 insertions(+)
|
||||
|
||||
diff --git a/ldap/servers/plugins/uiduniq/uid.c b/ldap/servers/plugins/uiduniq/uid.c
|
||||
index 5b763b551..15cf88477 100644
|
||||
index 053af4f9d..887e79d78 100644
|
||||
--- a/ldap/servers/plugins/uiduniq/uid.c
|
||||
+++ b/ldap/servers/plugins/uiduniq/uid.c
|
||||
@@ -1031,7 +1031,14 @@ preop_add(Slapi_PBlock *pb)
|
||||
@@ -1030,7 +1030,14 @@ preop_add(Slapi_PBlock *pb)
|
||||
}
|
||||
|
||||
for (i = 0; attrNames && attrNames[i]; i++) {
|
@ -1,44 +0,0 @@
|
||||
From a112394af3a20787755029804684d57a9c3ffa9a Mon Sep 17 00:00:00 2001
|
||||
From: James Chapman <jachapma@redhat.com>
|
||||
Date: Wed, 21 Feb 2024 12:43:03 +0000
|
||||
Subject: [PATCH] Issue 6103 - New connection timeout error breaks errormap
|
||||
(#6104)
|
||||
|
||||
Bug description: A recent addition to the connection disconnect error
|
||||
messaging, conflicts with how errormap.c maps error codes/strings.
|
||||
|
||||
Fix description: errormap expects error codes/strings to be in ascending
|
||||
order. Moved the new error code to the bottom of the list.
|
||||
|
||||
Relates: https://github.com/389ds/389-ds-base/issues/6103
|
||||
|
||||
Reviewed by: @droideck. @progier389 (Thank you)
|
||||
---
|
||||
ldap/servers/slapd/disconnect_error_strings.h | 5 +++--
|
||||
1 file changed, 3 insertions(+), 2 deletions(-)
|
||||
|
||||
diff --git a/ldap/servers/slapd/disconnect_error_strings.h b/ldap/servers/slapd/disconnect_error_strings.h
|
||||
index c2d9e283b..f603a08ce 100644
|
||||
--- a/ldap/servers/slapd/disconnect_error_strings.h
|
||||
+++ b/ldap/servers/slapd/disconnect_error_strings.h
|
||||
@@ -14,7 +14,8 @@
|
||||
/* disconnect_error_strings.h
|
||||
*
|
||||
* Strings describing the errors used in logging the reason a connection
|
||||
- * was closed.
|
||||
+ * was closed. Ensure definitions are in the same order as the error codes
|
||||
+ * defined in disconnect_errors.h
|
||||
*/
|
||||
#ifndef __DISCONNECT_ERROR_STRINGS_H_
|
||||
#define __DISCONNECT_ERROR_STRINGS_H_
|
||||
@@ -35,6 +36,6 @@ ER2(SLAPD_DISCONNECT_NTSSL_TIMEOUT, "T2")
|
||||
ER2(SLAPD_DISCONNECT_SASL_FAIL, "S1")
|
||||
ER2(SLAPD_DISCONNECT_PROXY_INVALID_HEADER, "P3")
|
||||
ER2(SLAPD_DISCONNECT_PROXY_UNKNOWN, "P4")
|
||||
-
|
||||
+ER2(SLAPD_DISCONNECT_PAGED_SEARCH_LIMIT, "T3")
|
||||
|
||||
#endif /* __DISCONNECT_ERROR_STRINGS_H_ */
|
||||
--
|
||||
2.45.0
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -1,30 +0,0 @@
|
||||
From edd9abc8901604dde1d739d87ca2906734d53dd3 Mon Sep 17 00:00:00 2001
|
||||
From: Viktor Ashirov <vashirov@redhat.com>
|
||||
Date: Thu, 13 Jun 2024 13:35:09 +0200
|
||||
Subject: [PATCH] Issue 6103 - New connection timeout error breaks errormap
|
||||
|
||||
Description:
|
||||
Remove duplicate SLAPD_DISCONNECT_PAGED_SEARCH_LIMIT error code.
|
||||
|
||||
Fixes: https://github.com/389ds/389-ds-base/issues/6103
|
||||
|
||||
Reviewed by: @tbordaz (Thanks!)
|
||||
---
|
||||
ldap/servers/slapd/disconnect_error_strings.h | 1 -
|
||||
1 file changed, 1 deletion(-)
|
||||
|
||||
diff --git a/ldap/servers/slapd/disconnect_error_strings.h b/ldap/servers/slapd/disconnect_error_strings.h
|
||||
index f603a08ce..d49cc79a2 100644
|
||||
--- a/ldap/servers/slapd/disconnect_error_strings.h
|
||||
+++ b/ldap/servers/slapd/disconnect_error_strings.h
|
||||
@@ -28,7 +28,6 @@ ER2(SLAPD_DISCONNECT_BER_FLUSH, "B4")
|
||||
ER2(SLAPD_DISCONNECT_IDLE_TIMEOUT, "T1")
|
||||
ER2(SLAPD_DISCONNECT_REVENTS, "R1")
|
||||
ER2(SLAPD_DISCONNECT_IO_TIMEOUT, "T2")
|
||||
-ER2(SLAPD_DISCONNECT_PAGED_SEARCH_LIMIT, "T3")
|
||||
ER2(SLAPD_DISCONNECT_PLUGIN, "P1")
|
||||
ER2(SLAPD_DISCONNECT_UNBIND, "U1")
|
||||
ER2(SLAPD_DISCONNECT_POLL, "P2")
|
||||
--
|
||||
2.45.0
|
||||
|
@ -1,4 +1,4 @@
|
||||
From 3ba73d2aa55f18ff73d4b3901ce101133745effc Mon Sep 17 00:00:00 2001
|
||||
From 5198da59d622dbc39afe2ece9c6f40f4fb249d52 Mon Sep 17 00:00:00 2001
|
||||
From: Mark Reynolds <mreynolds@redhat.com>
|
||||
Date: Wed, 9 Jul 2025 14:18:50 -0400
|
||||
Subject: [PATCH] Issue 6859 - str2filter is not fully applying matching rules
|
||||
@ -338,10 +338,10 @@ index b190e0ec1..b338f405f 100644
|
||||
+ with pytest.raises(ldap.CONSTRAINT_VIOLATION):
|
||||
+ user.replace('cn', 'common_name')
|
||||
diff --git a/ldap/servers/plugins/uiduniq/uid.c b/ldap/servers/plugins/uiduniq/uid.c
|
||||
index 15cf88477..5a0d61b86 100644
|
||||
index 887e79d78..fdb1404a0 100644
|
||||
--- a/ldap/servers/plugins/uiduniq/uid.c
|
||||
+++ b/ldap/servers/plugins/uiduniq/uid.c
|
||||
@@ -1179,6 +1179,10 @@ preop_modify(Slapi_PBlock *pb)
|
||||
@@ -1178,6 +1178,10 @@ preop_modify(Slapi_PBlock *pb)
|
||||
for (; mods && *mods; mods++) {
|
||||
mod = *mods;
|
||||
for (i = 0; attrNames && attrNames[i]; i++) {
|
||||
@ -352,7 +352,7 @@ index 15cf88477..5a0d61b86 100644
|
||||
if ((slapi_attr_type_cmp(mod->mod_type, attrNames[i], 1) == 0) && /* mod contains target attr */
|
||||
(mod->mod_op & LDAP_MOD_BVALUES) && /* mod is bval encoded (not string val) */
|
||||
(mod->mod_bvalues && mod->mod_bvalues[0]) && /* mod actually contains some values */
|
||||
@@ -1187,6 +1191,9 @@ preop_modify(Slapi_PBlock *pb)
|
||||
@@ -1186,6 +1190,9 @@ preop_modify(Slapi_PBlock *pb)
|
||||
{
|
||||
addMod(&checkmods, &checkmodsCapacity, &modcount, mod);
|
||||
}
|
||||
@ -363,10 +363,10 @@ index 15cf88477..5a0d61b86 100644
|
||||
}
|
||||
if (modcount == 0) {
|
||||
diff --git a/ldap/servers/slapd/plugin_mr.c b/ldap/servers/slapd/plugin_mr.c
|
||||
index 6cf88b7de..f40c9a39b 100644
|
||||
index b262820c5..67051a5ff 100644
|
||||
--- a/ldap/servers/slapd/plugin_mr.c
|
||||
+++ b/ldap/servers/slapd/plugin_mr.c
|
||||
@@ -624,7 +624,7 @@ attempt_mr_filter_create(mr_filter_t *f, struct slapdplugin *mrp, Slapi_PBlock *
|
||||
@@ -626,7 +626,7 @@ attempt_mr_filter_create(mr_filter_t *f, struct slapdplugin *mrp, Slapi_PBlock *
|
||||
int rc;
|
||||
IFP mrf_create = NULL;
|
||||
f->mrf_match = NULL;
|
@ -1,220 +0,0 @@
|
||||
From 8cf981c00ae18d3efaeb10819282cd991621e9a2 Mon Sep 17 00:00:00 2001
|
||||
From: tbordaz <tbordaz@redhat.com>
|
||||
Date: Wed, 22 May 2024 11:29:05 +0200
|
||||
Subject: [PATCH] Issue 6172 - RFE: improve the performance of evaluation of
|
||||
filter component when tested against a large valueset (like group members)
|
||||
(#6173)
|
||||
|
||||
Bug description:
|
||||
Before returning an entry (to a SRCH) the server checks that the entry matches the SRCH filter.
|
||||
If a filter component (equality) is testing the value (ava) against a
|
||||
large valueset (like uniquemember values), it takes a long time because
|
||||
of the large number of values and required normalization of the values.
|
||||
This can be improved taking benefit of sorted valueset. Those sorted
|
||||
valueset were created to improve updates of large valueset (groups) but
|
||||
at that time not implemented in SRCH path.
|
||||
|
||||
Fix description:
|
||||
In case of LDAP_FILTER_EQUALITY component, the server can get
|
||||
benefit of the sorted valuearray.
|
||||
To limit the risk of regression, we use the sorted valuearray
|
||||
only for the DN syntax attribute. Indeed the sorted valuearray was
|
||||
designed for those type of attribute.
|
||||
With those two limitations, there is no need of a toggle and
|
||||
the call to plugin_call_syntax_filter_ava can be replaced by
|
||||
a call to slapi_valueset_find.
|
||||
In both cases, sorted valueset and plugin_call_syntax_filter_ava, ava and
|
||||
values are normalized.
|
||||
In sorted valueset, the values have been normalized to insert the index
|
||||
in the sorted array and then comparison is done on normalized values.
|
||||
In plugin_call_syntax_filter_ava, all values in valuearray (of valueset) are normalized
|
||||
before comparison.
|
||||
|
||||
relates: #6172
|
||||
|
||||
Reviewed by: Pierre Rogier, Simon Pichugin (Big Thanks !!!)
|
||||
---
|
||||
.../tests/suites/filter/filter_test.py | 125 ++++++++++++++++++
|
||||
ldap/servers/slapd/filterentry.c | 22 ++-
|
||||
2 files changed, 146 insertions(+), 1 deletion(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/filter/filter_test.py b/dirsrvtests/tests/suites/filter/filter_test.py
|
||||
index d6bfa5a3b..4baaf04a7 100644
|
||||
--- a/dirsrvtests/tests/suites/filter/filter_test.py
|
||||
+++ b/dirsrvtests/tests/suites/filter/filter_test.py
|
||||
@@ -9,7 +9,11 @@
|
||||
import logging
|
||||
|
||||
import pytest
|
||||
+import time
|
||||
+from lib389.dirsrv_log import DirsrvAccessLog
|
||||
from lib389.tasks import *
|
||||
+from lib389.backend import Backends, Backend
|
||||
+from lib389.dbgen import dbgen_users, dbgen_groups
|
||||
from lib389.topologies import topology_st
|
||||
from lib389._constants import PASSWORD, DEFAULT_SUFFIX, DN_DM, SUFFIX
|
||||
from lib389.utils import *
|
||||
@@ -304,6 +308,127 @@ def test_extended_search(topology_st):
|
||||
ents = topology_st.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, myfilter)
|
||||
assert len(ents) == 1
|
||||
|
||||
+def test_match_large_valueset(topology_st):
|
||||
+ """Test that when returning a big number of entries
|
||||
+ and that we need to match the filter from a large valueset
|
||||
+ we get benefit to use the sorted valueset
|
||||
+
|
||||
+ :id: 7db5aa88-50e0-4c31-85dd-1d2072cb674c
|
||||
+
|
||||
+ :setup: Standalone instance
|
||||
+
|
||||
+ :steps:
|
||||
+ 1. Create a users and groups backends and tune them
|
||||
+ 2. Generate a test ldif (2k users and 1K groups with all users)
|
||||
+ 3. Import test ldif file using Offline import (ldif2db).
|
||||
+ 4. Prim the 'groups' entrycache with a "fast" search
|
||||
+ 5. Search the 'groups' with a difficult matching value
|
||||
+ 6. check that etime from step 5 is less than a second
|
||||
+
|
||||
+ :expectedresults:
|
||||
+ 1. Create a users and groups backends should PASS
|
||||
+ 2. Generate LDIF should PASS.
|
||||
+ 3. Offline import should PASS.
|
||||
+ 4. Priming should PASS.
|
||||
+ 5. Performance search should PASS.
|
||||
+ 6. Etime of performance search should PASS.
|
||||
+ """
|
||||
+
|
||||
+ log.info('Running test_match_large_valueset...')
|
||||
+ #
|
||||
+ # Test online/offline LDIF imports
|
||||
+ #
|
||||
+ inst = topology_st.standalone
|
||||
+ inst.start()
|
||||
+ backends = Backends(inst)
|
||||
+ users_suffix = "ou=users,%s" % DEFAULT_SUFFIX
|
||||
+ users_backend = 'users'
|
||||
+ users_ldif = 'users_import.ldif'
|
||||
+ groups_suffix = "ou=groups,%s" % DEFAULT_SUFFIX
|
||||
+ groups_backend = 'groups'
|
||||
+ groups_ldif = 'groups_import.ldif'
|
||||
+ groups_entrycache = '200000000'
|
||||
+ users_number = 2000
|
||||
+ groups_number = 1000
|
||||
+
|
||||
+
|
||||
+ # For priming the cache we just want to be fast
|
||||
+ # taking the first value in the valueset is good
|
||||
+ # whether the valueset is sorted or not
|
||||
+ priming_user_rdn = "user0001"
|
||||
+
|
||||
+ # For performance testing, this is important to use
|
||||
+ # user1000 rather then user0001
|
||||
+ # Because user0001 is the first value in the valueset
|
||||
+ # whether we use the sorted valuearray or non sorted
|
||||
+ # valuearray the performance will be similar.
|
||||
+ # With middle value user1000, the performance boost of
|
||||
+ # the sorted valuearray will make the difference.
|
||||
+ perf_user_rdn = "user1000"
|
||||
+
|
||||
+ # Step 1. Prepare the backends and tune the groups entrycache
|
||||
+ try:
|
||||
+ be_users = backends.create(properties={'parent': DEFAULT_SUFFIX, 'nsslapd-suffix': users_suffix, 'name': users_backend})
|
||||
+ be_groups = backends.create(properties={'parent': DEFAULT_SUFFIX, 'nsslapd-suffix': groups_suffix, 'name': groups_backend})
|
||||
+
|
||||
+ # set the entry cache to 200Mb as the 1K groups of 2K users require at least 170Mb
|
||||
+ be_groups.replace('nsslapd-cachememsize', groups_entrycache)
|
||||
+ except:
|
||||
+ raise
|
||||
+
|
||||
+ # Step 2. Generate a test ldif (10k users entries)
|
||||
+ log.info("Generating users LDIF...")
|
||||
+ ldif_dir = inst.get_ldif_dir()
|
||||
+ users_import_ldif = "%s/%s" % (ldif_dir, users_ldif)
|
||||
+ groups_import_ldif = "%s/%s" % (ldif_dir, groups_ldif)
|
||||
+ dbgen_users(inst, users_number, users_import_ldif, suffix=users_suffix, generic=True, parent=users_suffix)
|
||||
+
|
||||
+ # Generate a test ldif (800 groups with 10k members) that fit in 700Mb entry cache
|
||||
+ props = {
|
||||
+ "name": "group",
|
||||
+ "suffix": groups_suffix,
|
||||
+ "parent": groups_suffix,
|
||||
+ "number": groups_number,
|
||||
+ "numMembers": users_number,
|
||||
+ "createMembers": False,
|
||||
+ "memberParent": users_suffix,
|
||||
+ "membershipAttr": "uniquemember",
|
||||
+ }
|
||||
+ dbgen_groups(inst, groups_import_ldif, props)
|
||||
+
|
||||
+ # Step 3. Do the both offline imports
|
||||
+ inst.stop()
|
||||
+ if not inst.ldif2db(users_backend, None, None, None, users_import_ldif):
|
||||
+ log.fatal('test_basic_import_export: Offline users import failed')
|
||||
+ assert False
|
||||
+ if not inst.ldif2db(groups_backend, None, None, None, groups_import_ldif):
|
||||
+ log.fatal('test_basic_import_export: Offline groups import failed')
|
||||
+ assert False
|
||||
+ inst.start()
|
||||
+
|
||||
+ # Step 4. first prime the cache
|
||||
+ # Just request the 'DN'. We are interested by the time of matching not by the time of transfert
|
||||
+ entries = topology_st.standalone.search_s(groups_suffix, ldap.SCOPE_SUBTREE, "(&(objectclass=groupOfUniqueNames)(uniquemember=uid=%s,%s))" % (priming_user_rdn, users_suffix), ['dn'])
|
||||
+ assert len(entries) == groups_number
|
||||
+
|
||||
+ # Step 5. Now do the real performance checking it should take less than a second
|
||||
+ # Just request the 'DN'. We are interested by the time of matching not by the time of transfert
|
||||
+ search_start = time.time()
|
||||
+ entries = topology_st.standalone.search_s(groups_suffix, ldap.SCOPE_SUBTREE, "(&(objectclass=groupOfUniqueNames)(uniquemember=uid=%s,%s))" % (perf_user_rdn, users_suffix), ['dn'])
|
||||
+ duration = time.time() - search_start
|
||||
+ log.info("Duration of the search was %f", duration)
|
||||
+
|
||||
+ # Step 6. Gather the etime from the access log
|
||||
+ inst.stop()
|
||||
+ access_log = DirsrvAccessLog(inst)
|
||||
+ search_result = access_log.match(".*RESULT err=0 tag=101 nentries=%s.*" % groups_number)
|
||||
+ log.info("Found patterns are %s", search_result[0])
|
||||
+ log.info("Found patterns are %s", search_result[1])
|
||||
+ etime = float(search_result[1].split('etime=')[1])
|
||||
+ log.info("Duration of the search from access log was %f", etime)
|
||||
+ assert len(entries) == groups_number
|
||||
+ assert (etime < 1)
|
||||
+
|
||||
if __name__ == '__main__':
|
||||
# Run isolated
|
||||
# -s for DEBUG mode
|
||||
diff --git a/ldap/servers/slapd/filterentry.c b/ldap/servers/slapd/filterentry.c
|
||||
index fd8fdda9f..cae5c7edc 100644
|
||||
--- a/ldap/servers/slapd/filterentry.c
|
||||
+++ b/ldap/servers/slapd/filterentry.c
|
||||
@@ -296,7 +296,27 @@ test_ava_filter(
|
||||
rc = -1;
|
||||
for (; a != NULL; a = a->a_next) {
|
||||
if (slapi_attr_type_cmp(ava->ava_type, a->a_type, SLAPI_TYPE_CMP_SUBTYPE) == 0) {
|
||||
- rc = plugin_call_syntax_filter_ava(a, ftype, ava);
|
||||
+ if ((ftype == LDAP_FILTER_EQUALITY) &&
|
||||
+ (slapi_attr_is_dn_syntax_type(a->a_type))) {
|
||||
+ /* This path is for a performance improvement */
|
||||
+
|
||||
+ /* In case of equality filter we can get benefit of the
|
||||
+ * sorted valuearray (from valueset).
|
||||
+ * This improvement is limited to DN syntax attributes for
|
||||
+ * which the sorted valueset was designed.
|
||||
+ */
|
||||
+ Slapi_Value *sval = NULL;
|
||||
+ sval = slapi_value_new_berval(&ava->ava_value);
|
||||
+ if (slapi_valueset_find((const Slapi_Attr *)a, &a->a_present_values, sval)) {
|
||||
+ rc = 0;
|
||||
+ }
|
||||
+ slapi_value_free(&sval);
|
||||
+ } else {
|
||||
+ /* When sorted valuearray optimization cannot be used
|
||||
+ * lets filter the value according to its syntax
|
||||
+ */
|
||||
+ rc = plugin_call_syntax_filter_ava(a, ftype, ava);
|
||||
+ }
|
||||
if (rc == 0) {
|
||||
break;
|
||||
}
|
||||
--
|
||||
2.46.0
|
||||
|
@ -0,0 +1,163 @@
|
||||
From 406563c136d78235751e34a3c7e22ccaf114f754 Mon Sep 17 00:00:00 2001
|
||||
From: Mark Reynolds <mreynolds@redhat.com>
|
||||
Date: Tue, 15 Jul 2025 17:56:18 -0400
|
||||
Subject: [PATCH] Issue 6872 - compressed log rotation creates files with world
|
||||
readable permission
|
||||
|
||||
Description:
|
||||
|
||||
When compressing a log file, first create the empty file using open()
|
||||
so we can set the correct permissions right from the start. gzopen()
|
||||
always uses permission 644 and that is not safe. So after creating it
|
||||
with open(), with the correct permissions, then pass the FD to gzdopen()
|
||||
and write the compressed content.
|
||||
|
||||
relates: https://github.com/389ds/389-ds-base/issues/6872
|
||||
|
||||
Reviewed by: progier(Thanks!)
|
||||
---
|
||||
.../logging/logging_compression_test.py | 15 ++++++++--
|
||||
ldap/servers/slapd/log.c | 28 +++++++++++++------
|
||||
ldap/servers/slapd/schema.c | 2 +-
|
||||
3 files changed, 33 insertions(+), 12 deletions(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/logging/logging_compression_test.py b/dirsrvtests/tests/suites/logging/logging_compression_test.py
|
||||
index e30874cc0..3a987d62c 100644
|
||||
--- a/dirsrvtests/tests/suites/logging/logging_compression_test.py
|
||||
+++ b/dirsrvtests/tests/suites/logging/logging_compression_test.py
|
||||
@@ -1,5 +1,5 @@
|
||||
# --- BEGIN COPYRIGHT BLOCK ---
|
||||
-# Copyright (C) 2022 Red Hat, Inc.
|
||||
+# Copyright (C) 2025 Red Hat, Inc.
|
||||
# All rights reserved.
|
||||
#
|
||||
# License: GPL (version 3 or any later version).
|
||||
@@ -22,12 +22,21 @@ log = logging.getLogger(__name__)
|
||||
|
||||
pytestmark = pytest.mark.tier1
|
||||
|
||||
+
|
||||
def log_rotated_count(log_type, log_dir, check_compressed=False):
|
||||
- # Check if the log was rotated
|
||||
+ """
|
||||
+ Check if the log was rotated and has the correct permissions
|
||||
+ """
|
||||
log_file = f'{log_dir}/{log_type}.2*'
|
||||
if check_compressed:
|
||||
log_file += ".gz"
|
||||
- return len(glob.glob(log_file))
|
||||
+ log_files = glob.glob(log_file)
|
||||
+ for logf in log_files:
|
||||
+ # Check permissions
|
||||
+ st = os.stat(logf)
|
||||
+ assert oct(st.st_mode) == '0o100600' # 0600
|
||||
+
|
||||
+ return len(log_files)
|
||||
|
||||
|
||||
def update_and_sleep(inst, suffix, sleep=True):
|
||||
diff --git a/ldap/servers/slapd/log.c b/ldap/servers/slapd/log.c
|
||||
index a018ca2d5..178d29b89 100644
|
||||
--- a/ldap/servers/slapd/log.c
|
||||
+++ b/ldap/servers/slapd/log.c
|
||||
@@ -172,17 +172,28 @@ get_syslog_loglevel(int loglevel)
|
||||
}
|
||||
|
||||
static int
|
||||
-compress_log_file(char *log_name)
|
||||
+compress_log_file(char *log_name, int32_t mode)
|
||||
{
|
||||
char gzip_log[BUFSIZ] = {0};
|
||||
char buf[LOG_CHUNK] = {0};
|
||||
size_t bytes_read = 0;
|
||||
gzFile outfile = NULL;
|
||||
FILE *source = NULL;
|
||||
+ int fd = 0;
|
||||
|
||||
PR_snprintf(gzip_log, sizeof(gzip_log), "%s.gz", log_name);
|
||||
- if ((outfile = gzopen(gzip_log,"wb")) == NULL) {
|
||||
- /* Failed to open new gzip file */
|
||||
+
|
||||
+ /*
|
||||
+ * Try to open the file as we may have an incorrect path. We also need to
|
||||
+ * set the permissions using open() as gzopen() creates the file with
|
||||
+ * 644 permissions (world readable - bad). So we create an empty file with
|
||||
+ * the correct permissions, then we pass the FD to gzdopen() to write the
|
||||
+ * compressed content.
|
||||
+ */
|
||||
+ if ((fd = open(gzip_log, O_WRONLY|O_CREAT|O_TRUNC, mode)) >= 0) {
|
||||
+ /* FIle successfully created, now pass the FD to gzdopen() */
|
||||
+ outfile = gzdopen(fd, "ab");
|
||||
+ } else {
|
||||
return -1;
|
||||
}
|
||||
|
||||
@@ -191,6 +202,7 @@ compress_log_file(char *log_name)
|
||||
gzclose(outfile);
|
||||
return -1;
|
||||
}
|
||||
+
|
||||
bytes_read = fread(buf, 1, LOG_CHUNK, source);
|
||||
while (bytes_read > 0) {
|
||||
int bytes_written = gzwrite(outfile, buf, bytes_read);
|
||||
@@ -3291,7 +3303,7 @@ log__open_accesslogfile(int logfile_state, int locked)
|
||||
return LOG_UNABLE_TO_OPENFILE;
|
||||
}
|
||||
} else if (loginfo.log_access_compress) {
|
||||
- if (compress_log_file(newfile) != 0) {
|
||||
+ if (compress_log_file(newfile, loginfo.log_access_mode) != 0) {
|
||||
slapi_log_err(SLAPI_LOG_ERR, "log__open_auditfaillogfile",
|
||||
"failed to compress rotated access log (%s)\n",
|
||||
newfile);
|
||||
@@ -3455,7 +3467,7 @@ log__open_securitylogfile(int logfile_state, int locked)
|
||||
return LOG_UNABLE_TO_OPENFILE;
|
||||
}
|
||||
} else if (loginfo.log_security_compress) {
|
||||
- if (compress_log_file(newfile) != 0) {
|
||||
+ if (compress_log_file(newfile, loginfo.log_security_mode) != 0) {
|
||||
slapi_log_err(SLAPI_LOG_ERR, "log__open_securitylogfile",
|
||||
"failed to compress rotated security audit log (%s)\n",
|
||||
newfile);
|
||||
@@ -6172,7 +6184,7 @@ log__open_errorlogfile(int logfile_state, int locked)
|
||||
return LOG_UNABLE_TO_OPENFILE;
|
||||
}
|
||||
} else if (loginfo.log_error_compress) {
|
||||
- if (compress_log_file(newfile) != 0) {
|
||||
+ if (compress_log_file(newfile, loginfo.log_error_mode) != 0) {
|
||||
PR_snprintf(buffer, sizeof(buffer), "Failed to compress errors log file (%s)\n", newfile);
|
||||
log__error_emergency(buffer, 1, 1);
|
||||
} else {
|
||||
@@ -6355,7 +6367,7 @@ log__open_auditlogfile(int logfile_state, int locked)
|
||||
return LOG_UNABLE_TO_OPENFILE;
|
||||
}
|
||||
} else if (loginfo.log_audit_compress) {
|
||||
- if (compress_log_file(newfile) != 0) {
|
||||
+ if (compress_log_file(newfile, loginfo.log_audit_mode) != 0) {
|
||||
slapi_log_err(SLAPI_LOG_ERR, "log__open_auditfaillogfile",
|
||||
"failed to compress rotated audit log (%s)\n",
|
||||
newfile);
|
||||
@@ -6514,7 +6526,7 @@ log__open_auditfaillogfile(int logfile_state, int locked)
|
||||
return LOG_UNABLE_TO_OPENFILE;
|
||||
}
|
||||
} else if (loginfo.log_auditfail_compress) {
|
||||
- if (compress_log_file(newfile) != 0) {
|
||||
+ if (compress_log_file(newfile, loginfo.log_auditfail_mode) != 0) {
|
||||
slapi_log_err(SLAPI_LOG_ERR, "log__open_auditfaillogfile",
|
||||
"failed to compress rotated auditfail log (%s)\n",
|
||||
newfile);
|
||||
diff --git a/ldap/servers/slapd/schema.c b/ldap/servers/slapd/schema.c
|
||||
index a8e6b1210..9ef4ee4bf 100644
|
||||
--- a/ldap/servers/slapd/schema.c
|
||||
+++ b/ldap/servers/slapd/schema.c
|
||||
@@ -903,7 +903,7 @@ oc_check_allowed_sv(Slapi_PBlock *pb, Slapi_Entry *e, const char *type, struct o
|
||||
|
||||
if (pb) {
|
||||
PR_snprintf(errtext, sizeof(errtext),
|
||||
- "attribute \"%s\" not allowed\n",
|
||||
+ "attribute \"%s\" not allowed",
|
||||
escape_string(type, ebuf));
|
||||
slapi_pblock_set(pb, SLAPI_PB_RESULT_TEXT, errtext);
|
||||
}
|
||||
--
|
||||
2.49.0
|
||||
|
@ -0,0 +1,116 @@
|
||||
From 9b8b23f6d46f16fbc1784b26cfc04dd6b4fa94e1 Mon Sep 17 00:00:00 2001
|
||||
From: Simon Pichugin <spichugi@redhat.com>
|
||||
Date: Fri, 18 Jul 2025 18:50:33 -0700
|
||||
Subject: [PATCH] Issue 6878 - Prevent repeated disconnect logs during shutdown
|
||||
(#6879)
|
||||
|
||||
Description: Avoid logging non-active initialized connections via CONN in disconnect_server_nomutex_ext by adding a check to skip invalid conn=0 with invalid sockets, preventing excessive repeated messages.
|
||||
|
||||
Update ds_logs_test.py by adding test_no_repeated_disconnect_messages to verify the fix.
|
||||
|
||||
Fixes: https://github.com/389ds/389-ds-base/issues/6878
|
||||
|
||||
Reviewed by: @mreynolds389 (Thanks!)
|
||||
---
|
||||
.../tests/suites/ds_logs/ds_logs_test.py | 51 ++++++++++++++++++-
|
||||
ldap/servers/slapd/connection.c | 15 +++---
|
||||
2 files changed, 59 insertions(+), 7 deletions(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/ds_logs/ds_logs_test.py b/dirsrvtests/tests/suites/ds_logs/ds_logs_test.py
|
||||
index 2c22347bb..b86c72687 100644
|
||||
--- a/dirsrvtests/tests/suites/ds_logs/ds_logs_test.py
|
||||
+++ b/dirsrvtests/tests/suites/ds_logs/ds_logs_test.py
|
||||
@@ -24,7 +24,7 @@ from lib389.plugins import AutoMembershipPlugin, ReferentialIntegrityPlugin, Aut
|
||||
from lib389.idm.user import UserAccounts, UserAccount
|
||||
from lib389.idm.group import Groups
|
||||
from lib389.idm.organizationalunit import OrganizationalUnits
|
||||
-from lib389._constants import DEFAULT_SUFFIX, LOG_ACCESS_LEVEL, PASSWORD
|
||||
+from lib389._constants import DEFAULT_SUFFIX, LOG_ACCESS_LEVEL, PASSWORD, ErrorLog
|
||||
from lib389.utils import ds_is_older, ds_is_newer
|
||||
from lib389.config import RSA
|
||||
from lib389.dseldif import DSEldif
|
||||
@@ -1435,6 +1435,55 @@ def test_errorlog_buffering(topology_st, request):
|
||||
assert inst.ds_error_log.match(".*slapd_daemon - slapd started.*")
|
||||
|
||||
|
||||
+def test_no_repeated_disconnect_messages(topology_st):
|
||||
+ """Test that there are no repeated "Not setting conn 0 to be disconnected: socket is invalid" messages on restart
|
||||
+
|
||||
+ :id: 72b5e1ce-2db8-458f-b2cd-0a0b6525f51f
|
||||
+ :setup: Standalone Instance
|
||||
+ :steps:
|
||||
+ 1. Set error log level to CONNECTION
|
||||
+ 2. Clear existing error logs
|
||||
+ 3. Restart the server with 30 second timeout
|
||||
+ 4. Check error log for repeated disconnect messages
|
||||
+ 5. Verify there are no more than 10 occurrences of the disconnect message
|
||||
+ :expectedresults:
|
||||
+ 1. Error log level should be set successfully
|
||||
+ 2. Error logs should be cleared
|
||||
+ 3. Server should restart successfully within 30 seconds
|
||||
+ 4. Error log should be accessible
|
||||
+ 5. There should be no more than 10 repeated disconnect messages
|
||||
+ """
|
||||
+
|
||||
+ inst = topology_st.standalone
|
||||
+
|
||||
+ log.info('Set error log level to CONNECTION')
|
||||
+ inst.config.loglevel([ErrorLog.CONNECT])
|
||||
+ current_level = inst.config.get_attr_val_int('nsslapd-errorlog-level')
|
||||
+ log.info(f'Error log level set to: {current_level}')
|
||||
+
|
||||
+ log.info('Clear existing error logs')
|
||||
+ inst.deleteErrorLogs()
|
||||
+
|
||||
+ log.info('Restart the server with 30 second timeout')
|
||||
+ inst.restart(timeout=30)
|
||||
+
|
||||
+ log.info('Check error log for repeated disconnect messages')
|
||||
+ disconnect_message = "Not setting conn 0 to be disconnected: socket is invalid"
|
||||
+
|
||||
+ # Count occurrences of the disconnect message
|
||||
+ error_log_lines = inst.ds_error_log.readlines()
|
||||
+ disconnect_count = 0
|
||||
+
|
||||
+ for line in error_log_lines:
|
||||
+ if disconnect_message in line:
|
||||
+ disconnect_count += 1
|
||||
+
|
||||
+ log.info(f'Found {disconnect_count} occurrences of disconnect message')
|
||||
+
|
||||
+ log.info('Verify there are no more than 10 occurrences')
|
||||
+ assert disconnect_count <= 10, f"Found {disconnect_count} repeated disconnect messages, expected <= 10"
|
||||
+
|
||||
+
|
||||
if __name__ == '__main__':
|
||||
# Run isolated
|
||||
# -s for DEBUG mode
|
||||
diff --git a/ldap/servers/slapd/connection.c b/ldap/servers/slapd/connection.c
|
||||
index bb4fcd77f..2967de15b 100644
|
||||
--- a/ldap/servers/slapd/connection.c
|
||||
+++ b/ldap/servers/slapd/connection.c
|
||||
@@ -2465,12 +2465,15 @@ disconnect_server_nomutex_ext(Connection *conn, PRUint64 opconnid, int opid, PRE
|
||||
}
|
||||
|
||||
} else {
|
||||
- slapi_log_err(SLAPI_LOG_CONNS, "disconnect_server_nomutex_ext",
|
||||
- "Not setting conn %d to be disconnected: %s\n",
|
||||
- conn->c_sd,
|
||||
- (conn->c_sd == SLAPD_INVALID_SOCKET) ? "socket is invalid" :
|
||||
- ((conn->c_connid != opconnid) ? "conn id does not match op conn id" :
|
||||
- ((conn->c_flags & CONN_FLAG_CLOSING) ? "conn is closing" : "unknown")));
|
||||
+ /* We avoid logging an invalid conn=0 connection as it is not a real connection. */
|
||||
+ if (!(conn->c_sd == SLAPD_INVALID_SOCKET && conn->c_connid == 0)) {
|
||||
+ slapi_log_err(SLAPI_LOG_CONNS, "disconnect_server_nomutex_ext",
|
||||
+ "Not setting conn %d to be disconnected: %s\n",
|
||||
+ conn->c_sd,
|
||||
+ (conn->c_sd == SLAPD_INVALID_SOCKET) ? "socket is invalid" :
|
||||
+ ((conn->c_connid != opconnid) ? "conn id does not match op conn id" :
|
||||
+ ((conn->c_flags & CONN_FLAG_CLOSING) ? "conn is closing" : "unknown")));
|
||||
+ }
|
||||
}
|
||||
}
|
||||
|
||||
--
|
||||
2.49.0
|
||||
|
@ -1,163 +0,0 @@
|
||||
From 57051154bafaf50b83fc27dadbd89a49fd1c8c36 Mon Sep 17 00:00:00 2001
|
||||
From: Pierre Rogier <progier@redhat.com>
|
||||
Date: Fri, 14 Jun 2024 13:27:10 +0200
|
||||
Subject: [PATCH] Security fix for CVE-2024-5953
|
||||
|
||||
Description:
|
||||
A denial of service vulnerability was found in the 389 Directory Server.
|
||||
This issue may allow an authenticated user to cause a server denial
|
||||
of service while attempting to log in with a user with a malformed hash
|
||||
in their password.
|
||||
|
||||
Fix Description:
|
||||
To prevent buffer overflow when a bind request is processed, the bind fails
|
||||
if the hash size is not coherent without even attempting to process further
|
||||
the hashed password.
|
||||
|
||||
References:
|
||||
- https://nvd.nist.gov/vuln/detail/CVE-2024-5953
|
||||
- https://access.redhat.com/security/cve/CVE-2024-5953
|
||||
- https://bugzilla.redhat.com/show_bug.cgi?id=2292104
|
||||
---
|
||||
.../tests/suites/password/regression_test.py | 54 ++++++++++++++++++-
|
||||
ldap/servers/plugins/pwdstorage/md5_pwd.c | 9 +++-
|
||||
ldap/servers/plugins/pwdstorage/pbkdf2_pwd.c | 6 +++
|
||||
3 files changed, 66 insertions(+), 3 deletions(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/password/regression_test.py b/dirsrvtests/tests/suites/password/regression_test.py
|
||||
index 8f1facb6d..1fa581643 100644
|
||||
--- a/dirsrvtests/tests/suites/password/regression_test.py
|
||||
+++ b/dirsrvtests/tests/suites/password/regression_test.py
|
||||
@@ -7,12 +7,14 @@
|
||||
#
|
||||
import pytest
|
||||
import time
|
||||
+import glob
|
||||
+import base64
|
||||
from lib389._constants import PASSWORD, DN_DM, DEFAULT_SUFFIX
|
||||
from lib389._constants import SUFFIX, PASSWORD, DN_DM, DN_CONFIG, PLUGIN_RETRO_CHANGELOG, DEFAULT_SUFFIX, DEFAULT_CHANGELOG_DB
|
||||
from lib389 import Entry
|
||||
from lib389.topologies import topology_m1 as topo_supplier
|
||||
-from lib389.idm.user import UserAccounts
|
||||
-from lib389.utils import ldap, os, logging, ensure_bytes, ds_is_newer
|
||||
+from lib389.idm.user import UserAccounts, UserAccount
|
||||
+from lib389.utils import ldap, os, logging, ensure_bytes, ds_is_newer, ds_supports_new_changelog
|
||||
from lib389.topologies import topology_st as topo
|
||||
from lib389.idm.organizationalunit import OrganizationalUnits
|
||||
|
||||
@@ -39,6 +41,13 @@ TEST_PASSWORDS += ['CNpwtest1ZZZZ', 'ZZZZZCNpwtest1',
|
||||
TEST_PASSWORDS2 = (
|
||||
'CN12pwtest31', 'SN3pwtest231', 'UID1pwtest123', 'MAIL2pwtest12@redhat.com', '2GN1pwtest123', 'People123')
|
||||
|
||||
+SUPPORTED_SCHEMES = (
|
||||
+ "{SHA}", "{SSHA}", "{SHA256}", "{SSHA256}",
|
||||
+ "{SHA384}", "{SSHA384}", "{SHA512}", "{SSHA512}",
|
||||
+ "{crypt}", "{NS-MTA-MD5}", "{clear}", "{MD5}",
|
||||
+ "{SMD5}", "{PBKDF2_SHA256}", "{PBKDF2_SHA512}",
|
||||
+ "{GOST_YESCRYPT}", "{PBKDF2-SHA256}", "{PBKDF2-SHA512}" )
|
||||
+
|
||||
def _check_unhashed_userpw(inst, user_dn, is_present=False):
|
||||
"""Check if unhashed#user#password attribute is present or not in the changelog"""
|
||||
unhashed_pwd_attribute = 'unhashed#user#password'
|
||||
@@ -319,6 +328,47 @@ def test_unhashed_pw_switch(topo_supplier):
|
||||
# Add debugging steps(if any)...
|
||||
pass
|
||||
|
||||
+@pytest.mark.parametrize("scheme", SUPPORTED_SCHEMES )
|
||||
+def test_long_hashed_password(topo, create_user, scheme):
|
||||
+ """Check that hashed password with very long value does not cause trouble
|
||||
+
|
||||
+ :id: 252a1f76-114b-11ef-8a7a-482ae39447e5
|
||||
+ :setup: standalone Instance
|
||||
+ :parametrized: yes
|
||||
+ :steps:
|
||||
+ 1. Add a test user user
|
||||
+ 2. Set a long password with requested scheme
|
||||
+ 3. Bind on that user using a wrong password
|
||||
+ 4. Check that instance is still alive
|
||||
+ 5. Remove the added user
|
||||
+ :expectedresults:
|
||||
+ 1. Success
|
||||
+ 2. Success
|
||||
+ 3. Should get ldap.INVALID_CREDENTIALS exception
|
||||
+ 4. Success
|
||||
+ 5. Success
|
||||
+ """
|
||||
+ inst = topo.standalone
|
||||
+ inst.simple_bind_s(DN_DM, PASSWORD)
|
||||
+ users = UserAccounts(inst, DEFAULT_SUFFIX)
|
||||
+ # Make sure that server is started as this test may crash it
|
||||
+ inst.start()
|
||||
+ # Adding Test user (It may already exists if previous test failed)
|
||||
+ user2 = UserAccount(inst, dn='uid=test_user_1002,ou=People,dc=example,dc=com')
|
||||
+ if not user2.exists():
|
||||
+ user2 = users.create_test_user(uid=1002, gid=2002)
|
||||
+ # Setting hashed password
|
||||
+ passwd = 'A'*4000
|
||||
+ hashed_passwd = scheme.encode('utf-8') + base64.b64encode(passwd.encode('utf-8'))
|
||||
+ user2.replace('userpassword', hashed_passwd)
|
||||
+ # Bind on that user using a wrong password
|
||||
+ with pytest.raises(ldap.INVALID_CREDENTIALS):
|
||||
+ conn = user2.bind(PASSWORD)
|
||||
+ # Check that instance is still alive
|
||||
+ assert inst.status()
|
||||
+ # Remove the added user
|
||||
+ user2.delete()
|
||||
+
|
||||
|
||||
if __name__ == '__main__':
|
||||
# Run isolated
|
||||
diff --git a/ldap/servers/plugins/pwdstorage/md5_pwd.c b/ldap/servers/plugins/pwdstorage/md5_pwd.c
|
||||
index 1e2cf58e7..b9a48d5ca 100644
|
||||
--- a/ldap/servers/plugins/pwdstorage/md5_pwd.c
|
||||
+++ b/ldap/servers/plugins/pwdstorage/md5_pwd.c
|
||||
@@ -37,6 +37,7 @@ md5_pw_cmp(const char *userpwd, const char *dbpwd)
|
||||
unsigned char hash_out[MD5_HASH_LEN];
|
||||
unsigned char b2a_out[MD5_HASH_LEN * 2]; /* conservative */
|
||||
SECItem binary_item;
|
||||
+ size_t dbpwd_len = strlen(dbpwd);
|
||||
|
||||
ctx = PK11_CreateDigestContext(SEC_OID_MD5);
|
||||
if (ctx == NULL) {
|
||||
@@ -45,6 +46,12 @@ md5_pw_cmp(const char *userpwd, const char *dbpwd)
|
||||
goto loser;
|
||||
}
|
||||
|
||||
+ if (dbpwd_len >= sizeof b2a_out) {
|
||||
+ slapi_log_err(SLAPI_LOG_PLUGIN, MD5_SUBSYSTEM_NAME,
|
||||
+ "The hashed password stored in the user entry is longer than any valid md5 hash");
|
||||
+ goto loser;
|
||||
+ }
|
||||
+
|
||||
/* create the hash */
|
||||
PK11_DigestBegin(ctx);
|
||||
PK11_DigestOp(ctx, (const unsigned char *)userpwd, strlen(userpwd));
|
||||
@@ -57,7 +64,7 @@ md5_pw_cmp(const char *userpwd, const char *dbpwd)
|
||||
bver = NSSBase64_EncodeItem(NULL, (char *)b2a_out, sizeof b2a_out, &binary_item);
|
||||
/* bver points to b2a_out upon success */
|
||||
if (bver) {
|
||||
- rc = slapi_ct_memcmp(bver, dbpwd, strlen(dbpwd));
|
||||
+ rc = slapi_ct_memcmp(bver, dbpwd, dbpwd_len);
|
||||
} else {
|
||||
slapi_log_err(SLAPI_LOG_PLUGIN, MD5_SUBSYSTEM_NAME,
|
||||
"Could not base64 encode hashed value for password compare");
|
||||
diff --git a/ldap/servers/plugins/pwdstorage/pbkdf2_pwd.c b/ldap/servers/plugins/pwdstorage/pbkdf2_pwd.c
|
||||
index dcac4fcdd..82b8c9501 100644
|
||||
--- a/ldap/servers/plugins/pwdstorage/pbkdf2_pwd.c
|
||||
+++ b/ldap/servers/plugins/pwdstorage/pbkdf2_pwd.c
|
||||
@@ -255,6 +255,12 @@ pbkdf2_sha256_pw_cmp(const char *userpwd, const char *dbpwd)
|
||||
passItem.data = (unsigned char *)userpwd;
|
||||
passItem.len = strlen(userpwd);
|
||||
|
||||
+ if (pwdstorage_base64_decode_len(dbpwd, dbpwd_len) > sizeof dbhash) {
|
||||
+ /* Hashed value is too long and cannot match any value generated by pbkdf2_sha256_hash */
|
||||
+ slapi_log_err(SLAPI_LOG_ERR, (char *)schemeName, "Unable to base64 decode dbpwd value. (hashed value is too long)\n");
|
||||
+ return result;
|
||||
+ }
|
||||
+
|
||||
/* Decode the DBpwd to bytes from b64 */
|
||||
if (PL_Base64Decode(dbpwd, dbpwd_len, dbhash) == NULL) {
|
||||
slapi_log_err(SLAPI_LOG_ERR, (char *)schemeName, "Unable to base64 decode dbpwd value\n");
|
||||
--
|
||||
2.46.0
|
||||
|
@ -1,178 +0,0 @@
|
||||
From e8a5b1deef1b455aafecb71efc029d2407b1b06f Mon Sep 17 00:00:00 2001
|
||||
From: Simon Pichugin <spichugi@redhat.com>
|
||||
Date: Tue, 16 Jul 2024 08:32:21 -0700
|
||||
Subject: [PATCH] Issue 4778 - Add COMPACT_CL5 task to dsconf replication
|
||||
(#6260)
|
||||
|
||||
Description: In 1.4.3, the changelog is not part of a backend.
|
||||
It can be compacted with nsds5task: CAMPACT_CL5 as part of the replication entry.
|
||||
Add the task as a compact-changelog command under the dsconf replication tool.
|
||||
Add tests for the feature and fix old tests.
|
||||
|
||||
Related: https://github.com/389ds/389-ds-base/issues/4778
|
||||
|
||||
Reviewed by: @progier389 (Thanks!)
|
||||
---
|
||||
.../tests/suites/config/compact_test.py | 36 ++++++++++++++---
|
||||
src/lib389/lib389/cli_conf/replication.py | 10 +++++
|
||||
src/lib389/lib389/replica.py | 40 +++++++++++++++++++
|
||||
3 files changed, 81 insertions(+), 5 deletions(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/config/compact_test.py b/dirsrvtests/tests/suites/config/compact_test.py
|
||||
index 317258d0e..31d98d10c 100644
|
||||
--- a/dirsrvtests/tests/suites/config/compact_test.py
|
||||
+++ b/dirsrvtests/tests/suites/config/compact_test.py
|
||||
@@ -13,14 +13,14 @@ import time
|
||||
import datetime
|
||||
from lib389.tasks import DBCompactTask
|
||||
from lib389.backend import DatabaseConfig
|
||||
-from lib389.replica import Changelog5
|
||||
+from lib389.replica import Changelog5, Replicas
|
||||
from lib389.topologies import topology_m1 as topo
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def test_compact_db_task(topo):
|
||||
- """Specify a test case purpose or name here
|
||||
+ """Test compaction of database
|
||||
|
||||
:id: 1b3222ef-a336-4259-be21-6a52f76e1859
|
||||
:setup: Standalone Instance
|
||||
@@ -48,7 +48,7 @@ def test_compact_db_task(topo):
|
||||
|
||||
|
||||
def test_compaction_interval_and_time(topo):
|
||||
- """Specify a test case purpose or name here
|
||||
+ """Test compaction interval and time for database and changelog
|
||||
|
||||
:id: f361bee9-d7e7-4569-9255-d7b60dd9d92e
|
||||
:setup: Supplier Instance
|
||||
@@ -95,10 +95,36 @@ def test_compaction_interval_and_time(topo):
|
||||
|
||||
# Check compaction occurred as expected
|
||||
time.sleep(45)
|
||||
- assert not inst.searchErrorsLog("Compacting databases")
|
||||
+ assert not inst.searchErrorsLog("compacting replication changelogs")
|
||||
|
||||
time.sleep(90)
|
||||
- assert inst.searchErrorsLog("Compacting databases")
|
||||
+ assert inst.searchErrorsLog("compacting replication changelogs")
|
||||
+ inst.deleteErrorLogs(restart=False)
|
||||
+
|
||||
+
|
||||
+def test_compact_cl5_task(topo):
|
||||
+ """Test compaction of changelog5 database
|
||||
+
|
||||
+ :id: aadfa9f7-73c0-463a-912c-0a29aa1f8167
|
||||
+ :setup: Standalone Instance
|
||||
+ :steps:
|
||||
+ 1. Run compaction task
|
||||
+ 2. Check errors log to show task was run
|
||||
+ :expectedresults:
|
||||
+ 1. Success
|
||||
+ 2. Success
|
||||
+ """
|
||||
+ inst = topo.ms["supplier1"]
|
||||
+
|
||||
+ replicas = Replicas(inst)
|
||||
+ replicas.compact_changelog(log=log)
|
||||
+
|
||||
+ # Check compaction occurred as expected. But instead of time.sleep(5) check 1 sec in loop
|
||||
+ for _ in range(5):
|
||||
+ time.sleep(1)
|
||||
+ if inst.searchErrorsLog("compacting replication changelogs"):
|
||||
+ break
|
||||
+ assert inst.searchErrorsLog("compacting replication changelogs")
|
||||
inst.deleteErrorLogs(restart=False)
|
||||
|
||||
|
||||
diff --git a/src/lib389/lib389/cli_conf/replication.py b/src/lib389/lib389/cli_conf/replication.py
|
||||
index 352c0ee5b..ccc394255 100644
|
||||
--- a/src/lib389/lib389/cli_conf/replication.py
|
||||
+++ b/src/lib389/lib389/cli_conf/replication.py
|
||||
@@ -1199,6 +1199,11 @@ def restore_cl_dir(inst, basedn, log, args):
|
||||
replicas.restore_changelog(replica_roots=args.REPLICA_ROOTS, log=log)
|
||||
|
||||
|
||||
+def compact_cl5(inst, basedn, log, args):
|
||||
+ replicas = Replicas(inst)
|
||||
+ replicas.compact_changelog(replica_roots=args.REPLICA_ROOTS, log=log)
|
||||
+
|
||||
+
|
||||
def create_parser(subparsers):
|
||||
|
||||
############################################
|
||||
@@ -1326,6 +1331,11 @@ def create_parser(subparsers):
|
||||
help="Specify one replica root whose changelog you want to restore. "
|
||||
"The replica root will be consumed from the LDIF file name if the option is omitted.")
|
||||
|
||||
+ compact_cl = repl_subcommands.add_parser('compact-changelog', help='Compact the changelog database')
|
||||
+ compact_cl.set_defaults(func=compact_cl5)
|
||||
+ compact_cl.add_argument('REPLICA_ROOTS', nargs="+",
|
||||
+ help="Specify replica roots whose changelog you want to compact.")
|
||||
+
|
||||
restore_changelogdir = restore_subcommands.add_parser('from-changelogdir', help='Restore LDIF files from changelogdir.')
|
||||
restore_changelogdir.set_defaults(func=restore_cl_dir)
|
||||
restore_changelogdir.add_argument('REPLICA_ROOTS', nargs="+",
|
||||
diff --git a/src/lib389/lib389/replica.py b/src/lib389/lib389/replica.py
|
||||
index 94e1fdad5..1f321972d 100644
|
||||
--- a/src/lib389/lib389/replica.py
|
||||
+++ b/src/lib389/lib389/replica.py
|
||||
@@ -1648,6 +1648,11 @@ class Replica(DSLdapObject):
|
||||
"""
|
||||
self.replace('nsds5task', 'ldif2cl')
|
||||
|
||||
+ def begin_task_compact_cl5(self):
|
||||
+ """Begin COMPACT_CL5 task
|
||||
+ """
|
||||
+ self.replace('nsds5task', 'COMPACT_CL5')
|
||||
+
|
||||
def get_suffix(self):
|
||||
"""Return the suffix
|
||||
"""
|
||||
@@ -1829,6 +1834,41 @@ class Replicas(DSLdapObjects):
|
||||
log.error(f"Changelog LDIF for '{repl_root}' was not found")
|
||||
continue
|
||||
|
||||
+ def compact_changelog(self, replica_roots=[], log=None):
|
||||
+ """Compact Directory Server replication changelog
|
||||
+
|
||||
+ :param replica_roots: Replica suffixes that need to be processed (and optional LDIF file path)
|
||||
+ :type replica_roots: list of str
|
||||
+ :param log: The logger object
|
||||
+ :type log: logger
|
||||
+ """
|
||||
+
|
||||
+ if log is None:
|
||||
+ log = self._log
|
||||
+
|
||||
+ # Check if the changelog entry exists
|
||||
+ try:
|
||||
+ cl = Changelog5(self._instance)
|
||||
+ cl.get_attr_val_utf8_l("nsslapd-changelogdir")
|
||||
+ except ldap.NO_SUCH_OBJECT:
|
||||
+ raise ValueError("Changelog entry was not found. Probably, the replication is not enabled on this instance")
|
||||
+
|
||||
+ # Get all the replicas on the server if --replica-roots option is not specified
|
||||
+ repl_roots = []
|
||||
+ if not replica_roots:
|
||||
+ for replica in self.list():
|
||||
+ repl_roots.append(replica.get_attr_val_utf8("nsDS5ReplicaRoot"))
|
||||
+ else:
|
||||
+ for repl_root in replica_roots:
|
||||
+ repl_roots.append(repl_root)
|
||||
+
|
||||
+ # Dump the changelog for the replica
|
||||
+
|
||||
+ # Dump the changelog for the replica
|
||||
+ for repl_root in repl_roots:
|
||||
+ replica = self.get(repl_root)
|
||||
+ replica.begin_task_compact_cl5()
|
||||
+
|
||||
|
||||
class BootstrapReplicationManager(DSLdapObject):
|
||||
"""A Replication Manager credential for bootstrapping the repl process.
|
||||
--
|
||||
2.47.0
|
||||
|
@ -0,0 +1,67 @@
|
||||
From fef4875a9c3d67ef424a1fb1698ae011152735b1 Mon Sep 17 00:00:00 2001
|
||||
From: Anuar Beisembayev <111912342+abeisemb@users.noreply.github.com>
|
||||
Date: Wed, 23 Jul 2025 23:48:11 -0400
|
||||
Subject: [PATCH] Issue 6772 - dsconf - Replicas with the "consumer" role allow
|
||||
for viewing and modification of their changelog. (#6773)
|
||||
|
||||
dsconf currently allows users to set and retrieve changelogs in consumer replicas, which do not have officially supported changelogs. This can lead to undefined behavior and confusion.
|
||||
This commit prints a warning message if the user tries to interact with a changelog on a consumer replica.
|
||||
|
||||
Resolves: https://github.com/389ds/389-ds-base/issues/6772
|
||||
|
||||
Reviewed by: @droideck
|
||||
---
|
||||
src/lib389/lib389/cli_conf/replication.py | 23 +++++++++++++++++++++++
|
||||
1 file changed, 23 insertions(+)
|
||||
|
||||
diff --git a/src/lib389/lib389/cli_conf/replication.py b/src/lib389/lib389/cli_conf/replication.py
|
||||
index 6f77f34ca..a18bf83ca 100644
|
||||
--- a/src/lib389/lib389/cli_conf/replication.py
|
||||
+++ b/src/lib389/lib389/cli_conf/replication.py
|
||||
@@ -686,6 +686,9 @@ def set_per_backend_cl(inst, basedn, log, args):
|
||||
replace_list = []
|
||||
did_something = False
|
||||
|
||||
+ if (is_replica_role_consumer(inst, suffix)):
|
||||
+ log.info("Warning: Changelogs are not supported for consumer replicas. You may run into undefined behavior.")
|
||||
+
|
||||
if args.encrypt:
|
||||
cl.replace('nsslapd-encryptionalgorithm', 'AES')
|
||||
del args.encrypt
|
||||
@@ -715,6 +718,10 @@ def set_per_backend_cl(inst, basedn, log, args):
|
||||
# that means there is a changelog config entry per backend (aka suffix)
|
||||
def get_per_backend_cl(inst, basedn, log, args):
|
||||
suffix = args.suffix
|
||||
+
|
||||
+ if (is_replica_role_consumer(inst, suffix)):
|
||||
+ log.info("Warning: Changelogs are not supported for consumer replicas. You may run into undefined behavior.")
|
||||
+
|
||||
cl = Changelog(inst, suffix)
|
||||
if args and args.json:
|
||||
log.info(cl.get_all_attrs_json())
|
||||
@@ -822,6 +829,22 @@ def del_repl_manager(inst, basedn, log, args):
|
||||
|
||||
log.info("Successfully deleted replication manager: " + manager_dn)
|
||||
|
||||
+def is_replica_role_consumer(inst, suffix):
|
||||
+ """Helper function for get_per_backend_cl and set_per_backend_cl.
|
||||
+ Makes sure the instance in question is not a consumer, which is a role that
|
||||
+ does not support changelogs.
|
||||
+ """
|
||||
+ replicas = Replicas(inst)
|
||||
+ try:
|
||||
+ replica = replicas.get(suffix)
|
||||
+ role = replica.get_role()
|
||||
+ except ldap.NO_SUCH_OBJECT:
|
||||
+ raise ValueError(f"Backend \"{suffix}\" is not enabled for replication")
|
||||
+
|
||||
+ if role == ReplicaRole.CONSUMER:
|
||||
+ return True
|
||||
+ else:
|
||||
+ return False
|
||||
|
||||
#
|
||||
# Agreements
|
||||
--
|
||||
2.49.0
|
||||
|
@ -1,55 +0,0 @@
|
||||
From d1cd9a5675e2953b7c8034ebb87a434cdd3ce0c3 Mon Sep 17 00:00:00 2001
|
||||
From: tbordaz <tbordaz@redhat.com>
|
||||
Date: Mon, 2 Dec 2024 17:18:32 +0100
|
||||
Subject: [PATCH] Issue 6417 - If an entry RDN is identical to the suffix, then
|
||||
Entryrdn gets broken during a reindex (#6418)
|
||||
|
||||
Bug description:
|
||||
During a reindex, the entryrdn index is built at the end from
|
||||
each entry in the suffix.
|
||||
If one entry has a RDN that is identical to the suffix DN,
|
||||
then entryrdn_lookup_dn may erroneously return the suffix DN
|
||||
as the DN of the entry.
|
||||
|
||||
Fix description:
|
||||
When the lookup entry has no parent (because index is under
|
||||
work) the loop lookup the entry using the RDN.
|
||||
If this RDN matches the suffix DN, then it exits from the loop
|
||||
with the suffix DN.
|
||||
Before exiting it checks that the original lookup entryID
|
||||
is equal to suffix entryID. If it does not match
|
||||
the function fails and then the DN from the entry will be
|
||||
built from id2enty
|
||||
|
||||
fixes: #6417
|
||||
|
||||
Reviewed by: Pierre Rogier, Simon Pichugin (Thanks !!!)
|
||||
---
|
||||
ldap/servers/slapd/back-ldbm/ldbm_entryrdn.c | 11 ++++++++++-
|
||||
1 file changed, 10 insertions(+), 1 deletion(-)
|
||||
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/ldbm_entryrdn.c b/ldap/servers/slapd/back-ldbm/ldbm_entryrdn.c
|
||||
index 5797dd779..83b041192 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/ldbm_entryrdn.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/ldbm_entryrdn.c
|
||||
@@ -1224,7 +1224,16 @@ entryrdn_lookup_dn(backend *be,
|
||||
}
|
||||
goto bail;
|
||||
}
|
||||
- maybesuffix = 1;
|
||||
+ if (workid == 1) {
|
||||
+ /* The loop (workid) iterates from the starting 'id'
|
||||
+ * up to the suffix ID (i.e. '1').
|
||||
+ * A corner case (#6417) is if an entry, on the path
|
||||
+ * 'id' -> suffix, has the same RDN than the suffix.
|
||||
+ * In order to erroneously believe the loop hits the suffix
|
||||
+ * we need to check that 'workid' is '1' (suffix)
|
||||
+ */
|
||||
+ maybesuffix = 1;
|
||||
+ }
|
||||
} else {
|
||||
_entryrdn_cursor_print_error("entryrdn_lookup_dn",
|
||||
key.data, data.size, data.ulen, rc);
|
||||
--
|
||||
2.48.0
|
||||
|
@ -0,0 +1,143 @@
|
||||
From 4cb50f83397e6a5e14a9b75ed15f24189ee2792b Mon Sep 17 00:00:00 2001
|
||||
From: Mark Reynolds <mreynolds@redhat.com>
|
||||
Date: Mon, 21 Jul 2025 18:07:21 -0400
|
||||
Subject: [PATCH] Issue 6893 - Log user that is updated during password modify
|
||||
extended operation
|
||||
|
||||
Description:
|
||||
|
||||
When a user's password is updated via an extended operation (password modify
|
||||
plugin) we only log the bind DN and not what user was updated. While "internal
|
||||
operation" logging will display the the user it should be logged by the default
|
||||
logging level.
|
||||
|
||||
Add access logging using "EXT_INFO" where we display the bind dn, target
|
||||
dn, and message.
|
||||
|
||||
Relates: https://github.com/389ds/389-ds-base/issues/6893
|
||||
|
||||
Reviewed by: spichugi & tbordaz(Thanks!!)
|
||||
---
|
||||
ldap/servers/slapd/passwd_extop.c | 56 +++++++++++++++----------------
|
||||
1 file changed, 28 insertions(+), 28 deletions(-)
|
||||
|
||||
diff --git a/ldap/servers/slapd/passwd_extop.c b/ldap/servers/slapd/passwd_extop.c
|
||||
index 4bb60afd6..0296d64fb 100644
|
||||
--- a/ldap/servers/slapd/passwd_extop.c
|
||||
+++ b/ldap/servers/slapd/passwd_extop.c
|
||||
@@ -465,12 +465,13 @@ passwd_modify_extop(Slapi_PBlock *pb)
|
||||
BerElement *response_ber = NULL;
|
||||
Slapi_Entry *targetEntry = NULL;
|
||||
Connection *conn = NULL;
|
||||
+ Operation *pb_op = NULL;
|
||||
LDAPControl **req_controls = NULL;
|
||||
LDAPControl **resp_controls = NULL;
|
||||
passwdPolicy *pwpolicy = NULL;
|
||||
Slapi_DN *target_sdn = NULL;
|
||||
Slapi_Entry *referrals = NULL;
|
||||
- /* Slapi_DN sdn; */
|
||||
+ Slapi_Backend *be = NULL;
|
||||
|
||||
slapi_log_err(SLAPI_LOG_TRACE, "passwd_modify_extop", "=>\n");
|
||||
|
||||
@@ -647,7 +648,7 @@ parse_req_done:
|
||||
}
|
||||
dn = slapi_sdn_get_ndn(target_sdn);
|
||||
if (dn == NULL || *dn == '\0') {
|
||||
- /* Refuse the operation because they're bound anonymously */
|
||||
+ /* Invalid DN - refuse the operation */
|
||||
errMesg = "Invalid dn.";
|
||||
rc = LDAP_INVALID_DN_SYNTAX;
|
||||
goto free_and_return;
|
||||
@@ -724,14 +725,19 @@ parse_req_done:
|
||||
ber_free(response_ber, 1);
|
||||
}
|
||||
|
||||
- slapi_pblock_set(pb, SLAPI_ORIGINAL_TARGET, (void *)dn);
|
||||
+ slapi_pblock_get(pb, SLAPI_OPERATION, &pb_op);
|
||||
+ if (pb_op == NULL) {
|
||||
+ slapi_log_err(SLAPI_LOG_ERR, "passwd_modify_extop", "pb_op is NULL\n");
|
||||
+ goto free_and_return;
|
||||
+ }
|
||||
|
||||
+ slapi_pblock_set(pb, SLAPI_ORIGINAL_TARGET, (void *)dn);
|
||||
/* Now we have the DN, look for the entry */
|
||||
ret = passwd_modify_getEntry(dn, &targetEntry);
|
||||
/* If we can't find the entry, then that's an error */
|
||||
if (ret) {
|
||||
/* Couldn't find the entry, fail */
|
||||
- errMesg = "No such Entry exists.";
|
||||
+ errMesg = "No such entry exists.";
|
||||
rc = LDAP_NO_SUCH_OBJECT;
|
||||
goto free_and_return;
|
||||
}
|
||||
@@ -742,30 +748,18 @@ parse_req_done:
|
||||
leak any useful information to the client such as current password
|
||||
wrong, etc.
|
||||
*/
|
||||
- Operation *pb_op = NULL;
|
||||
- slapi_pblock_get(pb, SLAPI_OPERATION, &pb_op);
|
||||
- if (pb_op == NULL) {
|
||||
- slapi_log_err(SLAPI_LOG_ERR, "passwd_modify_extop", "pb_op is NULL\n");
|
||||
- goto free_and_return;
|
||||
- }
|
||||
-
|
||||
operation_set_target_spec(pb_op, slapi_entry_get_sdn(targetEntry));
|
||||
slapi_pblock_set(pb, SLAPI_REQUESTOR_ISROOT, &pb_op->o_isroot);
|
||||
|
||||
- /* In order to perform the access control check , we need to select a backend (even though
|
||||
- * we don't actually need it otherwise).
|
||||
- */
|
||||
- {
|
||||
- Slapi_Backend *be = NULL;
|
||||
-
|
||||
- be = slapi_mapping_tree_find_backend_for_sdn(slapi_entry_get_sdn(targetEntry));
|
||||
- if (NULL == be) {
|
||||
- errMesg = "Failed to find backend for target entry";
|
||||
- rc = LDAP_OPERATIONS_ERROR;
|
||||
- goto free_and_return;
|
||||
- }
|
||||
- slapi_pblock_set(pb, SLAPI_BACKEND, be);
|
||||
+ /* In order to perform the access control check, we need to select a backend (even though
|
||||
+ * we don't actually need it otherwise). */
|
||||
+ be = slapi_mapping_tree_find_backend_for_sdn(slapi_entry_get_sdn(targetEntry));
|
||||
+ if (NULL == be) {
|
||||
+ errMesg = "Failed to find backend for target entry";
|
||||
+ rc = LDAP_NO_SUCH_OBJECT;
|
||||
+ goto free_and_return;
|
||||
}
|
||||
+ slapi_pblock_set(pb, SLAPI_BACKEND, be);
|
||||
|
||||
/* Check if the pwpolicy control is present */
|
||||
slapi_pblock_get(pb, SLAPI_PWPOLICY, &need_pwpolicy_ctrl);
|
||||
@@ -797,10 +791,7 @@ parse_req_done:
|
||||
/* Check if password policy allows users to change their passwords. We need to do
|
||||
* this here since the normal modify code doesn't perform this check for
|
||||
* internal operations. */
|
||||
-
|
||||
- Connection *pb_conn;
|
||||
- slapi_pblock_get(pb, SLAPI_CONNECTION, &pb_conn);
|
||||
- if (!pb_op->o_isroot && !pb_conn->c_needpw && !pwpolicy->pw_change) {
|
||||
+ if (!pb_op->o_isroot && !conn->c_needpw && !pwpolicy->pw_change) {
|
||||
if (NULL == bindSDN) {
|
||||
bindSDN = slapi_sdn_new_normdn_byref(bindDN);
|
||||
}
|
||||
@@ -848,6 +839,15 @@ free_and_return:
|
||||
slapi_log_err(SLAPI_LOG_PLUGIN, "passwd_modify_extop",
|
||||
"%s\n", errMesg ? errMesg : "success");
|
||||
|
||||
+ if (dn) {
|
||||
+ /* Log the target ndn (if we have a target ndn) */
|
||||
+ slapi_log_access(LDAP_DEBUG_STATS,
|
||||
+ "conn=%" PRIu64 " op=%d EXT_INFO name=\"passwd_modify_plugin\" bind_dn=\"%s\" target_dn=\"%s\" msg=\"%s\" rc=%d\n",
|
||||
+ conn ? conn->c_connid : -1, pb_op ? pb_op->o_opid : -1,
|
||||
+ bindDN ? bindDN : "", dn,
|
||||
+ errMesg ? errMesg : "success", rc);
|
||||
+ }
|
||||
+
|
||||
if ((rc == LDAP_REFERRAL) && (referrals)) {
|
||||
send_referrals_from_entry(pb, referrals);
|
||||
} else {
|
||||
--
|
||||
2.49.0
|
||||
|
@ -1,267 +0,0 @@
|
||||
From 9b2fc77a36156ea987dcea6e2043f8e4c4a6b259 Mon Sep 17 00:00:00 2001
|
||||
From: progier389 <progier@redhat.com>
|
||||
Date: Tue, 18 Jun 2024 14:21:07 +0200
|
||||
Subject: [PATCH] Issue 6224 - d2entry - Could not open id2entry err 0 - at
|
||||
startup when having sub-suffixes (#6225)
|
||||
|
||||
Problem:: d2entry - Could not open id2entry err 0 is logged at startup when having sub-suffixes
|
||||
Reason: The slapi_exist_referral internal search access a backend that is not yet started.
|
||||
Solution: Limit the internal search to a single backend
|
||||
|
||||
Issue: #6224
|
||||
|
||||
Reviewed by: @droideck Thanks!
|
||||
|
||||
(cherry picked from commit 796f703021e961fdd8cbc53b4ad4e20258af0e96)
|
||||
---
|
||||
.../tests/suites/ds_logs/ds_logs_test.py | 1 +
|
||||
.../suites/mapping_tree/regression_test.py | 161 +++++++++++++++++-
|
||||
ldap/servers/slapd/backend.c | 7 +-
|
||||
3 files changed, 159 insertions(+), 10 deletions(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/ds_logs/ds_logs_test.py b/dirsrvtests/tests/suites/ds_logs/ds_logs_test.py
|
||||
index 812936c62..84a9c6ec8 100644
|
||||
--- a/dirsrvtests/tests/suites/ds_logs/ds_logs_test.py
|
||||
+++ b/dirsrvtests/tests/suites/ds_logs/ds_logs_test.py
|
||||
@@ -1222,6 +1222,7 @@ def test_referral_check(topology_st, request):
|
||||
|
||||
request.addfinalizer(fin)
|
||||
|
||||
+<<<<<<< HEAD
|
||||
def test_referral_subsuffix(topology_st, request):
|
||||
"""Test the results of an inverted parent suffix definition in the configuration.
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/mapping_tree/regression_test.py b/dirsrvtests/tests/suites/mapping_tree/regression_test.py
|
||||
index 99d4a1d5f..689ff9f59 100644
|
||||
--- a/dirsrvtests/tests/suites/mapping_tree/regression_test.py
|
||||
+++ b/dirsrvtests/tests/suites/mapping_tree/regression_test.py
|
||||
@@ -11,10 +11,14 @@ import ldap
|
||||
import logging
|
||||
import os
|
||||
import pytest
|
||||
+import time
|
||||
from lib389.backend import Backends, Backend
|
||||
+from lib389._constants import HOST_STANDALONE, PORT_STANDALONE, DN_DM, PW_DM
|
||||
from lib389.dbgen import dbgen_users
|
||||
from lib389.mappingTree import MappingTrees
|
||||
from lib389.topologies import topology_st
|
||||
+from lib389.referral import Referrals, Referral
|
||||
+
|
||||
|
||||
try:
|
||||
from lib389.backend import BackendSuffixView
|
||||
@@ -31,14 +35,26 @@ else:
|
||||
logging.getLogger(__name__).setLevel(logging.INFO)
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
+PARENT_SUFFIX = "dc=parent"
|
||||
+CHILD1_SUFFIX = f"dc=child1,{PARENT_SUFFIX}"
|
||||
+CHILD2_SUFFIX = f"dc=child2,{PARENT_SUFFIX}"
|
||||
+
|
||||
+PARENT_REFERRAL_DN = f"cn=ref,ou=People,{PARENT_SUFFIX}"
|
||||
+CHILD1_REFERRAL_DN = f"cn=ref,ou=people,{CHILD1_SUFFIX}"
|
||||
+CHILD2_REFERRAL_DN = f"cn=ref,ou=people,{CHILD2_SUFFIX}"
|
||||
+
|
||||
+REFERRAL_CHECK_PEDIOD = 7
|
||||
+
|
||||
+
|
||||
+
|
||||
BESTRUCT = [
|
||||
- { "bename" : "parent", "suffix": "dc=parent" },
|
||||
- { "bename" : "child1", "suffix": "dc=child1,dc=parent" },
|
||||
- { "bename" : "child2", "suffix": "dc=child2,dc=parent" },
|
||||
+ { "bename" : "parent", "suffix": PARENT_SUFFIX },
|
||||
+ { "bename" : "child1", "suffix": CHILD1_SUFFIX },
|
||||
+ { "bename" : "child2", "suffix": CHILD2_SUFFIX },
|
||||
]
|
||||
|
||||
|
||||
-@pytest.fixture(scope="function")
|
||||
+@pytest.fixture(scope="module")
|
||||
def topo(topology_st, request):
|
||||
bes = []
|
||||
|
||||
@@ -50,6 +66,9 @@ def topo(topology_st, request):
|
||||
request.addfinalizer(fin)
|
||||
|
||||
inst = topology_st.standalone
|
||||
+ # Reduce nsslapd-referral-check-period to accelerate test
|
||||
+ topology_st.standalone.config.set("nsslapd-referral-check-period", str(REFERRAL_CHECK_PEDIOD))
|
||||
+
|
||||
ldif_files = {}
|
||||
for d in BESTRUCT:
|
||||
bename = d['bename']
|
||||
@@ -76,14 +95,13 @@ def topo(topology_st, request):
|
||||
inst.start()
|
||||
return topology_st
|
||||
|
||||
-# Parameters for test_change_repl_passwd
|
||||
-EXPECTED_ENTRIES = (("dc=parent", 39), ("dc=child1,dc=parent", 13), ("dc=child2,dc=parent", 13))
|
||||
+# Parameters for test_sub_suffixes
|
||||
@pytest.mark.parametrize(
|
||||
"orphan_param",
|
||||
[
|
||||
- pytest.param( ( True, { "dc=parent": 2, "dc=child1,dc=parent":1, "dc=child2,dc=parent":1}), id="orphan-is-true" ),
|
||||
- pytest.param( ( False, { "dc=parent": 3, "dc=child1,dc=parent":1, "dc=child2,dc=parent":1}), id="orphan-is-false" ),
|
||||
- pytest.param( ( None, { "dc=parent": 3, "dc=child1,dc=parent":1, "dc=child2,dc=parent":1}), id="no-orphan" ),
|
||||
+ pytest.param( ( True, { PARENT_SUFFIX: 2, CHILD1_SUFFIX:1, CHILD2_SUFFIX:1}), id="orphan-is-true" ),
|
||||
+ pytest.param( ( False, { PARENT_SUFFIX: 3, CHILD1_SUFFIX:1, CHILD2_SUFFIX:1}), id="orphan-is-false" ),
|
||||
+ pytest.param( ( None, { PARENT_SUFFIX: 3, CHILD1_SUFFIX:1, CHILD2_SUFFIX:1}), id="no-orphan" ),
|
||||
],
|
||||
)
|
||||
|
||||
@@ -128,3 +146,128 @@ def test_sub_suffixes(topo, orphan_param):
|
||||
log.info('Test PASSED')
|
||||
|
||||
|
||||
+def test_one_level_search_on_sub_suffixes(topo):
|
||||
+ """ Perform one level scoped search accross suffix and sub-suffix
|
||||
+
|
||||
+ :id: 92f3139e-280e-11ef-a989-482ae39447e5
|
||||
+ :feature: mapping-tree
|
||||
+ :setup: Standalone instance with 3 additional backends:
|
||||
+ dc=parent, dc=child1,dc=parent, dc=childr21,dc=parent
|
||||
+ :steps:
|
||||
+ 1. Perform a ONE LEVEL search on dc=parent
|
||||
+ 2. Check that all expected entries have been returned
|
||||
+ 3. Check that only the expected entries have been returned
|
||||
+ :expectedresults:
|
||||
+ 1. Success
|
||||
+ 2. each expected dn should be in the result set
|
||||
+ 3. Number of returned entries should be the same as the number of expected entries
|
||||
+ """
|
||||
+ expected_dns = ( 'dc=child1,dc=parent',
|
||||
+ 'dc=child2,dc=parent',
|
||||
+ 'ou=accounting,dc=parent',
|
||||
+ 'ou=product development,dc=parent',
|
||||
+ 'ou=product testing,dc=parent',
|
||||
+ 'ou=human resources,dc=parent',
|
||||
+ 'ou=payroll,dc=parent',
|
||||
+ 'ou=people,dc=parent',
|
||||
+ 'ou=groups,dc=parent', )
|
||||
+ entries = topo.standalone.search_s("dc=parent", ldap.SCOPE_ONELEVEL, "(objectClass=*)",
|
||||
+ attrlist=("dc","ou"), escapehatch='i am sure')
|
||||
+ log.info(f'one level search on dc=parent returned the following entries: {entries}')
|
||||
+ dns = [ entry.dn for entry in entries ]
|
||||
+ for dn in expected_dns:
|
||||
+ assert dn in dns
|
||||
+ assert len(entries) == len(expected_dns)
|
||||
+
|
||||
+
|
||||
+def test_sub_suffixes_errlog(topo):
|
||||
+ """ check the entries found on suffix/sub-suffix
|
||||
+ used int
|
||||
+
|
||||
+ :id: 1db9d52e-28de-11ef-b286-482ae39447e5
|
||||
+ :feature: mapping-tree
|
||||
+ :setup: Standalone instance with 3 additional backends:
|
||||
+ dc=parent, dc=child1,dc=parent, dc=childr21,dc=parent
|
||||
+ :steps:
|
||||
+ 1. Check that id2entry error message is not in the error log.
|
||||
+ :expectedresults:
|
||||
+ 1. Success
|
||||
+ """
|
||||
+ inst = topo.standalone
|
||||
+ assert not inst.searchErrorsLog('id2entry - Could not open id2entry err 0')
|
||||
+
|
||||
+
|
||||
+# Parameters for test_referral_subsuffix:
|
||||
+# a tuple pair containing:
|
||||
+# - list of referral dn that must be created
|
||||
+# - dict of searches basedn: expected_number_of_referrals
|
||||
+@pytest.mark.parametrize(
|
||||
+ "parameters",
|
||||
+ [
|
||||
+ pytest.param( ((PARENT_REFERRAL_DN, CHILD1_REFERRAL_DN), {PARENT_SUFFIX: 2, CHILD1_SUFFIX:1, CHILD2_SUFFIX:0}), id="Both"),
|
||||
+ pytest.param( ((PARENT_REFERRAL_DN,), {PARENT_SUFFIX: 1, CHILD1_SUFFIX:0, CHILD2_SUFFIX:0}) , id="Parent"),
|
||||
+ pytest.param( ((CHILD1_REFERRAL_DN,), {PARENT_SUFFIX: 1, CHILD1_SUFFIX:1, CHILD2_SUFFIX:0}) , id="Child"),
|
||||
+ pytest.param( ((), {PARENT_SUFFIX: 0, CHILD1_SUFFIX:0, CHILD2_SUFFIX:0}), id="None"),
|
||||
+ ])
|
||||
+
|
||||
+def test_referral_subsuffix(topo, request, parameters):
|
||||
+ """Test the results of an inverted parent suffix definition in the configuration.
|
||||
+
|
||||
+ For more details see:
|
||||
+ https://www.port389.org/docs/389ds/design/mapping_tree_assembly.html
|
||||
+
|
||||
+ :id: 4e111a22-2a5d-11ef-a890-482ae39447e5
|
||||
+ :feature: referrals
|
||||
+ :setup: Standalone instance with 3 additional backends:
|
||||
+ dc=parent, dc=child1,dc=parent, dc=childr21,dc=parent
|
||||
+
|
||||
+ :setup: Standalone instance
|
||||
+ :parametrized: yes
|
||||
+ :steps:
|
||||
+ refs,searches = referrals
|
||||
+
|
||||
+ 1. Create the referrals according to the current parameter
|
||||
+ 2. Wait enough time so they get detected
|
||||
+ 3. For each search base dn, in the current parameter, perform the two following steps
|
||||
+ 4. In 3. loop: Perform a search with provided base dn
|
||||
+ 5. In 3. loop: Check that the number of returned referrals is the expected one.
|
||||
+
|
||||
+ :expectedresults:
|
||||
+ all steps succeeds
|
||||
+ """
|
||||
+ inst = topo.standalone
|
||||
+
|
||||
+ def fin():
|
||||
+ log.info('Deleting all referrals')
|
||||
+ for ref in Referrals(inst, PARENT_SUFFIX).list():
|
||||
+ ref.delete()
|
||||
+
|
||||
+ # Set cleanup callback
|
||||
+ if DEBUGGING:
|
||||
+ request.addfinalizer(fin)
|
||||
+
|
||||
+ # Remove all referrals
|
||||
+ fin()
|
||||
+ # Add requested referrals
|
||||
+ for dn in parameters[0]:
|
||||
+ refs = Referral(inst, dn=dn)
|
||||
+ refs.create(basedn=dn, properties={ 'cn': 'ref', 'ref': f'ldap://remote/{dn}'})
|
||||
+ # Wait that the internal search detects the referrals
|
||||
+ time.sleep(REFERRAL_CHECK_PEDIOD + 1)
|
||||
+ # Open a test connection
|
||||
+ ldc = ldap.initialize(f"ldap://{HOST_STANDALONE}:{PORT_STANDALONE}")
|
||||
+ ldc.set_option(ldap.OPT_REFERRALS,0)
|
||||
+ ldc.simple_bind_s(DN_DM,PW_DM)
|
||||
+
|
||||
+ # For each search base dn:
|
||||
+ for basedn,nbref in parameters[1].items():
|
||||
+ log.info(f"Referrals are: {parameters[0]}")
|
||||
+ # Perform a search with provided base dn
|
||||
+ result = ldc.search_s(basedn, ldap.SCOPE_SUBTREE, filterstr="(ou=People)")
|
||||
+ found_dns = [ dn for dn,entry in result if dn is not None ]
|
||||
+ found_refs = [ entry for dn,entry in result if dn is None ]
|
||||
+ log.info(f"Search on {basedn} returned {found_dns} and {found_refs}")
|
||||
+ # Check that the number of returned referrals is the expected one.
|
||||
+ log.info(f"Search returned {len(found_refs)} referrals. {nbref} are expected.")
|
||||
+ assert len(found_refs) == nbref
|
||||
+ ldc.unbind()
|
||||
diff --git a/ldap/servers/slapd/backend.c b/ldap/servers/slapd/backend.c
|
||||
index 498f683b1..f86b0b9b6 100644
|
||||
--- a/ldap/servers/slapd/backend.c
|
||||
+++ b/ldap/servers/slapd/backend.c
|
||||
@@ -230,12 +230,17 @@ slapi_exist_referral(Slapi_Backend *be)
|
||||
|
||||
/* search for ("smart") referral entries */
|
||||
search_pb = slapi_pblock_new();
|
||||
- server_ctrls = (LDAPControl **) slapi_ch_calloc(2, sizeof (LDAPControl *));
|
||||
+ server_ctrls = (LDAPControl **) slapi_ch_calloc(3, sizeof (LDAPControl *));
|
||||
server_ctrls[0] = (LDAPControl *) slapi_ch_malloc(sizeof (LDAPControl));
|
||||
server_ctrls[0]->ldctl_oid = slapi_ch_strdup(LDAP_CONTROL_MANAGEDSAIT);
|
||||
server_ctrls[0]->ldctl_value.bv_val = NULL;
|
||||
server_ctrls[0]->ldctl_value.bv_len = 0;
|
||||
server_ctrls[0]->ldctl_iscritical = '\0';
|
||||
+ server_ctrls[1] = (LDAPControl *) slapi_ch_malloc(sizeof (LDAPControl));
|
||||
+ server_ctrls[1]->ldctl_oid = slapi_ch_strdup(MTN_CONTROL_USE_ONE_BACKEND_EXT_OID);
|
||||
+ server_ctrls[1]->ldctl_value.bv_val = NULL;
|
||||
+ server_ctrls[1]->ldctl_value.bv_len = 0;
|
||||
+ server_ctrls[1]->ldctl_iscritical = '\0';
|
||||
slapi_search_internal_set_pb(search_pb, suffix, LDAP_SCOPE_SUBTREE,
|
||||
filter, NULL, 0, server_ctrls, NULL,
|
||||
(void *) plugin_get_default_component_id(), 0);
|
||||
--
|
||||
2.48.0
|
||||
|
@ -0,0 +1,98 @@
|
||||
From ffc3a81ed5852b7f1fbaed79b9b776af23d65b7c Mon Sep 17 00:00:00 2001
|
||||
From: Mark Reynolds <mreynolds@redhat.com>
|
||||
Date: Wed, 23 Jul 2025 19:35:32 -0400
|
||||
Subject: [PATCH] Issue 6895 - Crash if repl keep alive entry can not be
|
||||
created
|
||||
|
||||
Description:
|
||||
|
||||
Heap use after free when logging that the replicaton keep-alive entry can not
|
||||
be created. slapi_add_internal_pb() frees the slapi entry, then
|
||||
we try and get the dn from the entry and we get a use-after-free crash.
|
||||
|
||||
Relates: https://github.com/389ds/389-ds-base/issues/6895
|
||||
|
||||
Reviewed by: spichugi(Thanks!)
|
||||
---
|
||||
ldap/servers/plugins/chainingdb/cb_config.c | 3 +--
|
||||
ldap/servers/plugins/posix-winsync/posix-winsync.c | 1 -
|
||||
ldap/servers/plugins/replication/repl5_init.c | 3 ---
|
||||
ldap/servers/plugins/replication/repl5_replica.c | 8 ++++----
|
||||
4 files changed, 5 insertions(+), 10 deletions(-)
|
||||
|
||||
diff --git a/ldap/servers/plugins/chainingdb/cb_config.c b/ldap/servers/plugins/chainingdb/cb_config.c
|
||||
index 40a7088d7..24fa1bcb3 100644
|
||||
--- a/ldap/servers/plugins/chainingdb/cb_config.c
|
||||
+++ b/ldap/servers/plugins/chainingdb/cb_config.c
|
||||
@@ -44,8 +44,7 @@ cb_config_add_dse_entries(cb_backend *cb, char **entries, char *string1, char *s
|
||||
slapi_pblock_get(util_pb, SLAPI_PLUGIN_INTOP_RESULT, &res);
|
||||
if (LDAP_SUCCESS != res && LDAP_ALREADY_EXISTS != res) {
|
||||
slapi_log_err(SLAPI_LOG_ERR, CB_PLUGIN_SUBSYSTEM,
|
||||
- "cb_config_add_dse_entries - Unable to add config entry (%s) to the DSE: %s\n",
|
||||
- slapi_entry_get_dn(e),
|
||||
+ "cb_config_add_dse_entries - Unable to add config entry to the DSE: %s\n",
|
||||
ldap_err2string(res));
|
||||
rc = res;
|
||||
slapi_pblock_destroy(util_pb);
|
||||
diff --git a/ldap/servers/plugins/posix-winsync/posix-winsync.c b/ldap/servers/plugins/posix-winsync/posix-winsync.c
|
||||
index 51a55b643..3a002bb70 100644
|
||||
--- a/ldap/servers/plugins/posix-winsync/posix-winsync.c
|
||||
+++ b/ldap/servers/plugins/posix-winsync/posix-winsync.c
|
||||
@@ -1626,7 +1626,6 @@ posix_winsync_end_update_cb(void *cbdata __attribute__((unused)),
|
||||
"posix_winsync_end_update_cb: "
|
||||
"add task entry\n");
|
||||
}
|
||||
- /* slapi_entry_free(e_task); */
|
||||
slapi_pblock_destroy(pb);
|
||||
pb = NULL;
|
||||
posix_winsync_config_reset_MOFTaskCreated();
|
||||
diff --git a/ldap/servers/plugins/replication/repl5_init.c b/ldap/servers/plugins/replication/repl5_init.c
|
||||
index 8bc0b5372..5047fb8dc 100644
|
||||
--- a/ldap/servers/plugins/replication/repl5_init.c
|
||||
+++ b/ldap/servers/plugins/replication/repl5_init.c
|
||||
@@ -682,7 +682,6 @@ create_repl_schema_policy(void)
|
||||
repl_schema_top,
|
||||
ldap_err2string(return_value));
|
||||
rc = -1;
|
||||
- slapi_entry_free(e); /* The entry was not consumed */
|
||||
goto done;
|
||||
}
|
||||
slapi_pblock_destroy(pb);
|
||||
@@ -703,7 +702,6 @@ create_repl_schema_policy(void)
|
||||
repl_schema_supplier,
|
||||
ldap_err2string(return_value));
|
||||
rc = -1;
|
||||
- slapi_entry_free(e); /* The entry was not consumed */
|
||||
goto done;
|
||||
}
|
||||
slapi_pblock_destroy(pb);
|
||||
@@ -724,7 +722,6 @@ create_repl_schema_policy(void)
|
||||
repl_schema_consumer,
|
||||
ldap_err2string(return_value));
|
||||
rc = -1;
|
||||
- slapi_entry_free(e); /* The entry was not consumed */
|
||||
goto done;
|
||||
}
|
||||
slapi_pblock_destroy(pb);
|
||||
diff --git a/ldap/servers/plugins/replication/repl5_replica.c b/ldap/servers/plugins/replication/repl5_replica.c
|
||||
index 59062b46b..a97c807e9 100644
|
||||
--- a/ldap/servers/plugins/replication/repl5_replica.c
|
||||
+++ b/ldap/servers/plugins/replication/repl5_replica.c
|
||||
@@ -465,10 +465,10 @@ replica_subentry_create(const char *repl_root, ReplicaId rid)
|
||||
if (return_value != LDAP_SUCCESS &&
|
||||
return_value != LDAP_ALREADY_EXISTS &&
|
||||
return_value != LDAP_REFERRAL /* CONSUMER */) {
|
||||
- slapi_log_err(SLAPI_LOG_ERR, repl_plugin_name, "replica_subentry_create - Unable to "
|
||||
- "create replication keep alive entry %s: error %d - %s\n",
|
||||
- slapi_entry_get_dn_const(e),
|
||||
- return_value, ldap_err2string(return_value));
|
||||
+ slapi_log_err(SLAPI_LOG_ERR, repl_plugin_name, "replica_subentry_create - "
|
||||
+ "Unable to create replication keep alive entry 'cn=%s %d,%s': error %d - %s\n",
|
||||
+ KEEP_ALIVE_ENTRY, rid, repl_root,
|
||||
+ return_value, ldap_err2string(return_value));
|
||||
rc = -1;
|
||||
goto done;
|
||||
}
|
||||
--
|
||||
2.49.0
|
||||
|
@ -1,32 +0,0 @@
|
||||
From ab06b3cebbe0287ef557c0307ca2ee86fe8cb761 Mon Sep 17 00:00:00 2001
|
||||
From: progier389 <progier@redhat.com>
|
||||
Date: Thu, 21 Nov 2024 16:26:02 +0100
|
||||
Subject: [PATCH] Issue 6224 - Fix merge issue in 389-ds-base-2.1 for
|
||||
ds_log_test.py (#6414)
|
||||
|
||||
Fix a merge issue during cherry-pick over 389-ds-base-2.1 and 389-ds-base-1.4.3 branches
|
||||
|
||||
Issue: #6224
|
||||
|
||||
Reviewed by: @mreynolds389
|
||||
|
||||
(cherry picked from commit 2b541c64b8317209e4dafa4f82918d714039907c)
|
||||
---
|
||||
dirsrvtests/tests/suites/ds_logs/ds_logs_test.py | 1 -
|
||||
1 file changed, 1 deletion(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/ds_logs/ds_logs_test.py b/dirsrvtests/tests/suites/ds_logs/ds_logs_test.py
|
||||
index 84a9c6ec8..812936c62 100644
|
||||
--- a/dirsrvtests/tests/suites/ds_logs/ds_logs_test.py
|
||||
+++ b/dirsrvtests/tests/suites/ds_logs/ds_logs_test.py
|
||||
@@ -1222,7 +1222,6 @@ def test_referral_check(topology_st, request):
|
||||
|
||||
request.addfinalizer(fin)
|
||||
|
||||
-<<<<<<< HEAD
|
||||
def test_referral_subsuffix(topology_st, request):
|
||||
"""Test the results of an inverted parent suffix definition in the configuration.
|
||||
|
||||
--
|
||||
2.48.0
|
||||
|
@ -0,0 +1,352 @@
|
||||
From 191634746fdcb7e26a154cd00a22324e02a10110 Mon Sep 17 00:00:00 2001
|
||||
From: Simon Pichugin <spichugi@redhat.com>
|
||||
Date: Mon, 28 Jul 2025 10:50:26 -0700
|
||||
Subject: [PATCH] Issue 6250 - Add test for entryUSN overflow on failed add
|
||||
operations (#6821)
|
||||
|
||||
Description: Add comprehensive test to reproduce the entryUSN
|
||||
overflow issue where failed attempts to add existing entries followed by
|
||||
modify operations cause entryUSN values to underflow/overflow instead of
|
||||
incrementing properly.
|
||||
|
||||
Related: https://github.com/389ds/389-ds-base/issues/6250
|
||||
|
||||
Reviewed by: @tbordaz (Thanks!)
|
||||
---
|
||||
.../suites/plugins/entryusn_overflow_test.py | 323 ++++++++++++++++++
|
||||
1 file changed, 323 insertions(+)
|
||||
create mode 100644 dirsrvtests/tests/suites/plugins/entryusn_overflow_test.py
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/plugins/entryusn_overflow_test.py b/dirsrvtests/tests/suites/plugins/entryusn_overflow_test.py
|
||||
new file mode 100644
|
||||
index 000000000..a23d734ca
|
||||
--- /dev/null
|
||||
+++ b/dirsrvtests/tests/suites/plugins/entryusn_overflow_test.py
|
||||
@@ -0,0 +1,323 @@
|
||||
+# --- BEGIN COPYRIGHT BLOCK ---
|
||||
+# Copyright (C) 2025 Red Hat, Inc.
|
||||
+# All rights reserved.
|
||||
+#
|
||||
+# License: GPL (version 3 or any later version).
|
||||
+# See LICENSE for details.
|
||||
+# --- END COPYRIGHT BLOCK ---
|
||||
+#
|
||||
+import os
|
||||
+import ldap
|
||||
+import logging
|
||||
+import pytest
|
||||
+import time
|
||||
+import random
|
||||
+from lib389._constants import DEFAULT_SUFFIX
|
||||
+from lib389.config import Config
|
||||
+from lib389.plugins import USNPlugin
|
||||
+from lib389.idm.user import UserAccounts
|
||||
+from lib389.topologies import topology_st
|
||||
+from lib389.rootdse import RootDSE
|
||||
+
|
||||
+pytestmark = pytest.mark.tier2
|
||||
+
|
||||
+log = logging.getLogger(__name__)
|
||||
+
|
||||
+# Test constants
|
||||
+DEMO_USER_BASE_DN = "uid=demo_user,ou=people," + DEFAULT_SUFFIX
|
||||
+TEST_USER_PREFIX = "Demo User"
|
||||
+MAX_USN_64BIT = 18446744073709551615 # 2^64 - 1
|
||||
+ITERATIONS = 10
|
||||
+ADD_EXISTING_ENTRY_MAX_ATTEMPTS = 5
|
||||
+
|
||||
+
|
||||
+@pytest.fixture(scope="module")
|
||||
+def setup_usn_test(topology_st, request):
|
||||
+ """Setup USN plugin and test data for entryUSN overflow testing"""
|
||||
+
|
||||
+ inst = topology_st.standalone
|
||||
+
|
||||
+ log.info("Enable the USN plugin...")
|
||||
+ plugin = USNPlugin(inst)
|
||||
+ plugin.enable()
|
||||
+ plugin.enable_global_mode()
|
||||
+
|
||||
+ inst.restart()
|
||||
+
|
||||
+ # Create initial test users
|
||||
+ users = UserAccounts(inst, DEFAULT_SUFFIX)
|
||||
+ created_users = []
|
||||
+
|
||||
+ log.info("Creating initial test users...")
|
||||
+ for i in range(3):
|
||||
+ user_props = {
|
||||
+ 'uid': f'{TEST_USER_PREFIX}-{i}',
|
||||
+ 'cn': f'{TEST_USER_PREFIX}-{i}',
|
||||
+ 'sn': f'User{i}',
|
||||
+ 'uidNumber': str(1000 + i),
|
||||
+ 'gidNumber': str(1000 + i),
|
||||
+ 'homeDirectory': f'/home/{TEST_USER_PREFIX}-{i}',
|
||||
+ 'userPassword': 'password123'
|
||||
+ }
|
||||
+ try:
|
||||
+ user = users.create(properties=user_props)
|
||||
+ created_users.append(user)
|
||||
+ log.info(f"Created user: {user.dn}")
|
||||
+ except ldap.ALREADY_EXISTS:
|
||||
+ log.info(f"User {user_props['uid']} already exists, skipping creation")
|
||||
+ user = users.get(user_props['uid'])
|
||||
+ created_users.append(user)
|
||||
+
|
||||
+ def fin():
|
||||
+ log.info("Cleaning up test users...")
|
||||
+ for user in created_users:
|
||||
+ try:
|
||||
+ user.delete()
|
||||
+ except ldap.NO_SUCH_OBJECT:
|
||||
+ pass
|
||||
+
|
||||
+ request.addfinalizer(fin)
|
||||
+
|
||||
+ return created_users
|
||||
+
|
||||
+
|
||||
+def test_entryusn_overflow_on_add_existing_entries(topology_st, setup_usn_test):
|
||||
+ """Test that reproduces entryUSN overflow when adding existing entries
|
||||
+
|
||||
+ :id: a5a8c33d-82f3-4113-be2b-027de51791c8
|
||||
+ :setup: Standalone instance with USN plugin enabled and test users
|
||||
+ :steps:
|
||||
+ 1. Record initial entryUSN values for existing users
|
||||
+ 2. Attempt to add existing entries multiple times (should fail)
|
||||
+ 3. Perform modify operations on the entries
|
||||
+ 4. Check that entryUSN values increment correctly without overflow
|
||||
+ 5. Verify lastusn values are consistent
|
||||
+ :expectedresults:
|
||||
+ 1. Initial entryUSN values are recorded successfully
|
||||
+ 2. Add operations fail with ALREADY_EXISTS error
|
||||
+ 3. Modify operations succeed
|
||||
+ 4. EntryUSN values increment properly without underflow/overflow
|
||||
+ 5. LastUSN values are consistent and increasing
|
||||
+ """
|
||||
+
|
||||
+ inst = topology_st.standalone
|
||||
+ users = setup_usn_test
|
||||
+
|
||||
+ # Enable detailed logging for debugging
|
||||
+ config = Config(inst)
|
||||
+ config.replace('nsslapd-accesslog-level', '260') # Internal op logging
|
||||
+ config.replace('nsslapd-errorlog-level', '65536')
|
||||
+ config.replace('nsslapd-plugin-logging', 'on')
|
||||
+
|
||||
+ root_dse = RootDSE(inst)
|
||||
+
|
||||
+ log.info("Starting entryUSN overflow reproduction test")
|
||||
+
|
||||
+ # Record initial state
|
||||
+ initial_usn_values = {}
|
||||
+ for user in users:
|
||||
+ initial_usn = user.get_attr_val_int('entryusn')
|
||||
+ initial_usn_values[user.dn] = initial_usn
|
||||
+ log.info(f"Initial entryUSN for {user.get_attr_val_utf8('cn')}: {initial_usn}")
|
||||
+
|
||||
+ initial_lastusn = root_dse.get_attr_val_int("lastusn")
|
||||
+ log.info(f"Initial lastUSN: {initial_lastusn}")
|
||||
+
|
||||
+ # Perform test iterations
|
||||
+ for iteration in range(1, ITERATIONS + 1):
|
||||
+ log.info(f"\n--- Iteration {iteration} ---")
|
||||
+
|
||||
+ # Step 1: Try to add existing entries multiple times
|
||||
+ selected_user = random.choice(users)
|
||||
+ cn_value = selected_user.get_attr_val_utf8('cn')
|
||||
+ attempts = random.randint(1, ADD_EXISTING_ENTRY_MAX_ATTEMPTS)
|
||||
+
|
||||
+ log.info(f"Attempting to add existing entry '{cn_value}' {attempts} times")
|
||||
+
|
||||
+ # Get user attributes for recreation attempt
|
||||
+ user_attrs = {
|
||||
+ 'uid': selected_user.get_attr_val_utf8('uid'),
|
||||
+ 'cn': selected_user.get_attr_val_utf8('cn'),
|
||||
+ 'sn': selected_user.get_attr_val_utf8('sn'),
|
||||
+ 'uidNumber': selected_user.get_attr_val_utf8('uidNumber'),
|
||||
+ 'gidNumber': selected_user.get_attr_val_utf8('gidNumber'),
|
||||
+ 'homeDirectory': selected_user.get_attr_val_utf8('homeDirectory'),
|
||||
+ 'userPassword': 'password123'
|
||||
+ }
|
||||
+
|
||||
+ users_collection = UserAccounts(inst, DEFAULT_SUFFIX)
|
||||
+
|
||||
+ # Try to add the existing user multiple times
|
||||
+ for attempt in range(attempts):
|
||||
+ try:
|
||||
+ users_collection.create(properties=user_attrs)
|
||||
+ log.error(f"ERROR: Add operation should have failed but succeeded on attempt {attempt + 1}")
|
||||
+ assert False, "Add operation should have failed with ALREADY_EXISTS"
|
||||
+ except ldap.ALREADY_EXISTS:
|
||||
+ log.info(f"Attempt {attempt + 1}: Got expected ALREADY_EXISTS error")
|
||||
+ except Exception as e:
|
||||
+ log.error(f"Unexpected error on attempt {attempt + 1}: {e}")
|
||||
+ raise
|
||||
+
|
||||
+ # Step 2: Perform modify operation
|
||||
+ target_user = random.choice(users)
|
||||
+ cn_value = target_user.get_attr_val_utf8('cn')
|
||||
+ old_usn = target_user.get_attr_val_int('entryusn')
|
||||
+
|
||||
+ # Modify the user entry
|
||||
+ new_description = f"Modified in iteration {iteration} - {time.time()}"
|
||||
+ target_user.replace('description', new_description)
|
||||
+
|
||||
+ # Get new USN value
|
||||
+ new_usn = target_user.get_attr_val_int('entryusn')
|
||||
+
|
||||
+ log.info(f"Modified entry '{cn_value}': old USN = {old_usn}, new USN = {new_usn}")
|
||||
+
|
||||
+ # Step 3: Validate USN values
|
||||
+ # Check for overflow/underflow conditions
|
||||
+ assert new_usn > 0, f"EntryUSN should be positive, got {new_usn}"
|
||||
+ assert new_usn < MAX_USN_64BIT, f"EntryUSN overflow detected: {new_usn} >= {MAX_USN_64BIT}"
|
||||
+
|
||||
+ # Check that USN didn't wrap around (underflow detection)
|
||||
+ usn_diff = new_usn - old_usn
|
||||
+ assert usn_diff < 1000, f"USN increment too large, possible overflow: {usn_diff}"
|
||||
+
|
||||
+ # Verify lastUSN is also reasonable
|
||||
+ current_lastusn = root_dse.get_attr_val_int("lastusn")
|
||||
+ assert current_lastusn >= new_usn, f"LastUSN ({current_lastusn}) should be >= entryUSN ({new_usn})"
|
||||
+ assert current_lastusn < MAX_USN_64BIT, f"LastUSN overflow detected: {current_lastusn}"
|
||||
+
|
||||
+ log.info(f"USN validation passed for iteration {iteration}")
|
||||
+
|
||||
+ # Add a new entry occasionally to increase USN diversity
|
||||
+ if iteration % 3 == 0:
|
||||
+ new_user_props = {
|
||||
+ 'uid': f'{TEST_USER_PREFIX}-new-{iteration}',
|
||||
+ 'cn': f'{TEST_USER_PREFIX}-new-{iteration}',
|
||||
+ 'sn': f'NewUser{iteration}',
|
||||
+ 'uidNumber': str(2000 + iteration),
|
||||
+ 'gidNumber': str(2000 + iteration),
|
||||
+ 'homeDirectory': f'/home/{TEST_USER_PREFIX}-new-{iteration}',
|
||||
+ 'userPassword': 'newpassword123'
|
||||
+ }
|
||||
+ try:
|
||||
+ new_user = users_collection.create(properties=new_user_props)
|
||||
+ new_user_usn = new_user.get_attr_val_int('entryusn')
|
||||
+ log.info(f"Created new entry '{new_user.get_attr_val_utf8('cn')}' with USN: {new_user_usn}")
|
||||
+ users.append(new_user) # Add to cleanup list
|
||||
+ except Exception as e:
|
||||
+ log.warning(f"Failed to create new user in iteration {iteration}: {e}")
|
||||
+
|
||||
+ # Final validation: Check all USN values are reasonable
|
||||
+ log.info("\nFinal USN validation")
|
||||
+ final_lastusn = root_dse.get_attr_val_int("lastusn")
|
||||
+
|
||||
+ for user in users:
|
||||
+ try:
|
||||
+ final_usn = user.get_attr_val_int('entryusn')
|
||||
+ cn_value = user.get_attr_val_utf8('cn')
|
||||
+ log.info(f"Final entryUSN for '{cn_value}': {final_usn}")
|
||||
+
|
||||
+ # Ensure no overflow occurred
|
||||
+ assert final_usn > 0, f"Final entryUSN should be positive for {cn_value}: {final_usn}"
|
||||
+ assert final_usn < MAX_USN_64BIT, f"EntryUSN overflow for {cn_value}: {final_usn}"
|
||||
+
|
||||
+ except ldap.NO_SUCH_OBJECT:
|
||||
+ log.info(f"User {user.dn} was deleted during test")
|
||||
+
|
||||
+ log.info(f"Final lastUSN: {final_lastusn}")
|
||||
+ assert final_lastusn > initial_lastusn, "LastUSN should have increased during test"
|
||||
+ assert final_lastusn < MAX_USN_64BIT, f"LastUSN overflow detected: {final_lastusn}"
|
||||
+
|
||||
+ log.info("EntryUSN overflow test completed successfully")
|
||||
+
|
||||
+
|
||||
+def test_entryusn_consistency_after_failed_adds(topology_st, setup_usn_test):
|
||||
+ """Test that entryUSN remains consistent after failed add operations
|
||||
+
|
||||
+ :id: e380ccad-527b-427e-a331-df5c41badbed
|
||||
+ :setup: Standalone instance with USN plugin enabled and test users
|
||||
+ :steps:
|
||||
+ 1. Record entryUSN values before failed add attempts
|
||||
+ 2. Attempt to add existing entries (should fail)
|
||||
+ 3. Verify entryUSN values haven't changed due to failed operations
|
||||
+ 4. Perform successful modify operations
|
||||
+ 5. Verify entryUSN increments correctly
|
||||
+ :expectedresults:
|
||||
+ 1. Initial entryUSN values recorded
|
||||
+ 2. Add operations fail as expected
|
||||
+ 3. EntryUSN values unchanged after failed adds
|
||||
+ 4. Modify operations succeed
|
||||
+ 5. EntryUSN values increment correctly without overflow
|
||||
+ """
|
||||
+
|
||||
+ inst = topology_st.standalone
|
||||
+ users = setup_usn_test
|
||||
+
|
||||
+ log.info("Testing entryUSN consistency after failed adds")
|
||||
+
|
||||
+ # Record USN values before any operations
|
||||
+ pre_operation_usns = {}
|
||||
+ for user in users:
|
||||
+ usn = user.get_attr_val_int('entryusn')
|
||||
+ pre_operation_usns[user.dn] = usn
|
||||
+ log.info(f"Pre-operation entryUSN for {user.get_attr_val_utf8('cn')}: {usn}")
|
||||
+
|
||||
+ # Attempt to add existing entries - these should fail
|
||||
+ users_collection = UserAccounts(inst, DEFAULT_SUFFIX)
|
||||
+
|
||||
+ for user in users:
|
||||
+ cn_value = user.get_attr_val_utf8('cn')
|
||||
+ log.info(f"Attempting to add existing user: {cn_value}")
|
||||
+
|
||||
+ user_attrs = {
|
||||
+ 'uid': user.get_attr_val_utf8('uid'),
|
||||
+ 'cn': cn_value,
|
||||
+ 'sn': user.get_attr_val_utf8('sn'),
|
||||
+ 'uidNumber': user.get_attr_val_utf8('uidNumber'),
|
||||
+ 'gidNumber': user.get_attr_val_utf8('gidNumber'),
|
||||
+ 'homeDirectory': user.get_attr_val_utf8('homeDirectory'),
|
||||
+ 'userPassword': 'password123'
|
||||
+ }
|
||||
+
|
||||
+ try:
|
||||
+ users_collection.create(properties=user_attrs)
|
||||
+ assert False, f"Add operation should have failed for existing user {cn_value}"
|
||||
+ except ldap.ALREADY_EXISTS:
|
||||
+ log.info(f"Got expected ALREADY_EXISTS for {cn_value}")
|
||||
+
|
||||
+ # Verify USN values haven't changed after failed adds
|
||||
+ log.info("Verifying entryUSN values after failed add operations...")
|
||||
+ for user in users:
|
||||
+ current_usn = user.get_attr_val_int('entryusn')
|
||||
+ expected_usn = pre_operation_usns[user.dn]
|
||||
+ cn_value = user.get_attr_val_utf8('cn')
|
||||
+
|
||||
+ assert current_usn == expected_usn, \
|
||||
+ f"EntryUSN changed after failed add for {cn_value}: was {expected_usn}, now {current_usn}"
|
||||
+ log.info(f"EntryUSN unchanged for {cn_value}: {current_usn}")
|
||||
+
|
||||
+ # Now perform successful modify operations
|
||||
+ log.info("Performing successful modify operations...")
|
||||
+ for i, user in enumerate(users):
|
||||
+ cn_value = user.get_attr_val_utf8('cn')
|
||||
+ old_usn = user.get_attr_val_int('entryusn')
|
||||
+
|
||||
+ # Modify the user
|
||||
+ user.replace('description', f'Consistency test modification {i + 1}')
|
||||
+
|
||||
+ new_usn = user.get_attr_val_int('entryusn')
|
||||
+ log.info(f"Modified {cn_value}: USN {old_usn} -> {new_usn}")
|
||||
+
|
||||
+ # Verify proper increment
|
||||
+ assert (new_usn - old_usn) == 1, f"EntryUSN should increment by 1 for {cn_value}: {old_usn} -> {new_usn}"
|
||||
+ assert new_usn < MAX_USN_64BIT, f"EntryUSN overflow for {cn_value}: {new_usn}"
|
||||
+
|
||||
+ log.info("EntryUSN consistency test completed successfully")
|
||||
+
|
||||
+
|
||||
+if __name__ == '__main__':
|
||||
+ # Run isolated
|
||||
+ # -s for DEBUG mode
|
||||
+ CURRENT_FILE = os.path.realpath(__file__)
|
||||
+ pytest.main("-s %s" % CURRENT_FILE)
|
||||
\ No newline at end of file
|
||||
--
|
||||
2.49.0
|
||||
|
@ -1,214 +0,0 @@
|
||||
From 3fe2cf7cdedcdf5cafb59867e52a1fbe4a643571 Mon Sep 17 00:00:00 2001
|
||||
From: Masahiro Matsuya <mmatsuya@redhat.com>
|
||||
Date: Fri, 20 Dec 2024 22:37:15 +0900
|
||||
Subject: [PATCH] Issue 6224 - Remove test_referral_subsuffix from
|
||||
ds_logs_test.py (#6456)
|
||||
|
||||
Bug Description:
|
||||
|
||||
test_referral_subsuffix test was removed from main branch and some other
|
||||
ones for higher versions. But, it was not removed from 389-ds-base-1.4.3
|
||||
and 389-ds-base-2.1. The test doesn't work anymore with the fix for
|
||||
Issue 6224, because the added new control limited one backend for internal
|
||||
search. The test should be removed.
|
||||
|
||||
Fix Description:
|
||||
|
||||
remove the test from ds_logs_test.py
|
||||
|
||||
relates: https://github.com/389ds/389-ds-base/issues/6224
|
||||
---
|
||||
.../tests/suites/ds_logs/ds_logs_test.py | 177 ------------------
|
||||
1 file changed, 177 deletions(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/ds_logs/ds_logs_test.py b/dirsrvtests/tests/suites/ds_logs/ds_logs_test.py
|
||||
index 812936c62..84d721756 100644
|
||||
--- a/dirsrvtests/tests/suites/ds_logs/ds_logs_test.py
|
||||
+++ b/dirsrvtests/tests/suites/ds_logs/ds_logs_test.py
|
||||
@@ -1222,183 +1222,6 @@ def test_referral_check(topology_st, request):
|
||||
|
||||
request.addfinalizer(fin)
|
||||
|
||||
-def test_referral_subsuffix(topology_st, request):
|
||||
- """Test the results of an inverted parent suffix definition in the configuration.
|
||||
-
|
||||
- For more details see:
|
||||
- https://www.port389.org/docs/389ds/design/mapping_tree_assembly.html
|
||||
-
|
||||
- :id: 4faf210a-4fde-4e4f-8834-865bdc8f4d37
|
||||
- :setup: Standalone instance
|
||||
- :steps:
|
||||
- 1. First create two Backends, without mapping trees.
|
||||
- 2. create the mapping trees for these backends
|
||||
- 3. reduce nsslapd-referral-check-period to accelerate test
|
||||
- 4. Remove error log file
|
||||
- 5. Create a referral entry on parent suffix
|
||||
- 6. Check that the server detected the referral
|
||||
- 7. Delete the referral entry
|
||||
- 8. Check that the server detected the deletion of the referral
|
||||
- 9. Remove error log file
|
||||
- 10. Create a referral entry on child suffix
|
||||
- 11. Check that the server detected the referral on both parent and child suffixes
|
||||
- 12. Delete the referral entry
|
||||
- 13. Check that the server detected the deletion of the referral on both parent and child suffixes
|
||||
- 14. Remove error log file
|
||||
- 15. Create a referral entry on parent suffix
|
||||
- 16. Check that the server detected the referral on both parent and child suffixes
|
||||
- 17. Delete the child referral entry
|
||||
- 18. Check that the server detected the deletion of the referral on child suffix but not on parent suffix
|
||||
- 19. Delete the parent referral entry
|
||||
- 20. Check that the server detected the deletion of the referral parent suffix
|
||||
-
|
||||
- :expectedresults:
|
||||
- all steps succeeds
|
||||
- """
|
||||
- inst = topology_st.standalone
|
||||
- # Step 1 First create two Backends, without mapping trees.
|
||||
- PARENT_SUFFIX='dc=parent,dc=com'
|
||||
- CHILD_SUFFIX='dc=child,%s' % PARENT_SUFFIX
|
||||
- be1 = create_backend(inst, 'Parent', PARENT_SUFFIX)
|
||||
- be2 = create_backend(inst, 'Child', CHILD_SUFFIX)
|
||||
- # Step 2 create the mapping trees for these backends
|
||||
- mts = MappingTrees(inst)
|
||||
- mt1 = mts.create(properties={
|
||||
- 'cn': PARENT_SUFFIX,
|
||||
- 'nsslapd-state': 'backend',
|
||||
- 'nsslapd-backend': 'Parent',
|
||||
- })
|
||||
- mt2 = mts.create(properties={
|
||||
- 'cn': CHILD_SUFFIX,
|
||||
- 'nsslapd-state': 'backend',
|
||||
- 'nsslapd-backend': 'Child',
|
||||
- 'nsslapd-parent-suffix': PARENT_SUFFIX,
|
||||
- })
|
||||
-
|
||||
- dc_ex = Domain(inst, dn=PARENT_SUFFIX)
|
||||
- assert dc_ex.exists()
|
||||
-
|
||||
- dc_st = Domain(inst, dn=CHILD_SUFFIX)
|
||||
- assert dc_st.exists()
|
||||
-
|
||||
- # Step 3 reduce nsslapd-referral-check-period to accelerate test
|
||||
- # requires a restart done on step 4
|
||||
- REFERRAL_CHECK=7
|
||||
- topology_st.standalone.config.set("nsslapd-referral-check-period", str(REFERRAL_CHECK))
|
||||
-
|
||||
- # Check that if we create a referral at parent level
|
||||
- # - referral is detected at parent backend
|
||||
- # - referral is not detected at child backend
|
||||
-
|
||||
- # Step 3 Remove error log file
|
||||
- topology_st.standalone.stop()
|
||||
- lpath = topology_st.standalone.ds_error_log._get_log_path()
|
||||
- os.unlink(lpath)
|
||||
- topology_st.standalone.start()
|
||||
-
|
||||
- # Step 4 Create a referral entry on parent suffix
|
||||
- rs_parent = Referrals(topology_st.standalone, PARENT_SUFFIX)
|
||||
-
|
||||
- referral_entry_parent = rs_parent.create(properties={
|
||||
- 'cn': 'testref',
|
||||
- 'ref': 'ldap://localhost:38901/ou=People,dc=example,dc=com'
|
||||
- })
|
||||
-
|
||||
- # Step 5 Check that the server detected the referral
|
||||
- time.sleep(REFERRAL_CHECK + 1)
|
||||
- assert topology_st.standalone.ds_error_log.match('.*slapd_daemon - New referral entries are detected under %s.*' % PARENT_SUFFIX)
|
||||
- assert not topology_st.standalone.ds_error_log.match('.*slapd_daemon - New referral entries are detected under %s.*' % CHILD_SUFFIX)
|
||||
- assert not topology_st.standalone.ds_error_log.match('.*slapd_daemon - No more referral entry under %s' % PARENT_SUFFIX)
|
||||
-
|
||||
- # Step 6 Delete the referral entry
|
||||
- referral_entry_parent.delete()
|
||||
-
|
||||
- # Step 7 Check that the server detected the deletion of the referral
|
||||
- time.sleep(REFERRAL_CHECK + 1)
|
||||
- assert topology_st.standalone.ds_error_log.match('.*slapd_daemon - No more referral entry under %s' % PARENT_SUFFIX)
|
||||
-
|
||||
- # Check that if we create a referral at child level
|
||||
- # - referral is detected at parent backend
|
||||
- # - referral is detected at child backend
|
||||
-
|
||||
- # Step 8 Remove error log file
|
||||
- topology_st.standalone.stop()
|
||||
- lpath = topology_st.standalone.ds_error_log._get_log_path()
|
||||
- os.unlink(lpath)
|
||||
- topology_st.standalone.start()
|
||||
-
|
||||
- # Step 9 Create a referral entry on child suffix
|
||||
- rs_child = Referrals(topology_st.standalone, CHILD_SUFFIX)
|
||||
- referral_entry_child = rs_child.create(properties={
|
||||
- 'cn': 'testref',
|
||||
- 'ref': 'ldap://localhost:38901/ou=People,dc=example,dc=com'
|
||||
- })
|
||||
-
|
||||
- # Step 10 Check that the server detected the referral on both parent and child suffixes
|
||||
- time.sleep(REFERRAL_CHECK + 1)
|
||||
- assert topology_st.standalone.ds_error_log.match('.*slapd_daemon - New referral entries are detected under %s.*' % PARENT_SUFFIX)
|
||||
- assert topology_st.standalone.ds_error_log.match('.*slapd_daemon - New referral entries are detected under %s.*' % CHILD_SUFFIX)
|
||||
- assert not topology_st.standalone.ds_error_log.match('.*slapd_daemon - No more referral entry under %s' % CHILD_SUFFIX)
|
||||
-
|
||||
- # Step 11 Delete the referral entry
|
||||
- referral_entry_child.delete()
|
||||
-
|
||||
- # Step 12 Check that the server detected the deletion of the referral on both parent and child suffixes
|
||||
- time.sleep(REFERRAL_CHECK + 1)
|
||||
- assert topology_st.standalone.ds_error_log.match('.*slapd_daemon - No more referral entry under %s' % PARENT_SUFFIX)
|
||||
- assert topology_st.standalone.ds_error_log.match('.*slapd_daemon - No more referral entry under %s' % CHILD_SUFFIX)
|
||||
-
|
||||
- # Check that if we create a referral at child level and parent level
|
||||
- # - referral is detected at parent backend
|
||||
- # - referral is detected at child backend
|
||||
-
|
||||
- # Step 13 Remove error log file
|
||||
- topology_st.standalone.stop()
|
||||
- lpath = topology_st.standalone.ds_error_log._get_log_path()
|
||||
- os.unlink(lpath)
|
||||
- topology_st.standalone.start()
|
||||
-
|
||||
- # Step 14 Create a referral entry on parent suffix
|
||||
- # Create a referral entry on child suffix
|
||||
- referral_entry_parent = rs_parent.create(properties={
|
||||
- 'cn': 'testref',
|
||||
- 'ref': 'ldap://localhost:38901/ou=People,dc=example,dc=com'
|
||||
- })
|
||||
- referral_entry_child = rs_child.create(properties={
|
||||
- 'cn': 'testref',
|
||||
- 'ref': 'ldap://localhost:38901/ou=People,dc=example,dc=com'
|
||||
- })
|
||||
-
|
||||
- # Step 15 Check that the server detected the referral on both parent and child suffixes
|
||||
- time.sleep(REFERRAL_CHECK + 1)
|
||||
- assert topology_st.standalone.ds_error_log.match('.*slapd_daemon - New referral entries are detected under %s.*' % PARENT_SUFFIX)
|
||||
- assert topology_st.standalone.ds_error_log.match('.*slapd_daemon - New referral entries are detected under %s.*' % CHILD_SUFFIX)
|
||||
- assert not topology_st.standalone.ds_error_log.match('.*slapd_daemon - No more referral entry under %s' % CHILD_SUFFIX)
|
||||
-
|
||||
- # Step 16 Delete the child referral entry
|
||||
- referral_entry_child.delete()
|
||||
-
|
||||
- # Step 17 Check that the server detected the deletion of the referral on child suffix but not on parent suffix
|
||||
- time.sleep(REFERRAL_CHECK + 1)
|
||||
- assert topology_st.standalone.ds_error_log.match('.*slapd_daemon - No more referral entry under %s' % CHILD_SUFFIX)
|
||||
- assert not topology_st.standalone.ds_error_log.match('.*slapd_daemon - No more referral entry under %s' % PARENT_SUFFIX)
|
||||
-
|
||||
- # Step 18 Delete the parent referral entry
|
||||
- referral_entry_parent.delete()
|
||||
-
|
||||
- # Step 19 Check that the server detected the deletion of the referral parent suffix
|
||||
- time.sleep(REFERRAL_CHECK + 1)
|
||||
- assert topology_st.standalone.ds_error_log.match('.*slapd_daemon - No more referral entry under %s' % PARENT_SUFFIX)
|
||||
-
|
||||
- def fin():
|
||||
- log.info('Deleting referral')
|
||||
- try:
|
||||
- referral_entry_parent.delete()
|
||||
- referral.entry_child.delete()
|
||||
- except:
|
||||
- pass
|
||||
-
|
||||
- request.addfinalizer(fin)
|
||||
|
||||
def test_missing_backend_suffix(topology_st, request):
|
||||
"""Test that the server does not crash if a backend has no suffix
|
||||
--
|
||||
2.48.0
|
||||
|
@ -0,0 +1,172 @@
|
||||
From 37a56f75afac2805e1ba958eebd496e77b7079e7 Mon Sep 17 00:00:00 2001
|
||||
From: Simon Pichugin <spichugi@redhat.com>
|
||||
Date: Mon, 28 Jul 2025 15:35:50 -0700
|
||||
Subject: [PATCH] Issue 6594 - Add test for numSubordinates replication
|
||||
consistency with tombstones (#6862)
|
||||
|
||||
Description: Add a comprehensive test to verify that numSubordinates and
|
||||
tombstoneNumSubordinates attributes are correctly replicated between
|
||||
instances when tombstone entries are present.
|
||||
|
||||
Fixes: https://github.com/389ds/389-ds-base/issues/6594
|
||||
|
||||
Reviewed by: @progier389 (Thanks!)
|
||||
---
|
||||
.../numsubordinates_replication_test.py | 144 ++++++++++++++++++
|
||||
1 file changed, 144 insertions(+)
|
||||
create mode 100644 dirsrvtests/tests/suites/replication/numsubordinates_replication_test.py
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/replication/numsubordinates_replication_test.py b/dirsrvtests/tests/suites/replication/numsubordinates_replication_test.py
|
||||
new file mode 100644
|
||||
index 000000000..9ba10657d
|
||||
--- /dev/null
|
||||
+++ b/dirsrvtests/tests/suites/replication/numsubordinates_replication_test.py
|
||||
@@ -0,0 +1,144 @@
|
||||
+# --- BEGIN COPYRIGHT BLOCK ---
|
||||
+# Copyright (C) 2025 Red Hat, Inc.
|
||||
+# All rights reserved.
|
||||
+#
|
||||
+# License: GPL (version 3 or any later version).
|
||||
+# See LICENSE for details.
|
||||
+# --- END COPYRIGHT BLOCK ---
|
||||
+
|
||||
+import os
|
||||
+import logging
|
||||
+import pytest
|
||||
+from lib389._constants import DEFAULT_SUFFIX
|
||||
+from lib389.replica import ReplicationManager
|
||||
+from lib389.idm.organizationalunit import OrganizationalUnits
|
||||
+from lib389.idm.user import UserAccounts
|
||||
+from lib389.topologies import topology_i2 as topo_i2
|
||||
+
|
||||
+
|
||||
+pytestmark = pytest.mark.tier1
|
||||
+
|
||||
+DEBUGGING = os.getenv("DEBUGGING", default=False)
|
||||
+if DEBUGGING:
|
||||
+ logging.getLogger(__name__).setLevel(logging.DEBUG)
|
||||
+else:
|
||||
+ logging.getLogger(__name__).setLevel(logging.INFO)
|
||||
+log = logging.getLogger(__name__)
|
||||
+
|
||||
+
|
||||
+def test_numsubordinates_tombstone_replication_mismatch(topo_i2):
|
||||
+ """Test that numSubordinates values match between replicas after tombstone creation
|
||||
+
|
||||
+ :id: c43ecc7a-d706-42e8-9179-1ff7d0e7163a
|
||||
+ :setup: Two standalone instances
|
||||
+ :steps:
|
||||
+ 1. Create a container (organizational unit) on the first instance
|
||||
+ 2. Create a user object in that container
|
||||
+ 3. Delete the user object (this creates a tombstone)
|
||||
+ 4. Set up replication between the two instances
|
||||
+ 5. Wait for replication to complete
|
||||
+ 6. Check numSubordinates on both instances
|
||||
+ 7. Check tombstoneNumSubordinates on both instances
|
||||
+ 8. Verify that numSubordinates values match on both instances
|
||||
+ :expectedresults:
|
||||
+ 1. Container should be created successfully
|
||||
+ 2. User object should be created successfully
|
||||
+ 3. User object should be deleted successfully
|
||||
+ 4. Replication should be set up successfully
|
||||
+ 5. Replication should complete successfully
|
||||
+ 6. numSubordinates should be accessible on both instances
|
||||
+ 7. tombstoneNumSubordinates should be accessible on both instances
|
||||
+ 8. numSubordinates values should match on both instances
|
||||
+ """
|
||||
+
|
||||
+ instance1 = topo_i2.ins["standalone1"]
|
||||
+ instance2 = topo_i2.ins["standalone2"]
|
||||
+
|
||||
+ log.info("Create a container (organizational unit) on the first instance")
|
||||
+ ous1 = OrganizationalUnits(instance1, DEFAULT_SUFFIX)
|
||||
+ container = ous1.create(properties={
|
||||
+ 'ou': 'test_container',
|
||||
+ 'description': 'Test container for numSubordinates replication test'
|
||||
+ })
|
||||
+ container_rdn = container.rdn
|
||||
+ log.info(f"Created container: {container_rdn}")
|
||||
+
|
||||
+ log.info("Create a user object in that container")
|
||||
+ users1 = UserAccounts(instance1, DEFAULT_SUFFIX, rdn=f"ou={container_rdn}")
|
||||
+ test_user = users1.create_test_user(uid=1001)
|
||||
+ log.info(f"Created user: {test_user.dn}")
|
||||
+
|
||||
+ log.info("Checking initial numSubordinates on container")
|
||||
+ container_obj1 = OrganizationalUnits(instance1, DEFAULT_SUFFIX).get(container_rdn)
|
||||
+ initial_numsubordinates = container_obj1.get_attr_val_int('numSubordinates')
|
||||
+ log.info(f"Initial numSubordinates: {initial_numsubordinates}")
|
||||
+ assert initial_numsubordinates == 1
|
||||
+
|
||||
+ log.info("Delete the user object (this creates a tombstone)")
|
||||
+ test_user.delete()
|
||||
+
|
||||
+ log.info("Checking numSubordinates after deletion")
|
||||
+ after_delete_numsubordinates = container_obj1.get_attr_val_int('numSubordinates')
|
||||
+ log.info(f"numSubordinates after deletion: {after_delete_numsubordinates}")
|
||||
+
|
||||
+ log.info("Checking tombstoneNumSubordinates after deletion")
|
||||
+ try:
|
||||
+ tombstone_numsubordinates = container_obj1.get_attr_val_int('tombstoneNumSubordinates')
|
||||
+ log.info(f"tombstoneNumSubordinates: {tombstone_numsubordinates}")
|
||||
+ except Exception as e:
|
||||
+ log.info(f"tombstoneNumSubordinates not found or error: {e}")
|
||||
+ tombstone_numsubordinates = 0
|
||||
+
|
||||
+ log.info("Set up replication between the two instances")
|
||||
+ repl = ReplicationManager(DEFAULT_SUFFIX)
|
||||
+ repl.create_first_supplier(instance1)
|
||||
+ repl.join_supplier(instance1, instance2)
|
||||
+
|
||||
+ log.info("Wait for replication to complete")
|
||||
+ repl.wait_for_replication(instance1, instance2)
|
||||
+
|
||||
+ log.info("Check numSubordinates on both instances")
|
||||
+ container_obj1 = OrganizationalUnits(instance1, DEFAULT_SUFFIX).get(container_rdn)
|
||||
+ numsubordinates_instance1 = container_obj1.get_attr_val_int('numSubordinates')
|
||||
+ log.info(f"numSubordinates on instance1: {numsubordinates_instance1}")
|
||||
+
|
||||
+ container_obj2 = OrganizationalUnits(instance2, DEFAULT_SUFFIX).get(container_rdn)
|
||||
+ numsubordinates_instance2 = container_obj2.get_attr_val_int('numSubordinates')
|
||||
+ log.info(f"numSubordinates on instance2: {numsubordinates_instance2}")
|
||||
+
|
||||
+ log.info("Check tombstoneNumSubordinates on both instances")
|
||||
+ try:
|
||||
+ tombstone_numsubordinates_instance1 = container_obj1.get_attr_val_int('tombstoneNumSubordinates')
|
||||
+ log.info(f"tombstoneNumSubordinates on instance1: {tombstone_numsubordinates_instance1}")
|
||||
+ except Exception as e:
|
||||
+ log.info(f"tombstoneNumSubordinates not found on instance1: {e}")
|
||||
+ tombstone_numsubordinates_instance1 = 0
|
||||
+
|
||||
+ try:
|
||||
+ tombstone_numsubordinates_instance2 = container_obj2.get_attr_val_int('tombstoneNumSubordinates')
|
||||
+ log.info(f"tombstoneNumSubordinates on instance2: {tombstone_numsubordinates_instance2}")
|
||||
+ except Exception as e:
|
||||
+ log.info(f"tombstoneNumSubordinates not found on instance2: {e}")
|
||||
+ tombstone_numsubordinates_instance2 = 0
|
||||
+
|
||||
+ log.info("Verify that numSubordinates values match on both instances")
|
||||
+ log.info(f"Comparison: instance1 numSubordinates={numsubordinates_instance1}, "
|
||||
+ f"instance2 numSubordinates={numsubordinates_instance2}")
|
||||
+ log.info(f"Comparison: instance1 tombstoneNumSubordinates={tombstone_numsubordinates_instance1}, "
|
||||
+ f"instance2 tombstoneNumSubordinates={tombstone_numsubordinates_instance2}")
|
||||
+
|
||||
+ assert numsubordinates_instance1 == numsubordinates_instance2, (
|
||||
+ f"numSubordinates mismatch: instance1 has {numsubordinates_instance1}, "
|
||||
+ f"instance2 has {numsubordinates_instance2}. "
|
||||
+ )
|
||||
+ assert tombstone_numsubordinates_instance1 == tombstone_numsubordinates_instance2, (
|
||||
+ f"tombstoneNumSubordinates mismatch: instance1 has {tombstone_numsubordinates_instance1}, "
|
||||
+ f"instance2 has {tombstone_numsubordinates_instance2}. "
|
||||
+ )
|
||||
+
|
||||
+
|
||||
+if __name__ == '__main__':
|
||||
+ # Run isolated
|
||||
+ # -s for DEBUG mode
|
||||
+ CURRENT_FILE = os.path.realpath(__file__)
|
||||
+ pytest.main("-s %s" % CURRENT_FILE)
|
||||
\ No newline at end of file
|
||||
--
|
||||
2.49.0
|
||||
|
@ -1,90 +0,0 @@
|
||||
From 4121ffe7a44fbacf513758661e71e483eb11ee3c Mon Sep 17 00:00:00 2001
|
||||
From: tbordaz <tbordaz@redhat.com>
|
||||
Date: Mon, 6 Jan 2025 14:00:39 +0100
|
||||
Subject: [PATCH] Issue 6417 - (2nd) If an entry RDN is identical to the
|
||||
suffix, then Entryrdn gets broken during a reindex (#6460)
|
||||
|
||||
Bug description:
|
||||
The primary fix has a flaw as it assumes that the
|
||||
suffix ID is '1'.
|
||||
If the RUV entry is the first entry of the database
|
||||
the server loops indefinitely
|
||||
|
||||
Fix description:
|
||||
Read the suffix ID from the entryrdn index
|
||||
|
||||
fixes: #6417
|
||||
|
||||
Reviewed by: Pierre Rogier (also reviewed the first fix)
|
||||
---
|
||||
.../suites/replication/regression_m2_test.py | 9 +++++++++
|
||||
ldap/servers/slapd/back-ldbm/ldbm_entryrdn.c | 19 ++++++++++++++++++-
|
||||
2 files changed, 27 insertions(+), 1 deletion(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/replication/regression_m2_test.py b/dirsrvtests/tests/suites/replication/regression_m2_test.py
|
||||
index abac46ada..72d4b9f89 100644
|
||||
--- a/dirsrvtests/tests/suites/replication/regression_m2_test.py
|
||||
+++ b/dirsrvtests/tests/suites/replication/regression_m2_test.py
|
||||
@@ -1010,6 +1010,15 @@ def test_online_reinit_may_hang(topo_with_sigkill):
|
||||
"""
|
||||
M1 = topo_with_sigkill.ms["supplier1"]
|
||||
M2 = topo_with_sigkill.ms["supplier2"]
|
||||
+
|
||||
+ # The RFE 5367 (when enabled) retrieves the DN
|
||||
+ # from the dncache. This hides an issue
|
||||
+ # with primary fix for 6417.
|
||||
+ # We need to disable the RFE to verify that the primary
|
||||
+ # fix is properly fixed.
|
||||
+ if ds_is_newer('2.3.1'):
|
||||
+ M1.config.replace('nsslapd-return-original-entrydn', 'off')
|
||||
+
|
||||
M1.stop()
|
||||
ldif_file = '%s/supplier1.ldif' % M1.get_ldif_dir()
|
||||
M1.db2ldif(bename=DEFAULT_BENAME, suffixes=[DEFAULT_SUFFIX],
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/ldbm_entryrdn.c b/ldap/servers/slapd/back-ldbm/ldbm_entryrdn.c
|
||||
index 83b041192..1bbb6252a 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/ldbm_entryrdn.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/ldbm_entryrdn.c
|
||||
@@ -1115,6 +1115,7 @@ entryrdn_lookup_dn(backend *be,
|
||||
rdn_elem *elem = NULL;
|
||||
int maybesuffix = 0;
|
||||
int db_retry = 0;
|
||||
+ ID suffix_id = 1;
|
||||
|
||||
slapi_log_err(SLAPI_LOG_TRACE, "entryrdn_lookup_dn",
|
||||
"--> entryrdn_lookup_dn\n");
|
||||
@@ -1175,6 +1176,22 @@ entryrdn_lookup_dn(backend *be,
|
||||
/* Setting the bulk fetch buffer */
|
||||
data.flags = DB_DBT_MALLOC;
|
||||
|
||||
+ /* Just in case the suffix ID is not '1' retrieve it from the database */
|
||||
+ keybuf = slapi_ch_strdup(slapi_sdn_get_ndn(be->be_suffix));
|
||||
+ dblayer_value_set(be, &key, keybuf, strlen(keybuf) + 1);
|
||||
+ rc = dblayer_cursor_op(&ctx.cursor, DBI_OP_MOVE_TO_KEY, &key, &data);
|
||||
+ if (rc) {
|
||||
+ slapi_log_err(SLAPI_LOG_WARNING, "entryrdn_lookup_dn",
|
||||
+ "Fails to retrieve the ID of suffix %s - keep the default value '%d'\n",
|
||||
+ slapi_sdn_get_ndn(be->be_suffix),
|
||||
+ suffix_id);
|
||||
+ } else {
|
||||
+ elem = (rdn_elem *)data.data;
|
||||
+ suffix_id = id_stored_to_internal(elem->rdn_elem_id);
|
||||
+ }
|
||||
+ dblayer_value_free(be, &data);
|
||||
+ dblayer_value_free(be, &key);
|
||||
+
|
||||
do {
|
||||
/* Setting up a key for the node to get its parent */
|
||||
slapi_ch_free_string(&keybuf);
|
||||
@@ -1224,7 +1241,7 @@ entryrdn_lookup_dn(backend *be,
|
||||
}
|
||||
goto bail;
|
||||
}
|
||||
- if (workid == 1) {
|
||||
+ if (workid == suffix_id) {
|
||||
/* The loop (workid) iterates from the starting 'id'
|
||||
* up to the suffix ID (i.e. '1').
|
||||
* A corner case (#6417) is if an entry, on the path
|
||||
--
|
||||
2.48.0
|
||||
|
@ -0,0 +1,814 @@
|
||||
From e05653cbff500c47b89e43e4a1c85b7cb30321ff Mon Sep 17 00:00:00 2001
|
||||
From: Simon Pichugin <spichugi@redhat.com>
|
||||
Date: Mon, 28 Jul 2025 15:41:29 -0700
|
||||
Subject: [PATCH] Issue 6884 - Mask password hashes in audit logs (#6885)
|
||||
|
||||
Description: Fix the audit log functionality to mask password hash values for
|
||||
userPassword, nsslapd-rootpw, nsmultiplexorcredentials, nsds5ReplicaCredentials,
|
||||
and nsds5ReplicaBootstrapCredentials attributes in ADD and MODIFY operations.
|
||||
Update auditlog.c to detect password attributes and replace their values with
|
||||
asterisks (**********************) in both LDIF and JSON audit log formats.
|
||||
Add a comprehensive test suite audit_password_masking_test.py to verify
|
||||
password masking works correctly across all log formats and operation types.
|
||||
|
||||
Fixes: https://github.com/389ds/389-ds-base/issues/6884
|
||||
|
||||
Reviewed by: @mreynolds389, @vashirov (Thanks!!)
|
||||
---
|
||||
.../logging/audit_password_masking_test.py | 501 ++++++++++++++++++
|
||||
ldap/servers/slapd/auditlog.c | 170 +++++-
|
||||
ldap/servers/slapd/slapi-private.h | 1 +
|
||||
src/lib389/lib389/chaining.py | 3 +-
|
||||
4 files changed, 652 insertions(+), 23 deletions(-)
|
||||
create mode 100644 dirsrvtests/tests/suites/logging/audit_password_masking_test.py
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/logging/audit_password_masking_test.py b/dirsrvtests/tests/suites/logging/audit_password_masking_test.py
|
||||
new file mode 100644
|
||||
index 000000000..3b6a54849
|
||||
--- /dev/null
|
||||
+++ b/dirsrvtests/tests/suites/logging/audit_password_masking_test.py
|
||||
@@ -0,0 +1,501 @@
|
||||
+# --- BEGIN COPYRIGHT BLOCK ---
|
||||
+# Copyright (C) 2025 Red Hat, Inc.
|
||||
+# All rights reserved.
|
||||
+#
|
||||
+# License: GPL (version 3 or any later version).
|
||||
+# See LICENSE for details.
|
||||
+# --- END COPYRIGHT BLOCK ---
|
||||
+#
|
||||
+import logging
|
||||
+import pytest
|
||||
+import os
|
||||
+import re
|
||||
+import time
|
||||
+import ldap
|
||||
+from lib389._constants import DEFAULT_SUFFIX, DN_DM, PW_DM
|
||||
+from lib389.topologies import topology_m2 as topo
|
||||
+from lib389.idm.user import UserAccounts
|
||||
+from lib389.dirsrv_log import DirsrvAuditJSONLog
|
||||
+from lib389.plugins import ChainingBackendPlugin
|
||||
+from lib389.chaining import ChainingLinks
|
||||
+from lib389.agreement import Agreements
|
||||
+from lib389.replica import ReplicationManager, Replicas
|
||||
+from lib389.idm.directorymanager import DirectoryManager
|
||||
+
|
||||
+log = logging.getLogger(__name__)
|
||||
+
|
||||
+MASKED_PASSWORD = "**********************"
|
||||
+TEST_PASSWORD = "MySecret123"
|
||||
+TEST_PASSWORD_2 = "NewPassword789"
|
||||
+TEST_PASSWORD_3 = "NewPassword101"
|
||||
+
|
||||
+
|
||||
+def setup_audit_logging(inst, log_format='default', display_attrs=None):
|
||||
+ """Configure audit logging settings"""
|
||||
+ inst.config.replace('nsslapd-auditlog-logbuffering', 'off')
|
||||
+ inst.config.replace('nsslapd-auditlog-logging-enabled', 'on')
|
||||
+ inst.config.replace('nsslapd-auditlog-log-format', log_format)
|
||||
+
|
||||
+ if display_attrs is not None:
|
||||
+ inst.config.replace('nsslapd-auditlog-display-attrs', display_attrs)
|
||||
+
|
||||
+ inst.deleteAuditLogs()
|
||||
+
|
||||
+
|
||||
+def check_password_masked(inst, log_format, expected_password, actual_password):
|
||||
+ """Helper function to check password masking in audit logs"""
|
||||
+
|
||||
+ time.sleep(1) # Allow log to flush
|
||||
+
|
||||
+ # List of all password/credential attributes that should be masked
|
||||
+ password_attributes = [
|
||||
+ 'userPassword',
|
||||
+ 'nsslapd-rootpw',
|
||||
+ 'nsmultiplexorcredentials',
|
||||
+ 'nsDS5ReplicaCredentials',
|
||||
+ 'nsDS5ReplicaBootstrapCredentials'
|
||||
+ ]
|
||||
+
|
||||
+ # Get password schemes to check for hash leakage
|
||||
+ user_password_scheme = inst.config.get_attr_val_utf8('passwordStorageScheme')
|
||||
+ root_password_scheme = inst.config.get_attr_val_utf8('nsslapd-rootpwstoragescheme')
|
||||
+
|
||||
+ if log_format == 'json':
|
||||
+ # Check JSON format logs
|
||||
+ audit_log = DirsrvAuditJSONLog(inst)
|
||||
+ log_lines = audit_log.readlines()
|
||||
+
|
||||
+ found_masked = False
|
||||
+ found_actual = False
|
||||
+ found_hashed = False
|
||||
+
|
||||
+ for line in log_lines:
|
||||
+ # Check if any password attribute is present in the line
|
||||
+ for attr in password_attributes:
|
||||
+ if attr in line:
|
||||
+ if expected_password in line:
|
||||
+ found_masked = True
|
||||
+ if actual_password in line:
|
||||
+ found_actual = True
|
||||
+ # Check for password scheme indicators (hashed passwords)
|
||||
+ if user_password_scheme and f'{{{user_password_scheme}}}' in line:
|
||||
+ found_hashed = True
|
||||
+ if root_password_scheme and f'{{{root_password_scheme}}}' in line:
|
||||
+ found_hashed = True
|
||||
+ break # Found a password attribute, no need to check others for this line
|
||||
+
|
||||
+ else:
|
||||
+ # Check LDIF format logs
|
||||
+ found_masked = False
|
||||
+ found_actual = False
|
||||
+ found_hashed = False
|
||||
+
|
||||
+ # Check each password attribute for masked password
|
||||
+ for attr in password_attributes:
|
||||
+ if inst.ds_audit_log.match(f"{attr}: {re.escape(expected_password)}"):
|
||||
+ found_masked = True
|
||||
+ if inst.ds_audit_log.match(f"{attr}: {actual_password}"):
|
||||
+ found_actual = True
|
||||
+
|
||||
+ # Check for hashed passwords in LDIF format
|
||||
+ if user_password_scheme:
|
||||
+ if inst.ds_audit_log.match(f"userPassword: {{{user_password_scheme}}}"):
|
||||
+ found_hashed = True
|
||||
+ if root_password_scheme:
|
||||
+ if inst.ds_audit_log.match(f"nsslapd-rootpw: {{{root_password_scheme}}}"):
|
||||
+ found_hashed = True
|
||||
+
|
||||
+ # Delete audit logs to avoid interference with other tests
|
||||
+ # We need to reset the root password to default as deleteAuditLogs()
|
||||
+ # opens a new connection with the default password
|
||||
+ dm = DirectoryManager(inst)
|
||||
+ dm.change_password(PW_DM)
|
||||
+ inst.deleteAuditLogs()
|
||||
+
|
||||
+ return found_masked, found_actual, found_hashed
|
||||
+
|
||||
+
|
||||
+@pytest.mark.parametrize("log_format,display_attrs", [
|
||||
+ ("default", None),
|
||||
+ ("default", "*"),
|
||||
+ ("default", "userPassword"),
|
||||
+ ("json", None),
|
||||
+ ("json", "*"),
|
||||
+ ("json", "userPassword")
|
||||
+])
|
||||
+def test_password_masking_add_operation(topo, log_format, display_attrs):
|
||||
+ """Test password masking in ADD operations
|
||||
+
|
||||
+ :id: 4358bd75-bcc7-401c-b492-d3209b10412d
|
||||
+ :parametrized: yes
|
||||
+ :setup: Standalone Instance
|
||||
+ :steps:
|
||||
+ 1. Configure audit logging format
|
||||
+ 2. Add user with password
|
||||
+ 3. Check that password is masked in audit log
|
||||
+ 4. Verify actual password does not appear in log
|
||||
+ :expectedresults:
|
||||
+ 1. Success
|
||||
+ 2. Success
|
||||
+ 3. Password should be masked with asterisks
|
||||
+ 4. Actual password should not be found in log
|
||||
+ """
|
||||
+ inst = topo.ms['supplier1']
|
||||
+ setup_audit_logging(inst, log_format, display_attrs)
|
||||
+
|
||||
+ users = UserAccounts(inst, DEFAULT_SUFFIX)
|
||||
+ user = None
|
||||
+
|
||||
+ try:
|
||||
+ user = users.create(properties={
|
||||
+ 'uid': 'test_add_pwd_mask',
|
||||
+ 'cn': 'Test Add User',
|
||||
+ 'sn': 'User',
|
||||
+ 'uidNumber': '1000',
|
||||
+ 'gidNumber': '1000',
|
||||
+ 'homeDirectory': '/home/test_add',
|
||||
+ 'userPassword': TEST_PASSWORD
|
||||
+ })
|
||||
+
|
||||
+ found_masked, found_actual, found_hashed = check_password_masked(inst, log_format, MASKED_PASSWORD, TEST_PASSWORD)
|
||||
+
|
||||
+ assert found_masked, f"Masked password not found in {log_format} ADD operation"
|
||||
+ assert not found_actual, f"Actual password found in {log_format} ADD log (should be masked)"
|
||||
+ assert not found_hashed, f"Hashed password found in {log_format} ADD log (should be masked)"
|
||||
+
|
||||
+ finally:
|
||||
+ if user is not None:
|
||||
+ try:
|
||||
+ user.delete()
|
||||
+ except:
|
||||
+ pass
|
||||
+
|
||||
+
|
||||
+@pytest.mark.parametrize("log_format,display_attrs", [
|
||||
+ ("default", None),
|
||||
+ ("default", "*"),
|
||||
+ ("default", "userPassword"),
|
||||
+ ("json", None),
|
||||
+ ("json", "*"),
|
||||
+ ("json", "userPassword")
|
||||
+])
|
||||
+def test_password_masking_modify_operation(topo, log_format, display_attrs):
|
||||
+ """Test password masking in MODIFY operations
|
||||
+
|
||||
+ :id: e6963aa9-7609-419c-aae2-1d517aa434bd
|
||||
+ :parametrized: yes
|
||||
+ :setup: Standalone Instance
|
||||
+ :steps:
|
||||
+ 1. Configure audit logging format
|
||||
+ 2. Add user without password
|
||||
+ 3. Add password via MODIFY operation
|
||||
+ 4. Check that password is masked in audit log
|
||||
+ 5. Modify password to new value
|
||||
+ 6. Check that new password is also masked
|
||||
+ 7. Verify actual passwords do not appear in log
|
||||
+ :expectedresults:
|
||||
+ 1. Success
|
||||
+ 2. Success
|
||||
+ 3. Success
|
||||
+ 4. Password should be masked with asterisks
|
||||
+ 5. Success
|
||||
+ 6. New password should be masked with asterisks
|
||||
+ 7. No actual password values should be found in log
|
||||
+ """
|
||||
+ inst = topo.ms['supplier1']
|
||||
+ setup_audit_logging(inst, log_format, display_attrs)
|
||||
+
|
||||
+ users = UserAccounts(inst, DEFAULT_SUFFIX)
|
||||
+ user = None
|
||||
+
|
||||
+ try:
|
||||
+ user = users.create(properties={
|
||||
+ 'uid': 'test_modify_pwd_mask',
|
||||
+ 'cn': 'Test Modify User',
|
||||
+ 'sn': 'User',
|
||||
+ 'uidNumber': '2000',
|
||||
+ 'gidNumber': '2000',
|
||||
+ 'homeDirectory': '/home/test_modify'
|
||||
+ })
|
||||
+
|
||||
+ user.replace('userPassword', TEST_PASSWORD)
|
||||
+
|
||||
+ found_masked, found_actual, found_hashed = check_password_masked(inst, log_format, MASKED_PASSWORD, TEST_PASSWORD)
|
||||
+ assert found_masked, f"Masked password not found in {log_format} MODIFY operation (first password)"
|
||||
+ assert not found_actual, f"Actual password found in {log_format} MODIFY log (should be masked)"
|
||||
+ assert not found_hashed, f"Hashed password found in {log_format} MODIFY log (should be masked)"
|
||||
+
|
||||
+ user.replace('userPassword', TEST_PASSWORD_2)
|
||||
+
|
||||
+ found_masked_2, found_actual_2, found_hashed_2 = check_password_masked(inst, log_format, MASKED_PASSWORD, TEST_PASSWORD_2)
|
||||
+ assert found_masked_2, f"Masked password not found in {log_format} MODIFY operation (second password)"
|
||||
+ assert not found_actual_2, f"Second actual password found in {log_format} MODIFY log (should be masked)"
|
||||
+ assert not found_hashed_2, f"Second hashed password found in {log_format} MODIFY log (should be masked)"
|
||||
+
|
||||
+ finally:
|
||||
+ if user is not None:
|
||||
+ try:
|
||||
+ user.delete()
|
||||
+ except:
|
||||
+ pass
|
||||
+
|
||||
+
|
||||
+@pytest.mark.parametrize("log_format,display_attrs", [
|
||||
+ ("default", None),
|
||||
+ ("default", "*"),
|
||||
+ ("default", "nsslapd-rootpw"),
|
||||
+ ("json", None),
|
||||
+ ("json", "*"),
|
||||
+ ("json", "nsslapd-rootpw")
|
||||
+])
|
||||
+def test_password_masking_rootpw_modify_operation(topo, log_format, display_attrs):
|
||||
+ """Test password masking for nsslapd-rootpw MODIFY operations
|
||||
+
|
||||
+ :id: ec8c9fd4-56ba-4663-ab65-58efb3b445e4
|
||||
+ :parametrized: yes
|
||||
+ :setup: Standalone Instance
|
||||
+ :steps:
|
||||
+ 1. Configure audit logging format
|
||||
+ 2. Modify nsslapd-rootpw in configuration
|
||||
+ 3. Check that root password is masked in audit log
|
||||
+ 4. Modify root password to new value
|
||||
+ 5. Check that new root password is also masked
|
||||
+ 6. Verify actual root passwords do not appear in log
|
||||
+ :expectedresults:
|
||||
+ 1. Success
|
||||
+ 2. Success
|
||||
+ 3. Root password should be masked with asterisks
|
||||
+ 4. Success
|
||||
+ 5. New root password should be masked with asterisks
|
||||
+ 6. No actual root password values should be found in log
|
||||
+ """
|
||||
+ inst = topo.ms['supplier1']
|
||||
+ setup_audit_logging(inst, log_format, display_attrs)
|
||||
+ dm = DirectoryManager(inst)
|
||||
+
|
||||
+ try:
|
||||
+ dm.change_password(TEST_PASSWORD)
|
||||
+ dm.rebind(TEST_PASSWORD)
|
||||
+
|
||||
+ found_masked, found_actual, found_hashed = check_password_masked(inst, log_format, MASKED_PASSWORD, TEST_PASSWORD)
|
||||
+ assert found_masked, f"Masked root password not found in {log_format} MODIFY operation (first root password)"
|
||||
+ assert not found_actual, f"Actual root password found in {log_format} MODIFY log (should be masked)"
|
||||
+ assert not found_hashed, f"Hashed root password found in {log_format} MODIFY log (should be masked)"
|
||||
+
|
||||
+ dm.change_password(TEST_PASSWORD_2)
|
||||
+ dm.rebind(TEST_PASSWORD_2)
|
||||
+
|
||||
+ found_masked_2, found_actual_2, found_hashed_2 = check_password_masked(inst, log_format, MASKED_PASSWORD, TEST_PASSWORD_2)
|
||||
+ assert found_masked_2, f"Masked root password not found in {log_format} MODIFY operation (second root password)"
|
||||
+ assert not found_actual_2, f"Second actual root password found in {log_format} MODIFY log (should be masked)"
|
||||
+ assert not found_hashed_2, f"Second hashed root password found in {log_format} MODIFY log (should be masked)"
|
||||
+
|
||||
+ finally:
|
||||
+ dm.change_password(PW_DM)
|
||||
+ dm.rebind(PW_DM)
|
||||
+
|
||||
+
|
||||
+@pytest.mark.parametrize("log_format,display_attrs", [
|
||||
+ ("default", None),
|
||||
+ ("default", "*"),
|
||||
+ ("default", "nsmultiplexorcredentials"),
|
||||
+ ("json", None),
|
||||
+ ("json", "*"),
|
||||
+ ("json", "nsmultiplexorcredentials")
|
||||
+])
|
||||
+def test_password_masking_multiplexor_credentials(topo, log_format, display_attrs):
|
||||
+ """Test password masking for nsmultiplexorcredentials in chaining/multiplexor configurations
|
||||
+
|
||||
+ :id: 161a9498-b248-4926-90be-a696a36ed36e
|
||||
+ :parametrized: yes
|
||||
+ :setup: Standalone Instance
|
||||
+ :steps:
|
||||
+ 1. Configure audit logging format
|
||||
+ 2. Create a chaining backend configuration entry with nsmultiplexorcredentials
|
||||
+ 3. Check that multiplexor credentials are masked in audit log
|
||||
+ 4. Modify the credentials
|
||||
+ 5. Check that updated credentials are also masked
|
||||
+ 6. Verify actual credentials do not appear in log
|
||||
+ :expectedresults:
|
||||
+ 1. Success
|
||||
+ 2. Success
|
||||
+ 3. Multiplexor credentials should be masked with asterisks
|
||||
+ 4. Success
|
||||
+ 5. Updated credentials should be masked with asterisks
|
||||
+ 6. No actual credential values should be found in log
|
||||
+ """
|
||||
+ inst = topo.ms['supplier1']
|
||||
+ setup_audit_logging(inst, log_format, display_attrs)
|
||||
+
|
||||
+ # Enable chaining plugin and create chaining link
|
||||
+ chain_plugin = ChainingBackendPlugin(inst)
|
||||
+ chain_plugin.enable()
|
||||
+
|
||||
+ chains = ChainingLinks(inst)
|
||||
+ chain = None
|
||||
+
|
||||
+ try:
|
||||
+ # Create chaining link with multiplexor credentials
|
||||
+ chain = chains.create(properties={
|
||||
+ 'cn': 'testchain',
|
||||
+ 'nsfarmserverurl': 'ldap://localhost:389/',
|
||||
+ 'nsslapd-suffix': 'dc=example,dc=com',
|
||||
+ 'nsmultiplexorbinddn': 'cn=manager',
|
||||
+ 'nsmultiplexorcredentials': TEST_PASSWORD,
|
||||
+ 'nsCheckLocalACI': 'on',
|
||||
+ 'nsConnectionLife': '30',
|
||||
+ })
|
||||
+
|
||||
+ found_masked, found_actual, found_hashed = check_password_masked(inst, log_format, MASKED_PASSWORD, TEST_PASSWORD)
|
||||
+ assert found_masked, f"Masked multiplexor credentials not found in {log_format} ADD operation"
|
||||
+ assert not found_actual, f"Actual multiplexor credentials found in {log_format} ADD log (should be masked)"
|
||||
+ assert not found_hashed, f"Hashed multiplexor credentials found in {log_format} ADD log (should be masked)"
|
||||
+
|
||||
+ # Modify the credentials
|
||||
+ chain.replace('nsmultiplexorcredentials', TEST_PASSWORD_2)
|
||||
+
|
||||
+ found_masked_2, found_actual_2, found_hashed_2 = check_password_masked(inst, log_format, MASKED_PASSWORD, TEST_PASSWORD_2)
|
||||
+ assert found_masked_2, f"Masked multiplexor credentials not found in {log_format} MODIFY operation"
|
||||
+ assert not found_actual_2, f"Actual multiplexor credentials found in {log_format} MODIFY log (should be masked)"
|
||||
+ assert not found_hashed_2, f"Hashed multiplexor credentials found in {log_format} MODIFY log (should be masked)"
|
||||
+
|
||||
+ finally:
|
||||
+ chain_plugin.disable()
|
||||
+ if chain is not None:
|
||||
+ inst.delete_branch_s(chain.dn, ldap.SCOPE_ONELEVEL)
|
||||
+ chain.delete()
|
||||
+
|
||||
+
|
||||
+@pytest.mark.parametrize("log_format,display_attrs", [
|
||||
+ ("default", None),
|
||||
+ ("default", "*"),
|
||||
+ ("default", "nsDS5ReplicaCredentials"),
|
||||
+ ("json", None),
|
||||
+ ("json", "*"),
|
||||
+ ("json", "nsDS5ReplicaCredentials")
|
||||
+])
|
||||
+def test_password_masking_replica_credentials(topo, log_format, display_attrs):
|
||||
+ """Test password masking for nsDS5ReplicaCredentials in replication agreements
|
||||
+
|
||||
+ :id: 7bf9e612-1b7c-49af-9fc0-de4c7df84b2a
|
||||
+ :parametrized: yes
|
||||
+ :setup: Standalone Instance
|
||||
+ :steps:
|
||||
+ 1. Configure audit logging format
|
||||
+ 2. Create a replication agreement entry with nsDS5ReplicaCredentials
|
||||
+ 3. Check that replica credentials are masked in audit log
|
||||
+ 4. Modify the credentials
|
||||
+ 5. Check that updated credentials are also masked
|
||||
+ 6. Verify actual credentials do not appear in log
|
||||
+ :expectedresults:
|
||||
+ 1. Success
|
||||
+ 2. Success
|
||||
+ 3. Replica credentials should be masked with asterisks
|
||||
+ 4. Success
|
||||
+ 5. Updated credentials should be masked with asterisks
|
||||
+ 6. No actual credential values should be found in log
|
||||
+ """
|
||||
+ inst = topo.ms['supplier2']
|
||||
+ setup_audit_logging(inst, log_format, display_attrs)
|
||||
+ agmt = None
|
||||
+
|
||||
+ try:
|
||||
+ replicas = Replicas(inst)
|
||||
+ replica = replicas.get(DEFAULT_SUFFIX)
|
||||
+ agmts = replica.get_agreements()
|
||||
+ agmt = agmts.create(properties={
|
||||
+ 'cn': 'testagmt',
|
||||
+ 'nsDS5ReplicaHost': 'localhost',
|
||||
+ 'nsDS5ReplicaPort': '389',
|
||||
+ 'nsDS5ReplicaBindDN': 'cn=replication manager,cn=config',
|
||||
+ 'nsDS5ReplicaCredentials': TEST_PASSWORD,
|
||||
+ 'nsDS5ReplicaRoot': DEFAULT_SUFFIX
|
||||
+ })
|
||||
+
|
||||
+ found_masked, found_actual, found_hashed = check_password_masked(inst, log_format, MASKED_PASSWORD, TEST_PASSWORD)
|
||||
+ assert found_masked, f"Masked replica credentials not found in {log_format} ADD operation"
|
||||
+ assert not found_actual, f"Actual replica credentials found in {log_format} ADD log (should be masked)"
|
||||
+ assert not found_hashed, f"Hashed replica credentials found in {log_format} ADD log (should be masked)"
|
||||
+
|
||||
+ # Modify the credentials
|
||||
+ agmt.replace('nsDS5ReplicaCredentials', TEST_PASSWORD_2)
|
||||
+
|
||||
+ found_masked_2, found_actual_2, found_hashed_2 = check_password_masked(inst, log_format, MASKED_PASSWORD, TEST_PASSWORD_2)
|
||||
+ assert found_masked_2, f"Masked replica credentials not found in {log_format} MODIFY operation"
|
||||
+ assert not found_actual_2, f"Actual replica credentials found in {log_format} MODIFY log (should be masked)"
|
||||
+ assert not found_hashed_2, f"Hashed replica credentials found in {log_format} MODIFY log (should be masked)"
|
||||
+
|
||||
+ finally:
|
||||
+ if agmt is not None:
|
||||
+ agmt.delete()
|
||||
+
|
||||
+
|
||||
+@pytest.mark.parametrize("log_format,display_attrs", [
|
||||
+ ("default", None),
|
||||
+ ("default", "*"),
|
||||
+ ("default", "nsDS5ReplicaBootstrapCredentials"),
|
||||
+ ("json", None),
|
||||
+ ("json", "*"),
|
||||
+ ("json", "nsDS5ReplicaBootstrapCredentials")
|
||||
+])
|
||||
+def test_password_masking_bootstrap_credentials(topo, log_format, display_attrs):
|
||||
+ """Test password masking for nsDS5ReplicaCredentials and nsDS5ReplicaBootstrapCredentials in replication agreements
|
||||
+
|
||||
+ :id: 248bd418-ffa4-4733-963d-2314c60b7c5b
|
||||
+ :parametrized: yes
|
||||
+ :setup: Standalone Instance
|
||||
+ :steps:
|
||||
+ 1. Configure audit logging format
|
||||
+ 2. Create a replication agreement entry with both nsDS5ReplicaCredentials and nsDS5ReplicaBootstrapCredentials
|
||||
+ 3. Check that both credentials are masked in audit log
|
||||
+ 4. Modify both credentials
|
||||
+ 5. Check that both updated credentials are also masked
|
||||
+ 6. Verify actual credentials do not appear in log
|
||||
+ :expectedresults:
|
||||
+ 1. Success
|
||||
+ 2. Success
|
||||
+ 3. Both credentials should be masked with asterisks
|
||||
+ 4. Success
|
||||
+ 5. Both updated credentials should be masked with asterisks
|
||||
+ 6. No actual credential values should be found in log
|
||||
+ """
|
||||
+ inst = topo.ms['supplier2']
|
||||
+ setup_audit_logging(inst, log_format, display_attrs)
|
||||
+ agmt = None
|
||||
+
|
||||
+ try:
|
||||
+ replicas = Replicas(inst)
|
||||
+ replica = replicas.get(DEFAULT_SUFFIX)
|
||||
+ agmts = replica.get_agreements()
|
||||
+ agmt = agmts.create(properties={
|
||||
+ 'cn': 'testbootstrapagmt',
|
||||
+ 'nsDS5ReplicaHost': 'localhost',
|
||||
+ 'nsDS5ReplicaPort': '389',
|
||||
+ 'nsDS5ReplicaBindDN': 'cn=replication manager,cn=config',
|
||||
+ 'nsDS5ReplicaCredentials': TEST_PASSWORD,
|
||||
+ 'nsDS5replicabootstrapbinddn': 'cn=bootstrap manager,cn=config',
|
||||
+ 'nsDS5ReplicaBootstrapCredentials': TEST_PASSWORD_2,
|
||||
+ 'nsDS5ReplicaRoot': DEFAULT_SUFFIX
|
||||
+ })
|
||||
+
|
||||
+ found_masked_bootstrap, found_actual_bootstrap, found_hashed_bootstrap = check_password_masked(inst, log_format, MASKED_PASSWORD, TEST_PASSWORD_2)
|
||||
+ assert found_masked_bootstrap, f"Masked bootstrap credentials not found in {log_format} ADD operation"
|
||||
+ assert not found_actual_bootstrap, f"Actual bootstrap credentials found in {log_format} ADD log (should be masked)"
|
||||
+ assert not found_hashed_bootstrap, f"Hashed bootstrap credentials found in {log_format} ADD log (should be masked)"
|
||||
+
|
||||
+ agmt.replace('nsDS5ReplicaBootstrapCredentials', TEST_PASSWORD_3)
|
||||
+
|
||||
+ found_masked_bootstrap_2, found_actual_bootstrap_2, found_hashed_bootstrap_2 = check_password_masked(inst, log_format, MASKED_PASSWORD, TEST_PASSWORD_3)
|
||||
+ assert found_masked_bootstrap_2, f"Masked bootstrap credentials not found in {log_format} MODIFY operation"
|
||||
+ assert not found_actual_bootstrap_2, f"Actual bootstrap credentials found in {log_format} MODIFY log (should be masked)"
|
||||
+ assert not found_hashed_bootstrap_2, f"Hashed bootstrap credentials found in {log_format} MODIFY log (should be masked)"
|
||||
+
|
||||
+ finally:
|
||||
+ if agmt is not None:
|
||||
+ agmt.delete()
|
||||
+
|
||||
+
|
||||
+
|
||||
+if __name__ == '__main__':
|
||||
+ CURRENT_FILE = os.path.realpath(__file__)
|
||||
+ pytest.main(["-s", CURRENT_FILE])
|
||||
\ No newline at end of file
|
||||
diff --git a/ldap/servers/slapd/auditlog.c b/ldap/servers/slapd/auditlog.c
|
||||
index 3945b0533..3a34959f6 100644
|
||||
--- a/ldap/servers/slapd/auditlog.c
|
||||
+++ b/ldap/servers/slapd/auditlog.c
|
||||
@@ -39,6 +39,89 @@ static void write_audit_file(Slapi_PBlock *pb, Slapi_Entry *entry, int logtype,
|
||||
|
||||
static const char *modrdn_changes[4];
|
||||
|
||||
+/* Helper function to check if an attribute is a password that needs masking */
|
||||
+static int
|
||||
+is_password_attribute(const char *attr_name)
|
||||
+{
|
||||
+ return (strcasecmp(attr_name, SLAPI_USERPWD_ATTR) == 0 ||
|
||||
+ strcasecmp(attr_name, CONFIG_ROOTPW_ATTRIBUTE) == 0 ||
|
||||
+ strcasecmp(attr_name, SLAPI_MB_CREDENTIALS) == 0 ||
|
||||
+ strcasecmp(attr_name, SLAPI_REP_CREDENTIALS) == 0 ||
|
||||
+ strcasecmp(attr_name, SLAPI_REP_BOOTSTRAP_CREDENTIALS) == 0);
|
||||
+}
|
||||
+
|
||||
+/* Helper function to create a masked string representation of an entry */
|
||||
+static char *
|
||||
+create_masked_entry_string(Slapi_Entry *original_entry, int *len)
|
||||
+{
|
||||
+ Slapi_Attr *attr = NULL;
|
||||
+ char *entry_str = NULL;
|
||||
+ char *current_pos = NULL;
|
||||
+ char *line_start = NULL;
|
||||
+ char *next_line = NULL;
|
||||
+ char *colon_pos = NULL;
|
||||
+ int has_password_attrs = 0;
|
||||
+
|
||||
+ if (original_entry == NULL) {
|
||||
+ return NULL;
|
||||
+ }
|
||||
+
|
||||
+ /* Single pass through attributes to check for password attributes */
|
||||
+ for (slapi_entry_first_attr(original_entry, &attr); attr != NULL;
|
||||
+ slapi_entry_next_attr(original_entry, attr, &attr)) {
|
||||
+
|
||||
+ char *attr_name = NULL;
|
||||
+ slapi_attr_get_type(attr, &attr_name);
|
||||
+
|
||||
+ if (is_password_attribute(attr_name)) {
|
||||
+ has_password_attrs = 1;
|
||||
+ break;
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ /* If no password attributes, return original string - no masking needed */
|
||||
+ entry_str = slapi_entry2str(original_entry, len);
|
||||
+ if (!has_password_attrs) {
|
||||
+ return entry_str;
|
||||
+ }
|
||||
+
|
||||
+ /* Process the string in-place, replacing password values */
|
||||
+ current_pos = entry_str;
|
||||
+ while ((line_start = current_pos) != NULL && *line_start != '\0') {
|
||||
+ /* Find the end of current line */
|
||||
+ next_line = strchr(line_start, '\n');
|
||||
+ if (next_line != NULL) {
|
||||
+ *next_line = '\0'; /* Temporarily terminate line */
|
||||
+ current_pos = next_line + 1;
|
||||
+ } else {
|
||||
+ current_pos = NULL; /* Last line */
|
||||
+ }
|
||||
+
|
||||
+ /* Find the colon that separates attribute name from value */
|
||||
+ colon_pos = strchr(line_start, ':');
|
||||
+ if (colon_pos != NULL) {
|
||||
+ char saved_colon = *colon_pos;
|
||||
+ *colon_pos = '\0'; /* Temporarily null-terminate attribute name */
|
||||
+
|
||||
+ /* Check if this is a password attribute that needs masking */
|
||||
+ if (is_password_attribute(line_start)) {
|
||||
+ strcpy(colon_pos + 1, " **********************");
|
||||
+ }
|
||||
+
|
||||
+ *colon_pos = saved_colon; /* Restore colon */
|
||||
+ }
|
||||
+
|
||||
+ /* Restore newline if it was there */
|
||||
+ if (next_line != NULL) {
|
||||
+ *next_line = '\n';
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ /* Update length since we may have shortened the string */
|
||||
+ *len = strlen(entry_str);
|
||||
+ return entry_str; /* Return the modified original string */
|
||||
+}
|
||||
+
|
||||
void
|
||||
write_audit_log_entry(Slapi_PBlock *pb)
|
||||
{
|
||||
@@ -279,10 +362,31 @@ add_entry_attrs_ext(Slapi_Entry *entry, lenstr *l, PRBool use_json, json_object
|
||||
{
|
||||
slapi_entry_attr_find(entry, req_attr, &entry_attr);
|
||||
if (entry_attr) {
|
||||
- if (use_json) {
|
||||
- log_entry_attr_json(entry_attr, req_attr, id_list);
|
||||
+ if (strcmp(req_attr, PSEUDO_ATTR_UNHASHEDUSERPASSWORD) == 0) {
|
||||
+ /* Do not write the unhashed clear-text password */
|
||||
+ continue;
|
||||
+ }
|
||||
+
|
||||
+ /* Check if this is a password attribute that needs masking */
|
||||
+ if (is_password_attribute(req_attr)) {
|
||||
+ /* userpassword/rootdn password - mask the value */
|
||||
+ if (use_json) {
|
||||
+ json_object *secret_obj = json_object_new_object();
|
||||
+ json_object_object_add(secret_obj, req_attr,
|
||||
+ json_object_new_string("**********************"));
|
||||
+ json_object_array_add(id_list, secret_obj);
|
||||
+ } else {
|
||||
+ addlenstr(l, "#");
|
||||
+ addlenstr(l, req_attr);
|
||||
+ addlenstr(l, ": **********************\n");
|
||||
+ }
|
||||
} else {
|
||||
- log_entry_attr(entry_attr, req_attr, l);
|
||||
+ /* Regular attribute - log normally */
|
||||
+ if (use_json) {
|
||||
+ log_entry_attr_json(entry_attr, req_attr, id_list);
|
||||
+ } else {
|
||||
+ log_entry_attr(entry_attr, req_attr, l);
|
||||
+ }
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -297,9 +401,7 @@ add_entry_attrs_ext(Slapi_Entry *entry, lenstr *l, PRBool use_json, json_object
|
||||
continue;
|
||||
}
|
||||
|
||||
- if (strcasecmp(attr, SLAPI_USERPWD_ATTR) == 0 ||
|
||||
- strcasecmp(attr, CONFIG_ROOTPW_ATTRIBUTE) == 0)
|
||||
- {
|
||||
+ if (is_password_attribute(attr)) {
|
||||
/* userpassword/rootdn password - mask the value */
|
||||
if (use_json) {
|
||||
json_object *secret_obj = json_object_new_object();
|
||||
@@ -309,7 +411,7 @@ add_entry_attrs_ext(Slapi_Entry *entry, lenstr *l, PRBool use_json, json_object
|
||||
} else {
|
||||
addlenstr(l, "#");
|
||||
addlenstr(l, attr);
|
||||
- addlenstr(l, ": ****************************\n");
|
||||
+ addlenstr(l, ": **********************\n");
|
||||
}
|
||||
continue;
|
||||
}
|
||||
@@ -478,6 +580,9 @@ write_audit_file_json(Slapi_PBlock *pb, Slapi_Entry *entry, int logtype,
|
||||
}
|
||||
}
|
||||
|
||||
+ /* Check if this is a password attribute that needs masking */
|
||||
+ int is_password_attr = is_password_attribute(mods[j]->mod_type);
|
||||
+
|
||||
mod = json_object_new_object();
|
||||
switch (operationtype) {
|
||||
case LDAP_MOD_ADD:
|
||||
@@ -502,7 +607,12 @@ write_audit_file_json(Slapi_PBlock *pb, Slapi_Entry *entry, int logtype,
|
||||
json_object *val_list = NULL;
|
||||
val_list = json_object_new_array();
|
||||
for (size_t i = 0; mods[j]->mod_bvalues != NULL && mods[j]->mod_bvalues[i] != NULL; i++) {
|
||||
- json_object_array_add(val_list, json_object_new_string(mods[j]->mod_bvalues[i]->bv_val));
|
||||
+ if (is_password_attr) {
|
||||
+ /* Mask password values */
|
||||
+ json_object_array_add(val_list, json_object_new_string("**********************"));
|
||||
+ } else {
|
||||
+ json_object_array_add(val_list, json_object_new_string(mods[j]->mod_bvalues[i]->bv_val));
|
||||
+ }
|
||||
}
|
||||
json_object_object_add(mod, "values", val_list);
|
||||
}
|
||||
@@ -514,8 +624,11 @@ write_audit_file_json(Slapi_PBlock *pb, Slapi_Entry *entry, int logtype,
|
||||
|
||||
case SLAPI_OPERATION_ADD:
|
||||
int len;
|
||||
+
|
||||
e = change;
|
||||
- tmp = slapi_entry2str(e, &len);
|
||||
+
|
||||
+ /* Create a masked string representation for password attributes */
|
||||
+ tmp = create_masked_entry_string(e, &len);
|
||||
tmpsave = tmp;
|
||||
while ((tmp = strchr(tmp, '\n')) != NULL) {
|
||||
tmp++;
|
||||
@@ -662,6 +775,10 @@ write_audit_file(
|
||||
break;
|
||||
}
|
||||
}
|
||||
+
|
||||
+ /* Check if this is a password attribute that needs masking */
|
||||
+ int is_password_attr = is_password_attribute(mods[j]->mod_type);
|
||||
+
|
||||
switch (operationtype) {
|
||||
case LDAP_MOD_ADD:
|
||||
addlenstr(l, "add: ");
|
||||
@@ -686,18 +803,27 @@ write_audit_file(
|
||||
break;
|
||||
}
|
||||
if (operationtype != LDAP_MOD_IGNORE) {
|
||||
- for (i = 0; mods[j]->mod_bvalues != NULL && mods[j]->mod_bvalues[i] != NULL; i++) {
|
||||
- char *buf, *bufp;
|
||||
- len = strlen(mods[j]->mod_type);
|
||||
- len = LDIF_SIZE_NEEDED(len, mods[j]->mod_bvalues[i]->bv_len) + 1;
|
||||
- buf = slapi_ch_malloc(len);
|
||||
- bufp = buf;
|
||||
- slapi_ldif_put_type_and_value_with_options(&bufp, mods[j]->mod_type,
|
||||
- mods[j]->mod_bvalues[i]->bv_val,
|
||||
- mods[j]->mod_bvalues[i]->bv_len, 0);
|
||||
- *bufp = '\0';
|
||||
- addlenstr(l, buf);
|
||||
- slapi_ch_free((void **)&buf);
|
||||
+ if (is_password_attr) {
|
||||
+ /* Add masked password */
|
||||
+ for (i = 0; mods[j]->mod_bvalues != NULL && mods[j]->mod_bvalues[i] != NULL; i++) {
|
||||
+ addlenstr(l, mods[j]->mod_type);
|
||||
+ addlenstr(l, ": **********************\n");
|
||||
+ }
|
||||
+ } else {
|
||||
+ /* Add actual values for non-password attributes */
|
||||
+ for (i = 0; mods[j]->mod_bvalues != NULL && mods[j]->mod_bvalues[i] != NULL; i++) {
|
||||
+ char *buf, *bufp;
|
||||
+ len = strlen(mods[j]->mod_type);
|
||||
+ len = LDIF_SIZE_NEEDED(len, mods[j]->mod_bvalues[i]->bv_len) + 1;
|
||||
+ buf = slapi_ch_malloc(len);
|
||||
+ bufp = buf;
|
||||
+ slapi_ldif_put_type_and_value_with_options(&bufp, mods[j]->mod_type,
|
||||
+ mods[j]->mod_bvalues[i]->bv_val,
|
||||
+ mods[j]->mod_bvalues[i]->bv_len, 0);
|
||||
+ *bufp = '\0';
|
||||
+ addlenstr(l, buf);
|
||||
+ slapi_ch_free((void **)&buf);
|
||||
+ }
|
||||
}
|
||||
}
|
||||
addlenstr(l, "-\n");
|
||||
@@ -708,7 +834,7 @@ write_audit_file(
|
||||
e = change;
|
||||
addlenstr(l, attr_changetype);
|
||||
addlenstr(l, ": add\n");
|
||||
- tmp = slapi_entry2str(e, &len);
|
||||
+ tmp = create_masked_entry_string(e, &len);
|
||||
tmpsave = tmp;
|
||||
while ((tmp = strchr(tmp, '\n')) != NULL) {
|
||||
tmp++;
|
||||
diff --git a/ldap/servers/slapd/slapi-private.h b/ldap/servers/slapd/slapi-private.h
|
||||
index 7a3eb3fdf..fb88488b1 100644
|
||||
--- a/ldap/servers/slapd/slapi-private.h
|
||||
+++ b/ldap/servers/slapd/slapi-private.h
|
||||
@@ -848,6 +848,7 @@ void task_cleanup(void);
|
||||
/* for reversible encyrption */
|
||||
#define SLAPI_MB_CREDENTIALS "nsmultiplexorcredentials"
|
||||
#define SLAPI_REP_CREDENTIALS "nsds5ReplicaCredentials"
|
||||
+#define SLAPI_REP_BOOTSTRAP_CREDENTIALS "nsds5ReplicaBootstrapCredentials"
|
||||
int pw_rever_encode(Slapi_Value **vals, char *attr_name);
|
||||
int pw_rever_decode(char *cipher, char **plain, const char *attr_name);
|
||||
|
||||
diff --git a/src/lib389/lib389/chaining.py b/src/lib389/lib389/chaining.py
|
||||
index 533b83ebf..33ae78c8b 100644
|
||||
--- a/src/lib389/lib389/chaining.py
|
||||
+++ b/src/lib389/lib389/chaining.py
|
||||
@@ -134,7 +134,7 @@ class ChainingLink(DSLdapObject):
|
||||
"""
|
||||
|
||||
# Create chaining entry
|
||||
- super(ChainingLink, self).create(rdn, properties, basedn)
|
||||
+ link = super(ChainingLink, self).create(rdn, properties, basedn)
|
||||
|
||||
# Create mapping tree entry
|
||||
dn_comps = ldap.explode_dn(properties['nsslapd-suffix'][0])
|
||||
@@ -149,6 +149,7 @@ class ChainingLink(DSLdapObject):
|
||||
self._mts.ensure_state(properties=mt_properties)
|
||||
except ldap.ALREADY_EXISTS:
|
||||
pass
|
||||
+ return link
|
||||
|
||||
|
||||
class ChainingLinks(DSLdapObjects):
|
||||
--
|
||||
2.49.0
|
||||
|
@ -1,40 +0,0 @@
|
||||
From 1ffcc9aa9a397180fe35283ee61b164471d073fb Mon Sep 17 00:00:00 2001
|
||||
From: Thierry Bordaz <tbordaz@redhat.com>
|
||||
Date: Tue, 7 Jan 2025 10:01:51 +0100
|
||||
Subject: [PATCH] Issue 6417 - (2nd) fix typo
|
||||
|
||||
---
|
||||
ldap/servers/slapd/back-ldbm/ldbm_entryrdn.c | 10 ++++++----
|
||||
1 file changed, 6 insertions(+), 4 deletions(-)
|
||||
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/ldbm_entryrdn.c b/ldap/servers/slapd/back-ldbm/ldbm_entryrdn.c
|
||||
index 1bbb6252a..e2b8273a2 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/ldbm_entryrdn.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/ldbm_entryrdn.c
|
||||
@@ -1178,8 +1178,10 @@ entryrdn_lookup_dn(backend *be,
|
||||
|
||||
/* Just in case the suffix ID is not '1' retrieve it from the database */
|
||||
keybuf = slapi_ch_strdup(slapi_sdn_get_ndn(be->be_suffix));
|
||||
- dblayer_value_set(be, &key, keybuf, strlen(keybuf) + 1);
|
||||
- rc = dblayer_cursor_op(&ctx.cursor, DBI_OP_MOVE_TO_KEY, &key, &data);
|
||||
+ key.data = keybuf;
|
||||
+ key.size = key.ulen = strlen(keybuf) + 1;
|
||||
+ key.flags = DB_DBT_USERMEM;
|
||||
+ rc = cursor->c_get(cursor, &key, &data, DB_SET);
|
||||
if (rc) {
|
||||
slapi_log_err(SLAPI_LOG_WARNING, "entryrdn_lookup_dn",
|
||||
"Fails to retrieve the ID of suffix %s - keep the default value '%d'\n",
|
||||
@@ -1189,8 +1191,8 @@ entryrdn_lookup_dn(backend *be,
|
||||
elem = (rdn_elem *)data.data;
|
||||
suffix_id = id_stored_to_internal(elem->rdn_elem_id);
|
||||
}
|
||||
- dblayer_value_free(be, &data);
|
||||
- dblayer_value_free(be, &key);
|
||||
+ slapi_ch_free(&data.data);
|
||||
+ slapi_ch_free_string(&keybuf);
|
||||
|
||||
do {
|
||||
/* Setting up a key for the node to get its parent */
|
||||
--
|
||||
2.48.0
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,63 @@
|
||||
From 574a5295e13cf01c34226d676104057468198616 Mon Sep 17 00:00:00 2001
|
||||
From: Simon Pichugin <spichugi@redhat.com>
|
||||
Date: Fri, 4 Oct 2024 08:55:11 -0700
|
||||
Subject: [PATCH] Issue 6339 - Address Coverity scan issues in memberof and
|
||||
bdb_layer (#6353)
|
||||
|
||||
Description: Add null check for memberof attribute in memberof.c
|
||||
Fix memory leak by freeing 'cookie' in memberof.c
|
||||
Add null check for database environment in bdb_layer.c
|
||||
Fix race condition by adding mutex lock/unlock in bdb_layer.c
|
||||
|
||||
Fixes: https://github.com/389ds/389-ds-base/issues/6339
|
||||
|
||||
Reviewed by: @progier389, @tbordaz (Thanks!)
|
||||
---
|
||||
ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.c | 17 ++++++++++++++---
|
||||
1 file changed, 14 insertions(+), 3 deletions(-)
|
||||
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.c b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.c
|
||||
index b04cd68e2..4f069197e 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.c
|
||||
@@ -6987,6 +6987,7 @@ bdb_public_private_open(backend *be, const char *db_filename, int rw, dbi_env_t
|
||||
bdb_config *conf = (bdb_config *)li->li_dblayer_config;
|
||||
bdb_db_env **ppEnv = (bdb_db_env**)&priv->dblayer_env;
|
||||
char dbhome[MAXPATHLEN];
|
||||
+ bdb_db_env *pEnv = NULL;
|
||||
DB_ENV *bdb_env = NULL;
|
||||
DB *bdb_db = NULL;
|
||||
struct stat st = {0};
|
||||
@@ -7036,7 +7037,13 @@ bdb_public_private_open(backend *be, const char *db_filename, int rw, dbi_env_t
|
||||
conf->bdb_tx_max = 50;
|
||||
rc = bdb_start(li, DBLAYER_NORMAL_MODE);
|
||||
if (rc == 0) {
|
||||
- bdb_env = ((struct bdb_db_env*)(priv->dblayer_env))->bdb_DB_ENV;
|
||||
+ pEnv = (bdb_db_env *)priv->dblayer_env;
|
||||
+ if (pEnv == NULL) {
|
||||
+ fprintf(stderr, "bdb_public_private_open: dbenv is not available (0x%p) for database %s\n",
|
||||
+ (void *)pEnv, db_filename ? db_filename : "unknown");
|
||||
+ return EINVAL;
|
||||
+ }
|
||||
+ bdb_env = pEnv->bdb_DB_ENV;
|
||||
}
|
||||
} else {
|
||||
/* Setup minimal environment */
|
||||
@@ -7080,8 +7087,12 @@ bdb_public_private_close(struct ldbminfo *li, dbi_env_t **env, dbi_db_t **db)
|
||||
if (priv) {
|
||||
/* Detect if db is fully set up in read write mode */
|
||||
bdb_db_env *pEnv = (bdb_db_env *)priv->dblayer_env;
|
||||
- if (pEnv && pEnv->bdb_thread_count>0) {
|
||||
- rw = 1;
|
||||
+ if (pEnv) {
|
||||
+ pthread_mutex_lock(&pEnv->bdb_thread_count_lock);
|
||||
+ if (pEnv->bdb_thread_count > 0) {
|
||||
+ rw = 1;
|
||||
+ }
|
||||
+ pthread_mutex_unlock(&pEnv->bdb_thread_count_lock);
|
||||
}
|
||||
}
|
||||
if (rw == 0) {
|
||||
--
|
||||
2.49.0
|
||||
|
@ -1,75 +0,0 @@
|
||||
From 9e1284122a929fe14633a2aa6e2de4d72891f98f Mon Sep 17 00:00:00 2001
|
||||
From: Thierry Bordaz <tbordaz@redhat.com>
|
||||
Date: Mon, 13 Jan 2025 17:41:18 +0100
|
||||
Subject: [PATCH] Issue 6417 - (3rd) If an entry RDN is identical to the
|
||||
suffix, then Entryrdn gets broken during a reindex (#6480)
|
||||
|
||||
Bug description:
|
||||
The previous fix had a flaw.
|
||||
In case entryrdn_lookup_dn is called with an undefined suffix
|
||||
the lookup of the suffix trigger a crash.
|
||||
For example it can occur during internal search of an
|
||||
unexisting map (view plugin).
|
||||
The issue exists in all releases but is hidden since 2.3.
|
||||
|
||||
Fix description:
|
||||
testing the suffix is defined
|
||||
|
||||
fixes: #6417
|
||||
|
||||
Reviewed by: Pierre Rogier (THnaks !)
|
||||
---
|
||||
ldap/servers/slapd/back-ldbm/ldbm_entryrdn.c | 36 +++++++++++---------
|
||||
1 file changed, 20 insertions(+), 16 deletions(-)
|
||||
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/ldbm_entryrdn.c b/ldap/servers/slapd/back-ldbm/ldbm_entryrdn.c
|
||||
index e2b8273a2..01c77156f 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/ldbm_entryrdn.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/ldbm_entryrdn.c
|
||||
@@ -1176,23 +1176,27 @@ entryrdn_lookup_dn(backend *be,
|
||||
/* Setting the bulk fetch buffer */
|
||||
data.flags = DB_DBT_MALLOC;
|
||||
|
||||
- /* Just in case the suffix ID is not '1' retrieve it from the database */
|
||||
- keybuf = slapi_ch_strdup(slapi_sdn_get_ndn(be->be_suffix));
|
||||
- key.data = keybuf;
|
||||
- key.size = key.ulen = strlen(keybuf) + 1;
|
||||
- key.flags = DB_DBT_USERMEM;
|
||||
- rc = cursor->c_get(cursor, &key, &data, DB_SET);
|
||||
- if (rc) {
|
||||
- slapi_log_err(SLAPI_LOG_WARNING, "entryrdn_lookup_dn",
|
||||
- "Fails to retrieve the ID of suffix %s - keep the default value '%d'\n",
|
||||
- slapi_sdn_get_ndn(be->be_suffix),
|
||||
- suffix_id);
|
||||
- } else {
|
||||
- elem = (rdn_elem *)data.data;
|
||||
- suffix_id = id_stored_to_internal(elem->rdn_elem_id);
|
||||
+ /* Just in case the suffix ID is not '1' retrieve it from the database
|
||||
+ * if the suffix is not defined suffix_id remains '1'
|
||||
+ */
|
||||
+ if (be->be_suffix) {
|
||||
+ keybuf = slapi_ch_strdup(slapi_sdn_get_ndn(be->be_suffix));
|
||||
+ key.data = keybuf;
|
||||
+ key.size = key.ulen = strlen(keybuf) + 1;
|
||||
+ key.flags = DB_DBT_USERMEM;
|
||||
+ rc = cursor->c_get(cursor, &key, &data, DB_SET);
|
||||
+ if (rc) {
|
||||
+ slapi_log_err(SLAPI_LOG_WARNING, "entryrdn_lookup_dn",
|
||||
+ "Fails to retrieve the ID of suffix %s - keep the default value '%d'\n",
|
||||
+ slapi_sdn_get_ndn(be->be_suffix),
|
||||
+ suffix_id);
|
||||
+ } else {
|
||||
+ elem = (rdn_elem *) data.data;
|
||||
+ suffix_id = id_stored_to_internal(elem->rdn_elem_id);
|
||||
+ }
|
||||
+ slapi_ch_free(&data.data);
|
||||
+ slapi_ch_free_string(&keybuf);
|
||||
}
|
||||
- slapi_ch_free(&data.data);
|
||||
- slapi_ch_free_string(&keybuf);
|
||||
|
||||
do {
|
||||
/* Setting up a key for the node to get its parent */
|
||||
--
|
||||
2.48.0
|
||||
|
@ -0,0 +1,31 @@
|
||||
From 972ddeed2029975d5d89e165db1db554f2e8bc28 Mon Sep 17 00:00:00 2001
|
||||
From: Viktor Ashirov <vashirov@redhat.com>
|
||||
Date: Tue, 29 Jul 2025 08:00:00 +0200
|
||||
Subject: [PATCH] Issue 6468 - CLI - Fix default error log level
|
||||
|
||||
Description:
|
||||
Default error log level is 16384
|
||||
|
||||
Relates: https://github.com/389ds/389-ds-base/issues/6468
|
||||
|
||||
Reviewed by: @droideck (Thanks!)
|
||||
---
|
||||
src/lib389/lib389/cli_conf/logging.py | 2 +-
|
||||
1 file changed, 1 insertion(+), 1 deletion(-)
|
||||
|
||||
diff --git a/src/lib389/lib389/cli_conf/logging.py b/src/lib389/lib389/cli_conf/logging.py
|
||||
index d1e32822c..c48c75faa 100644
|
||||
--- a/src/lib389/lib389/cli_conf/logging.py
|
||||
+++ b/src/lib389/lib389/cli_conf/logging.py
|
||||
@@ -44,7 +44,7 @@ ERROR_LEVELS = {
|
||||
+ "methods used for a SASL bind"
|
||||
},
|
||||
"default": {
|
||||
- "level": 6384,
|
||||
+ "level": 16384,
|
||||
"desc": "Default logging level"
|
||||
},
|
||||
"filter": {
|
||||
--
|
||||
2.49.0
|
||||
|
@ -1,297 +0,0 @@
|
||||
From d2f9dd82e3610ee9b73feea981c680c03bb21394 Mon Sep 17 00:00:00 2001
|
||||
From: Mark Reynolds <mreynolds@redhat.com>
|
||||
Date: Thu, 16 Jan 2025 08:42:53 -0500
|
||||
Subject: [PATCH] Issue 6509 - Race condition with Paged Result searches
|
||||
|
||||
Description:
|
||||
|
||||
There is a race condition with Paged Result searches when a new operation comes
|
||||
in while a paged search is finishing. This triggers an invalid time out error
|
||||
and closes the connection with a T3 code.
|
||||
|
||||
The problem is that we do not use the "PagedResult lock" when checking the
|
||||
connection's paged result data for a timeout event. This causes the paged
|
||||
result timeout value to change unexpectedly and trigger a false timeout when a
|
||||
new operation arrives.
|
||||
|
||||
Now we check the timeout without hte conn lock, if its expired it could
|
||||
be a race condition and false positive. Try the lock again and test the
|
||||
timeout. This also prevents blocking non-paged result searches from
|
||||
getting held up by the lock when it's not necessary.
|
||||
|
||||
This also fixes some memory leaks that occur when an error happens.
|
||||
|
||||
Relates: https://github.com/389ds/389-ds-base/issues/6509
|
||||
|
||||
Reviewed by: tbordaz & proger (Thanks!!)
|
||||
---
|
||||
ldap/servers/slapd/daemon.c | 61 ++++++++++++++++++-------------
|
||||
ldap/servers/slapd/opshared.c | 58 ++++++++++++++---------------
|
||||
ldap/servers/slapd/pagedresults.c | 9 +++++
|
||||
ldap/servers/slapd/slap.h | 2 +-
|
||||
4 files changed, 75 insertions(+), 55 deletions(-)
|
||||
|
||||
diff --git a/ldap/servers/slapd/daemon.c b/ldap/servers/slapd/daemon.c
|
||||
index bb80dae36..13dfe250d 100644
|
||||
--- a/ldap/servers/slapd/daemon.c
|
||||
+++ b/ldap/servers/slapd/daemon.c
|
||||
@@ -1578,7 +1578,29 @@ setup_pr_read_pds(Connection_Table *ct)
|
||||
if (c->c_state == CONN_STATE_FREE) {
|
||||
connection_table_move_connection_out_of_active_list(ct, c);
|
||||
} else {
|
||||
- /* we try to acquire the connection mutex, if it is already
|
||||
+ /* Check for a timeout for PAGED RESULTS */
|
||||
+ if (pagedresults_is_timedout_nolock(c)) {
|
||||
+ /*
|
||||
+ * There could be a race condition so lets try again with the
|
||||
+ * right lock
|
||||
+ */
|
||||
+ pthread_mutex_t *pr_mutex = pageresult_lock_get_addr(c);
|
||||
+ if (pthread_mutex_trylock(pr_mutex) == EBUSY) {
|
||||
+ c = next;
|
||||
+ continue;
|
||||
+ }
|
||||
+ if (pagedresults_is_timedout_nolock(c)) {
|
||||
+ pthread_mutex_unlock(pr_mutex);
|
||||
+ disconnect_server(c, c->c_connid, -1,
|
||||
+ SLAPD_DISCONNECT_PAGED_SEARCH_LIMIT,
|
||||
+ 0);
|
||||
+ } else {
|
||||
+ pthread_mutex_unlock(pr_mutex);
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ /*
|
||||
+ * we try to acquire the connection mutex, if it is already
|
||||
* acquired by another thread, don't wait
|
||||
*/
|
||||
if (pthread_mutex_trylock(&(c->c_mutex)) == EBUSY) {
|
||||
@@ -1586,35 +1608,24 @@ setup_pr_read_pds(Connection_Table *ct)
|
||||
continue;
|
||||
}
|
||||
if (c->c_flags & CONN_FLAG_CLOSING) {
|
||||
- /* A worker thread has marked that this connection
|
||||
- * should be closed by calling disconnect_server.
|
||||
- * move this connection out of the active list
|
||||
- * the last thread to use the connection will close it
|
||||
+ /*
|
||||
+ * A worker thread, or paged result timeout, has marked that
|
||||
+ * this connection should be closed by calling
|
||||
+ * disconnect_server(). Move this connection out of the active
|
||||
+ * list then the last thread to use the connection will close
|
||||
+ * it.
|
||||
*/
|
||||
connection_table_move_connection_out_of_active_list(ct, c);
|
||||
} else if (c->c_sd == SLAPD_INVALID_SOCKET) {
|
||||
connection_table_move_connection_out_of_active_list(ct, c);
|
||||
} else if (c->c_prfd != NULL) {
|
||||
if ((!c->c_gettingber) && (c->c_threadnumber < c->c_max_threads_per_conn)) {
|
||||
- int add_fd = 1;
|
||||
- /* check timeout for PAGED RESULTS */
|
||||
- if (pagedresults_is_timedout_nolock(c)) {
|
||||
- /* Exceeded the paged search timelimit; disconnect the client */
|
||||
- disconnect_server_nomutex(c, c->c_connid, -1,
|
||||
- SLAPD_DISCONNECT_PAGED_SEARCH_LIMIT,
|
||||
- 0);
|
||||
- connection_table_move_connection_out_of_active_list(ct,
|
||||
- c);
|
||||
- add_fd = 0; /* do not poll on this fd */
|
||||
- }
|
||||
- if (add_fd) {
|
||||
- ct->fd[count].fd = c->c_prfd;
|
||||
- ct->fd[count].in_flags = SLAPD_POLL_FLAGS;
|
||||
- /* slot i of the connection table is mapped to slot
|
||||
- * count of the fds array */
|
||||
- c->c_fdi = count;
|
||||
- count++;
|
||||
- }
|
||||
+ ct->fd[listnum][count].fd = c->c_prfd;
|
||||
+ ct->fd[listnum][count].in_flags = SLAPD_POLL_FLAGS;
|
||||
+ /* slot i of the connection table is mapped to slot
|
||||
+ * count of the fds array */
|
||||
+ c->c_fdi = count;
|
||||
+ count++;
|
||||
} else {
|
||||
if (c->c_threadnumber >= c->c_max_threads_per_conn) {
|
||||
c->c_maxthreadsblocked++;
|
||||
@@ -1675,7 +1686,7 @@ handle_pr_read_ready(Connection_Table *ct, PRIntn num_poll __attribute__((unused
|
||||
continue;
|
||||
}
|
||||
|
||||
- /* Try to get connection mutex, if not available just skip the connection and
|
||||
+ /* Try to get connection mutex, if not available just skip the connection and
|
||||
* process other connections events. May generates cpu load for listening thread
|
||||
* if connection mutex is held for a long time
|
||||
*/
|
||||
diff --git a/ldap/servers/slapd/opshared.c b/ldap/servers/slapd/opshared.c
|
||||
index 7ab4117cd..a29eed052 100644
|
||||
--- a/ldap/servers/slapd/opshared.c
|
||||
+++ b/ldap/servers/slapd/opshared.c
|
||||
@@ -250,7 +250,7 @@ op_shared_search(Slapi_PBlock *pb, int send_result)
|
||||
char *errtext = NULL;
|
||||
int nentries, pnentries;
|
||||
int flag_search_base_found = 0;
|
||||
- int flag_no_such_object = 0;
|
||||
+ bool flag_no_such_object = false;
|
||||
int flag_referral = 0;
|
||||
int flag_psearch = 0;
|
||||
int err_code = LDAP_SUCCESS;
|
||||
@@ -315,7 +315,7 @@ op_shared_search(Slapi_PBlock *pb, int send_result)
|
||||
rc = -1;
|
||||
goto free_and_return_nolock;
|
||||
}
|
||||
-
|
||||
+
|
||||
/* Set the time we actually started the operation */
|
||||
slapi_operation_set_time_started(operation);
|
||||
|
||||
@@ -798,11 +798,11 @@ op_shared_search(Slapi_PBlock *pb, int send_result)
|
||||
}
|
||||
|
||||
/* subtree searches :
|
||||
- * if the search was started above the backend suffix
|
||||
- * - temporarily set the SLAPI_SEARCH_TARGET_SDN to the
|
||||
- * base of the node so that we don't get a NO SUCH OBJECT error
|
||||
- * - do not change the scope
|
||||
- */
|
||||
+ * if the search was started above the backend suffix
|
||||
+ * - temporarily set the SLAPI_SEARCH_TARGET_SDN to the
|
||||
+ * base of the node so that we don't get a NO SUCH OBJECT error
|
||||
+ * - do not change the scope
|
||||
+ */
|
||||
if (scope == LDAP_SCOPE_SUBTREE) {
|
||||
if (slapi_sdn_issuffix(be_suffix, basesdn)) {
|
||||
if (free_sdn) {
|
||||
@@ -825,53 +825,53 @@ op_shared_search(Slapi_PBlock *pb, int send_result)
|
||||
switch (rc) {
|
||||
case 1:
|
||||
/* if the backend returned LDAP_NO_SUCH_OBJECT for a SEARCH request,
|
||||
- * it will not have sent back a result - otherwise, it will have
|
||||
- * sent a result */
|
||||
+ * it will not have sent back a result - otherwise, it will have
|
||||
+ * sent a result */
|
||||
rc = SLAPI_FAIL_GENERAL;
|
||||
slapi_pblock_get(pb, SLAPI_RESULT_CODE, &err);
|
||||
if (err == LDAP_NO_SUCH_OBJECT) {
|
||||
/* may be the object exist somewhere else
|
||||
- * wait the end of the loop to send back this error
|
||||
- */
|
||||
- flag_no_such_object = 1;
|
||||
+ * wait the end of the loop to send back this error
|
||||
+ */
|
||||
+ flag_no_such_object = true;
|
||||
} else {
|
||||
/* err something other than LDAP_NO_SUCH_OBJECT, so the backend will
|
||||
- * have sent the result -
|
||||
- * Set a flag here so we don't return another result. */
|
||||
+ * have sent the result -
|
||||
+ * Set a flag here so we don't return another result. */
|
||||
sent_result = 1;
|
||||
}
|
||||
- /* fall through */
|
||||
+ /* fall through */
|
||||
|
||||
case -1: /* an error occurred */
|
||||
+ slapi_pblock_get(pb, SLAPI_RESULT_CODE, &err);
|
||||
/* PAGED RESULTS */
|
||||
if (op_is_pagedresults(operation)) {
|
||||
/* cleanup the slot */
|
||||
pthread_mutex_lock(pagedresults_mutex);
|
||||
+ if (err != LDAP_NO_SUCH_OBJECT && !flag_no_such_object) {
|
||||
+ /* Free the results if not "no_such_object" */
|
||||
+ void *sr = NULL;
|
||||
+ slapi_pblock_get(pb, SLAPI_SEARCH_RESULT_SET, &sr);
|
||||
+ be->be_search_results_release(&sr);
|
||||
+ }
|
||||
pagedresults_set_search_result(pb_conn, operation, NULL, 1, pr_idx);
|
||||
rc = pagedresults_set_current_be(pb_conn, NULL, pr_idx, 1);
|
||||
pthread_mutex_unlock(pagedresults_mutex);
|
||||
}
|
||||
- if (1 == flag_no_such_object) {
|
||||
- break;
|
||||
- }
|
||||
- slapi_pblock_get(pb, SLAPI_RESULT_CODE, &err);
|
||||
- if (err == LDAP_NO_SUCH_OBJECT) {
|
||||
- /* may be the object exist somewhere else
|
||||
- * wait the end of the loop to send back this error
|
||||
- */
|
||||
- flag_no_such_object = 1;
|
||||
+
|
||||
+ if (err == LDAP_NO_SUCH_OBJECT || flag_no_such_object) {
|
||||
+ /* Maybe the object exists somewhere else, wait to the end
|
||||
+ * of the loop to send back this error */
|
||||
+ flag_no_such_object = true;
|
||||
break;
|
||||
} else {
|
||||
- /* for error other than LDAP_NO_SUCH_OBJECT
|
||||
- * the error has already been sent
|
||||
- * stop the search here
|
||||
- */
|
||||
+ /* For error other than LDAP_NO_SUCH_OBJECT the error has
|
||||
+ * already been sent stop the search here */
|
||||
cache_return_target_entry(pb, be, operation);
|
||||
goto free_and_return;
|
||||
}
|
||||
|
||||
/* when rc == SLAPI_FAIL_DISKFULL this case is executed */
|
||||
-
|
||||
case SLAPI_FAIL_DISKFULL:
|
||||
operation_out_of_disk_space();
|
||||
cache_return_target_entry(pb, be, operation);
|
||||
diff --git a/ldap/servers/slapd/pagedresults.c b/ldap/servers/slapd/pagedresults.c
|
||||
index db87e486e..4aa1fa3e5 100644
|
||||
--- a/ldap/servers/slapd/pagedresults.c
|
||||
+++ b/ldap/servers/slapd/pagedresults.c
|
||||
@@ -121,12 +121,15 @@ pagedresults_parse_control_value(Slapi_PBlock *pb,
|
||||
if (ber_scanf(ber, "{io}", pagesize, &cookie) == LBER_ERROR) {
|
||||
slapi_log_err(SLAPI_LOG_ERR, "pagedresults_parse_control_value",
|
||||
"<= corrupted control value\n");
|
||||
+ ber_free(ber, 1);
|
||||
return LDAP_PROTOCOL_ERROR;
|
||||
}
|
||||
if (!maxreqs) {
|
||||
slapi_log_err(SLAPI_LOG_ERR, "pagedresults_parse_control_value",
|
||||
"Simple paged results requests per conn exceeded the limit: %d\n",
|
||||
maxreqs);
|
||||
+ ber_free(ber, 1);
|
||||
+ slapi_ch_free_string(&cookie.bv_val);
|
||||
return LDAP_UNWILLING_TO_PERFORM;
|
||||
}
|
||||
|
||||
@@ -376,6 +379,10 @@ pagedresults_free_one_msgid(Connection *conn, ber_int_t msgid, pthread_mutex_t *
|
||||
}
|
||||
prp->pr_flags |= CONN_FLAG_PAGEDRESULTS_ABANDONED;
|
||||
prp->pr_flags &= ~CONN_FLAG_PAGEDRESULTS_PROCESSING;
|
||||
+ if (conn->c_pagedresults.prl_count > 0) {
|
||||
+ _pr_cleanup_one_slot(prp);
|
||||
+ conn->c_pagedresults.prl_count--;
|
||||
+ }
|
||||
rc = 0;
|
||||
break;
|
||||
}
|
||||
@@ -940,7 +947,9 @@ pagedresults_is_timedout_nolock(Connection *conn)
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
+
|
||||
slapi_log_err(SLAPI_LOG_TRACE, "<-- pagedresults_is_timedout", "<= false 2\n");
|
||||
+
|
||||
return 0;
|
||||
}
|
||||
|
||||
diff --git a/ldap/servers/slapd/slap.h b/ldap/servers/slapd/slap.h
|
||||
index 072f6f962..469874fd1 100644
|
||||
--- a/ldap/servers/slapd/slap.h
|
||||
+++ b/ldap/servers/slapd/slap.h
|
||||
@@ -74,7 +74,7 @@ static char ptokPBE[34] = "Internal (Software) Token ";
|
||||
#include <sys/stat.h>
|
||||
#include <sys/socket.h>
|
||||
#include <netinet/in.h>
|
||||
-
|
||||
+#include <stdbool.h>
|
||||
#include <time.h> /* For timespec definitions */
|
||||
|
||||
/* Provides our int types and platform specific requirements. */
|
||||
--
|
||||
2.48.0
|
||||
|
@ -1,29 +0,0 @@
|
||||
From 27cd055197bc3cae458a1f86621aa5410c66dd2c Mon Sep 17 00:00:00 2001
|
||||
From: Mark Reynolds <mreynolds@redhat.com>
|
||||
Date: Mon, 20 Jan 2025 15:51:24 -0500
|
||||
Subject: [PATCH] Issue 6509 - Fix cherry pick issue (race condition in Paged
|
||||
results)
|
||||
|
||||
Relates: https://github.com/389ds/389-ds-base/issues/6509
|
||||
---
|
||||
ldap/servers/slapd/daemon.c | 4 ++--
|
||||
1 file changed, 2 insertions(+), 2 deletions(-)
|
||||
|
||||
diff --git a/ldap/servers/slapd/daemon.c b/ldap/servers/slapd/daemon.c
|
||||
index 13dfe250d..57e07e5f5 100644
|
||||
--- a/ldap/servers/slapd/daemon.c
|
||||
+++ b/ldap/servers/slapd/daemon.c
|
||||
@@ -1620,8 +1620,8 @@ setup_pr_read_pds(Connection_Table *ct)
|
||||
connection_table_move_connection_out_of_active_list(ct, c);
|
||||
} else if (c->c_prfd != NULL) {
|
||||
if ((!c->c_gettingber) && (c->c_threadnumber < c->c_max_threads_per_conn)) {
|
||||
- ct->fd[listnum][count].fd = c->c_prfd;
|
||||
- ct->fd[listnum][count].in_flags = SLAPD_POLL_FLAGS;
|
||||
+ ct->fd[count].fd = c->c_prfd;
|
||||
+ ct->fd[count].in_flags = SLAPD_POLL_FLAGS;
|
||||
/* slot i of the connection table is mapped to slot
|
||||
* count of the fds array */
|
||||
c->c_fdi = count;
|
||||
--
|
||||
2.48.0
|
||||
|
222
SOURCES/0022-Issues-6913-6886-6250-Adjust-xfail-marks-6914.patch
Normal file
222
SOURCES/0022-Issues-6913-6886-6250-Adjust-xfail-marks-6914.patch
Normal file
@ -0,0 +1,222 @@
|
||||
From f28deac93c552a9c4dc9dd9c18f449fcd5cc7731 Mon Sep 17 00:00:00 2001
|
||||
From: Simon Pichugin <spichugi@redhat.com>
|
||||
Date: Fri, 1 Aug 2025 09:28:39 -0700
|
||||
Subject: [PATCH] Issues 6913, 6886, 6250 - Adjust xfail marks (#6914)
|
||||
|
||||
Description: Some of the ACI invalid syntax issues were fixed,
|
||||
so we need to remove xfail marks.
|
||||
Disk space issue should have a 'skipif' mark.
|
||||
Display all attrs (nsslapd-auditlog-display-attrs: *) fails because of a bug.
|
||||
EntryUSN inconsistency and overflow bugs were exposed with the tests.
|
||||
|
||||
Related: https://github.com/389ds/389-ds-base/issues/6913
|
||||
Related: https://github.com/389ds/389-ds-base/issues/6886
|
||||
Related: https://github.com/389ds/389-ds-base/issues/6250
|
||||
|
||||
Reviewed by: @vashirov (Thanks!)
|
||||
---
|
||||
dirsrvtests/tests/suites/acl/syntax_test.py | 13 ++++++++--
|
||||
.../tests/suites/import/regression_test.py | 18 +++++++-------
|
||||
.../logging/audit_password_masking_test.py | 24 +++++++++----------
|
||||
.../suites/plugins/entryusn_overflow_test.py | 2 ++
|
||||
4 files changed, 34 insertions(+), 23 deletions(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/acl/syntax_test.py b/dirsrvtests/tests/suites/acl/syntax_test.py
|
||||
index 4edc7fa4b..ed9919ba3 100644
|
||||
--- a/dirsrvtests/tests/suites/acl/syntax_test.py
|
||||
+++ b/dirsrvtests/tests/suites/acl/syntax_test.py
|
||||
@@ -190,10 +190,9 @@ FAILED = [('test_targattrfilters_18',
|
||||
f'(all)userdn="ldap:///anyone";)'), ]
|
||||
|
||||
|
||||
-@pytest.mark.xfail(reason='https://bugzilla.redhat.com/show_bug.cgi?id=1691473')
|
||||
@pytest.mark.parametrize("real_value", [a[1] for a in FAILED],
|
||||
ids=[a[0] for a in FAILED])
|
||||
-def test_aci_invalid_syntax_fail(topo, real_value):
|
||||
+def test_aci_invalid_syntax_fail(topo, real_value, request):
|
||||
"""Try to set wrong ACI syntax.
|
||||
|
||||
:id: 83c40784-fff5-49c8-9535-7064c9c19e7e
|
||||
@@ -206,6 +205,16 @@ def test_aci_invalid_syntax_fail(topo, real_value):
|
||||
1. It should pass
|
||||
2. It should not pass
|
||||
"""
|
||||
+ # Mark specific test cases as xfail
|
||||
+ xfail_cases = [
|
||||
+ 'test_targattrfilters_18',
|
||||
+ 'test_targattrfilters_20',
|
||||
+ 'test_bind_rule_set_with_more_than_three'
|
||||
+ ]
|
||||
+
|
||||
+ if request.node.callspec.id in xfail_cases:
|
||||
+ pytest.xfail("DS6913 - This test case is expected to fail")
|
||||
+
|
||||
domain = Domain(topo.standalone, DEFAULT_SUFFIX)
|
||||
with pytest.raises(ldap.INVALID_SYNTAX):
|
||||
domain.add("aci", real_value)
|
||||
diff --git a/dirsrvtests/tests/suites/import/regression_test.py b/dirsrvtests/tests/suites/import/regression_test.py
|
||||
index 2f850a19a..18611de35 100644
|
||||
--- a/dirsrvtests/tests/suites/import/regression_test.py
|
||||
+++ b/dirsrvtests/tests/suites/import/regression_test.py
|
||||
@@ -323,7 +323,7 @@ ou: myDups00001
|
||||
|
||||
@pytest.mark.bz1749595
|
||||
@pytest.mark.tier2
|
||||
-@pytest.mark.xfail(not _check_disk_space(), reason="not enough disk space for lmdb map")
|
||||
+@pytest.mark.skipif(not _check_disk_space(), reason="not enough disk space for lmdb map")
|
||||
@pytest.mark.xfail(ds_is_older("1.3.10.1"), reason="bz1749595 not fixed on versions older than 1.3.10.1")
|
||||
def test_large_ldif2db_ancestorid_index_creation(topo, _set_mdb_map_size):
|
||||
"""Import with ldif2db a large file - check that the ancestorid index creation phase has a correct performance
|
||||
@@ -399,39 +399,39 @@ def test_large_ldif2db_ancestorid_index_creation(topo, _set_mdb_map_size):
|
||||
log.info('Starting the server')
|
||||
topo.standalone.start()
|
||||
|
||||
- # With lmdb there is no more any special phase for ancestorid
|
||||
+ # With lmdb there is no more any special phase for ancestorid
|
||||
# because ancestorsid get updated on the fly while processing the
|
||||
# entryrdn (by up the parents chain to compute the parentid
|
||||
- #
|
||||
+ #
|
||||
# But there is still a numSubordinates generation phase
|
||||
if get_default_db_lib() == "mdb":
|
||||
log.info('parse the errors logs to check lines with "Generating numSubordinates complete." are present')
|
||||
end_numsubordinates = str(topo.standalone.ds_error_log.match(r'.*Generating numSubordinates complete.*'))[1:-1]
|
||||
assert len(end_numsubordinates) > 0
|
||||
-
|
||||
+
|
||||
else:
|
||||
log.info('parse the errors logs to check lines with "Starting sort of ancestorid" are present')
|
||||
start_sort_str = str(topo.standalone.ds_error_log.match(r'.*Starting sort of ancestorid non-leaf IDs*'))[1:-1]
|
||||
assert len(start_sort_str) > 0
|
||||
-
|
||||
+
|
||||
log.info('parse the errors logs to check lines with "Finished sort of ancestorid" are present')
|
||||
end_sort_str = str(topo.standalone.ds_error_log.match(r'.*Finished sort of ancestorid non-leaf IDs*'))[1:-1]
|
||||
assert len(end_sort_str) > 0
|
||||
-
|
||||
+
|
||||
log.info('parse the error logs for the line with "Gathering ancestorid non-leaf IDs"')
|
||||
start_ancestorid_indexing_op_str = str(topo.standalone.ds_error_log.match(r'.*Gathering ancestorid non-leaf IDs*'))[1:-1]
|
||||
assert len(start_ancestorid_indexing_op_str) > 0
|
||||
-
|
||||
+
|
||||
log.info('parse the error logs for the line with "Created ancestorid index"')
|
||||
end_ancestorid_indexing_op_str = str(topo.standalone.ds_error_log.match(r'.*Created ancestorid index*'))[1:-1]
|
||||
assert len(end_ancestorid_indexing_op_str) > 0
|
||||
-
|
||||
+
|
||||
log.info('get the ancestorid non-leaf IDs indexing start and end time from the collected strings')
|
||||
# Collected lines look like : '[15/May/2020:05:30:27.245967313 -0400] - INFO - bdb_get_nonleaf_ids - import userRoot: Gathering ancestorid non-leaf IDs...'
|
||||
# We are getting the sec.nanosec part of the date, '27.245967313' in the above example
|
||||
start_time = (start_ancestorid_indexing_op_str.split()[0]).split(':')[3]
|
||||
end_time = (end_ancestorid_indexing_op_str.split()[0]).split(':')[3]
|
||||
-
|
||||
+
|
||||
log.info('Calculate the elapsed time for the ancestorid non-leaf IDs index creation')
|
||||
etime = (Decimal(end_time) - Decimal(start_time))
|
||||
# The time for the ancestorid index creation should be less than 10s for an offline import of an ldif file with 100000 entries / 5 entries per node
|
||||
diff --git a/dirsrvtests/tests/suites/logging/audit_password_masking_test.py b/dirsrvtests/tests/suites/logging/audit_password_masking_test.py
|
||||
index 3b6a54849..69a36cb5d 100644
|
||||
--- a/dirsrvtests/tests/suites/logging/audit_password_masking_test.py
|
||||
+++ b/dirsrvtests/tests/suites/logging/audit_password_masking_test.py
|
||||
@@ -117,10 +117,10 @@ def check_password_masked(inst, log_format, expected_password, actual_password):
|
||||
|
||||
@pytest.mark.parametrize("log_format,display_attrs", [
|
||||
("default", None),
|
||||
- ("default", "*"),
|
||||
+ pytest.param("default", "*", marks=pytest.mark.xfail(reason="DS6886")),
|
||||
("default", "userPassword"),
|
||||
("json", None),
|
||||
- ("json", "*"),
|
||||
+ pytest.param("json", "*", marks=pytest.mark.xfail(reason="DS6886")),
|
||||
("json", "userPassword")
|
||||
])
|
||||
def test_password_masking_add_operation(topo, log_format, display_attrs):
|
||||
@@ -173,10 +173,10 @@ def test_password_masking_add_operation(topo, log_format, display_attrs):
|
||||
|
||||
@pytest.mark.parametrize("log_format,display_attrs", [
|
||||
("default", None),
|
||||
- ("default", "*"),
|
||||
+ pytest.param("default", "*", marks=pytest.mark.xfail(reason="DS6886")),
|
||||
("default", "userPassword"),
|
||||
("json", None),
|
||||
- ("json", "*"),
|
||||
+ pytest.param("json", "*", marks=pytest.mark.xfail(reason="DS6886")),
|
||||
("json", "userPassword")
|
||||
])
|
||||
def test_password_masking_modify_operation(topo, log_format, display_attrs):
|
||||
@@ -242,10 +242,10 @@ def test_password_masking_modify_operation(topo, log_format, display_attrs):
|
||||
|
||||
@pytest.mark.parametrize("log_format,display_attrs", [
|
||||
("default", None),
|
||||
- ("default", "*"),
|
||||
+ pytest.param("default", "*", marks=pytest.mark.xfail(reason="DS6886")),
|
||||
("default", "nsslapd-rootpw"),
|
||||
("json", None),
|
||||
- ("json", "*"),
|
||||
+ pytest.param("json", "*", marks=pytest.mark.xfail(reason="DS6886")),
|
||||
("json", "nsslapd-rootpw")
|
||||
])
|
||||
def test_password_masking_rootpw_modify_operation(topo, log_format, display_attrs):
|
||||
@@ -297,10 +297,10 @@ def test_password_masking_rootpw_modify_operation(topo, log_format, display_attr
|
||||
|
||||
@pytest.mark.parametrize("log_format,display_attrs", [
|
||||
("default", None),
|
||||
- ("default", "*"),
|
||||
+ pytest.param("default", "*", marks=pytest.mark.xfail(reason="DS6886")),
|
||||
("default", "nsmultiplexorcredentials"),
|
||||
("json", None),
|
||||
- ("json", "*"),
|
||||
+ pytest.param("json", "*", marks=pytest.mark.xfail(reason="DS6886")),
|
||||
("json", "nsmultiplexorcredentials")
|
||||
])
|
||||
def test_password_masking_multiplexor_credentials(topo, log_format, display_attrs):
|
||||
@@ -368,10 +368,10 @@ def test_password_masking_multiplexor_credentials(topo, log_format, display_attr
|
||||
|
||||
@pytest.mark.parametrize("log_format,display_attrs", [
|
||||
("default", None),
|
||||
- ("default", "*"),
|
||||
+ pytest.param("default", "*", marks=pytest.mark.xfail(reason="DS6886")),
|
||||
("default", "nsDS5ReplicaCredentials"),
|
||||
("json", None),
|
||||
- ("json", "*"),
|
||||
+ pytest.param("json", "*", marks=pytest.mark.xfail(reason="DS6886")),
|
||||
("json", "nsDS5ReplicaCredentials")
|
||||
])
|
||||
def test_password_masking_replica_credentials(topo, log_format, display_attrs):
|
||||
@@ -432,10 +432,10 @@ def test_password_masking_replica_credentials(topo, log_format, display_attrs):
|
||||
|
||||
@pytest.mark.parametrize("log_format,display_attrs", [
|
||||
("default", None),
|
||||
- ("default", "*"),
|
||||
+ pytest.param("default", "*", marks=pytest.mark.xfail(reason="DS6886")),
|
||||
("default", "nsDS5ReplicaBootstrapCredentials"),
|
||||
("json", None),
|
||||
- ("json", "*"),
|
||||
+ pytest.param("json", "*", marks=pytest.mark.xfail(reason="DS6886")),
|
||||
("json", "nsDS5ReplicaBootstrapCredentials")
|
||||
])
|
||||
def test_password_masking_bootstrap_credentials(topo, log_format, display_attrs):
|
||||
diff --git a/dirsrvtests/tests/suites/plugins/entryusn_overflow_test.py b/dirsrvtests/tests/suites/plugins/entryusn_overflow_test.py
|
||||
index a23d734ca..8c3a537ab 100644
|
||||
--- a/dirsrvtests/tests/suites/plugins/entryusn_overflow_test.py
|
||||
+++ b/dirsrvtests/tests/suites/plugins/entryusn_overflow_test.py
|
||||
@@ -81,6 +81,7 @@ def setup_usn_test(topology_st, request):
|
||||
return created_users
|
||||
|
||||
|
||||
+@pytest.mark.xfail(reason="DS6250")
|
||||
def test_entryusn_overflow_on_add_existing_entries(topology_st, setup_usn_test):
|
||||
"""Test that reproduces entryUSN overflow when adding existing entries
|
||||
|
||||
@@ -232,6 +233,7 @@ def test_entryusn_overflow_on_add_existing_entries(topology_st, setup_usn_test):
|
||||
log.info("EntryUSN overflow test completed successfully")
|
||||
|
||||
|
||||
+@pytest.mark.xfail(reason="DS6250")
|
||||
def test_entryusn_consistency_after_failed_adds(topology_st, setup_usn_test):
|
||||
"""Test that entryUSN remains consistent after failed add operations
|
||||
|
||||
--
|
||||
2.49.0
|
||||
|
@ -0,0 +1,32 @@
|
||||
From 58a9e1083865e75bba3cf9867a3df109031d7810 Mon Sep 17 00:00:00 2001
|
||||
From: Viktor Ashirov <vashirov@redhat.com>
|
||||
Date: Mon, 28 Jul 2025 13:18:26 +0200
|
||||
Subject: [PATCH] Issue 6181 - RFE - Allow system to manage uid/gid at startup
|
||||
|
||||
Description:
|
||||
Expand CapabilityBoundingSet to include CAP_FOWNER
|
||||
|
||||
Relates: https://github.com/389ds/389-ds-base/issues/6181
|
||||
Relates: https://github.com/389ds/389-ds-base/issues/6906
|
||||
|
||||
Reviewed by: @progier389 (Thanks!)
|
||||
---
|
||||
wrappers/systemd.template.service.in | 2 +-
|
||||
1 file changed, 1 insertion(+), 1 deletion(-)
|
||||
|
||||
diff --git a/wrappers/systemd.template.service.in b/wrappers/systemd.template.service.in
|
||||
index fa05c9f60..6db1f6f8f 100644
|
||||
--- a/wrappers/systemd.template.service.in
|
||||
+++ b/wrappers/systemd.template.service.in
|
||||
@@ -25,7 +25,7 @@ MemoryAccounting=yes
|
||||
|
||||
# Allow non-root instances to bind to low ports.
|
||||
AmbientCapabilities=CAP_NET_BIND_SERVICE
|
||||
-CapabilityBoundingSet=CAP_NET_BIND_SERVICE CAP_SETUID CAP_SETGID CAP_DAC_OVERRIDE CAP_CHOWN
|
||||
+CapabilityBoundingSet=CAP_NET_BIND_SERVICE CAP_SETUID CAP_SETGID CAP_DAC_OVERRIDE CAP_CHOWN CAP_FOWNER
|
||||
|
||||
PrivateTmp=on
|
||||
# https://en.opensuse.org/openSUSE:Security_Features#Systemd_hardening_effort
|
||||
--
|
||||
2.49.0
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -1,236 +0,0 @@
|
||||
From 1845aed98becaba6b975342229cb5e0de79d208d Mon Sep 17 00:00:00 2001
|
||||
From: James Chapman <jachapma@redhat.com>
|
||||
Date: Wed, 29 Jan 2025 17:41:55 +0000
|
||||
Subject: [PATCH] Issue 6436 - MOD on a large group slow if substring index is
|
||||
present (#6437)
|
||||
|
||||
Bug Description: If the substring index is configured for the group
|
||||
membership attribute ( member or uniqueMember ), the removal of a
|
||||
member from a large static group is pretty slow.
|
||||
|
||||
Fix Description: A solution to this issue would be to introduce
|
||||
a new index to track a membership atttribute index. In the interm,
|
||||
we add a check to healthcheck to inform the user of the implications
|
||||
of this configuration.
|
||||
|
||||
Fixes: https://github.com/389ds/389-ds-base/issues/6436
|
||||
|
||||
Reviewed by: @Firstyear, @tbordaz, @droideck (Thanks)
|
||||
---
|
||||
.../suites/healthcheck/health_config_test.py | 89 ++++++++++++++++++-
|
||||
src/lib389/lib389/lint.py | 15 ++++
|
||||
src/lib389/lib389/plugins.py | 37 +++++++-
|
||||
3 files changed, 137 insertions(+), 4 deletions(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/healthcheck/health_config_test.py b/dirsrvtests/tests/suites/healthcheck/health_config_test.py
|
||||
index 6d3d08bfa..747699486 100644
|
||||
--- a/dirsrvtests/tests/suites/healthcheck/health_config_test.py
|
||||
+++ b/dirsrvtests/tests/suites/healthcheck/health_config_test.py
|
||||
@@ -212,6 +212,7 @@ def test_healthcheck_RI_plugin_missing_indexes(topology_st):
|
||||
MEMBER_DN = 'cn=member,cn=index,cn=userroot,cn=ldbm database,cn=plugins,cn=config'
|
||||
|
||||
standalone = topology_st.standalone
|
||||
+ standalone.config.set("nsslapd-accesslog-logbuffering", "on")
|
||||
|
||||
log.info('Enable RI plugin')
|
||||
plugin = ReferentialIntegrityPlugin(standalone)
|
||||
@@ -233,7 +234,7 @@ def test_healthcheck_RI_plugin_missing_indexes(topology_st):
|
||||
|
||||
|
||||
def test_healthcheck_MO_plugin_missing_indexes(topology_st):
|
||||
- """Check if HealthCheck returns DSMOLE0002 code
|
||||
+ """Check if HealthCheck returns DSMOLE0001 code
|
||||
|
||||
:id: 236b0ec2-13da-48fb-b65a-db7406d56d5d
|
||||
:setup: Standalone instance
|
||||
@@ -248,8 +249,8 @@ def test_healthcheck_MO_plugin_missing_indexes(topology_st):
|
||||
:expectedresults:
|
||||
1. Success
|
||||
2. Success
|
||||
- 3. Healthcheck reports DSMOLE0002 code and related details
|
||||
- 4. Healthcheck reports DSMOLE0002 code and related details
|
||||
+ 3. Healthcheck reports DSMOLE0001 code and related details
|
||||
+ 4. Healthcheck reports DSMOLE0001 code and related details
|
||||
5. Success
|
||||
6. Healthcheck reports no issue found
|
||||
7. Healthcheck reports no issue found
|
||||
@@ -259,6 +260,7 @@ def test_healthcheck_MO_plugin_missing_indexes(topology_st):
|
||||
MO_GROUP_ATTR = 'creatorsname'
|
||||
|
||||
standalone = topology_st.standalone
|
||||
+ standalone.config.set("nsslapd-accesslog-logbuffering", "on")
|
||||
|
||||
log.info('Enable MO plugin')
|
||||
plugin = MemberOfPlugin(standalone)
|
||||
@@ -279,6 +281,87 @@ def test_healthcheck_MO_plugin_missing_indexes(topology_st):
|
||||
run_healthcheck_and_flush_log(topology_st, standalone, json=True, searched_code=JSON_OUTPUT)
|
||||
|
||||
|
||||
+def test_healthcheck_MO_plugin_substring_index(topology_st):
|
||||
+ """Check if HealthCheck returns DSMOLE0002 code when the
|
||||
+ member, uniquemember attribute contains a substring index type
|
||||
+
|
||||
+ :id: 10954811-24ac-4886-8183-e30892f8e02d
|
||||
+ :setup: Standalone instance
|
||||
+ :steps:
|
||||
+ 1. Create DS instance
|
||||
+ 2. Configure the instance with MO Plugin
|
||||
+ 3. Change index type to substring for member attribute
|
||||
+ 4. Use HealthCheck without --json option
|
||||
+ 5. Use HealthCheck with --json option
|
||||
+ 6. Change index type back to equality for member attribute
|
||||
+ 7. Use HealthCheck without --json option
|
||||
+ 8. Use HealthCheck with --json option
|
||||
+ 9. Change index type to substring for uniquemember attribute
|
||||
+ 10. Use HealthCheck without --json option
|
||||
+ 11. Use HealthCheck with --json option
|
||||
+ 12. Change index type back to equality for uniquemember attribute
|
||||
+ 13. Use HealthCheck without --json option
|
||||
+ 14. Use HealthCheck with --json option
|
||||
+
|
||||
+ :expectedresults:
|
||||
+ 1. Success
|
||||
+ 2. Success
|
||||
+ 3. Success
|
||||
+ 4. Healthcheck reports DSMOLE0002 code and related details
|
||||
+ 5. Healthcheck reports DSMOLE0002 code and related details
|
||||
+ 6. Success
|
||||
+ 7. Healthcheck reports no issue found
|
||||
+ 8. Healthcheck reports no issue found
|
||||
+ 9. Success
|
||||
+ 10. Healthcheck reports DSMOLE0002 code and related details
|
||||
+ 11. Healthcheck reports DSMOLE0002 code and related details
|
||||
+ 12. Success
|
||||
+ 13. Healthcheck reports no issue found
|
||||
+ 14. Healthcheck reports no issue found
|
||||
+ """
|
||||
+
|
||||
+ RET_CODE = 'DSMOLE0002'
|
||||
+ MEMBER_DN = 'cn=member,cn=index,cn=userroot,cn=ldbm database,cn=plugins,cn=config'
|
||||
+ UNIQUE_MEMBER_DN = 'cn=uniquemember,cn=index,cn=userroot,cn=ldbm database,cn=plugins,cn=config'
|
||||
+
|
||||
+ standalone = topology_st.standalone
|
||||
+ standalone.config.set("nsslapd-accesslog-logbuffering", "on")
|
||||
+
|
||||
+ log.info('Enable MO plugin')
|
||||
+ plugin = MemberOfPlugin(standalone)
|
||||
+ plugin.disable()
|
||||
+ plugin.enable()
|
||||
+
|
||||
+ log.info('Change the index type of the member attribute index to substring')
|
||||
+ index = Index(topology_st.standalone, MEMBER_DN)
|
||||
+ index.replace('nsIndexType', 'sub')
|
||||
+
|
||||
+ run_healthcheck_and_flush_log(topology_st, standalone, json=False, searched_code=RET_CODE)
|
||||
+ run_healthcheck_and_flush_log(topology_st, standalone, json=True, searched_code=RET_CODE)
|
||||
+
|
||||
+ log.info('Set the index type of the member attribute index back to eq')
|
||||
+ index.replace('nsIndexType', 'eq')
|
||||
+
|
||||
+ run_healthcheck_and_flush_log(topology_st, standalone, json=False, searched_code=CMD_OUTPUT)
|
||||
+ run_healthcheck_and_flush_log(topology_st, standalone, json=True, searched_code=JSON_OUTPUT)
|
||||
+
|
||||
+ log.info('Change the index type of the uniquemember attribute index to substring')
|
||||
+ index = Index(topology_st.standalone, UNIQUE_MEMBER_DN)
|
||||
+ index.replace('nsIndexType', 'sub')
|
||||
+
|
||||
+ run_healthcheck_and_flush_log(topology_st, standalone, json=False, searched_code=RET_CODE)
|
||||
+ run_healthcheck_and_flush_log(topology_st, standalone, json=True, searched_code=RET_CODE)
|
||||
+
|
||||
+ log.info('Set the index type of the uniquemember attribute index back to eq')
|
||||
+ index.replace('nsIndexType', 'eq')
|
||||
+
|
||||
+ run_healthcheck_and_flush_log(topology_st, standalone, json=False, searched_code=CMD_OUTPUT)
|
||||
+ run_healthcheck_and_flush_log(topology_st, standalone, json=True, searched_code=JSON_OUTPUT)
|
||||
+
|
||||
+ # Restart the instance after changing the plugin to avoid breaking the other tests
|
||||
+ standalone.restart()
|
||||
+
|
||||
+
|
||||
@pytest.mark.ds50873
|
||||
@pytest.mark.bz1685160
|
||||
@pytest.mark.xfail(ds_is_older("1.4.1"), reason="Not implemented")
|
||||
diff --git a/src/lib389/lib389/lint.py b/src/lib389/lib389/lint.py
|
||||
index 4d9cbb666..3d3c79ea3 100644
|
||||
--- a/src/lib389/lib389/lint.py
|
||||
+++ b/src/lib389/lib389/lint.py
|
||||
@@ -231,6 +231,21 @@ database after adding the missing index type. Here is an example using dsconf:
|
||||
"""
|
||||
}
|
||||
|
||||
+DSMOLE0002 = {
|
||||
+ 'dsle': 'DSMOLE0002',
|
||||
+ 'severity': 'LOW',
|
||||
+ 'description': 'Removal of a member can be slow ',
|
||||
+ 'items': ['cn=memberof plugin,cn=plugins,cn=config', ],
|
||||
+ 'detail': """If the substring index is configured for a membership attribute. The removal of a member
|
||||
+from the large group can be slow.
|
||||
+
|
||||
+""",
|
||||
+ 'fix': """If not required, you can remove the substring index type using dsconf:
|
||||
+
|
||||
+ # dsconf slapd-YOUR_INSTANCE backend index set --attr=ATTR BACKEND --del-type=sub
|
||||
+"""
|
||||
+}
|
||||
+
|
||||
# Disk Space check. Note - PARTITION is replaced by the calling function
|
||||
DSDSLE0001 = {
|
||||
'dsle': 'DSDSLE0001',
|
||||
diff --git a/src/lib389/lib389/plugins.py b/src/lib389/lib389/plugins.py
|
||||
index 6bf1843ad..185398e5b 100644
|
||||
--- a/src/lib389/lib389/plugins.py
|
||||
+++ b/src/lib389/lib389/plugins.py
|
||||
@@ -12,7 +12,7 @@ import copy
|
||||
import os.path
|
||||
from lib389 import tasks
|
||||
from lib389._mapped_object import DSLdapObjects, DSLdapObject
|
||||
-from lib389.lint import DSRILE0001, DSRILE0002, DSMOLE0001
|
||||
+from lib389.lint import DSRILE0001, DSRILE0002, DSMOLE0001, DSMOLE0002
|
||||
from lib389.utils import ensure_str, ensure_list_bytes
|
||||
from lib389.schema import Schema
|
||||
from lib389._constants import (
|
||||
@@ -827,6 +827,41 @@ class MemberOfPlugin(Plugin):
|
||||
report['check'] = f'memberof:attr_indexes'
|
||||
yield report
|
||||
|
||||
+ def _lint_member_substring_index(self):
|
||||
+ if self.status():
|
||||
+ from lib389.backend import Backends
|
||||
+ backends = Backends(self._instance).list()
|
||||
+ membership_attrs = ['member', 'uniquemember']
|
||||
+ container = self.get_attr_val_utf8_l("nsslapd-plugincontainerscope")
|
||||
+ for backend in backends:
|
||||
+ suffix = backend.get_attr_val_utf8_l('nsslapd-suffix')
|
||||
+ if suffix == "cn=changelog":
|
||||
+ # Always skip retro changelog
|
||||
+ continue
|
||||
+ if container is not None:
|
||||
+ # Check if this backend is in the scope
|
||||
+ if not container.endswith(suffix):
|
||||
+ # skip this backend that is not in the scope
|
||||
+ continue
|
||||
+ indexes = backend.get_indexes()
|
||||
+ for attr in membership_attrs:
|
||||
+ report = copy.deepcopy(DSMOLE0002)
|
||||
+ try:
|
||||
+ index = indexes.get(attr)
|
||||
+ types = index.get_attr_vals_utf8_l("nsIndexType")
|
||||
+ if "sub" in types:
|
||||
+ report['detail'] = report['detail'].replace('ATTR', attr)
|
||||
+ report['detail'] = report['detail'].replace('BACKEND', suffix)
|
||||
+ report['fix'] = report['fix'].replace('ATTR', attr)
|
||||
+ report['fix'] = report['fix'].replace('BACKEND', suffix)
|
||||
+ report['fix'] = report['fix'].replace('YOUR_INSTANCE', self._instance.serverid)
|
||||
+ report['items'].append(suffix)
|
||||
+ report['items'].append(attr)
|
||||
+ report['check'] = f'attr:substring_index'
|
||||
+ yield report
|
||||
+ except KeyError:
|
||||
+ continue
|
||||
+
|
||||
def get_attr(self):
|
||||
"""Get memberofattr attribute"""
|
||||
|
||||
--
|
||||
2.48.1
|
||||
|
@ -0,0 +1,92 @@
|
||||
From e03af0aa7e041fc2ca20caf3bcb5810e968043dc Mon Sep 17 00:00:00 2001
|
||||
From: Viktor Ashirov <vashirov@redhat.com>
|
||||
Date: Tue, 13 May 2025 13:53:05 +0200
|
||||
Subject: [PATCH] Issue 6778 - Memory leak in
|
||||
roles_cache_create_object_from_entry
|
||||
|
||||
Bug Description:
|
||||
`this_role` has internal allocations (`dn`, `rolescopedn`, etc.)
|
||||
that are not freed.
|
||||
|
||||
Fix Description:
|
||||
Use `roles_cache_role_object_free` to free `this_role` and all its
|
||||
internal structures.
|
||||
|
||||
Fixes: https://github.com/389ds/389-ds-base/issues/6778
|
||||
|
||||
Reviewed by: @mreynolds389 (Thanks!)
|
||||
---
|
||||
ldap/servers/plugins/roles/roles_cache.c | 15 ++++++++-------
|
||||
1 file changed, 8 insertions(+), 7 deletions(-)
|
||||
|
||||
diff --git a/ldap/servers/plugins/roles/roles_cache.c b/ldap/servers/plugins/roles/roles_cache.c
|
||||
index bbed11802..60d7182e2 100644
|
||||
--- a/ldap/servers/plugins/roles/roles_cache.c
|
||||
+++ b/ldap/servers/plugins/roles/roles_cache.c
|
||||
@@ -1098,7 +1098,7 @@ roles_cache_create_object_from_entry(Slapi_Entry *role_entry, role_object **resu
|
||||
/* We determine the role type by reading the objectclass */
|
||||
if (roles_cache_is_role_entry(role_entry) == 0) {
|
||||
/* Bad type */
|
||||
- slapi_ch_free((void **)&this_role);
|
||||
+ roles_cache_role_object_free((caddr_t)this_role);
|
||||
return SLAPI_ROLE_DEFINITION_ERROR;
|
||||
}
|
||||
|
||||
@@ -1108,7 +1108,7 @@ roles_cache_create_object_from_entry(Slapi_Entry *role_entry, role_object **resu
|
||||
this_role->type = type;
|
||||
} else {
|
||||
/* Bad type */
|
||||
- slapi_ch_free((void **)&this_role);
|
||||
+ roles_cache_role_object_free((caddr_t)this_role);
|
||||
return SLAPI_ROLE_DEFINITION_ERROR;
|
||||
}
|
||||
|
||||
@@ -1166,7 +1166,7 @@ roles_cache_create_object_from_entry(Slapi_Entry *role_entry, role_object **resu
|
||||
filter_attr_value = (char *)slapi_entry_attr_get_charptr(role_entry, ROLE_FILTER_ATTR_NAME);
|
||||
if (filter_attr_value == NULL) {
|
||||
/* Means probably no attribute or no value there */
|
||||
- slapi_ch_free((void **)&this_role);
|
||||
+ roles_cache_role_object_free((caddr_t)this_role);
|
||||
return SLAPI_ROLE_ERROR_NO_FILTER_SPECIFIED;
|
||||
}
|
||||
|
||||
@@ -1205,7 +1205,7 @@ roles_cache_create_object_from_entry(Slapi_Entry *role_entry, role_object **resu
|
||||
(char *)slapi_sdn_get_ndn(this_role->dn),
|
||||
ROLE_FILTER_ATTR_NAME, filter_attr_value,
|
||||
ROLE_FILTER_ATTR_NAME);
|
||||
- slapi_ch_free((void **)&this_role);
|
||||
+ roles_cache_role_object_free((caddr_t)this_role);
|
||||
slapi_ch_free_string(&filter_attr_value);
|
||||
return SLAPI_ROLE_ERROR_FILTER_BAD;
|
||||
}
|
||||
@@ -1217,7 +1217,7 @@ roles_cache_create_object_from_entry(Slapi_Entry *role_entry, role_object **resu
|
||||
filter = slapi_str2filter(filter_attr_value);
|
||||
if (filter == NULL) {
|
||||
/* An error has occured */
|
||||
- slapi_ch_free((void **)&this_role);
|
||||
+ roles_cache_role_object_free((caddr_t)this_role);
|
||||
slapi_ch_free_string(&filter_attr_value);
|
||||
return SLAPI_ROLE_ERROR_FILTER_BAD;
|
||||
}
|
||||
@@ -1228,7 +1228,8 @@ roles_cache_create_object_from_entry(Slapi_Entry *role_entry, role_object **resu
|
||||
(char *)slapi_sdn_get_ndn(this_role->dn),
|
||||
filter_attr_value,
|
||||
ROLE_FILTER_ATTR_NAME);
|
||||
- slapi_ch_free((void **)&this_role);
|
||||
+ roles_cache_role_object_free((caddr_t)this_role);
|
||||
+ slapi_filter_free(filter, 1);
|
||||
slapi_ch_free_string(&filter_attr_value);
|
||||
return SLAPI_ROLE_ERROR_FILTER_BAD;
|
||||
}
|
||||
@@ -1285,7 +1286,7 @@ roles_cache_create_object_from_entry(Slapi_Entry *role_entry, role_object **resu
|
||||
if (rc == 0) {
|
||||
*result = this_role;
|
||||
} else {
|
||||
- slapi_ch_free((void **)&this_role);
|
||||
+ roles_cache_role_object_free((caddr_t)this_role);
|
||||
}
|
||||
|
||||
slapi_log_err(SLAPI_LOG_PLUGIN, ROLES_PLUGIN_SUBSYSTEM,
|
||||
--
|
||||
2.49.0
|
||||
|
@ -1,651 +0,0 @@
|
||||
From dba27e56161943fbcf54ecbc28337e2c81b07979 Mon Sep 17 00:00:00 2001
|
||||
From: progier389 <progier@redhat.com>
|
||||
Date: Mon, 13 Jan 2025 18:03:07 +0100
|
||||
Subject: [PATCH] Issue 6494 - Various errors when using extended matching rule
|
||||
on vlv sort filter (#6495)
|
||||
|
||||
* Issue 6494 - Various errors when using extended matching rule on vlv sort filter
|
||||
|
||||
Various issues when configuring and using extended matching rule within a vlv sort filter:
|
||||
|
||||
Race condition about the keys storage while indexing leading to various heap and data corruption. (lmdb only)
|
||||
Crash while indexing if vlv are misconfigured because NULL key is not checked.
|
||||
Read after block because of data type mismatch between SlapiValue and berval
|
||||
Memory leaks
|
||||
Solution:
|
||||
|
||||
Serialize the vlv index key generation if vlv filter has an extended matching rule.
|
||||
Check null keys
|
||||
Always provides SlapiValue even ifg we want to get keys as bervals
|
||||
Free properly the resources
|
||||
Issue: #6494
|
||||
|
||||
Reviewed by: @mreynolds389 (Thanks!)
|
||||
|
||||
(cherry picked from commit 4bd27ecc4e1d21c8af5ab8cad795d70477179a98)
|
||||
(cherry picked from commit 223a20250cbf29a546dcb398cfc76024d2f91347)
|
||||
(cherry picked from commit 280043740a525eaf0438129fd8b99ca251c62366)
|
||||
---
|
||||
.../tests/suites/indexes/regression_test.py | 29 +++
|
||||
.../tests/suites/vlv/regression_test.py | 183 ++++++++++++++++++
|
||||
ldap/servers/slapd/back-ldbm/cleanup.c | 8 +
|
||||
ldap/servers/slapd/back-ldbm/dblayer.c | 22 ++-
|
||||
ldap/servers/slapd/back-ldbm/ldbm_attr.c | 2 +-
|
||||
ldap/servers/slapd/back-ldbm/matchrule.c | 8 +-
|
||||
.../servers/slapd/back-ldbm/proto-back-ldbm.h | 3 +-
|
||||
ldap/servers/slapd/back-ldbm/sort.c | 37 ++--
|
||||
ldap/servers/slapd/back-ldbm/vlv.c | 26 +--
|
||||
ldap/servers/slapd/back-ldbm/vlv_srch.c | 4 +-
|
||||
ldap/servers/slapd/generation.c | 5 +
|
||||
ldap/servers/slapd/plugin_mr.c | 12 +-
|
||||
src/lib389/lib389/backend.py | 10 +
|
||||
13 files changed, 292 insertions(+), 57 deletions(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/indexes/regression_test.py b/dirsrvtests/tests/suites/indexes/regression_test.py
|
||||
index fc6db727f..2196fb2ed 100644
|
||||
--- a/dirsrvtests/tests/suites/indexes/regression_test.py
|
||||
+++ b/dirsrvtests/tests/suites/indexes/regression_test.py
|
||||
@@ -227,6 +227,35 @@ def test_reject_virtual_attr_for_indexing(topo):
|
||||
break
|
||||
|
||||
|
||||
+def test_reindex_extended_matching_rule(topo, add_backend_and_ldif_50K_users):
|
||||
+ """Check that index with extended matching rule are reindexed properly.
|
||||
+
|
||||
+ :id: 8a3198e8-cc5a-11ef-a3e7-482ae39447e5
|
||||
+ :setup: Standalone instance + a second backend with 50K users
|
||||
+ :steps:
|
||||
+ 1. Configure uid with 2.5.13.2 matching rule
|
||||
+ 1. Configure cn with 2.5.13.2 matching rule
|
||||
+ 2. Reindex
|
||||
+ :expectedresults:
|
||||
+ 1. Success
|
||||
+ 2. Success
|
||||
+ """
|
||||
+
|
||||
+ inst = topo.standalone
|
||||
+ tasks = Tasks(inst)
|
||||
+ be2 = Backends(topo.standalone).get_backend(SUFFIX2)
|
||||
+ index = be2.get_index('uid')
|
||||
+ index.replace('nsMatchingRule', '2.5.13.2')
|
||||
+ index = be2.get_index('cn')
|
||||
+ index.replace('nsMatchingRule', '2.5.13.2')
|
||||
+
|
||||
+ assert tasks.reindex(
|
||||
+ suffix=SUFFIX2,
|
||||
+ args={TASK_WAIT: True}
|
||||
+ ) == 0
|
||||
+
|
||||
+
|
||||
+
|
||||
if __name__ == "__main__":
|
||||
# Run isolated
|
||||
# -s for DEBUG mode
|
||||
diff --git a/dirsrvtests/tests/suites/vlv/regression_test.py b/dirsrvtests/tests/suites/vlv/regression_test.py
|
||||
index 3b66de8b5..6ab709bd3 100644
|
||||
--- a/dirsrvtests/tests/suites/vlv/regression_test.py
|
||||
+++ b/dirsrvtests/tests/suites/vlv/regression_test.py
|
||||
@@ -22,6 +22,146 @@ logging.getLogger(__name__).setLevel(logging.DEBUG)
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
+class BackendHandler:
|
||||
+ def __init__(self, inst, bedict, scope=ldap.SCOPE_ONELEVEL):
|
||||
+ self.inst = inst
|
||||
+ self.bedict = bedict
|
||||
+ self.bes = Backends(inst)
|
||||
+ self.scope = scope
|
||||
+ self.data = {}
|
||||
+
|
||||
+ def find_backend(self, bename):
|
||||
+ for be in self.bes.list():
|
||||
+ if be.get_attr_val_utf8_l('cn') == bename:
|
||||
+ return be
|
||||
+ return None
|
||||
+
|
||||
+ def cleanup(self):
|
||||
+ benames = list(self.bedict.keys())
|
||||
+ benames.reverse()
|
||||
+ for bename in benames:
|
||||
+ be = self.find_backend(bename)
|
||||
+ if be:
|
||||
+ be.delete()
|
||||
+
|
||||
+ def setup(self):
|
||||
+ # Create backends, add vlv index and populate the backends.
|
||||
+ for bename,suffix in self.bedict.items():
|
||||
+ be = self.bes.create(properties={
|
||||
+ 'cn': bename,
|
||||
+ 'nsslapd-suffix': suffix,
|
||||
+ })
|
||||
+ # Add suffix entry
|
||||
+ Organization(self.inst, dn=suffix).create(properties={ 'o': bename, })
|
||||
+ # Configure vlv
|
||||
+ vlv_search, vlv_index = create_vlv_search_and_index(
|
||||
+ self.inst, basedn=suffix,
|
||||
+ bename=bename, scope=self.scope,
|
||||
+ prefix=f'vlv_1lvl_{bename}')
|
||||
+ # Reindex
|
||||
+ reindex_task = Tasks(self.inst)
|
||||
+ assert reindex_task.reindex(
|
||||
+ suffix=suffix,
|
||||
+ attrname=vlv_index.rdn,
|
||||
+ args={TASK_WAIT: True},
|
||||
+ vlv=True
|
||||
+ ) == 0
|
||||
+ # Add ou=People entry
|
||||
+ OrganizationalUnits(self.inst, suffix).create(properties={'ou': 'People'})
|
||||
+ # Add another ou that will be deleted before the export
|
||||
+ # so that import will change the vlv search basedn entryid
|
||||
+ ou2 = OrganizationalUnits(self.inst, suffix).create(properties={'ou': 'dummy ou'})
|
||||
+ # Add a demo user so that vlv_check is happy
|
||||
+ dn = f'uid=demo_user,ou=people,{suffix}'
|
||||
+ UserAccount(self.inst, dn=dn).create( properties= {
|
||||
+ 'uid': 'demo_user',
|
||||
+ 'cn': 'Demo user',
|
||||
+ 'sn': 'Demo user',
|
||||
+ 'uidNumber': '99998',
|
||||
+ 'gidNumber': '99998',
|
||||
+ 'homeDirectory': '/var/empty',
|
||||
+ 'loginShell': '/bin/false',
|
||||
+ 'userpassword': DEMO_PW })
|
||||
+ # Add regular user
|
||||
+ add_users(self.inst, 10, suffix=suffix)
|
||||
+ # Removing ou2
|
||||
+ ou2.delete()
|
||||
+ # And export
|
||||
+ tasks = Tasks(self.inst)
|
||||
+ ldif = f'{self.inst.get_ldif_dir()}/db-{bename}.ldif'
|
||||
+ assert tasks.exportLDIF(suffix=suffix,
|
||||
+ output_file=ldif,
|
||||
+ args={TASK_WAIT: True}) == 0
|
||||
+ # Add the various parameters in topology_st.belist
|
||||
+ self.data[bename] = { 'be': be,
|
||||
+ 'suffix': suffix,
|
||||
+ 'ldif': ldif,
|
||||
+ 'vlv_search' : vlv_search,
|
||||
+ 'vlv_index' : vlv_index,
|
||||
+ 'dn' : dn}
|
||||
+
|
||||
+
|
||||
+def create_vlv_search_and_index(inst, basedn=DEFAULT_SUFFIX, bename='userRoot',
|
||||
+ scope=ldap.SCOPE_SUBTREE, prefix="vlv", vlvsort="cn"):
|
||||
+ vlv_searches = VLVSearch(inst)
|
||||
+ vlv_search_properties = {
|
||||
+ "objectclass": ["top", "vlvSearch"],
|
||||
+ "cn": f"{prefix}Srch",
|
||||
+ "vlvbase": basedn,
|
||||
+ "vlvfilter": "(uid=*)",
|
||||
+ "vlvscope": str(scope),
|
||||
+ }
|
||||
+ vlv_searches.create(
|
||||
+ basedn=f"cn={bename},cn=ldbm database,cn=plugins,cn=config",
|
||||
+ properties=vlv_search_properties
|
||||
+ )
|
||||
+
|
||||
+ vlv_index = VLVIndex(inst)
|
||||
+ vlv_index_properties = {
|
||||
+ "objectclass": ["top", "vlvIndex"],
|
||||
+ "cn": f"{prefix}Idx",
|
||||
+ "vlvsort": vlvsort,
|
||||
+ }
|
||||
+ vlv_index.create(
|
||||
+ basedn=f"cn={prefix}Srch,cn={bename},cn=ldbm database,cn=plugins,cn=config",
|
||||
+ properties=vlv_index_properties
|
||||
+ )
|
||||
+ return vlv_searches, vlv_index
|
||||
+
|
||||
+
|
||||
+@pytest.fixture
|
||||
+def vlv_setup_with_uid_mr(topology_st, request):
|
||||
+ inst = topology_st.standalone
|
||||
+ bename = 'be1'
|
||||
+ besuffix = f'o={bename}'
|
||||
+ beh = BackendHandler(inst, { bename: besuffix })
|
||||
+
|
||||
+ def fin():
|
||||
+ # Cleanup function
|
||||
+ if not DEBUGGING and inst.exists() and inst.status():
|
||||
+ beh.cleanup()
|
||||
+
|
||||
+ request.addfinalizer(fin)
|
||||
+
|
||||
+ # Make sure that our backend are not already present.
|
||||
+ beh.cleanup()
|
||||
+
|
||||
+ # Then add the new backend
|
||||
+ beh.setup()
|
||||
+
|
||||
+ index = Index(inst, f'cn=uid,cn=index,cn={bename},cn=ldbm database,cn=plugins,cn=config')
|
||||
+ index.add('nsMatchingRule', '2.5.13.2')
|
||||
+ reindex_task = Tasks(inst)
|
||||
+ assert reindex_task.reindex(
|
||||
+ suffix=besuffix,
|
||||
+ attrname='uid',
|
||||
+ args={TASK_WAIT: True}
|
||||
+ ) == 0
|
||||
+
|
||||
+ topology_st.beh = beh
|
||||
+ return topology_st
|
||||
+
|
||||
+
|
||||
@pytest.mark.DS47966
|
||||
def test_bulk_import_when_the_backend_with_vlv_was_recreated(topology_m2):
|
||||
"""
|
||||
@@ -105,6 +245,49 @@ def test_bulk_import_when_the_backend_with_vlv_was_recreated(topology_m2):
|
||||
entries = M2.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, "(objectclass=*)")
|
||||
|
||||
|
||||
+def test_vlv_with_mr(vlv_setup_with_uid_mr):
|
||||
+ """
|
||||
+ Testing vlv having specific matching rule
|
||||
+
|
||||
+ :id: 5e04afe2-beec-11ef-aa84-482ae39447e5
|
||||
+ :setup: Standalone with uid have a matching rule index
|
||||
+ :steps:
|
||||
+ 1. Append vlvIndex entries then vlvSearch entry in the dse.ldif
|
||||
+ 2. Restart the server
|
||||
+ :expectedresults:
|
||||
+ 1. Should Success.
|
||||
+ 2. Should Success.
|
||||
+ """
|
||||
+ inst = vlv_setup_with_uid_mr.standalone
|
||||
+ beh = vlv_setup_with_uid_mr.beh
|
||||
+ bename, besuffix = next(iter(beh.bedict.items()))
|
||||
+ vlv_searches, vlv_index = create_vlv_search_and_index(
|
||||
+ inst, basedn=besuffix, bename=bename,
|
||||
+ vlvsort="uid:2.5.13.2")
|
||||
+ # Reindex the vlv
|
||||
+ reindex_task = Tasks(inst)
|
||||
+ assert reindex_task.reindex(
|
||||
+ suffix=besuffix,
|
||||
+ attrname=vlv_index.rdn,
|
||||
+ args={TASK_WAIT: True},
|
||||
+ vlv=True
|
||||
+ ) == 0
|
||||
+
|
||||
+ inst.restart()
|
||||
+ users = UserAccounts(inst, besuffix)
|
||||
+ user_properties = {
|
||||
+ 'uid': f'a new testuser',
|
||||
+ 'cn': f'a new testuser',
|
||||
+ 'sn': 'user',
|
||||
+ 'uidNumber': '0',
|
||||
+ 'gidNumber': '0',
|
||||
+ 'homeDirectory': 'foo'
|
||||
+ }
|
||||
+ user = users.create(properties=user_properties)
|
||||
+ user.delete()
|
||||
+ assert inst.status()
|
||||
+
|
||||
+
|
||||
if __name__ == "__main__":
|
||||
# Run isolated
|
||||
# -s for DEBUG mode
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/cleanup.c b/ldap/servers/slapd/back-ldbm/cleanup.c
|
||||
index 6b2e9faef..939d8bc4f 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/cleanup.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/cleanup.c
|
||||
@@ -15,12 +15,14 @@
|
||||
|
||||
#include "back-ldbm.h"
|
||||
#include "dblayer.h"
|
||||
+#include "vlv_srch.h"
|
||||
|
||||
int
|
||||
ldbm_back_cleanup(Slapi_PBlock *pb)
|
||||
{
|
||||
struct ldbminfo *li;
|
||||
Slapi_Backend *be;
|
||||
+ struct vlvSearch *nextp;
|
||||
|
||||
slapi_log_err(SLAPI_LOG_TRACE, "ldbm_back_cleanup", "ldbm backend cleaning up\n");
|
||||
slapi_pblock_get(pb, SLAPI_PLUGIN_PRIVATE, &li);
|
||||
@@ -45,6 +47,12 @@ ldbm_back_cleanup(Slapi_PBlock *pb)
|
||||
return 0;
|
||||
}
|
||||
|
||||
+ /* Release the vlv list */
|
||||
+ for (struct vlvSearch *p=be->vlvSearchList; p; p=nextp) {
|
||||
+ nextp = p->vlv_next;
|
||||
+ vlvSearch_delete(&p);
|
||||
+ }
|
||||
+
|
||||
/*
|
||||
* We check if li is NULL. Because of an issue in how we create backends
|
||||
* we share the li and plugin info between many unique backends. This causes
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/dblayer.c b/ldap/servers/slapd/back-ldbm/dblayer.c
|
||||
index 05cc5b891..6b8ce0016 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/dblayer.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/dblayer.c
|
||||
@@ -494,8 +494,12 @@ int
|
||||
dblayer_close(struct ldbminfo *li, int dbmode)
|
||||
{
|
||||
dblayer_private *priv = (dblayer_private *)li->li_dblayer_private;
|
||||
-
|
||||
- return priv->dblayer_close_fn(li, dbmode);
|
||||
+ int rc = priv->dblayer_close_fn(li, dbmode);
|
||||
+ if (rc == 0) {
|
||||
+ /* Clean thread specific data */
|
||||
+ dblayer_destroy_txn_stack();
|
||||
+ }
|
||||
+ return rc;
|
||||
}
|
||||
|
||||
/* Routines for opening and closing random files in the DB_ENV.
|
||||
@@ -621,6 +625,9 @@ dblayer_erase_index_file(backend *be, struct attrinfo *a, PRBool use_lock, int n
|
||||
return 0;
|
||||
}
|
||||
struct ldbminfo *li = (struct ldbminfo *)be->be_database->plg_private;
|
||||
+ if (NULL == li) {
|
||||
+ return 0;
|
||||
+ }
|
||||
dblayer_private *priv = (dblayer_private *)li->li_dblayer_private;
|
||||
|
||||
return priv->dblayer_rm_db_file_fn(be, a, use_lock, no_force_chkpt);
|
||||
@@ -1382,3 +1389,14 @@ dblayer_pop_pvt_txn(void)
|
||||
}
|
||||
return;
|
||||
}
|
||||
+
|
||||
+void
|
||||
+dblayer_destroy_txn_stack(void)
|
||||
+{
|
||||
+ /*
|
||||
+ * Cleanup for the main thread to avoid false/positive leaks from libasan
|
||||
+ * Note: data is freed because PR_SetThreadPrivate calls the
|
||||
+ * dblayer_cleanup_txn_stack callback
|
||||
+ */
|
||||
+ PR_SetThreadPrivate(thread_private_txn_stack, NULL);
|
||||
+}
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/ldbm_attr.c b/ldap/servers/slapd/back-ldbm/ldbm_attr.c
|
||||
index 708756d3e..70700ca1d 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/ldbm_attr.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/ldbm_attr.c
|
||||
@@ -54,7 +54,7 @@ attrinfo_delete(struct attrinfo **pp)
|
||||
idl_release_private(*pp);
|
||||
(*pp)->ai_key_cmp_fn = NULL;
|
||||
slapi_ch_free((void **)&((*pp)->ai_type));
|
||||
- slapi_ch_free((void **)(*pp)->ai_index_rules);
|
||||
+ charray_free((*pp)->ai_index_rules);
|
||||
slapi_ch_free((void **)&((*pp)->ai_attrcrypt));
|
||||
attr_done(&((*pp)->ai_sattr));
|
||||
attrinfo_delete_idlistinfo(&(*pp)->ai_idlistinfo);
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/matchrule.c b/ldap/servers/slapd/back-ldbm/matchrule.c
|
||||
index 5d516b9f8..5365e8acf 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/matchrule.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/matchrule.c
|
||||
@@ -107,7 +107,7 @@ destroy_matchrule_indexer(Slapi_PBlock *pb)
|
||||
* is destroyed
|
||||
*/
|
||||
int
|
||||
-matchrule_values_to_keys(Slapi_PBlock *pb, struct berval **input_values, struct berval ***output_values)
|
||||
+matchrule_values_to_keys(Slapi_PBlock *pb, Slapi_Value **input_values, struct berval ***output_values)
|
||||
{
|
||||
IFP mrINDEX = NULL;
|
||||
|
||||
@@ -135,10 +135,8 @@ matchrule_values_to_keys_sv(Slapi_PBlock *pb, Slapi_Value **input_values, Slapi_
|
||||
slapi_pblock_get(pb, SLAPI_PLUGIN_MR_INDEX_SV_FN, &mrINDEX);
|
||||
if (NULL == mrINDEX) { /* old school - does not have SV function */
|
||||
int rc;
|
||||
- struct berval **bvi = NULL, **bvo = NULL;
|
||||
- valuearray_get_bervalarray(input_values, &bvi);
|
||||
- rc = matchrule_values_to_keys(pb, bvi, &bvo);
|
||||
- ber_bvecfree(bvi);
|
||||
+ struct berval **bvo = NULL;
|
||||
+ rc = matchrule_values_to_keys(pb, input_values, &bvo);
|
||||
/* note - the indexer owns bvo and will free it when destroyed */
|
||||
valuearray_init_bervalarray(bvo, output_values);
|
||||
/* store output values in SV form - caller expects SLAPI_PLUGIN_MR_KEYS is Slapi_Value** */
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/proto-back-ldbm.h b/ldap/servers/slapd/back-ldbm/proto-back-ldbm.h
|
||||
index d93ff9239..157788fa4 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/proto-back-ldbm.h
|
||||
+++ b/ldap/servers/slapd/back-ldbm/proto-back-ldbm.h
|
||||
@@ -84,6 +84,7 @@ int dblayer_release_index_file(backend *be, struct attrinfo *a, DB *pDB);
|
||||
int dblayer_erase_index_file(backend *be, struct attrinfo *a, PRBool use_lock, int no_force_chkpt);
|
||||
int dblayer_get_id2entry(backend *be, DB **ppDB);
|
||||
int dblayer_release_id2entry(backend *be, DB *pDB);
|
||||
+void dblayer_destroy_txn_stack(void);
|
||||
int dblayer_txn_init(struct ldbminfo *li, back_txn *txn);
|
||||
int dblayer_txn_begin(backend *be, back_txnid parent_txn, back_txn *txn);
|
||||
int dblayer_txn_begin_ext(struct ldbminfo *li, back_txnid parent_txn, back_txn *txn, PRBool use_lock);
|
||||
@@ -560,7 +561,7 @@ int compute_allids_limit(Slapi_PBlock *pb, struct ldbminfo *li);
|
||||
*/
|
||||
int create_matchrule_indexer(Slapi_PBlock **pb, char *matchrule, char *type);
|
||||
int destroy_matchrule_indexer(Slapi_PBlock *pb);
|
||||
-int matchrule_values_to_keys(Slapi_PBlock *pb, struct berval **input_values, struct berval ***output_values);
|
||||
+int matchrule_values_to_keys(Slapi_PBlock *pb, Slapi_Value **input_values, struct berval ***output_values);
|
||||
int matchrule_values_to_keys_sv(Slapi_PBlock *pb, Slapi_Value **input_values, Slapi_Value ***output_values);
|
||||
|
||||
/*
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/sort.c b/ldap/servers/slapd/back-ldbm/sort.c
|
||||
index 70ac60803..196af753f 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/sort.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/sort.c
|
||||
@@ -536,30 +536,18 @@ compare_entries_sv(ID *id_a, ID *id_b, sort_spec *s, baggage_carrier *bc, int *e
|
||||
valuearray_get_bervalarray(valueset_get_valuearray(&attr_b->a_present_values), &value_b);
|
||||
} else {
|
||||
/* Match rule case */
|
||||
- struct berval **actual_value_a = NULL;
|
||||
- struct berval **actual_value_b = NULL;
|
||||
- struct berval **temp_value = NULL;
|
||||
-
|
||||
- valuearray_get_bervalarray(valueset_get_valuearray(&attr_a->a_present_values), &actual_value_a);
|
||||
- valuearray_get_bervalarray(valueset_get_valuearray(&attr_b->a_present_values), &actual_value_b);
|
||||
- matchrule_values_to_keys(this_one->mr_pb, actual_value_a, &temp_value);
|
||||
- /* Now copy it, so the second call doesn't crap on it */
|
||||
- value_a = slapi_ch_bvecdup(temp_value); /* Really, we'd prefer to not call the chXXX variant...*/
|
||||
- matchrule_values_to_keys(this_one->mr_pb, actual_value_b, &value_b);
|
||||
-
|
||||
- if ((actual_value_a && !value_a) ||
|
||||
- (actual_value_b && !value_b)) {
|
||||
- ber_bvecfree(actual_value_a);
|
||||
- ber_bvecfree(actual_value_b);
|
||||
- CACHE_RETURN(&inst->inst_cache, &a);
|
||||
- CACHE_RETURN(&inst->inst_cache, &b);
|
||||
- *error = 1;
|
||||
- return 0;
|
||||
+ Slapi_Value **va_a = valueset_get_valuearray(&attr_a->a_present_values);
|
||||
+ Slapi_Value **va_b = valueset_get_valuearray(&attr_b->a_present_values);
|
||||
+
|
||||
+ matchrule_values_to_keys(this_one->mr_pb, va_a, &value_a);
|
||||
+ /* Plugin owns the memory ==> duplicate the key before next call garble it */
|
||||
+ value_a = slapi_ch_bvecdup(value_a);
|
||||
+ matchrule_values_to_keys(this_one->mr_pb, va_b, &value_b);
|
||||
+
|
||||
+ if ((va_a && !value_a) || (va_b && !value_b)) {
|
||||
+ result = 0;
|
||||
+ goto bail;
|
||||
}
|
||||
- if (actual_value_a)
|
||||
- ber_bvecfree(actual_value_a);
|
||||
- if (actual_value_b)
|
||||
- ber_bvecfree(actual_value_b);
|
||||
}
|
||||
/* Compare them */
|
||||
if (!order) {
|
||||
@@ -582,9 +570,10 @@ compare_entries_sv(ID *id_a, ID *id_b, sort_spec *s, baggage_carrier *bc, int *e
|
||||
}
|
||||
/* If so, proceed to the next attribute for comparison */
|
||||
}
|
||||
+ *error = 0;
|
||||
+bail:
|
||||
CACHE_RETURN(&inst->inst_cache, &a);
|
||||
CACHE_RETURN(&inst->inst_cache, &b);
|
||||
- *error = 0;
|
||||
return result;
|
||||
}
|
||||
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/vlv.c b/ldap/servers/slapd/back-ldbm/vlv.c
|
||||
index 121fb3667..70e0bac85 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/vlv.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/vlv.c
|
||||
@@ -605,7 +605,7 @@ vlv_getindices(IFP callback_fn, void *param, backend *be)
|
||||
* generate the same composite key, so we append the EntryID
|
||||
* to ensure the uniqueness of the key.
|
||||
*
|
||||
- * Always creates a key. Never returns NULL.
|
||||
+ * May return NULL in case of errors (typically in some configuration error cases)
|
||||
*/
|
||||
static struct vlv_key *
|
||||
vlv_create_key(struct vlvIndex *p, struct backentry *e)
|
||||
@@ -659,10 +659,8 @@ vlv_create_key(struct vlvIndex *p, struct backentry *e)
|
||||
/* Matching rule. Do the magic mangling. Plugin owns the memory. */
|
||||
if (p->vlv_mrpb[sortattr] != NULL) {
|
||||
/* xxxPINAKI */
|
||||
- struct berval **bval = NULL;
|
||||
Slapi_Value **va = valueset_get_valuearray(&attr->a_present_values);
|
||||
- valuearray_get_bervalarray(va, &bval);
|
||||
- matchrule_values_to_keys(p->vlv_mrpb[sortattr], bval, &value);
|
||||
+ matchrule_values_to_keys(p->vlv_mrpb[sortattr], va, &value);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -779,6 +777,13 @@ do_vlv_update_index(back_txn *txn, struct ldbminfo *li __attribute__((unused)),
|
||||
}
|
||||
|
||||
key = vlv_create_key(pIndex, entry);
|
||||
+ if (key == NULL) {
|
||||
+ slapi_log_err(SLAPI_LOG_ERR, "vlv_create_key", "Unable to generate vlv %s index key."
|
||||
+ " There may be a configuration issue.\n", pIndex->vlv_name);
|
||||
+ dblayer_release_index_file(be, pIndex->vlv_attrinfo, db);
|
||||
+ return rc;
|
||||
+ }
|
||||
+
|
||||
if (NULL != txn) {
|
||||
db_txn = txn->back_txn_txn;
|
||||
} else {
|
||||
@@ -949,11 +954,11 @@ vlv_create_matching_rule_value(Slapi_PBlock *pb, struct berval *original_value)
|
||||
struct berval **value = NULL;
|
||||
if (pb != NULL) {
|
||||
struct berval **outvalue = NULL;
|
||||
- struct berval *invalue[2];
|
||||
- invalue[0] = original_value; /* jcm: cast away const */
|
||||
- invalue[1] = NULL;
|
||||
+ Slapi_Value v_in = {0};
|
||||
+ Slapi_Value *va_in[2] = { &v_in, NULL };
|
||||
+ slapi_value_init_berval(&v_in, original_value);
|
||||
/* The plugin owns the memory it returns in outvalue */
|
||||
- matchrule_values_to_keys(pb, invalue, &outvalue);
|
||||
+ matchrule_values_to_keys(pb, va_in, &outvalue);
|
||||
if (outvalue != NULL) {
|
||||
value = slapi_ch_bvecdup(outvalue);
|
||||
}
|
||||
@@ -1610,11 +1615,8 @@ retry:
|
||||
PRBool needFree = PR_FALSE;
|
||||
|
||||
if (sort_control->mr_pb != NULL) {
|
||||
- struct berval **tmp_entry_value = NULL;
|
||||
-
|
||||
- valuearray_get_bervalarray(csn_value, &tmp_entry_value);
|
||||
/* Matching rule. Do the magic mangling. Plugin owns the memory. */
|
||||
- matchrule_values_to_keys(sort_control->mr_pb, /* xxxPINAKI needs modification attr->a_vals */ tmp_entry_value, &entry_value);
|
||||
+ matchrule_values_to_keys(sort_control->mr_pb, csn_value, &entry_value);
|
||||
} else {
|
||||
valuearray_get_bervalarray(csn_value, &entry_value);
|
||||
needFree = PR_TRUE; /* entry_value is a copy */
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/vlv_srch.c b/ldap/servers/slapd/back-ldbm/vlv_srch.c
|
||||
index fe1208d59..11d1c715b 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/vlv_srch.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/vlv_srch.c
|
||||
@@ -203,6 +203,9 @@ vlvSearch_delete(struct vlvSearch **ppvs)
|
||||
{
|
||||
if (ppvs != NULL && *ppvs != NULL) {
|
||||
struct vlvIndex *pi, *ni;
|
||||
+ if ((*ppvs)->vlv_e) {
|
||||
+ slapi_entry_free((struct slapi_entry *)((*ppvs)->vlv_e));
|
||||
+ }
|
||||
slapi_sdn_free(&((*ppvs)->vlv_dn));
|
||||
slapi_ch_free((void **)&((*ppvs)->vlv_name));
|
||||
slapi_sdn_free(&((*ppvs)->vlv_base));
|
||||
@@ -217,7 +220,6 @@ vlvSearch_delete(struct vlvSearch **ppvs)
|
||||
pi = ni;
|
||||
}
|
||||
slapi_ch_free((void **)ppvs);
|
||||
- *ppvs = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
diff --git a/ldap/servers/slapd/generation.c b/ldap/servers/slapd/generation.c
|
||||
index c4f20f793..89f097322 100644
|
||||
--- a/ldap/servers/slapd/generation.c
|
||||
+++ b/ldap/servers/slapd/generation.c
|
||||
@@ -93,9 +93,13 @@ get_server_dataversion()
|
||||
lenstr *l = NULL;
|
||||
Slapi_Backend *be;
|
||||
char *cookie;
|
||||
+ static pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER;
|
||||
|
||||
+ /* Serialize to avoid race condition */
|
||||
+ pthread_mutex_lock(&mutex);
|
||||
/* we already cached the copy - just return it */
|
||||
if (server_dataversion_id != NULL) {
|
||||
+ pthread_mutex_unlock(&mutex);
|
||||
return server_dataversion_id;
|
||||
}
|
||||
|
||||
@@ -130,5 +134,6 @@ get_server_dataversion()
|
||||
server_dataversion_id = slapi_ch_strdup(l->ls_buf);
|
||||
}
|
||||
lenstr_free(&l);
|
||||
+ pthread_mutex_unlock(&mutex);
|
||||
return server_dataversion_id;
|
||||
}
|
||||
diff --git a/ldap/servers/slapd/plugin_mr.c b/ldap/servers/slapd/plugin_mr.c
|
||||
index 13f76fe52..6cf88b7de 100644
|
||||
--- a/ldap/servers/slapd/plugin_mr.c
|
||||
+++ b/ldap/servers/slapd/plugin_mr.c
|
||||
@@ -391,28 +391,18 @@ mr_wrap_mr_index_sv_fn(Slapi_PBlock *pb)
|
||||
return rc;
|
||||
}
|
||||
|
||||
-/* this function takes SLAPI_PLUGIN_MR_VALUES as struct berval ** and
|
||||
+/* this function takes SLAPI_PLUGIN_MR_VALUES as Slapi_Value ** and
|
||||
returns SLAPI_PLUGIN_MR_KEYS as struct berval **
|
||||
*/
|
||||
static int
|
||||
mr_wrap_mr_index_fn(Slapi_PBlock *pb)
|
||||
{
|
||||
int rc = -1;
|
||||
- struct berval **in_vals = NULL;
|
||||
struct berval **out_vals = NULL;
|
||||
struct mr_private *mrpriv = NULL;
|
||||
- Slapi_Value **in_vals_sv = NULL;
|
||||
Slapi_Value **out_vals_sv = NULL;
|
||||
|
||||
- slapi_pblock_get(pb, SLAPI_PLUGIN_MR_VALUES, &in_vals); /* get bervals */
|
||||
- /* convert bervals to sv ary */
|
||||
- valuearray_init_bervalarray(in_vals, &in_vals_sv);
|
||||
- slapi_pblock_set(pb, SLAPI_PLUGIN_MR_VALUES, in_vals_sv); /* use sv */
|
||||
rc = mr_wrap_mr_index_sv_fn(pb);
|
||||
- /* clean up in_vals_sv */
|
||||
- valuearray_free(&in_vals_sv);
|
||||
- /* restore old in_vals */
|
||||
- slapi_pblock_set(pb, SLAPI_PLUGIN_MR_VALUES, in_vals);
|
||||
/* get result sv keys */
|
||||
slapi_pblock_get(pb, SLAPI_PLUGIN_MR_KEYS, &out_vals_sv);
|
||||
/* convert to bvec */
|
||||
diff --git a/src/lib389/lib389/backend.py b/src/lib389/lib389/backend.py
|
||||
index 9acced205..cee073ea7 100644
|
||||
--- a/src/lib389/lib389/backend.py
|
||||
+++ b/src/lib389/lib389/backend.py
|
||||
@@ -1029,6 +1029,16 @@ class Backends(DSLdapObjects):
|
||||
for be in sorted(self.list(), key=lambda be: len(be.get_suffix()), reverse=True):
|
||||
be.delete()
|
||||
|
||||
+ def get_backend(self, suffix):
|
||||
+ """
|
||||
+ Return the backend associated with the provided suffix.
|
||||
+ """
|
||||
+ suffix_l = suffix.lower()
|
||||
+ for be in self.list():
|
||||
+ if be.get_attr_val_utf8_l('nsslapd-suffix') == suffix_l:
|
||||
+ return be
|
||||
+ return None
|
||||
+
|
||||
|
||||
class DatabaseConfig(DSLdapObject):
|
||||
"""Backend Database configuration
|
||||
--
|
||||
2.48.1
|
||||
|
@ -0,0 +1,262 @@
|
||||
From c8c9d8814bd328d9772b6a248aa142b72430cba1 Mon Sep 17 00:00:00 2001
|
||||
From: Viktor Ashirov <vashirov@redhat.com>
|
||||
Date: Wed, 16 Jul 2025 11:22:30 +0200
|
||||
Subject: [PATCH] Issue 6778 - Memory leak in
|
||||
roles_cache_create_object_from_entry part 2
|
||||
|
||||
Bug Description:
|
||||
Everytime a role with scope DN is processed, we leak rolescopeDN.
|
||||
|
||||
Fix Description:
|
||||
* Initialize all pointer variables to NULL
|
||||
* Add additional NULL checks
|
||||
* Free rolescopeDN
|
||||
* Move test_rewriter_with_invalid_filter before the DB contains 90k entries
|
||||
* Use task.wait() for import task completion instead of parsing logs,
|
||||
increase the timeout
|
||||
|
||||
Fixes: https://github.com/389ds/389-ds-base/issues/6778
|
||||
|
||||
Reviewed by: @progier389 (Thanks!)
|
||||
---
|
||||
dirsrvtests/tests/suites/roles/basic_test.py | 164 +++++++++----------
|
||||
ldap/servers/plugins/roles/roles_cache.c | 10 +-
|
||||
2 files changed, 82 insertions(+), 92 deletions(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/roles/basic_test.py b/dirsrvtests/tests/suites/roles/basic_test.py
|
||||
index d92d6f0c3..ec208bae9 100644
|
||||
--- a/dirsrvtests/tests/suites/roles/basic_test.py
|
||||
+++ b/dirsrvtests/tests/suites/roles/basic_test.py
|
||||
@@ -510,6 +510,76 @@ def test_vattr_on_managed_role(topo, request):
|
||||
|
||||
request.addfinalizer(fin)
|
||||
|
||||
+def test_rewriter_with_invalid_filter(topo, request):
|
||||
+ """Test that server does not crash when having
|
||||
+ invalid filter in filtered role
|
||||
+
|
||||
+ :id: 5013b0b2-0af6-11f0-8684-482ae39447e5
|
||||
+ :setup: standalone server
|
||||
+ :steps:
|
||||
+ 1. Setup filtered role with good filter
|
||||
+ 2. Setup nsrole rewriter
|
||||
+ 3. Restart the server
|
||||
+ 4. Search for entries
|
||||
+ 5. Setup filtered role with bad filter
|
||||
+ 6. Search for entries
|
||||
+ :expectedresults:
|
||||
+ 1. Operation should succeed
|
||||
+ 2. Operation should succeed
|
||||
+ 3. Operation should succeed
|
||||
+ 4. Operation should succeed
|
||||
+ 5. Operation should succeed
|
||||
+ 6. Operation should succeed
|
||||
+ """
|
||||
+ inst = topo.standalone
|
||||
+ entries = []
|
||||
+
|
||||
+ def fin():
|
||||
+ inst.start()
|
||||
+ for entry in entries:
|
||||
+ entry.delete()
|
||||
+ request.addfinalizer(fin)
|
||||
+
|
||||
+ # Setup filtered role
|
||||
+ roles = FilteredRoles(inst, f'ou=people,{DEFAULT_SUFFIX}')
|
||||
+ filter_ko = '(&((objectClass=top)(objectClass=nsPerson))'
|
||||
+ filter_ok = '(&(objectClass=top)(objectClass=nsPerson))'
|
||||
+ role_properties = {
|
||||
+ 'cn': 'TestFilteredRole',
|
||||
+ 'nsRoleFilter': filter_ok,
|
||||
+ 'description': 'Test good filter',
|
||||
+ }
|
||||
+ role = roles.create(properties=role_properties)
|
||||
+ entries.append(role)
|
||||
+
|
||||
+ # Setup nsrole rewriter
|
||||
+ rewriters = Rewriters(inst)
|
||||
+ rewriter_properties = {
|
||||
+ "cn": "nsrole",
|
||||
+ "nsslapd-libpath": 'libroles-plugin',
|
||||
+ "nsslapd-filterrewriter": 'role_nsRole_filter_rewriter',
|
||||
+ }
|
||||
+ rewriter = rewriters.ensure_state(properties=rewriter_properties)
|
||||
+ entries.append(rewriter)
|
||||
+
|
||||
+ # Restart thge instance
|
||||
+ inst.restart()
|
||||
+
|
||||
+ # Search for entries
|
||||
+ entries = inst.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, "(nsrole=%s)" % role.dn)
|
||||
+
|
||||
+ # Set bad filter
|
||||
+ role_properties = {
|
||||
+ 'cn': 'TestFilteredRole',
|
||||
+ 'nsRoleFilter': filter_ko,
|
||||
+ 'description': 'Test bad filter',
|
||||
+ }
|
||||
+ role.ensure_state(properties=role_properties)
|
||||
+
|
||||
+ # Search for entries
|
||||
+ entries = inst.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, "(nsrole=%s)" % role.dn)
|
||||
+
|
||||
+
|
||||
def test_managed_and_filtered_role_rewrite(topo, request):
|
||||
"""Test that filter components containing 'nsrole=xxx'
|
||||
are reworked if xxx is either a filtered role or a managed
|
||||
@@ -581,17 +651,11 @@ def test_managed_and_filtered_role_rewrite(topo, request):
|
||||
PARENT="ou=people,%s" % DEFAULT_SUFFIX
|
||||
dbgen_users(topo.standalone, 90000, import_ldif, DEFAULT_SUFFIX, entry_name=RDN, generic=True, parent=PARENT)
|
||||
|
||||
- # online import
|
||||
+ # Online import
|
||||
import_task = ImportTask(topo.standalone)
|
||||
import_task.import_suffix_from_ldif(ldiffile=import_ldif, suffix=DEFAULT_SUFFIX)
|
||||
- # Check for up to 200sec that the completion
|
||||
- for i in range(1, 20):
|
||||
- if len(topo.standalone.ds_error_log.match('.*import userRoot: Import complete. Processed 9000.*')) > 0:
|
||||
- break
|
||||
- time.sleep(10)
|
||||
- import_complete = topo.standalone.ds_error_log.match('.*import userRoot: Import complete. Processed 9000.*')
|
||||
- assert (len(import_complete) == 1)
|
||||
-
|
||||
+ import_task.wait(timeout=400)
|
||||
+ assert import_task.get_exit_code() == 0
|
||||
# Restart server
|
||||
topo.standalone.restart()
|
||||
|
||||
@@ -715,17 +779,11 @@ def test_not_such_entry_role_rewrite(topo, request):
|
||||
PARENT="ou=people,%s" % DEFAULT_SUFFIX
|
||||
dbgen_users(topo.standalone, 91000, import_ldif, DEFAULT_SUFFIX, entry_name=RDN, generic=True, parent=PARENT)
|
||||
|
||||
- # online import
|
||||
+ # Online import
|
||||
import_task = ImportTask(topo.standalone)
|
||||
import_task.import_suffix_from_ldif(ldiffile=import_ldif, suffix=DEFAULT_SUFFIX)
|
||||
- # Check for up to 200sec that the completion
|
||||
- for i in range(1, 20):
|
||||
- if len(topo.standalone.ds_error_log.match('.*import userRoot: Import complete. Processed 9100.*')) > 0:
|
||||
- break
|
||||
- time.sleep(10)
|
||||
- import_complete = topo.standalone.ds_error_log.match('.*import userRoot: Import complete. Processed 9100.*')
|
||||
- assert (len(import_complete) == 1)
|
||||
-
|
||||
+ import_task.wait(timeout=400)
|
||||
+ assert import_task.get_exit_code() == 0
|
||||
# Restart server
|
||||
topo.standalone.restart()
|
||||
|
||||
@@ -769,76 +827,6 @@ def test_not_such_entry_role_rewrite(topo, request):
|
||||
request.addfinalizer(fin)
|
||||
|
||||
|
||||
-def test_rewriter_with_invalid_filter(topo, request):
|
||||
- """Test that server does not crash when having
|
||||
- invalid filter in filtered role
|
||||
-
|
||||
- :id: 5013b0b2-0af6-11f0-8684-482ae39447e5
|
||||
- :setup: standalone server
|
||||
- :steps:
|
||||
- 1. Setup filtered role with good filter
|
||||
- 2. Setup nsrole rewriter
|
||||
- 3. Restart the server
|
||||
- 4. Search for entries
|
||||
- 5. Setup filtered role with bad filter
|
||||
- 6. Search for entries
|
||||
- :expectedresults:
|
||||
- 1. Operation should succeed
|
||||
- 2. Operation should succeed
|
||||
- 3. Operation should succeed
|
||||
- 4. Operation should succeed
|
||||
- 5. Operation should succeed
|
||||
- 6. Operation should succeed
|
||||
- """
|
||||
- inst = topo.standalone
|
||||
- entries = []
|
||||
-
|
||||
- def fin():
|
||||
- inst.start()
|
||||
- for entry in entries:
|
||||
- entry.delete()
|
||||
- request.addfinalizer(fin)
|
||||
-
|
||||
- # Setup filtered role
|
||||
- roles = FilteredRoles(inst, f'ou=people,{DEFAULT_SUFFIX}')
|
||||
- filter_ko = '(&((objectClass=top)(objectClass=nsPerson))'
|
||||
- filter_ok = '(&(objectClass=top)(objectClass=nsPerson))'
|
||||
- role_properties = {
|
||||
- 'cn': 'TestFilteredRole',
|
||||
- 'nsRoleFilter': filter_ok,
|
||||
- 'description': 'Test good filter',
|
||||
- }
|
||||
- role = roles.create(properties=role_properties)
|
||||
- entries.append(role)
|
||||
-
|
||||
- # Setup nsrole rewriter
|
||||
- rewriters = Rewriters(inst)
|
||||
- rewriter_properties = {
|
||||
- "cn": "nsrole",
|
||||
- "nsslapd-libpath": 'libroles-plugin',
|
||||
- "nsslapd-filterrewriter": 'role_nsRole_filter_rewriter',
|
||||
- }
|
||||
- rewriter = rewriters.ensure_state(properties=rewriter_properties)
|
||||
- entries.append(rewriter)
|
||||
-
|
||||
- # Restart thge instance
|
||||
- inst.restart()
|
||||
-
|
||||
- # Search for entries
|
||||
- entries = inst.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, "(nsrole=%s)" % role.dn)
|
||||
-
|
||||
- # Set bad filter
|
||||
- role_properties = {
|
||||
- 'cn': 'TestFilteredRole',
|
||||
- 'nsRoleFilter': filter_ko,
|
||||
- 'description': 'Test bad filter',
|
||||
- }
|
||||
- role.ensure_state(properties=role_properties)
|
||||
-
|
||||
- # Search for entries
|
||||
- entries = inst.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, "(nsrole=%s)" % role.dn)
|
||||
-
|
||||
-
|
||||
if __name__ == "__main__":
|
||||
CURRENT_FILE = os.path.realpath(__file__)
|
||||
pytest.main("-s -v %s" % CURRENT_FILE)
|
||||
diff --git a/ldap/servers/plugins/roles/roles_cache.c b/ldap/servers/plugins/roles/roles_cache.c
|
||||
index 60d7182e2..60f5a919a 100644
|
||||
--- a/ldap/servers/plugins/roles/roles_cache.c
|
||||
+++ b/ldap/servers/plugins/roles/roles_cache.c
|
||||
@@ -1117,16 +1117,17 @@ roles_cache_create_object_from_entry(Slapi_Entry *role_entry, role_object **resu
|
||||
|
||||
rolescopeDN = slapi_entry_attr_get_charptr(role_entry, ROLE_SCOPE_DN);
|
||||
if (rolescopeDN) {
|
||||
- Slapi_DN *rolescopeSDN;
|
||||
- Slapi_DN *top_rolescopeSDN, *top_this_roleSDN;
|
||||
+ Slapi_DN *rolescopeSDN = NULL;
|
||||
+ Slapi_DN *top_rolescopeSDN = NULL;
|
||||
+ Slapi_DN *top_this_roleSDN = NULL;
|
||||
|
||||
/* Before accepting to use this scope, first check if it belongs to the same suffix */
|
||||
rolescopeSDN = slapi_sdn_new_dn_byref(rolescopeDN);
|
||||
- if ((strlen((char *)slapi_sdn_get_ndn(rolescopeSDN)) > 0) &&
|
||||
+ if (rolescopeSDN && (strlen((char *)slapi_sdn_get_ndn(rolescopeSDN)) > 0) &&
|
||||
(slapi_dn_syntax_check(NULL, (char *)slapi_sdn_get_ndn(rolescopeSDN), 1) == 0)) {
|
||||
top_rolescopeSDN = roles_cache_get_top_suffix(rolescopeSDN);
|
||||
top_this_roleSDN = roles_cache_get_top_suffix(this_role->dn);
|
||||
- if (slapi_sdn_compare(top_rolescopeSDN, top_this_roleSDN) == 0) {
|
||||
+ if (top_rolescopeSDN && top_this_roleSDN && slapi_sdn_compare(top_rolescopeSDN, top_this_roleSDN) == 0) {
|
||||
/* rolescopeDN belongs to the same suffix as the role, we can use this scope */
|
||||
this_role->rolescopedn = rolescopeSDN;
|
||||
} else {
|
||||
@@ -1148,6 +1149,7 @@ roles_cache_create_object_from_entry(Slapi_Entry *role_entry, role_object **resu
|
||||
rolescopeDN);
|
||||
slapi_sdn_free(&rolescopeSDN);
|
||||
}
|
||||
+ slapi_ch_free_string(&rolescopeDN);
|
||||
}
|
||||
|
||||
/* Depending upon role type, pull out the remaining information we need */
|
||||
--
|
||||
2.49.0
|
||||
|
@ -1,230 +0,0 @@
|
||||
From bd2829d04491556c35a0b36b591c09a69baf6546 Mon Sep 17 00:00:00 2001
|
||||
From: progier389 <progier@redhat.com>
|
||||
Date: Mon, 11 Dec 2023 11:58:40 +0100
|
||||
Subject: [PATCH] Issue 6004 - idletimeout may be ignored (#6005)
|
||||
|
||||
* Issue 6004 - idletimeout may be ignored
|
||||
|
||||
Problem: idletimeout is still not handled when binding as non root (unless there are some activity
|
||||
on another connection)
|
||||
Fix:
|
||||
Add a slapi_eq_repeat_rel handler that walks all active connection every seconds and check if the timeout is expired.
|
||||
Note about CI test:
|
||||
Notice that idletimeout is never enforced for connections bound as root (i.e cn=directory manager).
|
||||
|
||||
Issue #6004
|
||||
|
||||
Reviewed by: @droideck, @tbordaz (Thanks!)
|
||||
|
||||
(cherry picked from commit 86b5969acbe124eec8c89bcf1ab2156b2b140c17)
|
||||
(cherry picked from commit bdb0a72b4953678e5418406b3c202dfa2c7469a2)
|
||||
(cherry picked from commit 61cebc191cd4090072dda691b9956dbde4cf7c48)
|
||||
---
|
||||
.../tests/suites/config/regression_test.py | 82 ++++++++++++++++++-
|
||||
ldap/servers/slapd/daemon.c | 52 +++++++++++-
|
||||
2 files changed, 128 insertions(+), 6 deletions(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/config/regression_test.py b/dirsrvtests/tests/suites/config/regression_test.py
|
||||
index 0000dd82d..8dbba8cd2 100644
|
||||
--- a/dirsrvtests/tests/suites/config/regression_test.py
|
||||
+++ b/dirsrvtests/tests/suites/config/regression_test.py
|
||||
@@ -6,20 +6,49 @@
|
||||
# See LICENSE for details.
|
||||
# --- END COPYRIGHT BLOCK ---
|
||||
#
|
||||
+import os
|
||||
import logging
|
||||
import pytest
|
||||
+import time
|
||||
from lib389.utils import *
|
||||
from lib389.dseldif import DSEldif
|
||||
-from lib389.config import LDBMConfig
|
||||
+from lib389.config import BDB_LDBMConfig, LDBMConfig, Config
|
||||
from lib389.backend import Backends
|
||||
from lib389.topologies import topology_st as topo
|
||||
+from lib389.idm.user import UserAccounts, TEST_USER_PROPERTIES
|
||||
+from lib389._constants import DEFAULT_SUFFIX, PASSWORD, DN_DM
|
||||
|
||||
pytestmark = pytest.mark.tier0
|
||||
|
||||
logging.getLogger(__name__).setLevel(logging.INFO)
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
+DEBUGGING = os.getenv("DEBUGGING", default=False)
|
||||
CUSTOM_MEM = '9100100100'
|
||||
+IDLETIMEOUT = 5
|
||||
+DN_TEST_USER = f'uid={TEST_USER_PROPERTIES["uid"]},ou=People,{DEFAULT_SUFFIX}'
|
||||
+
|
||||
+
|
||||
+@pytest.fixture(scope="module")
|
||||
+def idletimeout_topo(topo, request):
|
||||
+ """Create an instance with a test user and set idletimeout"""
|
||||
+ inst = topo.standalone
|
||||
+ config = Config(inst)
|
||||
+
|
||||
+ users = UserAccounts(inst, DEFAULT_SUFFIX)
|
||||
+ user = users.create(properties={
|
||||
+ **TEST_USER_PROPERTIES,
|
||||
+ 'userpassword' : PASSWORD,
|
||||
+ })
|
||||
+ config.replace('nsslapd-idletimeout', str(IDLETIMEOUT))
|
||||
+
|
||||
+ def fin():
|
||||
+ if not DEBUGGING:
|
||||
+ config.reset('nsslapd-idletimeout')
|
||||
+ user.delete()
|
||||
+
|
||||
+ request.addfinalizer(fin)
|
||||
+ return topo
|
||||
|
||||
|
||||
# Function to return value of available memory in kb
|
||||
@@ -79,7 +108,7 @@ def test_maxbersize_repl(topo):
|
||||
nsslapd-errorlog-logmaxdiskspace are set in certain order
|
||||
|
||||
:id: 743e912c-2be4-4f5f-9c2a-93dcb18f51a0
|
||||
- :setup: MMR with two suppliers
|
||||
+ :setup: Standalone Instance
|
||||
:steps:
|
||||
1. Stop the instance
|
||||
2. Set nsslapd-errorlog-maxlogsize before/after
|
||||
@@ -112,3 +141,52 @@ def test_maxbersize_repl(topo):
|
||||
log.info("Assert no init_dse_file errors in the error log")
|
||||
assert not inst.ds_error_log.match('.*ERR - init_dse_file.*')
|
||||
|
||||
+
|
||||
+def test_bdb_config(topo):
|
||||
+ """Check that bdb config entry exists
|
||||
+
|
||||
+ :id: edbc6f54-7c98-11ee-b1c0-482ae39447e5
|
||||
+ :setup: standalone
|
||||
+ :steps:
|
||||
+ 1. Check that bdb config instance exists.
|
||||
+ :expectedresults:
|
||||
+ 1. Success
|
||||
+ """
|
||||
+
|
||||
+ inst = topo.standalone
|
||||
+ assert BDB_LDBMConfig(inst).exists()
|
||||
+
|
||||
+
|
||||
+@pytest.mark.parametrize("dn,expected_result", [(DN_TEST_USER, True), (DN_DM, False)])
|
||||
+def test_idletimeout(idletimeout_topo, dn, expected_result):
|
||||
+ """Check that bdb config entry exists
|
||||
+
|
||||
+ :id: b20f2826-942a-11ee-827b-482ae39447e5
|
||||
+ :parametrized: yes
|
||||
+ :setup: Standalone Instance with test user and idletimeout
|
||||
+ :steps:
|
||||
+ 1. Open new ldap connection
|
||||
+ 2. Bind with the provided dn
|
||||
+ 3. Wait longer than idletimeout
|
||||
+ 4. Try to bind again the provided dn and check if
|
||||
+ connection is closed or not.
|
||||
+ 5. Check if result is the expected one.
|
||||
+ :expectedresults:
|
||||
+ 1. Success
|
||||
+ 2. Success
|
||||
+ 3. Success
|
||||
+ 4. Success
|
||||
+ 5. Success
|
||||
+ """
|
||||
+
|
||||
+ inst = idletimeout_topo.standalone
|
||||
+
|
||||
+ l = ldap.initialize(f'ldap://localhost:{inst.port}')
|
||||
+ l.bind_s(dn, PASSWORD)
|
||||
+ time.sleep(IDLETIMEOUT+1)
|
||||
+ try:
|
||||
+ l.bind_s(dn, PASSWORD)
|
||||
+ result = False
|
||||
+ except ldap.SERVER_DOWN:
|
||||
+ result = True
|
||||
+ assert expected_result == result
|
||||
diff --git a/ldap/servers/slapd/daemon.c b/ldap/servers/slapd/daemon.c
|
||||
index 57e07e5f5..6df109760 100644
|
||||
--- a/ldap/servers/slapd/daemon.c
|
||||
+++ b/ldap/servers/slapd/daemon.c
|
||||
@@ -68,6 +68,8 @@
|
||||
#define SLAPD_ACCEPT_WAKEUP_TIMER 250
|
||||
#endif
|
||||
|
||||
+#define MILLISECONDS_PER_SECOND 1000
|
||||
+
|
||||
int slapd_wakeup_timer = SLAPD_WAKEUP_TIMER; /* time in ms to wakeup */
|
||||
int slapd_accept_wakeup_timer = SLAPD_ACCEPT_WAKEUP_TIMER; /* time in ms to wakeup */
|
||||
#ifdef notdef /* GGOODREPL */
|
||||
@@ -1045,6 +1047,48 @@ slapd_sockets_ports_free(daemon_ports_t *ports_info)
|
||||
#endif
|
||||
}
|
||||
|
||||
+/*
|
||||
+ * Tells if idle timeout has expired
|
||||
+ */
|
||||
+static inline int __attribute__((always_inline))
|
||||
+has_idletimeout_expired(Connection *c, time_t curtime)
|
||||
+{
|
||||
+ return (c->c_state != CONN_STATE_FREE && !c->c_gettingber &&
|
||||
+ c->c_idletimeout > 0 && NULL == c->c_ops &&
|
||||
+ curtime - c->c_idlesince >= c->c_idletimeout);
|
||||
+}
|
||||
+
|
||||
+/*
|
||||
+ * slapi_eq_repeat_rel callback that checks that idletimeout has not expired.
|
||||
+ */
|
||||
+void
|
||||
+check_idletimeout(time_t when __attribute__((unused)), void *arg __attribute__((unused)) )
|
||||
+{
|
||||
+ Connection_Table *ct = the_connection_table;
|
||||
+ time_t curtime = slapi_current_rel_time_t();
|
||||
+ /* Walk all active connections of all connection listeners */
|
||||
+ for (int list_num = 0; list_num < ct->list_num; list_num++) {
|
||||
+ for (Connection *c = connection_table_get_first_active_connection(ct, list_num);
|
||||
+ c != NULL; c = connection_table_get_next_active_connection(ct, c)) {
|
||||
+ if (!has_idletimeout_expired(c, curtime)) {
|
||||
+ continue;
|
||||
+ }
|
||||
+ /* Looks like idletimeout has expired, lets acquire the lock
|
||||
+ * and double check.
|
||||
+ */
|
||||
+ if (pthread_mutex_trylock(&(c->c_mutex)) == EBUSY) {
|
||||
+ continue;
|
||||
+ }
|
||||
+ if (has_idletimeout_expired(c, curtime)) {
|
||||
+ /* idle timeout has expired */
|
||||
+ disconnect_server_nomutex(c, c->c_connid, -1,
|
||||
+ SLAPD_DISCONNECT_IDLE_TIMEOUT, ETIMEDOUT);
|
||||
+ }
|
||||
+ pthread_mutex_unlock(&(c->c_mutex));
|
||||
+ }
|
||||
+ }
|
||||
+}
|
||||
+
|
||||
void
|
||||
slapd_daemon(daemon_ports_t *ports)
|
||||
{
|
||||
@@ -1258,7 +1302,9 @@ slapd_daemon(daemon_ports_t *ports)
|
||||
"MAINPID=%lu",
|
||||
(unsigned long)getpid());
|
||||
#endif
|
||||
-
|
||||
+ slapi_eq_repeat_rel(check_idletimeout, NULL,
|
||||
+ slapi_current_rel_time_t(),
|
||||
+ MILLISECONDS_PER_SECOND);
|
||||
/* The meat of the operation is in a loop on a call to select */
|
||||
while (!g_get_shutdown()) {
|
||||
int select_return = 0;
|
||||
@@ -1734,9 +1780,7 @@ handle_pr_read_ready(Connection_Table *ct, PRIntn num_poll __attribute__((unused
|
||||
disconnect_server_nomutex(c, c->c_connid, -1,
|
||||
SLAPD_DISCONNECT_POLL, EPIPE);
|
||||
}
|
||||
- } else if (c->c_idletimeout > 0 &&
|
||||
- (curtime - c->c_idlesince) >= c->c_idletimeout &&
|
||||
- NULL == c->c_ops) {
|
||||
+ } else if (has_idletimeout_expired(c, curtime)) {
|
||||
/* idle timeout */
|
||||
disconnect_server_nomutex(c, c->c_connid, -1,
|
||||
SLAPD_DISCONNECT_IDLE_TIMEOUT, ETIMEDOUT);
|
||||
--
|
||||
2.48.1
|
||||
|
@ -0,0 +1,65 @@
|
||||
From f83a1996e3438e471cec086d53fb94be0c8666aa Mon Sep 17 00:00:00 2001
|
||||
From: Viktor Ashirov <vashirov@redhat.com>
|
||||
Date: Mon, 7 Jul 2025 23:11:17 +0200
|
||||
Subject: [PATCH] Issue 6850 - AddressSanitizer: memory leak in mdb_init
|
||||
|
||||
Bug Description:
|
||||
`dbmdb_componentid` can be allocated multiple times. To avoid a memory
|
||||
leak, allocate it only once, and free at the cleanup.
|
||||
|
||||
Fixes: https://github.com/389ds/389-ds-base/issues/6850
|
||||
|
||||
Reviewed by: @mreynolds389, @tbordaz (Tnanks!)
|
||||
---
|
||||
ldap/servers/slapd/back-ldbm/db-mdb/mdb_config.c | 4 +++-
|
||||
ldap/servers/slapd/back-ldbm/db-mdb/mdb_layer.c | 2 +-
|
||||
ldap/servers/slapd/back-ldbm/db-mdb/mdb_misc.c | 5 +++++
|
||||
3 files changed, 9 insertions(+), 2 deletions(-)
|
||||
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/db-mdb/mdb_config.c b/ldap/servers/slapd/back-ldbm/db-mdb/mdb_config.c
|
||||
index 1f7b71442..bebc83b76 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/db-mdb/mdb_config.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/db-mdb/mdb_config.c
|
||||
@@ -146,7 +146,9 @@ dbmdb_compute_limits(struct ldbminfo *li)
|
||||
int mdb_init(struct ldbminfo *li, config_info *config_array)
|
||||
{
|
||||
dbmdb_ctx_t *conf = (dbmdb_ctx_t *)slapi_ch_calloc(1, sizeof(dbmdb_ctx_t));
|
||||
- dbmdb_componentid = generate_componentid(NULL, "db-mdb");
|
||||
+ if (dbmdb_componentid == NULL) {
|
||||
+ dbmdb_componentid = generate_componentid(NULL, "db-mdb");
|
||||
+ }
|
||||
|
||||
li->li_dblayer_config = conf;
|
||||
strncpy(conf->home, li->li_directory, MAXPATHLEN-1);
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/db-mdb/mdb_layer.c b/ldap/servers/slapd/back-ldbm/db-mdb/mdb_layer.c
|
||||
index 3ecc47170..c6e9f8b01 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/db-mdb/mdb_layer.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/db-mdb/mdb_layer.c
|
||||
@@ -19,7 +19,7 @@
|
||||
#include <prclist.h>
|
||||
#include <glob.h>
|
||||
|
||||
-Slapi_ComponentId *dbmdb_componentid;
|
||||
+Slapi_ComponentId *dbmdb_componentid = NULL;
|
||||
|
||||
#define BULKOP_MAX_RECORDS 100 /* Max records handled by a single bulk operations */
|
||||
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/db-mdb/mdb_misc.c b/ldap/servers/slapd/back-ldbm/db-mdb/mdb_misc.c
|
||||
index 2d07db9b5..ae10ac7cf 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/db-mdb/mdb_misc.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/db-mdb/mdb_misc.c
|
||||
@@ -49,6 +49,11 @@ dbmdb_cleanup(struct ldbminfo *li)
|
||||
}
|
||||
slapi_ch_free((void **)&(li->li_dblayer_config));
|
||||
|
||||
+ if (dbmdb_componentid != NULL) {
|
||||
+ release_componentid(dbmdb_componentid);
|
||||
+ dbmdb_componentid = NULL;
|
||||
+ }
|
||||
+
|
||||
return 0;
|
||||
}
|
||||
|
||||
--
|
||||
2.49.0
|
||||
|
@ -1,69 +0,0 @@
|
||||
From e9fe6e074130406328b8e932a5c2efa814d190a0 Mon Sep 17 00:00:00 2001
|
||||
From: tbordaz <tbordaz@redhat.com>
|
||||
Date: Wed, 5 Feb 2025 09:41:30 +0100
|
||||
Subject: [PATCH] Issue 6004 - (2nd) idletimeout may be ignored (#6569)
|
||||
|
||||
Problem:
|
||||
multiple listener threads was implemented in 2.x and after
|
||||
This is missing in 1.4.3 so the cherry pick should be adapted
|
||||
Fix:
|
||||
skip the loop with listeners
|
||||
|
||||
Issue #6004
|
||||
|
||||
Reviewed by: Jamie Chapman (Thanks !)
|
||||
---
|
||||
ldap/servers/slapd/daemon.c | 36 +++++++++++++++++-------------------
|
||||
1 file changed, 17 insertions(+), 19 deletions(-)
|
||||
|
||||
diff --git a/ldap/servers/slapd/daemon.c b/ldap/servers/slapd/daemon.c
|
||||
index 6df109760..bef75e4a3 100644
|
||||
--- a/ldap/servers/slapd/daemon.c
|
||||
+++ b/ldap/servers/slapd/daemon.c
|
||||
@@ -1066,26 +1066,24 @@ check_idletimeout(time_t when __attribute__((unused)), void *arg __attribute__((
|
||||
{
|
||||
Connection_Table *ct = the_connection_table;
|
||||
time_t curtime = slapi_current_rel_time_t();
|
||||
- /* Walk all active connections of all connection listeners */
|
||||
- for (int list_num = 0; list_num < ct->list_num; list_num++) {
|
||||
- for (Connection *c = connection_table_get_first_active_connection(ct, list_num);
|
||||
- c != NULL; c = connection_table_get_next_active_connection(ct, c)) {
|
||||
- if (!has_idletimeout_expired(c, curtime)) {
|
||||
- continue;
|
||||
- }
|
||||
- /* Looks like idletimeout has expired, lets acquire the lock
|
||||
- * and double check.
|
||||
- */
|
||||
- if (pthread_mutex_trylock(&(c->c_mutex)) == EBUSY) {
|
||||
- continue;
|
||||
- }
|
||||
- if (has_idletimeout_expired(c, curtime)) {
|
||||
- /* idle timeout has expired */
|
||||
- disconnect_server_nomutex(c, c->c_connid, -1,
|
||||
- SLAPD_DISCONNECT_IDLE_TIMEOUT, ETIMEDOUT);
|
||||
- }
|
||||
- pthread_mutex_unlock(&(c->c_mutex));
|
||||
+ /* Walk all active connections */
|
||||
+ for (Connection *c = connection_table_get_first_active_connection(ct);
|
||||
+ c != NULL; c = connection_table_get_next_active_connection(ct, c)) {
|
||||
+ if (!has_idletimeout_expired(c, curtime)) {
|
||||
+ continue;
|
||||
+ }
|
||||
+ /* Looks like idletimeout has expired, lets acquire the lock
|
||||
+ * and double check.
|
||||
+ */
|
||||
+ if (pthread_mutex_trylock(&(c->c_mutex)) == EBUSY) {
|
||||
+ continue;
|
||||
+ }
|
||||
+ if (has_idletimeout_expired(c, curtime)) {
|
||||
+ /* idle timeout has expired */
|
||||
+ disconnect_server_nomutex(c, c->c_connid, -1,
|
||||
+ SLAPD_DISCONNECT_IDLE_TIMEOUT, ETIMEDOUT);
|
||||
}
|
||||
+ pthread_mutex_unlock(&(c->c_mutex));
|
||||
}
|
||||
}
|
||||
|
||||
--
|
||||
2.48.1
|
||||
|
@ -0,0 +1,58 @@
|
||||
From e98acc1bfe2194fcdd0e420777eb65a20d55a64b Mon Sep 17 00:00:00 2001
|
||||
From: Viktor Ashirov <vashirov@redhat.com>
|
||||
Date: Mon, 7 Jul 2025 22:01:09 +0200
|
||||
Subject: [PATCH] Issue 6848 - AddressSanitizer: leak in do_search
|
||||
|
||||
Bug Description:
|
||||
When there's a BER decoding error and the function goes to
|
||||
`free_and_return`, the `attrs` variable is not being freed because it's
|
||||
only freed if `!psearch || rc != 0 || err != 0`, but `err` is still 0 at
|
||||
that point.
|
||||
|
||||
If we reach `free_and_return` from the `ber_scanf` error path, `attrs`
|
||||
was never set in the pblock with `slapi_pblock_set()`, so the
|
||||
`slapi_pblock_get()` call will not retrieve the potentially partially
|
||||
allocated `attrs` from the BER decoding.
|
||||
|
||||
Fixes: https://github.com/389ds/389-ds-base/issues/6848
|
||||
|
||||
Reviewed by: @tbordaz, @droideck (Thanks!)
|
||||
---
|
||||
ldap/servers/slapd/search.c | 14 ++++++++++++--
|
||||
1 file changed, 12 insertions(+), 2 deletions(-)
|
||||
|
||||
diff --git a/ldap/servers/slapd/search.c b/ldap/servers/slapd/search.c
|
||||
index e9b2c3670..f9d03c090 100644
|
||||
--- a/ldap/servers/slapd/search.c
|
||||
+++ b/ldap/servers/slapd/search.c
|
||||
@@ -235,6 +235,7 @@ do_search(Slapi_PBlock *pb)
|
||||
log_search_access(pb, base, scope, fstr, "decoding error");
|
||||
send_ldap_result(pb, LDAP_PROTOCOL_ERROR, NULL, NULL, 0,
|
||||
NULL);
|
||||
+ err = 1; /* Make sure we free everything */
|
||||
goto free_and_return;
|
||||
}
|
||||
|
||||
@@ -420,8 +421,17 @@ free_and_return:
|
||||
if (!psearch || rc != 0 || err != 0) {
|
||||
slapi_ch_free_string(&fstr);
|
||||
slapi_filter_free(filter, 1);
|
||||
- slapi_pblock_get(pb, SLAPI_SEARCH_ATTRS, &attrs);
|
||||
- charray_free(attrs); /* passing NULL is fine */
|
||||
+
|
||||
+ /* Get attrs from pblock if it was set there, otherwise use local attrs */
|
||||
+ char **pblock_attrs = NULL;
|
||||
+ slapi_pblock_get(pb, SLAPI_SEARCH_ATTRS, &pblock_attrs);
|
||||
+ if (pblock_attrs != NULL) {
|
||||
+ charray_free(pblock_attrs); /* Free attrs from pblock */
|
||||
+ slapi_pblock_set(pb, SLAPI_SEARCH_ATTRS, NULL);
|
||||
+ } else if (attrs != NULL) {
|
||||
+ /* Free attrs that were allocated but never put in pblock */
|
||||
+ charray_free(attrs);
|
||||
+ }
|
||||
charray_free(gerattrs); /* passing NULL is fine */
|
||||
/*
|
||||
* Fix for defect 526719 / 553356 : Persistent search op failed.
|
||||
--
|
||||
2.49.0
|
||||
|
@ -1,52 +0,0 @@
|
||||
From b2edc371c5ca4fd24ef469c64829c48824098e7f Mon Sep 17 00:00:00 2001
|
||||
From: Mark Reynolds <mreynolds@redhat.com>
|
||||
Date: Wed, 8 Jan 2025 12:57:52 -0500
|
||||
Subject: [PATCH] Issue 6485 - Fix double free in USN cleanup task
|
||||
|
||||
Description:
|
||||
|
||||
ASAN report shows double free of bind dn in the USN cleanup task data. The bind
|
||||
dn was passed as a reference so it should never have to be freed by the cleanup
|
||||
task.
|
||||
|
||||
Relates: https://github.com/389ds/389-ds-base/issues/6485
|
||||
|
||||
Reviewed by: tbordaz(Thanks!)
|
||||
---
|
||||
ldap/servers/plugins/usn/usn_cleanup.c | 6 ++----
|
||||
1 file changed, 2 insertions(+), 4 deletions(-)
|
||||
|
||||
diff --git a/ldap/servers/plugins/usn/usn_cleanup.c b/ldap/servers/plugins/usn/usn_cleanup.c
|
||||
index bdb55e6b1..7eaf0f88f 100644
|
||||
--- a/ldap/servers/plugins/usn/usn_cleanup.c
|
||||
+++ b/ldap/servers/plugins/usn/usn_cleanup.c
|
||||
@@ -240,7 +240,7 @@ usn_cleanup_add(Slapi_PBlock *pb,
|
||||
char *suffix = NULL;
|
||||
char *backend = NULL;
|
||||
char *maxusn = NULL;
|
||||
- char *bind_dn;
|
||||
+ char *bind_dn = NULL;
|
||||
struct usn_cleanup_data *cleanup_data = NULL;
|
||||
int rv = SLAPI_DSE_CALLBACK_OK;
|
||||
Slapi_Task *task = NULL;
|
||||
@@ -323,8 +323,7 @@ usn_cleanup_add(Slapi_PBlock *pb,
|
||||
suffix = NULL; /* don't free in this function */
|
||||
cleanup_data->maxusn_to_delete = maxusn;
|
||||
maxusn = NULL; /* don't free in this function */
|
||||
- cleanup_data->bind_dn = bind_dn;
|
||||
- bind_dn = NULL; /* don't free in this function */
|
||||
+ cleanup_data->bind_dn = slapi_ch_strdup(bind_dn);
|
||||
slapi_task_set_data(task, cleanup_data);
|
||||
|
||||
/* start the USN tombstone cleanup task as a separate thread */
|
||||
@@ -363,7 +362,6 @@ usn_cleanup_task_destructor(Slapi_Task *task)
|
||||
slapi_ch_free_string(&mydata->suffix);
|
||||
slapi_ch_free_string(&mydata->maxusn_to_delete);
|
||||
slapi_ch_free_string(&mydata->bind_dn);
|
||||
- /* Need to cast to avoid a compiler warning */
|
||||
slapi_ch_free((void **)&mydata);
|
||||
}
|
||||
}
|
||||
--
|
||||
2.48.1
|
||||
|
@ -0,0 +1,58 @@
|
||||
From 120bc2666b682a27ffd6ace5cc238b33fab32c21 Mon Sep 17 00:00:00 2001
|
||||
From: Viktor Ashirov <vashirov@redhat.com>
|
||||
Date: Fri, 11 Jul 2025 12:32:38 +0200
|
||||
Subject: [PATCH] Issue 6865 - AddressSanitizer: leak in
|
||||
agmt_update_init_status
|
||||
|
||||
Bug Description:
|
||||
We allocate an array of `LDAPMod *` pointers, but never free it:
|
||||
|
||||
```
|
||||
=================================================================
|
||||
==2748356==ERROR: LeakSanitizer: detected memory leaks
|
||||
|
||||
Direct leak of 24 byte(s) in 1 object(s) allocated from:
|
||||
#0 0x7f05e8cb4a07 in __interceptor_malloc (/lib64/libasan.so.6+0xb4a07)
|
||||
#1 0x7f05e85c0138 in slapi_ch_malloc (/usr/lib64/dirsrv/libslapd.so.0+0x1c0138)
|
||||
#2 0x7f05e109e481 in agmt_update_init_status ldap/servers/plugins/replication/repl5_agmt.c:2583
|
||||
#3 0x7f05e10a0aa5 in agmtlist_shutdown ldap/servers/plugins/replication/repl5_agmtlist.c:789
|
||||
#4 0x7f05e10ab6bc in multisupplier_stop ldap/servers/plugins/replication/repl5_init.c:844
|
||||
#5 0x7f05e10ab6bc in multisupplier_stop ldap/servers/plugins/replication/repl5_init.c:837
|
||||
#6 0x7f05e862507d in plugin_call_func ldap/servers/slapd/plugin.c:2001
|
||||
#7 0x7f05e8625be1 in plugin_call_one ldap/servers/slapd/plugin.c:1950
|
||||
#8 0x7f05e8625be1 in plugin_dependency_closeall ldap/servers/slapd/plugin.c:1844
|
||||
#9 0x55e1a7ff9815 in slapd_daemon ldap/servers/slapd/daemon.c:1275
|
||||
#10 0x55e1a7fd36ef in main (/usr/sbin/ns-slapd+0x3e6ef)
|
||||
#11 0x7f05e80295cf in __libc_start_call_main (/lib64/libc.so.6+0x295cf)
|
||||
#12 0x7f05e802967f in __libc_start_main_alias_2 (/lib64/libc.so.6+0x2967f)
|
||||
#13 0x55e1a7fd74a4 in _start (/usr/sbin/ns-slapd+0x424a4)
|
||||
|
||||
SUMMARY: AddressSanitizer: 24 byte(s) leaked in 1 allocation(s).
|
||||
```
|
||||
|
||||
Fix Description:
|
||||
Ensure `mods` is freed in the cleanup code.
|
||||
|
||||
Fixes: https://github.com/389ds/389-ds-base/issues/6865
|
||||
Relates: https://github.com/389ds/389-ds-base/issues/6470
|
||||
|
||||
Reviewed by: @mreynolds389 (Thanks!)
|
||||
---
|
||||
ldap/servers/plugins/replication/repl5_agmt.c | 1 +
|
||||
1 file changed, 1 insertion(+)
|
||||
|
||||
diff --git a/ldap/servers/plugins/replication/repl5_agmt.c b/ldap/servers/plugins/replication/repl5_agmt.c
|
||||
index 6ffb074d4..c6cfcda07 100644
|
||||
--- a/ldap/servers/plugins/replication/repl5_agmt.c
|
||||
+++ b/ldap/servers/plugins/replication/repl5_agmt.c
|
||||
@@ -2653,6 +2653,7 @@ agmt_update_init_status(Repl_Agmt *ra)
|
||||
} else {
|
||||
PR_Unlock(ra->lock);
|
||||
}
|
||||
+ slapi_ch_free((void **)&mods);
|
||||
slapi_mod_done(&smod_start_time);
|
||||
slapi_mod_done(&smod_end_time);
|
||||
slapi_mod_done(&smod_status);
|
||||
--
|
||||
2.49.0
|
||||
|
@ -0,0 +1,97 @@
|
||||
From 5cc13c70dfe22d95686bec9214c53f1b4114cd90 Mon Sep 17 00:00:00 2001
|
||||
From: James Chapman <jachapma@redhat.com>
|
||||
Date: Fri, 1 Aug 2025 13:27:02 +0100
|
||||
Subject: [PATCH] Issue 6768 - ns-slapd crashes when a referral is added
|
||||
(#6780)
|
||||
|
||||
Bug description: When a paged result search is successfully run on a referred
|
||||
suffix, we retrieve the search result set from the pblock and try to release
|
||||
it. In this case the search result set is NULL, which triggers a SEGV during
|
||||
the release.
|
||||
|
||||
Fix description: If the search result code is LDAP_REFERRAL, skip deletion of
|
||||
the search result set. Added test case.
|
||||
|
||||
Fixes: https://github.com/389ds/389-ds-base/issues/6768
|
||||
|
||||
Reviewed by: @tbordaz, @progier389 (Thank you)
|
||||
---
|
||||
.../paged_results/paged_results_test.py | 46 +++++++++++++++++++
|
||||
ldap/servers/slapd/opshared.c | 4 +-
|
||||
2 files changed, 49 insertions(+), 1 deletion(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/paged_results/paged_results_test.py b/dirsrvtests/tests/suites/paged_results/paged_results_test.py
|
||||
index fca48db0f..1bb94b53a 100644
|
||||
--- a/dirsrvtests/tests/suites/paged_results/paged_results_test.py
|
||||
+++ b/dirsrvtests/tests/suites/paged_results/paged_results_test.py
|
||||
@@ -1271,6 +1271,52 @@ def test_search_stress_abandon(create_40k_users, create_user):
|
||||
paged_search(conn, create_40k_users.suffix, [req_ctrl], search_flt, searchreq_attrlist, abandon_rate=abandon_rate)
|
||||
|
||||
|
||||
+def test_search_referral(topology_st):
|
||||
+ """Test a paged search on a referred suffix doesnt crash the server.
|
||||
+
|
||||
+ :id: c788bdbf-965b-4f12-ac24-d4d695e2cce2
|
||||
+
|
||||
+ :setup: Standalone instance
|
||||
+
|
||||
+ :steps:
|
||||
+ 1. Configure a default referral.
|
||||
+ 2. Create a paged result search control.
|
||||
+ 3. Paged result search on referral suffix (doesnt exist on the instance, triggering a referral).
|
||||
+ 4. Check the server is still running.
|
||||
+ 5. Remove referral.
|
||||
+
|
||||
+ :expectedresults:
|
||||
+ 1. Referral sucessfully set.
|
||||
+ 2. Control created.
|
||||
+ 3. Search returns ldap.REFERRAL (10).
|
||||
+ 4. Server still running.
|
||||
+ 5. Referral removed.
|
||||
+ """
|
||||
+
|
||||
+ page_size = 5
|
||||
+ SEARCH_SUFFIX = "dc=referme,dc=com"
|
||||
+ REFERRAL = "ldap://localhost.localdomain:389/o%3dnetscaperoot"
|
||||
+
|
||||
+ log.info('Configuring referral')
|
||||
+ topology_st.standalone.config.set('nsslapd-referral', REFERRAL)
|
||||
+ referral = topology_st.standalone.config.get_attr_val_utf8('nsslapd-referral')
|
||||
+ assert (referral == REFERRAL)
|
||||
+
|
||||
+ log.info('Create paged result search control')
|
||||
+ req_ctrl = SimplePagedResultsControl(True, size=page_size, cookie='')
|
||||
+
|
||||
+ log.info('Perform a paged result search on referred suffix, no chase')
|
||||
+ with pytest.raises(ldap.REFERRAL):
|
||||
+ topology_st.standalone.search_ext_s(SEARCH_SUFFIX, ldap.SCOPE_SUBTREE, serverctrls=[req_ctrl])
|
||||
+
|
||||
+ log.info('Confirm instance is still running')
|
||||
+ assert (topology_st.standalone.status())
|
||||
+
|
||||
+ log.info('Remove referral')
|
||||
+ topology_st.standalone.config.remove_all('nsslapd-referral')
|
||||
+ referral = topology_st.standalone.config.get_attr_val_utf8('nsslapd-referral')
|
||||
+ assert (referral == None)
|
||||
+
|
||||
if __name__ == '__main__':
|
||||
# Run isolated
|
||||
# -s for DEBUG mode
|
||||
diff --git a/ldap/servers/slapd/opshared.c b/ldap/servers/slapd/opshared.c
|
||||
index 14a7dcdfb..03ed60981 100644
|
||||
--- a/ldap/servers/slapd/opshared.c
|
||||
+++ b/ldap/servers/slapd/opshared.c
|
||||
@@ -879,7 +879,9 @@ op_shared_search(Slapi_PBlock *pb, int send_result)
|
||||
/* Free the results if not "no_such_object" */
|
||||
void *sr = NULL;
|
||||
slapi_pblock_get(pb, SLAPI_SEARCH_RESULT_SET, &sr);
|
||||
- be->be_search_results_release(&sr);
|
||||
+ if (be->be_search_results_release != NULL) {
|
||||
+ be->be_search_results_release(&sr);
|
||||
+ }
|
||||
}
|
||||
pagedresults_set_search_result(pb_conn, operation, NULL, 1, pr_idx);
|
||||
rc = pagedresults_set_current_be(pb_conn, NULL, pr_idx, 1);
|
||||
--
|
||||
2.49.0
|
||||
|
@ -1,38 +0,0 @@
|
||||
From 679262c0c292413851d2d004b588ecfd7d91c85a Mon Sep 17 00:00:00 2001
|
||||
From: James Chapman <jachapma@redhat.com>
|
||||
Date: Tue, 11 Feb 2025 18:06:34 +0000
|
||||
Subject: [PATCH] Issue 5841 - dsconf incorrectly setting up Pass-Through
|
||||
Authentication (#6601)
|
||||
|
||||
Bug description:
|
||||
During init, PAMPassThroughAuthConfigs defines an "objectclass=nsslapdplugin"
|
||||
plugin object. During filter creation, dsconf fails as objectclass=nsslapdplugin
|
||||
is not present in the PAM PT config entry. This objectclass has been removed in
|
||||
all other branches, branch 1.4.3 was skipped as there are cherry pick conflicts.
|
||||
|
||||
Fix description:
|
||||
Remove nsslapdplugin from the plugin objecti, objectclass list.
|
||||
|
||||
Fixes: https://github.com/389ds/389-ds-base/issues/5841
|
||||
|
||||
Reviewed by: @progier389 (Thank you)
|
||||
---
|
||||
src/lib389/lib389/plugins.py | 2 +-
|
||||
1 file changed, 1 insertion(+), 1 deletion(-)
|
||||
|
||||
diff --git a/src/lib389/lib389/plugins.py b/src/lib389/lib389/plugins.py
|
||||
index 185398e5b..25b49dae4 100644
|
||||
--- a/src/lib389/lib389/plugins.py
|
||||
+++ b/src/lib389/lib389/plugins.py
|
||||
@@ -1579,7 +1579,7 @@ class PAMPassThroughAuthConfigs(DSLdapObjects):
|
||||
|
||||
def __init__(self, instance, basedn="cn=PAM Pass Through Auth,cn=plugins,cn=config"):
|
||||
super(PAMPassThroughAuthConfigs, self).__init__(instance)
|
||||
- self._objectclasses = ['top', 'extensibleObject', 'nsslapdplugin', 'pamConfig']
|
||||
+ self._objectclasses = ['top', 'extensibleObject', 'pamConfig']
|
||||
self._filterattrs = ['cn']
|
||||
self._scope = ldap.SCOPE_ONELEVEL
|
||||
self._childobject = PAMPassThroughAuthConfig
|
||||
--
|
||||
2.48.1
|
||||
|
@ -0,0 +1,268 @@
|
||||
From def739668dd2728825f1108911abc065f981010c Mon Sep 17 00:00:00 2001
|
||||
From: Simon Pichugin <spichugi@redhat.com>
|
||||
Date: Tue, 19 Aug 2025 16:10:09 -0700
|
||||
Subject: [PATCH] Issue 6940 - dsconf monitor server fails with ldapi:// due to
|
||||
absent server ID (#6941)
|
||||
|
||||
Description: The dsconf monitor server command fails when using ldapi://
|
||||
protocol because the server ID is not set, preventing PID retrieval from
|
||||
defaults.inf. This causes the Web console to fail displaying the "Server
|
||||
Version" field and potentially other CLI/WebUI issues.
|
||||
|
||||
The fix attempts to derive the server ID from the LDAPI socket path when
|
||||
not explicitly provided. This covers the common case where the socket name
|
||||
contains the instance name (e.g., slapd-instance.socket).
|
||||
If that's not possible, it also attempts to derive the server ID from the
|
||||
nsslapd-instancedir configuration attribute. The derived server ID
|
||||
is validated against actual system instances to ensure it exists.
|
||||
Note that socket names can vary and nsslapd-instancedir can be changed.
|
||||
This is a best-effort approach for the common naming pattern.
|
||||
|
||||
Also fixes the LDAPI socket path extraction which was incorrectly using
|
||||
offset 9 instead of 8 for ldapi:// URIs.
|
||||
|
||||
The monitor command now handles missing PIDs gracefully, returning zero
|
||||
values for process-specific stats instead of failing completely.
|
||||
|
||||
Fixes: https://github.com/389ds/389-ds-base/issues/6940
|
||||
|
||||
Reviewed by: @vashirov, @mreynolds389 (Thanks!!)
|
||||
---
|
||||
src/lib389/lib389/__init__.py | 93 +++++++++++++++++++++++++++---
|
||||
src/lib389/lib389/cli_base/dsrc.py | 4 +-
|
||||
src/lib389/lib389/monitor.py | 50 ++++++++++++----
|
||||
3 files changed, 124 insertions(+), 23 deletions(-)
|
||||
|
||||
diff --git a/src/lib389/lib389/__init__.py b/src/lib389/lib389/__init__.py
|
||||
index 65e70c1dd..e6f9273eb 100644
|
||||
--- a/src/lib389/lib389/__init__.py
|
||||
+++ b/src/lib389/lib389/__init__.py
|
||||
@@ -17,7 +17,7 @@
|
||||
|
||||
import sys
|
||||
import os
|
||||
-from urllib.parse import urlparse
|
||||
+from urllib.parse import urlparse, unquote
|
||||
import stat
|
||||
import pwd
|
||||
import grp
|
||||
@@ -67,7 +67,8 @@ from lib389.utils import (
|
||||
get_default_db_lib,
|
||||
selinux_present,
|
||||
selinux_label_port,
|
||||
- get_user_is_root)
|
||||
+ get_user_is_root,
|
||||
+ get_instance_list)
|
||||
from lib389.paths import Paths
|
||||
from lib389.nss_ssl import NssSsl
|
||||
from lib389.tasks import BackupTask, RestoreTask, Task
|
||||
@@ -304,6 +305,57 @@ class DirSrv(SimpleLDAPObject, object):
|
||||
self.dbdir = self.ds_paths.db_dir
|
||||
self.changelogdir = os.path.join(os.path.dirname(self.dbdir), DEFAULT_CHANGELOG_DB)
|
||||
|
||||
+ def _extract_serverid_from_string(self, text):
|
||||
+ """Extract serverid from a string containing 'slapd-<serverid>' pattern.
|
||||
+ Returns the serverid or None if not found or validation fails.
|
||||
+ Only attempts derivation if serverid is currently None.
|
||||
+ """
|
||||
+ if getattr(self, 'serverid', None) is not None:
|
||||
+ return None
|
||||
+ if not text:
|
||||
+ return None
|
||||
+
|
||||
+ # Use regex to extract serverid from "slapd-<serverid>" or "slapd-<serverid>.socket"
|
||||
+ match = re.search(r'slapd-([A-Za-z0-9._-]+?)(?:\.socket)?(?:$|/)', text)
|
||||
+ if not match:
|
||||
+ return None
|
||||
+ candidate = match.group(1)
|
||||
+
|
||||
+ self.serverid = candidate
|
||||
+ try:
|
||||
+ insts = get_instance_list()
|
||||
+ except Exception:
|
||||
+ self.serverid = None
|
||||
+ return None
|
||||
+ if f'slapd-{candidate}' in insts or candidate in insts:
|
||||
+ return candidate
|
||||
+ # restore original and report failure
|
||||
+ self.serverid = None
|
||||
+ return None
|
||||
+
|
||||
+ def _derive_serverid_from_ldapi(self):
|
||||
+ """Attempt to derive serverid from an LDAPI socket path or URI and
|
||||
+ verify it exists on the system. Returns the serverid or None.
|
||||
+ """
|
||||
+ socket_path = None
|
||||
+ if hasattr(self, 'ldapi_socket') and self.ldapi_socket:
|
||||
+ socket_path = unquote(self.ldapi_socket)
|
||||
+ elif hasattr(self, 'ldapuri') and isinstance(self.ldapuri, str) and self.ldapuri.startswith('ldapi://'):
|
||||
+ socket_path = unquote(self.ldapuri[len('ldapi://'):])
|
||||
+
|
||||
+ return self._extract_serverid_from_string(socket_path)
|
||||
+
|
||||
+ def _derive_serverid_from_instancedir(self):
|
||||
+ """Extract serverid from nsslapd-instancedir path like '/usr/lib64/dirsrv/slapd-<serverid>'"""
|
||||
+ try:
|
||||
+ from lib389.config import Config
|
||||
+ config = Config(self)
|
||||
+ instancedir = config.get_attr_val_utf8_l("nsslapd-instancedir")
|
||||
+ except Exception:
|
||||
+ return None
|
||||
+
|
||||
+ return self._extract_serverid_from_string(instancedir)
|
||||
+
|
||||
def rebind(self):
|
||||
"""Reconnect to the DS
|
||||
|
||||
@@ -576,6 +628,15 @@ class DirSrv(SimpleLDAPObject, object):
|
||||
self.ldapi_autobind = args.get(SER_LDAPI_AUTOBIND, 'off')
|
||||
self.isLocal = True
|
||||
self.log.debug("Allocate %s with %s", self.__class__, self.ldapi_socket)
|
||||
+ elif self.ldapuri is not None and isinstance(self.ldapuri, str) and self.ldapuri.startswith('ldapi://'):
|
||||
+ # Try to learn serverid from ldapi uri
|
||||
+ try:
|
||||
+ self.ldapi_enabled = 'on'
|
||||
+ self.ldapi_socket = unquote(self.ldapuri[len('ldapi://'):])
|
||||
+ self.ldapi_autobind = args.get(SER_LDAPI_AUTOBIND, 'off')
|
||||
+ self.isLocal = True
|
||||
+ except Exception:
|
||||
+ pass
|
||||
# Settings from args of server attributes
|
||||
self.strict_hostname = args.get(SER_STRICT_HOSTNAME_CHECKING, False)
|
||||
if self.strict_hostname is True:
|
||||
@@ -596,9 +657,16 @@ class DirSrv(SimpleLDAPObject, object):
|
||||
|
||||
self.log.debug("Allocate %s with %s:%s", self.__class__, self.host, (self.sslport or self.port))
|
||||
|
||||
- if SER_SERVERID_PROP in args:
|
||||
- self.ds_paths = Paths(serverid=args[SER_SERVERID_PROP], instance=self, local=self.isLocal)
|
||||
+ # Try to determine serverid if not provided
|
||||
+ if SER_SERVERID_PROP in args and args.get(SER_SERVERID_PROP) is not None:
|
||||
self.serverid = args.get(SER_SERVERID_PROP, None)
|
||||
+ elif getattr(self, 'serverid', None) is None and self.isLocal:
|
||||
+ sid = self._derive_serverid_from_ldapi()
|
||||
+ if sid:
|
||||
+ self.serverid = sid
|
||||
+
|
||||
+ if getattr(self, 'serverid', None):
|
||||
+ self.ds_paths = Paths(serverid=self.serverid, instance=self, local=self.isLocal)
|
||||
else:
|
||||
self.ds_paths = Paths(instance=self, local=self.isLocal)
|
||||
|
||||
@@ -1032,6 +1100,17 @@ class DirSrv(SimpleLDAPObject, object):
|
||||
self.__initPart2()
|
||||
self.state = DIRSRV_STATE_ONLINE
|
||||
# Now that we're online, some of our methods may try to query the version online.
|
||||
+
|
||||
+ # After transitioning online, attempt to derive serverid if still unknown.
|
||||
+ # If we find it, refresh ds_paths and rerun __initPart2
|
||||
+ if getattr(self, 'serverid', None) is None and self.isLocal:
|
||||
+ sid = self._derive_serverid_from_instancedir()
|
||||
+ if sid:
|
||||
+ self.serverid = sid
|
||||
+ # Reinitialize paths with the new serverid
|
||||
+ self.ds_paths = Paths(serverid=self.serverid, instance=self, local=self.isLocal)
|
||||
+ if not connOnly:
|
||||
+ self.__initPart2()
|
||||
self.__add_brookers__()
|
||||
|
||||
def close(self):
|
||||
@@ -3569,8 +3648,4 @@ class DirSrv(SimpleLDAPObject, object):
|
||||
"""
|
||||
Get the pid of the running server
|
||||
"""
|
||||
- pid = pid_from_file(self.pid_file())
|
||||
- if pid == 0 or pid is None:
|
||||
- return 0
|
||||
- else:
|
||||
- return pid
|
||||
+ return pid_from_file(self.pid_file())
|
||||
diff --git a/src/lib389/lib389/cli_base/dsrc.py b/src/lib389/lib389/cli_base/dsrc.py
|
||||
index 84567b990..498228ce0 100644
|
||||
--- a/src/lib389/lib389/cli_base/dsrc.py
|
||||
+++ b/src/lib389/lib389/cli_base/dsrc.py
|
||||
@@ -56,7 +56,7 @@ def dsrc_arg_concat(args, dsrc_inst):
|
||||
new_dsrc_inst['args'][SER_ROOT_DN] = new_dsrc_inst['binddn']
|
||||
if new_dsrc_inst['uri'][0:8] == 'ldapi://':
|
||||
new_dsrc_inst['args'][SER_LDAPI_ENABLED] = "on"
|
||||
- new_dsrc_inst['args'][SER_LDAPI_SOCKET] = new_dsrc_inst['uri'][9:]
|
||||
+ new_dsrc_inst['args'][SER_LDAPI_SOCKET] = new_dsrc_inst['uri'][8:]
|
||||
new_dsrc_inst['args'][SER_LDAPI_AUTOBIND] = "on"
|
||||
|
||||
# Make new
|
||||
@@ -170,7 +170,7 @@ def dsrc_to_ldap(path, instance_name, log):
|
||||
dsrc_inst['args'][SER_ROOT_DN] = dsrc_inst['binddn']
|
||||
if dsrc_inst['uri'][0:8] == 'ldapi://':
|
||||
dsrc_inst['args'][SER_LDAPI_ENABLED] = "on"
|
||||
- dsrc_inst['args'][SER_LDAPI_SOCKET] = dsrc_inst['uri'][9:]
|
||||
+ dsrc_inst['args'][SER_LDAPI_SOCKET] = dsrc_inst['uri'][8:]
|
||||
dsrc_inst['args'][SER_LDAPI_AUTOBIND] = "on"
|
||||
|
||||
# Return the dict.
|
||||
diff --git a/src/lib389/lib389/monitor.py b/src/lib389/lib389/monitor.py
|
||||
index 27b99a7e3..bf3e1df76 100644
|
||||
--- a/src/lib389/lib389/monitor.py
|
||||
+++ b/src/lib389/lib389/monitor.py
|
||||
@@ -92,21 +92,47 @@ class Monitor(DSLdapObject):
|
||||
Get CPU and memory stats
|
||||
"""
|
||||
stats = {}
|
||||
- pid = self._instance.get_pid()
|
||||
+ try:
|
||||
+ pid = self._instance.get_pid()
|
||||
+ except Exception:
|
||||
+ pid = None
|
||||
total_mem = psutil.virtual_memory()[0]
|
||||
- p = psutil.Process(pid)
|
||||
- memory_stats = p.memory_full_info()
|
||||
|
||||
- # Get memory & CPU stats
|
||||
+ # Always include total system memory
|
||||
stats['total_mem'] = [str(total_mem)]
|
||||
- stats['rss'] = [str(memory_stats[0])]
|
||||
- stats['vms'] = [str(memory_stats[1])]
|
||||
- stats['swap'] = [str(memory_stats[9])]
|
||||
- stats['mem_rss_percent'] = [str(round(p.memory_percent("rss")))]
|
||||
- stats['mem_vms_percent'] = [str(round(p.memory_percent("vms")))]
|
||||
- stats['mem_swap_percent'] = [str(round(p.memory_percent("swap")))]
|
||||
- stats['total_threads'] = [str(p.num_threads())]
|
||||
- stats['cpu_usage'] = [str(round(p.cpu_percent(interval=0.1)))]
|
||||
+
|
||||
+ # Process-specific stats - only if process is running (pid is not None)
|
||||
+ if pid is not None:
|
||||
+ try:
|
||||
+ p = psutil.Process(pid)
|
||||
+ memory_stats = p.memory_full_info()
|
||||
+
|
||||
+ # Get memory & CPU stats
|
||||
+ stats['rss'] = [str(memory_stats[0])]
|
||||
+ stats['vms'] = [str(memory_stats[1])]
|
||||
+ stats['swap'] = [str(memory_stats[9])]
|
||||
+ stats['mem_rss_percent'] = [str(round(p.memory_percent("rss")))]
|
||||
+ stats['mem_vms_percent'] = [str(round(p.memory_percent("vms")))]
|
||||
+ stats['mem_swap_percent'] = [str(round(p.memory_percent("swap")))]
|
||||
+ stats['total_threads'] = [str(p.num_threads())]
|
||||
+ stats['cpu_usage'] = [str(round(p.cpu_percent(interval=0.1)))]
|
||||
+ except (psutil.NoSuchProcess, psutil.AccessDenied):
|
||||
+ # Process exists in PID file but is not accessible or doesn't exist
|
||||
+ pid = None
|
||||
+
|
||||
+ # If no valid PID, provide zero values for process stats
|
||||
+ if pid is None:
|
||||
+ stats['rss'] = ['0']
|
||||
+ stats['vms'] = ['0']
|
||||
+ stats['swap'] = ['0']
|
||||
+ stats['mem_rss_percent'] = ['0']
|
||||
+ stats['mem_vms_percent'] = ['0']
|
||||
+ stats['mem_swap_percent'] = ['0']
|
||||
+ stats['total_threads'] = ['0']
|
||||
+ stats['cpu_usage'] = ['0']
|
||||
+ stats['server_status'] = ['PID unavailable']
|
||||
+ else:
|
||||
+ stats['server_status'] = ['Server running']
|
||||
|
||||
# Connections to DS
|
||||
if self._instance.port == "0":
|
||||
--
|
||||
2.49.0
|
||||
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@ -1,319 +0,0 @@
|
||||
From 7d534efdcd96b13524dae587c3c5994ed01924ab Mon Sep 17 00:00:00 2001
|
||||
From: Simon Pichugin <spichugi@redhat.com>
|
||||
Date: Fri, 16 Feb 2024 13:52:36 -0800
|
||||
Subject: [PATCH] Issue 6067 - Improve dsidm CLI No Such Entry handling (#6079)
|
||||
|
||||
Description: Add additional error processing to dsidm CLI tool for when basedn
|
||||
or OU subentries are absent.
|
||||
|
||||
Related: https://github.com/389ds/389-ds-base/issues/6067
|
||||
|
||||
Reviewed by: @vashirov (Thanks!)
|
||||
---
|
||||
src/lib389/cli/dsidm | 21 ++++++++-------
|
||||
src/lib389/lib389/cli_idm/__init__.py | 38 ++++++++++++++++++++++++++-
|
||||
src/lib389/lib389/cli_idm/account.py | 4 +--
|
||||
src/lib389/lib389/cli_idm/service.py | 4 ++-
|
||||
src/lib389/lib389/idm/group.py | 10 ++++---
|
||||
src/lib389/lib389/idm/posixgroup.py | 5 ++--
|
||||
src/lib389/lib389/idm/services.py | 5 ++--
|
||||
src/lib389/lib389/idm/user.py | 5 ++--
|
||||
8 files changed, 67 insertions(+), 25 deletions(-)
|
||||
|
||||
diff --git a/src/lib389/cli/dsidm b/src/lib389/cli/dsidm
|
||||
index 1b739b103..970973f4f 100755
|
||||
--- a/src/lib389/cli/dsidm
|
||||
+++ b/src/lib389/cli/dsidm
|
||||
@@ -2,7 +2,7 @@
|
||||
|
||||
# --- BEGIN COPYRIGHT BLOCK ---
|
||||
# Copyright (C) 2016, William Brown <william at blackhats.net.au>
|
||||
-# Copyright (C) 2023 Red Hat, Inc.
|
||||
+# Copyright (C) 2024 Red Hat, Inc.
|
||||
# All rights reserved.
|
||||
#
|
||||
# License: GPL (version 3 or any later version).
|
||||
@@ -19,6 +19,7 @@ import argparse
|
||||
import argcomplete
|
||||
from lib389.utils import get_instance_list, instance_choices
|
||||
from lib389._constants import DSRC_HOME
|
||||
+from lib389.cli_idm import _get_basedn_arg
|
||||
from lib389.cli_idm import account as cli_account
|
||||
from lib389.cli_idm import initialise as cli_init
|
||||
from lib389.cli_idm import organizationalunit as cli_ou
|
||||
@@ -117,14 +118,6 @@ if __name__ == '__main__':
|
||||
parser.print_help()
|
||||
sys.exit(1)
|
||||
|
||||
- if dsrc_inst['basedn'] is None:
|
||||
- errmsg = "Must provide a basedn!"
|
||||
- if args.json:
|
||||
- sys.stderr.write('{"desc": "%s"}\n' % errmsg)
|
||||
- else:
|
||||
- log.error(errmsg)
|
||||
- sys.exit(1)
|
||||
-
|
||||
if not args.verbose:
|
||||
signal.signal(signal.SIGINT, signal_handler)
|
||||
|
||||
@@ -135,7 +128,15 @@ if __name__ == '__main__':
|
||||
result = False
|
||||
try:
|
||||
inst = connect_instance(dsrc_inst=dsrc_inst, verbose=args.verbose, args=args)
|
||||
- result = args.func(inst, dsrc_inst['basedn'], log, args)
|
||||
+ basedn = _get_basedn_arg(inst, args, log, msg="Enter basedn")
|
||||
+ if basedn is None:
|
||||
+ errmsg = "Must provide a basedn!"
|
||||
+ if args.json:
|
||||
+ sys.stderr.write('{"desc": "%s"}\n' % errmsg)
|
||||
+ else:
|
||||
+ log.error(errmsg)
|
||||
+ sys.exit(1)
|
||||
+ result = args.func(inst, basedn, log, args)
|
||||
if args.verbose:
|
||||
log.info("Command successful.")
|
||||
except Exception as e:
|
||||
diff --git a/src/lib389/lib389/cli_idm/__init__.py b/src/lib389/lib389/cli_idm/__init__.py
|
||||
index 0dab54847..e3622246d 100644
|
||||
--- a/src/lib389/lib389/cli_idm/__init__.py
|
||||
+++ b/src/lib389/lib389/cli_idm/__init__.py
|
||||
@@ -1,15 +1,30 @@
|
||||
# --- BEGIN COPYRIGHT BLOCK ---
|
||||
# Copyright (C) 2016, William Brown <william at blackhats.net.au>
|
||||
-# Copyright (C) 2023 Red Hat, Inc.
|
||||
+# Copyright (C) 2024 Red Hat, Inc.
|
||||
# All rights reserved.
|
||||
#
|
||||
# License: GPL (version 3 or any later version).
|
||||
# See LICENSE for details.
|
||||
# --- END COPYRIGHT BLOCK ---
|
||||
|
||||
+import sys
|
||||
import ldap
|
||||
from getpass import getpass
|
||||
import json
|
||||
+from lib389._mapped_object import DSLdapObject
|
||||
+from lib389.cli_base import _get_dn_arg
|
||||
+from lib389.idm.user import DEFAULT_BASEDN_RDN as DEFAULT_BASEDN_RDN_USER
|
||||
+from lib389.idm.group import DEFAULT_BASEDN_RDN as DEFAULT_BASEDN_RDN_GROUP
|
||||
+from lib389.idm.posixgroup import DEFAULT_BASEDN_RDN as DEFAULT_BASEDN_RDN_POSIXGROUP
|
||||
+from lib389.idm.services import DEFAULT_BASEDN_RDN as DEFAULT_BASEDN_RDN_SERVICES
|
||||
+
|
||||
+# The key is module name, the value is default RDN
|
||||
+BASEDN_RDNS = {
|
||||
+ 'user': DEFAULT_BASEDN_RDN_USER,
|
||||
+ 'group': DEFAULT_BASEDN_RDN_GROUP,
|
||||
+ 'posixgroup': DEFAULT_BASEDN_RDN_POSIXGROUP,
|
||||
+ 'service': DEFAULT_BASEDN_RDN_SERVICES,
|
||||
+}
|
||||
|
||||
|
||||
def _get_arg(args, msg=None):
|
||||
@@ -37,6 +52,27 @@ def _get_args(args, kws):
|
||||
return kwargs
|
||||
|
||||
|
||||
+def _get_basedn_arg(inst, args, log, msg=None):
|
||||
+ basedn_arg = _get_dn_arg(args.basedn, msg="Enter basedn")
|
||||
+ if not DSLdapObject(inst, basedn_arg).exists():
|
||||
+ raise ValueError(f'The base DN "{basedn_arg}" does not exist.')
|
||||
+
|
||||
+ # Get the RDN based on the last part of the module name if applicable
|
||||
+ # (lib389.cli_idm.user -> user)
|
||||
+ try:
|
||||
+ command_name = args.func.__module__.split('.')[-1]
|
||||
+ object_rdn = BASEDN_RDNS[command_name]
|
||||
+ # Check if the DN for our command exists
|
||||
+ command_basedn = f'{object_rdn},{basedn_arg}'
|
||||
+ if not DSLdapObject(inst, command_basedn).exists():
|
||||
+ errmsg = f'The DN "{command_basedn}" does not exist.'
|
||||
+ errmsg += f' It is required for "{command_name}" subcommand. Please create it first.'
|
||||
+ raise ValueError(errmsg)
|
||||
+ except KeyError:
|
||||
+ pass
|
||||
+ return basedn_arg
|
||||
+
|
||||
+
|
||||
# This is really similar to get_args, but generates from an array
|
||||
def _get_attributes(args, attrs):
|
||||
kwargs = {}
|
||||
diff --git a/src/lib389/lib389/cli_idm/account.py b/src/lib389/lib389/cli_idm/account.py
|
||||
index 5d7b9cc77..15f766588 100644
|
||||
--- a/src/lib389/lib389/cli_idm/account.py
|
||||
+++ b/src/lib389/lib389/cli_idm/account.py
|
||||
@@ -1,5 +1,5 @@
|
||||
# --- BEGIN COPYRIGHT BLOCK ---
|
||||
-# Copyright (C) 2023, Red Hat inc,
|
||||
+# Copyright (C) 2024, Red Hat inc,
|
||||
# Copyright (C) 2018, William Brown <william@blackhats.net.au>
|
||||
# All rights reserved.
|
||||
#
|
||||
@@ -91,7 +91,6 @@ def entry_status(inst, basedn, log, args):
|
||||
|
||||
|
||||
def subtree_status(inst, basedn, log, args):
|
||||
- basedn = _get_dn_arg(args.basedn, msg="Enter basedn to check")
|
||||
filter = ""
|
||||
scope = ldap.SCOPE_SUBTREE
|
||||
epoch_inactive_time = None
|
||||
@@ -121,7 +120,6 @@ def subtree_status(inst, basedn, log, args):
|
||||
|
||||
|
||||
def bulk_update(inst, basedn, log, args):
|
||||
- basedn = _get_dn_arg(args.basedn, msg="Enter basedn to search")
|
||||
search_filter = "(objectclass=*)"
|
||||
scope = ldap.SCOPE_SUBTREE
|
||||
scope_str = "sub"
|
||||
diff --git a/src/lib389/lib389/cli_idm/service.py b/src/lib389/lib389/cli_idm/service.py
|
||||
index c62fc12d1..c2b2c8c84 100644
|
||||
--- a/src/lib389/lib389/cli_idm/service.py
|
||||
+++ b/src/lib389/lib389/cli_idm/service.py
|
||||
@@ -57,7 +57,9 @@ def rename(inst, basedn, log, args, warn=True):
|
||||
_generic_rename(inst, basedn, log.getChild('_generic_rename'), MANY, rdn, args)
|
||||
|
||||
def create_parser(subparsers):
|
||||
- service_parser = subparsers.add_parser('service', help='Manage service accounts', formatter_class=CustomHelpFormatter)
|
||||
+ service_parser = subparsers.add_parser('service',
|
||||
+ help='Manage service accounts. The organizationalUnit (by default "ou=Services") '
|
||||
+ 'needs to exist prior to managing service accounts.', formatter_class=CustomHelpFormatter)
|
||||
|
||||
subcommands = service_parser.add_subparsers(help='action')
|
||||
|
||||
diff --git a/src/lib389/lib389/idm/group.py b/src/lib389/lib389/idm/group.py
|
||||
index 1b60a1f51..2cf2c7b23 100644
|
||||
--- a/src/lib389/lib389/idm/group.py
|
||||
+++ b/src/lib389/lib389/idm/group.py
|
||||
@@ -1,6 +1,6 @@
|
||||
# --- BEGIN COPYRIGHT BLOCK ---
|
||||
# Copyright (C) 2016, William Brown <william at blackhats.net.au>
|
||||
-# Copyright (C) 2023 Red Hat, Inc.
|
||||
+# Copyright (C) 2024 Red Hat, Inc.
|
||||
# All rights reserved.
|
||||
#
|
||||
# License: GPL (version 3 or any later version).
|
||||
@@ -16,6 +16,8 @@ MUST_ATTRIBUTES = [
|
||||
'cn',
|
||||
]
|
||||
RDN = 'cn'
|
||||
+DEFAULT_BASEDN_RDN = 'ou=Groups'
|
||||
+DEFAULT_BASEDN_RDN_ADMIN_GROUPS = 'ou=People'
|
||||
|
||||
|
||||
class Group(DSLdapObject):
|
||||
@@ -93,7 +95,7 @@ class Groups(DSLdapObjects):
|
||||
:type basedn: str
|
||||
"""
|
||||
|
||||
- def __init__(self, instance, basedn, rdn='ou=Groups'):
|
||||
+ def __init__(self, instance, basedn, rdn=DEFAULT_BASEDN_RDN):
|
||||
super(Groups, self).__init__(instance)
|
||||
self._objectclasses = [
|
||||
'groupOfNames',
|
||||
@@ -140,7 +142,7 @@ class UniqueGroup(DSLdapObject):
|
||||
class UniqueGroups(DSLdapObjects):
|
||||
# WARNING!!!
|
||||
# Use group, not unique group!!!
|
||||
- def __init__(self, instance, basedn, rdn='ou=Groups'):
|
||||
+ def __init__(self, instance, basedn, rdn=DEFAULT_BASEDN_RDN):
|
||||
super(UniqueGroups, self).__init__(instance)
|
||||
self._objectclasses = [
|
||||
'groupOfUniqueNames',
|
||||
@@ -203,7 +205,7 @@ class nsAdminGroups(DSLdapObjects):
|
||||
:type rdn: str
|
||||
"""
|
||||
|
||||
- def __init__(self, instance, basedn, rdn='ou=People'):
|
||||
+ def __init__(self, instance, basedn, rdn=DEFAULT_BASEDN_RDN_ADMIN_GROUPS):
|
||||
super(nsAdminGroups, self).__init__(instance)
|
||||
self._objectclasses = [
|
||||
'nsAdminGroup'
|
||||
diff --git a/src/lib389/lib389/idm/posixgroup.py b/src/lib389/lib389/idm/posixgroup.py
|
||||
index d1debcf12..45735c579 100644
|
||||
--- a/src/lib389/lib389/idm/posixgroup.py
|
||||
+++ b/src/lib389/lib389/idm/posixgroup.py
|
||||
@@ -1,6 +1,6 @@
|
||||
# --- BEGIN COPYRIGHT BLOCK ---
|
||||
# Copyright (C) 2016, William Brown <william at blackhats.net.au>
|
||||
-# Copyright (C) 2023 Red Hat, Inc.
|
||||
+# Copyright (C) 2024 Red Hat, Inc.
|
||||
# All rights reserved.
|
||||
#
|
||||
# License: GPL (version 3 or any later version).
|
||||
@@ -17,6 +17,7 @@ MUST_ATTRIBUTES = [
|
||||
'gidNumber',
|
||||
]
|
||||
RDN = 'cn'
|
||||
+DEFAULT_BASEDN_RDN = 'ou=Groups'
|
||||
|
||||
|
||||
class PosixGroup(DSLdapObject):
|
||||
@@ -72,7 +73,7 @@ class PosixGroups(DSLdapObjects):
|
||||
:type basedn: str
|
||||
"""
|
||||
|
||||
- def __init__(self, instance, basedn, rdn='ou=Groups'):
|
||||
+ def __init__(self, instance, basedn, rdn=DEFAULT_BASEDN_RDN):
|
||||
super(PosixGroups, self).__init__(instance)
|
||||
self._objectclasses = [
|
||||
'groupOfNames',
|
||||
diff --git a/src/lib389/lib389/idm/services.py b/src/lib389/lib389/idm/services.py
|
||||
index d1e5b4693..e750a32c4 100644
|
||||
--- a/src/lib389/lib389/idm/services.py
|
||||
+++ b/src/lib389/lib389/idm/services.py
|
||||
@@ -1,6 +1,6 @@
|
||||
# --- BEGIN COPYRIGHT BLOCK ---
|
||||
# Copyright (C) 2016, William Brown <william at blackhats.net.au>
|
||||
-# Copyright (C) 2021 Red Hat, Inc.
|
||||
+# Copyright (C) 2024 Red Hat, Inc.
|
||||
# All rights reserved.
|
||||
#
|
||||
# License: GPL (version 3 or any later version).
|
||||
@@ -16,6 +16,7 @@ RDN = 'cn'
|
||||
MUST_ATTRIBUTES = [
|
||||
'cn',
|
||||
]
|
||||
+DEFAULT_BASEDN_RDN = 'ou=Services'
|
||||
|
||||
class ServiceAccount(Account):
|
||||
"""A single instance of Service entry
|
||||
@@ -59,7 +60,7 @@ class ServiceAccounts(DSLdapObjects):
|
||||
:type basedn: str
|
||||
"""
|
||||
|
||||
- def __init__(self, instance, basedn, rdn='ou=Services'):
|
||||
+ def __init__(self, instance, basedn, rdn=DEFAULT_BASEDN_RDN):
|
||||
super(ServiceAccounts, self).__init__(instance)
|
||||
self._objectclasses = [
|
||||
'applicationProcess',
|
||||
diff --git a/src/lib389/lib389/idm/user.py b/src/lib389/lib389/idm/user.py
|
||||
index 1206a6e08..3b21ccf1c 100644
|
||||
--- a/src/lib389/lib389/idm/user.py
|
||||
+++ b/src/lib389/lib389/idm/user.py
|
||||
@@ -1,6 +1,6 @@
|
||||
# --- BEGIN COPYRIGHT BLOCK ---
|
||||
# Copyright (C) 2016, William Brown <william at blackhats.net.au>
|
||||
-# Copyright (C) 2023 Red Hat, Inc.
|
||||
+# Copyright (C) 2024 Red Hat, Inc.
|
||||
# All rights reserved.
|
||||
#
|
||||
# License: GPL (version 3 or any later version).
|
||||
@@ -23,6 +23,7 @@ MUST_ATTRIBUTES = [
|
||||
'homeDirectory',
|
||||
]
|
||||
RDN = 'uid'
|
||||
+DEFAULT_BASEDN_RDN = 'ou=People'
|
||||
|
||||
TEST_USER_PROPERTIES = {
|
||||
'uid': 'testuser',
|
||||
@@ -201,7 +202,7 @@ class UserAccounts(DSLdapObjects):
|
||||
:type rdn: str
|
||||
"""
|
||||
|
||||
- def __init__(self, instance, basedn, rdn='ou=People'):
|
||||
+ def __init__(self, instance, basedn, rdn=DEFAULT_BASEDN_RDN):
|
||||
super(UserAccounts, self).__init__(instance)
|
||||
self._objectclasses = [
|
||||
'account',
|
||||
--
|
||||
2.48.1
|
||||
|
559
SOURCES/0032-Issue-6910-Fix-latest-coverity-issues.patch
Normal file
559
SOURCES/0032-Issue-6910-Fix-latest-coverity-issues.patch
Normal file
@ -0,0 +1,559 @@
|
||||
From dd40581c66c702a9a5d34ad1c498d8957be51f81 Mon Sep 17 00:00:00 2001
|
||||
From: Mark Reynolds <mreynolds@redhat.com>
|
||||
Date: Mon, 28 Jul 2025 17:12:33 -0400
|
||||
Subject: [PATCH] Issue 6910 - Fix latest coverity issues
|
||||
|
||||
Description:
|
||||
|
||||
Fix various coverity/ASAN warnings:
|
||||
|
||||
- CID 1618831: Resource leak (RESOURCE_LEAK) - bdb_layer.c
|
||||
- CID 1612606: Resource leak (RESOURCE_LEAK) - log.c
|
||||
- CID 1611461: Uninitialized pointer read (UNINIT) - repl5_agmt.c
|
||||
- CID 1568589: Dereference before null check (REVERSE_INULL) - repl5_agmt.c
|
||||
- CID 1590353: Logically dead code (DEADCODE) - repl5_agmt.c
|
||||
- CID 1611460: Logically dead code (DEADCODE) - control.c
|
||||
- CID 1610568: Dereference after null check (FORWARD_NULL) - modify.c
|
||||
- CID 1591259: Out-of-bounds read (OVERRUN) - memberof.c
|
||||
- CID 1550231: Unsigned compared against 0 (NO_EFFECT) - memberof_config.c
|
||||
- CID 1548904: Overflowed constant (INTEGER_OVERFLOW) - ch_malloc.c
|
||||
- CID 1548902: Overflowed constant (INTEGER_OVERFLOW) - dse.lc
|
||||
- CID 1548900: Overflowed return value (INTEGER_OVERFLOW) - acct_util.c
|
||||
- CID 1548898: Overflowed constant (INTEGER_OVERFLOW) - parents.c
|
||||
- CID 1546849: Resource leak (RESOURCE_LEAK) - referint.c
|
||||
- ASAN - Use after free - automember.c
|
||||
|
||||
Relates: http://github.com/389ds/389-ds-base/issues/6910
|
||||
|
||||
Reviewed by: progier & spichugi(Thanks!)
|
||||
---
|
||||
ldap/servers/plugins/acctpolicy/acct_util.c | 6 ++-
|
||||
ldap/servers/plugins/automember/automember.c | 9 +++--
|
||||
ldap/servers/plugins/memberof/memberof.c | 15 +++++--
|
||||
.../plugins/memberof/memberof_config.c | 24 ++++++------
|
||||
ldap/servers/plugins/referint/referint.c | 11 +++++-
|
||||
ldap/servers/plugins/replication/repl5_agmt.c | 39 ++++++++-----------
|
||||
.../slapd/back-ldbm/db-bdb/bdb_import.c | 5 ++-
|
||||
.../back-ldbm/db-bdb/bdb_instance_config.c | 3 +-
|
||||
.../slapd/back-ldbm/db-bdb/bdb_layer.c | 13 +++++--
|
||||
ldap/servers/slapd/back-ldbm/parents.c | 4 +-
|
||||
ldap/servers/slapd/ch_malloc.c | 4 +-
|
||||
ldap/servers/slapd/dse.c | 4 +-
|
||||
ldap/servers/slapd/log.c | 5 ++-
|
||||
ldap/servers/slapd/modify.c | 6 +--
|
||||
ldap/servers/slapd/passwd_extop.c | 2 +-
|
||||
ldap/servers/slapd/unbind.c | 12 ++++--
|
||||
16 files changed, 98 insertions(+), 64 deletions(-)
|
||||
|
||||
diff --git a/ldap/servers/plugins/acctpolicy/acct_util.c b/ldap/servers/plugins/acctpolicy/acct_util.c
|
||||
index b27eeaff1..7735d10e6 100644
|
||||
--- a/ldap/servers/plugins/acctpolicy/acct_util.c
|
||||
+++ b/ldap/servers/plugins/acctpolicy/acct_util.c
|
||||
@@ -17,7 +17,7 @@ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
Contributors:
|
||||
Hewlett-Packard Development Company, L.P.
|
||||
|
||||
-Copyright (C) 2021 Red Hat, Inc.
|
||||
+Copyright (C) 2025 Red Hat, Inc.
|
||||
******************************************************************************/
|
||||
|
||||
#include <stdio.h>
|
||||
@@ -248,6 +248,10 @@ gentimeToEpochtime(char *gentimestr)
|
||||
|
||||
/* Turn tm object into local epoch time */
|
||||
epochtime = mktime(&t);
|
||||
+ if (epochtime == (time_t) -1) {
|
||||
+ /* mktime failed */
|
||||
+ return 0;
|
||||
+ }
|
||||
|
||||
/* Turn local epoch time into GMT epoch time */
|
||||
epochtime -= zone_offset;
|
||||
diff --git a/ldap/servers/plugins/automember/automember.c b/ldap/servers/plugins/automember/automember.c
|
||||
index f900db7f2..9eade495e 100644
|
||||
--- a/ldap/servers/plugins/automember/automember.c
|
||||
+++ b/ldap/servers/plugins/automember/automember.c
|
||||
@@ -1,5 +1,5 @@
|
||||
/** BEGIN COPYRIGHT BLOCK
|
||||
- * Copyright (C) 2022 Red Hat, Inc.
|
||||
+ * Copyright (C) 2025 Red Hat, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* License: GPL (version 3 or any later version).
|
||||
@@ -1756,9 +1756,10 @@ automember_update_member_value(Slapi_Entry *member_e, const char *group_dn, char
|
||||
|
||||
mod_pb = slapi_pblock_new();
|
||||
/* Do a single mod with error overrides for DEL/ADD */
|
||||
- result = slapi_single_modify_internal_override(mod_pb, slapi_sdn_new_dn_byval(group_dn), mods,
|
||||
- automember_get_plugin_id(), 0);
|
||||
-
|
||||
+ Slapi_DN *sdn = slapi_sdn_new_normdn_byref(group_dn);
|
||||
+ result = slapi_single_modify_internal_override(mod_pb, sdn, mods,
|
||||
+ automember_get_plugin_id(), 0);
|
||||
+ slapi_sdn_free(&sdn);
|
||||
if(add){
|
||||
if (result != LDAP_SUCCESS) {
|
||||
slapi_log_err(SLAPI_LOG_ERR, AUTOMEMBER_PLUGIN_SUBSYSTEM,
|
||||
diff --git a/ldap/servers/plugins/memberof/memberof.c b/ldap/servers/plugins/memberof/memberof.c
|
||||
index 3775e52c9..82cb60c96 100644
|
||||
--- a/ldap/servers/plugins/memberof/memberof.c
|
||||
+++ b/ldap/servers/plugins/memberof/memberof.c
|
||||
@@ -1,5 +1,5 @@
|
||||
/** BEGIN COPYRIGHT BLOCK
|
||||
- * Copyright (C) 2021 Red Hat, Inc.
|
||||
+ * Copyright (C) 2025 Red Hat, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* License: GPL (version 3 or any later version).
|
||||
@@ -1657,6 +1657,7 @@ memberof_call_foreach_dn(Slapi_PBlock *pb __attribute__((unused)), Slapi_DN *sdn
|
||||
/* We already did the search for this backend, don't
|
||||
* do it again when we fall through */
|
||||
do_suffix_search = PR_FALSE;
|
||||
+ slapi_pblock_init(search_pb);
|
||||
}
|
||||
}
|
||||
} else if (!all_backends) {
|
||||
@@ -3755,6 +3756,10 @@ memberof_replace_list(Slapi_PBlock *pb, MemberOfConfig *config, Slapi_DN *group_
|
||||
|
||||
pre_index++;
|
||||
} else {
|
||||
+ if (pre_index >= pre_total || post_index >= post_total) {
|
||||
+ /* Don't overrun pre_array/post_array */
|
||||
+ break;
|
||||
+ }
|
||||
/* decide what to do */
|
||||
int cmp = memberof_compare(
|
||||
config,
|
||||
@@ -4445,10 +4450,12 @@ memberof_add_memberof_attr(LDAPMod **mods, const char *dn, char *add_oc)
|
||||
|
||||
while (1) {
|
||||
slapi_pblock_init(mod_pb);
|
||||
-
|
||||
+ Slapi_DN *sdn = slapi_sdn_new_normdn_byref(dn);
|
||||
/* Internal mod with error overrides for DEL/ADD */
|
||||
- rc = slapi_single_modify_internal_override(mod_pb, slapi_sdn_new_normdn_byref(dn), single_mod,
|
||||
- memberof_get_plugin_id(), SLAPI_OP_FLAG_BYPASS_REFERRALS);
|
||||
+ rc = slapi_single_modify_internal_override(mod_pb, sdn, single_mod,
|
||||
+ memberof_get_plugin_id(),
|
||||
+ SLAPI_OP_FLAG_BYPASS_REFERRALS);
|
||||
+ slapi_sdn_free(&sdn);
|
||||
if (rc == LDAP_OBJECT_CLASS_VIOLATION) {
|
||||
if (!add_oc || added_oc) {
|
||||
/*
|
||||
diff --git a/ldap/servers/plugins/memberof/memberof_config.c b/ldap/servers/plugins/memberof/memberof_config.c
|
||||
index 964fcc2b8..e4da351d9 100644
|
||||
--- a/ldap/servers/plugins/memberof/memberof_config.c
|
||||
+++ b/ldap/servers/plugins/memberof/memberof_config.c
|
||||
@@ -1,5 +1,5 @@
|
||||
/** BEGIN COPYRIGHT BLOCK
|
||||
- * Copyright (C) 2021 Red Hat, Inc.
|
||||
+ * Copyright (C) 2025 Red Hat, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* License: GPL (version 3 or any later version).
|
||||
@@ -568,28 +568,28 @@ memberof_apply_config(Slapi_PBlock *pb __attribute__((unused)),
|
||||
slapi_filter_free(theConfig.group_filter, 1);
|
||||
|
||||
if (num_groupattrs > 1) {
|
||||
- int bytes_out = 0;
|
||||
- int filter_str_len = groupattr_name_len + (num_groupattrs * 4) + 4;
|
||||
+ size_t bytes_out = 0;
|
||||
+ size_t filter_str_len = groupattr_name_len + (num_groupattrs * 4) + 4;
|
||||
+ int32_t rc = 0;
|
||||
|
||||
/* Allocate enough space for the filter */
|
||||
filter_str = slapi_ch_malloc(filter_str_len);
|
||||
|
||||
/* Add beginning of filter. */
|
||||
- bytes_out = snprintf(filter_str, filter_str_len - bytes_out, "(|");
|
||||
- if (bytes_out<0) {
|
||||
- slapi_log_err(SLAPI_LOG_ERR, MEMBEROF_PLUGIN_SUBSYSTEM,
|
||||
- "snprintf unexpectly failed in memberof_apply_config.\n");
|
||||
+ rc = snprintf(filter_str, filter_str_len - bytes_out, "(|");
|
||||
+ if (rc < 0) {
|
||||
+ slapi_log_err(SLAPI_LOG_ERR, MEMBEROF_PLUGIN_SUBSYSTEM, "snprintf unexpectly failed in memberof_apply_config.\n");
|
||||
*returncode = LDAP_UNWILLING_TO_PERFORM;
|
||||
goto done;
|
||||
+ } else {
|
||||
+ bytes_out = rc;
|
||||
}
|
||||
|
||||
/* Add filter section for each groupattr. */
|
||||
- for (i = 0; theConfig.groupattrs && theConfig.groupattrs[i]; i++) {
|
||||
- size_t bytes_read = snprintf(filter_str + bytes_out, filter_str_len - bytes_out, "(%s=*)",
|
||||
- theConfig.groupattrs[i]);
|
||||
+ for (size_t i=0; theConfig.groupattrs && theConfig.groupattrs[i]; i++) {
|
||||
+ int32_t bytes_read = snprintf(filter_str + bytes_out, filter_str_len - bytes_out, "(%s=*)", theConfig.groupattrs[i]);
|
||||
if (bytes_read<0) {
|
||||
- slapi_log_err(SLAPI_LOG_ERR, MEMBEROF_PLUGIN_SUBSYSTEM,
|
||||
- "snprintf unexpectly failed in memberof_apply_config.\n");
|
||||
+ slapi_log_err(SLAPI_LOG_ERR, MEMBEROF_PLUGIN_SUBSYSTEM, "snprintf unexpectly failed in memberof_apply_config.\n");
|
||||
*returncode = LDAP_UNWILLING_TO_PERFORM;
|
||||
goto done;
|
||||
}
|
||||
diff --git a/ldap/servers/plugins/referint/referint.c b/ldap/servers/plugins/referint/referint.c
|
||||
index a2f2e4706..cf79f973e 100644
|
||||
--- a/ldap/servers/plugins/referint/referint.c
|
||||
+++ b/ldap/servers/plugins/referint/referint.c
|
||||
@@ -1,6 +1,6 @@
|
||||
/** BEGIN COPYRIGHT BLOCK
|
||||
* Copyright (C) 2001 Sun Microsystems, Inc. Used by permission.
|
||||
- * Copyright (C) 2021 Red Hat, Inc.
|
||||
+ * Copyright (C) 2025 Red Hat, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* License: GPL (version 3 or any later version).
|
||||
@@ -1492,6 +1492,15 @@ referint_thread_func(void *arg __attribute__((unused)))
|
||||
}
|
||||
|
||||
ptoken = ldap_utf8strtok_r(NULL, delimiter, &iter);
|
||||
+ if (ptoken == NULL) {
|
||||
+ /* Invalid line in referint log, skip it */
|
||||
+ slapi_log_err(SLAPI_LOG_ERR, REFERINT_PLUGIN_SUBSYSTEM,
|
||||
+ "Skipping invalid referint log line: (%s)\n", thisline);
|
||||
+ slapi_sdn_free(&sdn);
|
||||
+ continue;
|
||||
+ }
|
||||
+
|
||||
+ slapi_sdn_free(&tmpsuperior);
|
||||
if (!strcasecmp(ptoken, "NULL")) {
|
||||
tmpsuperior = NULL;
|
||||
} else {
|
||||
diff --git a/ldap/servers/plugins/replication/repl5_agmt.c b/ldap/servers/plugins/replication/repl5_agmt.c
|
||||
index c6cfcda07..9b2d82547 100644
|
||||
--- a/ldap/servers/plugins/replication/repl5_agmt.c
|
||||
+++ b/ldap/servers/plugins/replication/repl5_agmt.c
|
||||
@@ -1,6 +1,6 @@
|
||||
/** BEGIN COPYRIGHT BLOCK
|
||||
* Copyright (C) 2001 Sun Microsystems, Inc. Used by permission.
|
||||
- * Copyright (C) 2021 Red Hat, Inc.
|
||||
+ * Copyright (C) 2025 Red Hat, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* License: GPL (version 3 or any later version).
|
||||
@@ -2628,31 +2628,26 @@ agmt_update_init_status(Repl_Agmt *ra)
|
||||
mod_idx++;
|
||||
}
|
||||
|
||||
- if (nb_mods) {
|
||||
- /* it is ok to release the lock here because we are done with the agreement data.
|
||||
- we have to do it before issuing the modify operation because it causes
|
||||
- agmtlist_notify_all to be called which uses the same lock - hence the deadlock */
|
||||
- PR_Unlock(ra->lock);
|
||||
-
|
||||
- pb = slapi_pblock_new();
|
||||
- mods[nb_mods] = NULL;
|
||||
+ /* it is ok to release the lock here because we are done with the agreement data.
|
||||
+ we have to do it before issuing the modify operation because it causes
|
||||
+ agmtlist_notify_all to be called which uses the same lock - hence the deadlock */
|
||||
+ PR_Unlock(ra->lock);
|
||||
|
||||
- slapi_modify_internal_set_pb_ext(pb, ra->dn, mods, NULL, NULL,
|
||||
- repl_get_plugin_identity(PLUGIN_MULTISUPPLIER_REPLICATION), 0);
|
||||
- slapi_modify_internal_pb(pb);
|
||||
+ pb = slapi_pblock_new();
|
||||
+ mods[nb_mods] = NULL;
|
||||
|
||||
- slapi_pblock_get(pb, SLAPI_PLUGIN_INTOP_RESULT, &rc);
|
||||
- if (rc != LDAP_SUCCESS && rc != LDAP_NO_SUCH_ATTRIBUTE) {
|
||||
- slapi_log_err(SLAPI_LOG_ERR, repl_plugin_name, "agmt_update_consumer_ruv - "
|
||||
- "%s: agmt_update_consumer_ruv: "
|
||||
- "failed to update consumer's RUV; LDAP error - %d\n",
|
||||
- ra->long_name, rc);
|
||||
- }
|
||||
+ slapi_modify_internal_set_pb_ext(pb, ra->dn, mods, NULL, NULL,
|
||||
+ repl_get_plugin_identity(PLUGIN_MULTISUPPLIER_REPLICATION), 0);
|
||||
+ slapi_modify_internal_pb(pb);
|
||||
|
||||
- slapi_pblock_destroy(pb);
|
||||
- } else {
|
||||
- PR_Unlock(ra->lock);
|
||||
+ slapi_pblock_get(pb, SLAPI_PLUGIN_INTOP_RESULT, &rc);
|
||||
+ if (rc != LDAP_SUCCESS && rc != LDAP_NO_SUCH_ATTRIBUTE) {
|
||||
+ slapi_log_err(SLAPI_LOG_ERR, repl_plugin_name, "agmt_update_consumer_ruv - "
|
||||
+ "%s: agmt_update_consumer_ruv: failed to update consumer's RUV; LDAP error - %d\n",
|
||||
+ ra->long_name, rc);
|
||||
}
|
||||
+
|
||||
+ slapi_pblock_destroy(pb);
|
||||
slapi_ch_free((void **)&mods);
|
||||
slapi_mod_done(&smod_start_time);
|
||||
slapi_mod_done(&smod_end_time);
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_import.c b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_import.c
|
||||
index 39edb7d0e..2bb6b0267 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_import.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_import.c
|
||||
@@ -1,5 +1,5 @@
|
||||
/** BEGIN COPYRIGHT BLOCK
|
||||
- * Copyright (C) 2020 Red Hat, Inc.
|
||||
+ * Copyright (C) 2025 Red Hat, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* License: GPL (version 3 or any later version).
|
||||
@@ -947,6 +947,7 @@ bdb_ancestorid_new_idl_create_index(backend *be, ImportJob *job)
|
||||
EQ_PREFIX, (u_long)id);
|
||||
key.size++; /* include the null terminator */
|
||||
ret = NEW_IDL_NO_ALLID;
|
||||
+ idl_free(&children);
|
||||
children = idl_fetch(be, db_pid, &key, txn, ai_pid, &ret);
|
||||
if (ret != 0) {
|
||||
ldbm_nasty("bdb_ancestorid_new_idl_create_index", sourcefile, 13070, ret);
|
||||
@@ -957,6 +958,7 @@ bdb_ancestorid_new_idl_create_index(backend *be, ImportJob *job)
|
||||
if (job->flags & FLAG_ABORT) {
|
||||
import_log_notice(job, SLAPI_LOG_ERR, "bdb_ancestorid_new_idl_create_index",
|
||||
"ancestorid creation aborted.");
|
||||
+ idl_free(&children);
|
||||
ret = -1;
|
||||
break;
|
||||
}
|
||||
@@ -1290,6 +1292,7 @@ bdb_update_subordinatecounts(backend *be, ImportJob *job, DB_TXN *txn)
|
||||
}
|
||||
bdb_close_subcount_cursor(&c_entryrdn);
|
||||
bdb_close_subcount_cursor(&c_objectclass);
|
||||
+
|
||||
return ret;
|
||||
}
|
||||
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_instance_config.c b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_instance_config.c
|
||||
index bb515a23f..44a624fde 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_instance_config.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_instance_config.c
|
||||
@@ -1,5 +1,5 @@
|
||||
/** BEGIN COPYRIGHT BLOCK
|
||||
- * Copyright (C) 2020 Red Hat, Inc.
|
||||
+ * Copyright (C) 2025 Red Hat, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* License: GPL (version 3 or any later version).
|
||||
@@ -261,6 +261,7 @@ bdb_instance_cleanup(struct ldbm_instance *inst)
|
||||
if (inst_dirp && *inst_dir) {
|
||||
return_value = env->remove(env, inst_dirp, 0);
|
||||
} else {
|
||||
+ slapi_ch_free((void **)&env);
|
||||
return_value = -1;
|
||||
}
|
||||
if (return_value == EBUSY) {
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.c b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.c
|
||||
index 4f069197e..e21f418be 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.c
|
||||
@@ -1,5 +1,5 @@
|
||||
/** BEGIN COPYRIGHT BLOCK
|
||||
- * Copyright (C) 2023 Red Hat, Inc.
|
||||
+ * Copyright (C) 2025 Red Hat, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* License: GPL (version 3 or any later version).
|
||||
@@ -2034,9 +2034,13 @@ bdb_pre_close(struct ldbminfo *li)
|
||||
conf = (bdb_config *)li->li_dblayer_config;
|
||||
bdb_db_env *pEnv = (bdb_db_env *)priv->dblayer_env;
|
||||
|
||||
+ if (pEnv == NULL) {
|
||||
+ return;
|
||||
+ }
|
||||
+
|
||||
pthread_mutex_lock(&pEnv->bdb_thread_count_lock);
|
||||
|
||||
- if (conf->bdb_stop_threads || !pEnv) {
|
||||
+ if (conf->bdb_stop_threads) {
|
||||
/* already stopped. do nothing... */
|
||||
goto timeout_escape;
|
||||
}
|
||||
@@ -2210,6 +2214,7 @@ bdb_remove_env(struct ldbminfo *li)
|
||||
}
|
||||
if (NULL == li) {
|
||||
slapi_log_err(SLAPI_LOG_ERR, "bdb_remove_env", "No ldbm info is given\n");
|
||||
+ slapi_ch_free((void **)&env);
|
||||
return -1;
|
||||
}
|
||||
|
||||
@@ -2219,10 +2224,11 @@ bdb_remove_env(struct ldbminfo *li)
|
||||
if (rc) {
|
||||
slapi_log_err(SLAPI_LOG_ERR,
|
||||
"bdb_remove_env", "Failed to remove DB environment files. "
|
||||
- "Please remove %s/__db.00# (# is 1 through 6)\n",
|
||||
+ "Please remove %s/__db.00# (# is 1 through 6)\n",
|
||||
home_dir);
|
||||
}
|
||||
}
|
||||
+ slapi_ch_free((void **)&env);
|
||||
return rc;
|
||||
}
|
||||
|
||||
@@ -6359,6 +6365,7 @@ bdb_back_ctrl(Slapi_Backend *be, int cmd, void *info)
|
||||
db->close(db, 0);
|
||||
rc = bdb_db_remove_ex((bdb_db_env *)priv->dblayer_env, path, NULL, PR_TRUE);
|
||||
inst->inst_changelog = NULL;
|
||||
+ slapi_ch_free_string(&path);
|
||||
slapi_ch_free_string(&instancedir);
|
||||
}
|
||||
}
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/parents.c b/ldap/servers/slapd/back-ldbm/parents.c
|
||||
index 31107591e..52c665ca4 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/parents.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/parents.c
|
||||
@@ -1,6 +1,6 @@
|
||||
/** BEGIN COPYRIGHT BLOCK
|
||||
* Copyright (C) 2001 Sun Microsystems, Inc. Used by permission.
|
||||
- * Copyright (C) 2005 Red Hat, Inc.
|
||||
+ * Copyright (C) 2025 Red Hat, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* License: GPL (version 3 or any later version).
|
||||
@@ -123,7 +123,7 @@ parent_update_on_childchange(modify_context *mc, int op, size_t *new_sub_count)
|
||||
/* Now compute the new value */
|
||||
if ((PARENTUPDATE_ADD == op) || (PARENTUPDATE_RESURECT == op)) {
|
||||
current_sub_count++;
|
||||
- } else {
|
||||
+ } else if (current_sub_count > 0) {
|
||||
current_sub_count--;
|
||||
}
|
||||
|
||||
diff --git a/ldap/servers/slapd/ch_malloc.c b/ldap/servers/slapd/ch_malloc.c
|
||||
index 75e791135..bacbc9371 100644
|
||||
--- a/ldap/servers/slapd/ch_malloc.c
|
||||
+++ b/ldap/servers/slapd/ch_malloc.c
|
||||
@@ -1,6 +1,6 @@
|
||||
/** BEGIN COPYRIGHT BLOCK
|
||||
* Copyright (C) 2001 Sun Microsystems, Inc. Used by permission.
|
||||
- * Copyright (C) 2005 Red Hat, Inc.
|
||||
+ * Copyright (C) 2025 Red Hat, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* License: GPL (version 3 or any later version).
|
||||
@@ -234,7 +234,7 @@ slapi_ch_bvecdup(struct berval **v)
|
||||
++i;
|
||||
newberval = (struct berval **)slapi_ch_malloc((i + 1) * sizeof(struct berval *));
|
||||
newberval[i] = NULL;
|
||||
- while (i-- > 0) {
|
||||
+ while (i > 0 && i-- > 0) {
|
||||
newberval[i] = slapi_ch_bvdup(v[i]);
|
||||
}
|
||||
}
|
||||
diff --git a/ldap/servers/slapd/dse.c b/ldap/servers/slapd/dse.c
|
||||
index 0f266f0d7..a0db367b2 100644
|
||||
--- a/ldap/servers/slapd/dse.c
|
||||
+++ b/ldap/servers/slapd/dse.c
|
||||
@@ -1,6 +1,6 @@
|
||||
/** BEGIN COPYRIGHT BLOCK
|
||||
* Copyright (C) 2001 Sun Microsystems, Inc. Used by permission.
|
||||
- * Copyright (C) 2005 Red Hat, Inc.
|
||||
+ * Copyright (C) 2025 Red Hat, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* License: GPL (version 3 or any later version).
|
||||
@@ -637,7 +637,7 @@ dse_updateNumSubordinates(Slapi_Entry *entry, int op)
|
||||
/* Now compute the new value */
|
||||
if (SLAPI_OPERATION_ADD == op) {
|
||||
current_sub_count++;
|
||||
- } else {
|
||||
+ } else if (current_sub_count > 0) {
|
||||
current_sub_count--;
|
||||
}
|
||||
{
|
||||
diff --git a/ldap/servers/slapd/log.c b/ldap/servers/slapd/log.c
|
||||
index 178d29b89..58f9fb4d6 100644
|
||||
--- a/ldap/servers/slapd/log.c
|
||||
+++ b/ldap/servers/slapd/log.c
|
||||
@@ -1,6 +1,6 @@
|
||||
/** BEGIN COPYRIGHT BLOCK
|
||||
* Copyright (C) 2001 Sun Microsystems, Inc. Used by permission.
|
||||
- * Copyright (C) 2005-2024 Red Hat, Inc.
|
||||
+ * Copyright (C) 2025 Red Hat, Inc.
|
||||
* Copyright (C) 2010 Hewlett-Packard Development Company, L.P.
|
||||
* All rights reserved.
|
||||
*
|
||||
@@ -199,6 +199,7 @@ compress_log_file(char *log_name, int32_t mode)
|
||||
|
||||
if ((source = fopen(log_name, "r")) == NULL) {
|
||||
/* Failed to open log file */
|
||||
+ /* coverity[leaked_storage] gzclose does close FD */
|
||||
gzclose(outfile);
|
||||
return -1;
|
||||
}
|
||||
@@ -209,11 +210,13 @@ compress_log_file(char *log_name, int32_t mode)
|
||||
if (bytes_written == 0)
|
||||
{
|
||||
fclose(source);
|
||||
+ /* coverity[leaked_storage] gzclose does close FD */
|
||||
gzclose(outfile);
|
||||
return -1;
|
||||
}
|
||||
bytes_read = fread(buf, 1, LOG_CHUNK, source);
|
||||
}
|
||||
+ /* coverity[leaked_storage] gzclose does close FD */
|
||||
gzclose(outfile);
|
||||
fclose(source);
|
||||
PR_Delete(log_name); /* remove the old uncompressed log */
|
||||
diff --git a/ldap/servers/slapd/modify.c b/ldap/servers/slapd/modify.c
|
||||
index 0e2abea18..b0066faf8 100644
|
||||
--- a/ldap/servers/slapd/modify.c
|
||||
+++ b/ldap/servers/slapd/modify.c
|
||||
@@ -1,6 +1,6 @@
|
||||
/** BEGIN COPYRIGHT BLOCK
|
||||
* Copyright (C) 2001 Sun Microsystems, Inc. Used by permission.
|
||||
- * Copyright (C) 2009 Red Hat, Inc.
|
||||
+ * Copyright (C) 2025 Red Hat, Inc.
|
||||
* Copyright (C) 2009, 2010 Hewlett-Packard Development Company, L.P.
|
||||
* All rights reserved.
|
||||
*
|
||||
@@ -498,7 +498,7 @@ slapi_modify_internal_set_pb_ext(Slapi_PBlock *pb, const Slapi_DN *sdn, LDAPMod
|
||||
*
|
||||
* Any other errors encountered during the operation will be returned as-is.
|
||||
*/
|
||||
-int
|
||||
+int
|
||||
slapi_single_modify_internal_override(Slapi_PBlock *pb, const Slapi_DN *sdn, LDAPMod **mod, Slapi_ComponentId *plugin_id, int op_flags)
|
||||
{
|
||||
int rc = 0;
|
||||
@@ -512,7 +512,7 @@ slapi_single_modify_internal_override(Slapi_PBlock *pb, const Slapi_DN *sdn, LDA
|
||||
!pb ? "pb " : "",
|
||||
!sdn ? "sdn " : "",
|
||||
!mod ? "mod " : "",
|
||||
- !mod[0] ? "mod[0] " : "");
|
||||
+ !mod || !mod[0] ? "mod[0] " : "");
|
||||
|
||||
return LDAP_PARAM_ERROR;
|
||||
}
|
||||
diff --git a/ldap/servers/slapd/passwd_extop.c b/ldap/servers/slapd/passwd_extop.c
|
||||
index 0296d64fb..3ade0be7f 100644
|
||||
--- a/ldap/servers/slapd/passwd_extop.c
|
||||
+++ b/ldap/servers/slapd/passwd_extop.c
|
||||
@@ -1,5 +1,5 @@
|
||||
/** BEGIN COPYRIGHT BLOCK
|
||||
- * Copyright (C) 2005 Red Hat, Inc.
|
||||
+ * Copyright (C) 2025 Red Hat, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* License: GPL (version 3 or any later version).
|
||||
diff --git a/ldap/servers/slapd/unbind.c b/ldap/servers/slapd/unbind.c
|
||||
index 89f6ef932..d0562a7c9 100644
|
||||
--- a/ldap/servers/slapd/unbind.c
|
||||
+++ b/ldap/servers/slapd/unbind.c
|
||||
@@ -1,6 +1,6 @@
|
||||
/** BEGIN COPYRIGHT BLOCK
|
||||
* Copyright (C) 2001 Sun Microsystems, Inc. Used by permission.
|
||||
- * Copyright (C) 2005 Red Hat, Inc.
|
||||
+ * Copyright (C) 2025 Red Hat, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* License: GPL (version 3 or any later version).
|
||||
@@ -87,8 +87,12 @@ do_unbind(Slapi_PBlock *pb)
|
||||
/* pass the unbind to all backends */
|
||||
be_unbindall(pb_conn, operation);
|
||||
|
||||
-free_and_return:;
|
||||
+free_and_return:
|
||||
|
||||
- /* close the connection to the client */
|
||||
- disconnect_server(pb_conn, operation->o_connid, operation->o_opid, SLAPD_DISCONNECT_UNBIND, 0);
|
||||
+ /* close the connection to the client after refreshing the operation */
|
||||
+ slapi_pblock_get(pb, SLAPI_OPERATION, &operation);
|
||||
+ disconnect_server(pb_conn,
|
||||
+ operation ? operation->o_connid : -1,
|
||||
+ operation ? operation->o_opid : -1,
|
||||
+ SLAPD_DISCONNECT_UNBIND, 0);
|
||||
}
|
||||
--
|
||||
2.49.0
|
||||
|
@ -1,52 +0,0 @@
|
||||
From ee03e8443a108cff0cc4c7a03962fdc3a1fbf94d Mon Sep 17 00:00:00 2001
|
||||
From: Simon Pichugin <spichugi@redhat.com>
|
||||
Date: Wed, 16 Oct 2024 19:24:55 -0700
|
||||
Subject: [PATCH] Issue 6067 - Update dsidm to prioritize basedn from .dsrc
|
||||
over interactive input (#6362)
|
||||
|
||||
Description: Modifies dsidm CLI tool to check for the basedn in the .dsrc configuration file
|
||||
when the -b option is not provided.
|
||||
Previously, users were required to always specify the basedn interactively if -b was omitted,
|
||||
even if it was available in .dsrc.
|
||||
Now, the basedn is determined by first checking the -b option, then the .dsrc file, and finally
|
||||
prompting the user if neither is set.
|
||||
|
||||
Related: https://github.com/389ds/389-ds-base/issues/6067
|
||||
|
||||
Reviewed by: @Firstyear (Thanks!)
|
||||
---
|
||||
src/lib389/cli/dsidm | 2 +-
|
||||
src/lib389/lib389/cli_idm/__init__.py | 4 ++--
|
||||
2 files changed, 3 insertions(+), 3 deletions(-)
|
||||
|
||||
diff --git a/src/lib389/cli/dsidm b/src/lib389/cli/dsidm
|
||||
index 970973f4f..d318664bc 100755
|
||||
--- a/src/lib389/cli/dsidm
|
||||
+++ b/src/lib389/cli/dsidm
|
||||
@@ -128,7 +128,7 @@ if __name__ == '__main__':
|
||||
result = False
|
||||
try:
|
||||
inst = connect_instance(dsrc_inst=dsrc_inst, verbose=args.verbose, args=args)
|
||||
- basedn = _get_basedn_arg(inst, args, log, msg="Enter basedn")
|
||||
+ basedn = _get_basedn_arg(inst, args, dsrc_inst['basedn'], log, msg="Enter basedn")
|
||||
if basedn is None:
|
||||
errmsg = "Must provide a basedn!"
|
||||
if args.json:
|
||||
diff --git a/src/lib389/lib389/cli_idm/__init__.py b/src/lib389/lib389/cli_idm/__init__.py
|
||||
index e3622246d..1f3e2dc86 100644
|
||||
--- a/src/lib389/lib389/cli_idm/__init__.py
|
||||
+++ b/src/lib389/lib389/cli_idm/__init__.py
|
||||
@@ -52,8 +52,8 @@ def _get_args(args, kws):
|
||||
return kwargs
|
||||
|
||||
|
||||
-def _get_basedn_arg(inst, args, log, msg=None):
|
||||
- basedn_arg = _get_dn_arg(args.basedn, msg="Enter basedn")
|
||||
+def _get_basedn_arg(inst, args, basedn, log, msg=None):
|
||||
+ basedn_arg = _get_dn_arg(basedn, msg="Enter basedn")
|
||||
if not DSLdapObject(inst, basedn_arg).exists():
|
||||
raise ValueError(f'The base DN "{basedn_arg}" does not exist.')
|
||||
|
||||
--
|
||||
2.48.1
|
||||
|
@ -0,0 +1,35 @@
|
||||
From b79da81cd24edd12af1da894d6dbd6f08995bc9d Mon Sep 17 00:00:00 2001
|
||||
From: Viktor Ashirov <vashirov@redhat.com>
|
||||
Date: Mon, 11 Aug 2025 13:19:13 +0200
|
||||
Subject: [PATCH] Issue 6929 - Compilation failure with rust-1.89 on Fedora ELN
|
||||
|
||||
Bug Description:
|
||||
The `ValueArrayRefIter` struct has a lifetime parameter `'a`.
|
||||
But in the `iter` method the return type doesn't specify the lifetime parameter.
|
||||
|
||||
Fix Description:
|
||||
Make the lifetime explicit.
|
||||
|
||||
Fixes: https://github.com/389ds/389-ds-base/issues/6929
|
||||
|
||||
Reviewed by: @droideck (Thanks!)
|
||||
---
|
||||
src/slapi_r_plugin/src/value.rs | 2 +-
|
||||
1 file changed, 1 insertion(+), 1 deletion(-)
|
||||
|
||||
diff --git a/src/slapi_r_plugin/src/value.rs b/src/slapi_r_plugin/src/value.rs
|
||||
index 2fd35c808..fec74ac25 100644
|
||||
--- a/src/slapi_r_plugin/src/value.rs
|
||||
+++ b/src/slapi_r_plugin/src/value.rs
|
||||
@@ -61,7 +61,7 @@ impl ValueArrayRef {
|
||||
ValueArrayRef { raw_slapi_val }
|
||||
}
|
||||
|
||||
- pub fn iter(&self) -> ValueArrayRefIter {
|
||||
+ pub fn iter(&self) -> ValueArrayRefIter<'_> {
|
||||
ValueArrayRefIter {
|
||||
idx: 0,
|
||||
va_ref: &self,
|
||||
--
|
||||
2.49.0
|
||||
|
@ -1,520 +0,0 @@
|
||||
From b8c079c770d3eaa4de49e997d42e1501c28a153b Mon Sep 17 00:00:00 2001
|
||||
From: progier389 <progier@redhat.com>
|
||||
Date: Mon, 8 Jul 2024 11:19:09 +0200
|
||||
Subject: [PATCH] Issue 6155 - ldap-agent fails to start because of permission
|
||||
error (#6179)
|
||||
|
||||
Issue: dirsrv-snmp service fails to starts when SELinux is enforced because of AVC preventing to open some files
|
||||
One workaround is to use the dac_override capability but it is a bad practice.
|
||||
Fix: Setting proper permissions:
|
||||
|
||||
Running ldap-agent with uid=root and gid=dirsrv to be able to access both snmp and dirsrv resources.
|
||||
Setting read permission on the group for the dse.ldif file
|
||||
Setting r/w permissions on the group for the snmp semaphore and mmap file
|
||||
For that one special care is needed because ns-slapd umask overrides the file creation permission
|
||||
as is better to avoid changing the umask (changing umask within the code is not thread safe,
|
||||
and the current 0022 umask value is correct for most of the files) so the safest way is to chmod the snmp file
|
||||
if the needed permission are not set.
|
||||
Issue: #6155
|
||||
|
||||
Reviewed by: @droideck , @vashirov (Thanks ! )
|
||||
|
||||
(cherry picked from commit eb7e57d77b557b63c65fdf38f9069893b021f049)
|
||||
---
|
||||
.github/scripts/generate_matrix.py | 4 +-
|
||||
dirsrvtests/tests/suites/snmp/snmp.py | 214 ++++++++++++++++++++++++++
|
||||
ldap/servers/slapd/agtmmap.c | 72 ++++++++-
|
||||
ldap/servers/slapd/agtmmap.h | 13 ++
|
||||
ldap/servers/slapd/dse.c | 6 +-
|
||||
ldap/servers/slapd/slap.h | 6 +
|
||||
ldap/servers/slapd/snmp_collator.c | 4 +-
|
||||
src/lib389/lib389/instance/setup.py | 5 +
|
||||
wrappers/systemd-snmp.service.in | 1 +
|
||||
9 files changed, 313 insertions(+), 12 deletions(-)
|
||||
create mode 100644 dirsrvtests/tests/suites/snmp/snmp.py
|
||||
|
||||
diff --git a/.github/scripts/generate_matrix.py b/.github/scripts/generate_matrix.py
|
||||
index 584374597..8d67a1dc7 100644
|
||||
--- a/.github/scripts/generate_matrix.py
|
||||
+++ b/.github/scripts/generate_matrix.py
|
||||
@@ -21,8 +21,8 @@ else:
|
||||
# Use tests from the source
|
||||
suites = next(os.walk('dirsrvtests/tests/suites/'))[1]
|
||||
|
||||
- # Filter out snmp as it is an empty directory:
|
||||
- suites.remove('snmp')
|
||||
+ # Filter out webui because of broken tests
|
||||
+ suites.remove('webui')
|
||||
|
||||
# Run each replication test module separately to speed things up
|
||||
suites.remove('replication')
|
||||
diff --git a/dirsrvtests/tests/suites/snmp/snmp.py b/dirsrvtests/tests/suites/snmp/snmp.py
|
||||
new file mode 100644
|
||||
index 000000000..0952deb40
|
||||
--- /dev/null
|
||||
+++ b/dirsrvtests/tests/suites/snmp/snmp.py
|
||||
@@ -0,0 +1,214 @@
|
||||
+# --- BEGIN COPYRIGHT BLOCK ---
|
||||
+# Copyright (C) 2024 Red Hat, Inc.
|
||||
+# All rights reserved.
|
||||
+#
|
||||
+# License: GPL (version 3 or any later version).
|
||||
+# See LICENSE for details.
|
||||
+# --- END COPYRIGHT BLOCK ---
|
||||
+#
|
||||
+import os
|
||||
+import pytest
|
||||
+import logging
|
||||
+import subprocess
|
||||
+import ldap
|
||||
+from datetime import datetime
|
||||
+from shutil import copyfile
|
||||
+from lib389.topologies import topology_m2 as topo_m2
|
||||
+from lib389.utils import selinux_present
|
||||
+
|
||||
+
|
||||
+DEBUGGING = os.getenv("DEBUGGING", default=False)
|
||||
+if DEBUGGING:
|
||||
+ logging.getLogger(__name__).setLevel(logging.DEBUG)
|
||||
+else:
|
||||
+ logging.getLogger(__name__).setLevel(logging.INFO)
|
||||
+log = logging.getLogger(__name__)
|
||||
+
|
||||
+
|
||||
+SNMP_USER = 'user_name'
|
||||
+SNMP_PASSWORD = 'authentication_password'
|
||||
+SNMP_PRIVATE = 'private_password'
|
||||
+
|
||||
+# LDAP OID in MIB
|
||||
+LDAP_OID = '.1.3.6.1.4.1.2312.6.1.1'
|
||||
+LDAPCONNECTIONS_OID = f'{LDAP_OID}.21'
|
||||
+
|
||||
+
|
||||
+def run_cmd(cmd, check_returncode=True):
|
||||
+ """Run a command"""
|
||||
+
|
||||
+ log.info(f'Run: {cmd}')
|
||||
+ result = subprocess.run(cmd, capture_output=True, universal_newlines=True)
|
||||
+ log.info(f'STDOUT of {cmd} is:\n{result.stdout}')
|
||||
+ log.info(f'STDERR of {cmd} is:\n{result.stderr}')
|
||||
+ if check_returncode:
|
||||
+ result.check_returncode()
|
||||
+ return result
|
||||
+
|
||||
+
|
||||
+def add_lines(lines, filename):
|
||||
+ """Add lines that are not already present at the end of a file"""
|
||||
+
|
||||
+ log.info(f'add_lines({lines}, {filename})')
|
||||
+ try:
|
||||
+ with open(filename, 'r') as fd:
|
||||
+ for line in fd:
|
||||
+ try:
|
||||
+ lines.remove(line.strip())
|
||||
+ except ValueError:
|
||||
+ pass
|
||||
+ except FileNotFoundError:
|
||||
+ pass
|
||||
+ if lines:
|
||||
+ with open(filename, 'a') as fd:
|
||||
+ for line in lines:
|
||||
+ fd.write(f'{line}\n')
|
||||
+
|
||||
+
|
||||
+def remove_lines(lines, filename):
|
||||
+ """Remove lines in a file"""
|
||||
+
|
||||
+ log.info(f'remove_lines({lines}, {filename})')
|
||||
+ file_lines = []
|
||||
+ with open(filename, 'r') as fd:
|
||||
+ for line in fd:
|
||||
+ if not line.strip() in lines:
|
||||
+ file_lines.append(line)
|
||||
+ with open(filename, 'w') as fd:
|
||||
+ for line in file_lines:
|
||||
+ fd.write(line)
|
||||
+
|
||||
+
|
||||
+@pytest.fixture(scope="module")
|
||||
+def setup_snmp(topo_m2, request):
|
||||
+ """Install snmp and configure it
|
||||
+
|
||||
+ Returns the time just before dirsrv-snmp get restarted
|
||||
+ """
|
||||
+
|
||||
+ inst1 = topo_m2.ms["supplier1"]
|
||||
+ inst2 = topo_m2.ms["supplier2"]
|
||||
+
|
||||
+ # Check for the test prerequisites
|
||||
+ if os.getuid() != 0:
|
||||
+ pytest.skip('This test should be run by root superuser')
|
||||
+ return None
|
||||
+ if not inst1.with_systemd_running():
|
||||
+ pytest.skip('This test requires systemd')
|
||||
+ return None
|
||||
+ required_packages = {
|
||||
+ '389-ds-base-snmp': os.path.join(inst1.get_sbin_dir(), 'ldap-agent'),
|
||||
+ 'net-snmp': '/etc/snmp/snmpd.conf', }
|
||||
+ skip_msg = ""
|
||||
+ for package,file in required_packages.items():
|
||||
+ if not os.path.exists(file):
|
||||
+ skip_msg += f"Package {package} is not installed ({file} is missing).\n"
|
||||
+ if skip_msg != "":
|
||||
+ pytest.skip(f'This test requires the following package(s): {skip_msg}')
|
||||
+ return None
|
||||
+
|
||||
+ # Install snmp
|
||||
+ # run_cmd(['/usr/bin/dnf', 'install', '-y', 'net-snmp', 'net-snmp-utils', '389-ds-base-snmp'])
|
||||
+
|
||||
+ # Prepare the lines to add/remove in files:
|
||||
+ # master agentx
|
||||
+ # snmp user (user_name - authentication_password - private_password)
|
||||
+ # ldap_agent ds instances
|
||||
+ #
|
||||
+ # Adding rwuser and createUser lines is the same as running:
|
||||
+ # net-snmp-create-v3-user -A authentication_password -a SHA -X private_password -x AES user_name
|
||||
+ # but has the advantage of removing the user at cleanup phase
|
||||
+ #
|
||||
+ agent_cfg = '/etc/dirsrv/config/ldap-agent.conf'
|
||||
+ lines_dict = { '/etc/snmp/snmpd.conf' : ['master agentx', f'rwuser {SNMP_USER}'],
|
||||
+ '/var/lib/net-snmp/snmpd.conf' : [
|
||||
+ f'createUser {SNMP_USER} SHA "{SNMP_PASSWORD}" AES "{SNMP_PRIVATE}"',],
|
||||
+ agent_cfg : [] }
|
||||
+ for inst in topo_m2:
|
||||
+ lines_dict[agent_cfg].append(f'server slapd-{inst.serverid}')
|
||||
+
|
||||
+ # Prepare the cleanup
|
||||
+ def fin():
|
||||
+ run_cmd(['systemctl', 'stop', 'dirsrv-snmp'])
|
||||
+ if not DEBUGGING:
|
||||
+ run_cmd(['systemctl', 'stop', 'snmpd'])
|
||||
+ try:
|
||||
+ os.remove('/usr/share/snmp/mibs/redhat-directory.mib')
|
||||
+ except FileNotFoundError:
|
||||
+ pass
|
||||
+ for filename,lines in lines_dict.items():
|
||||
+ remove_lines(lines, filename)
|
||||
+ run_cmd(['systemctl', 'start', 'snmpd'])
|
||||
+
|
||||
+ request.addfinalizer(fin)
|
||||
+
|
||||
+ # Copy RHDS MIB in default MIB search path (Ugly because I have not found how to change the search path)
|
||||
+ copyfile('/usr/share/dirsrv/mibs/redhat-directory.mib', '/usr/share/snmp/mibs/redhat-directory.mib')
|
||||
+
|
||||
+ run_cmd(['systemctl', 'stop', 'snmpd'])
|
||||
+ for filename,lines in lines_dict.items():
|
||||
+ add_lines(lines, filename)
|
||||
+
|
||||
+ run_cmd(['systemctl', 'start', 'snmpd'])
|
||||
+
|
||||
+ curtime = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
|
||||
+
|
||||
+ run_cmd(['systemctl', 'start', 'dirsrv-snmp'])
|
||||
+ return curtime
|
||||
+
|
||||
+
|
||||
+@pytest.mark.skipif(not os.path.exists('/usr/bin/snmpwalk'), reason="net-snmp-utils package is not installed")
|
||||
+def test_snmpwalk(topo_m2, setup_snmp):
|
||||
+ """snmp smoke tests.
|
||||
+
|
||||
+ :id: e5d29998-1c21-11ef-a654-482ae39447e5
|
||||
+ :setup: Two suppliers replication setup, snmp
|
||||
+ :steps:
|
||||
+ 1. use snmpwalk to display LDAP statistics
|
||||
+ 2. use snmpwalk to get the number of open connections
|
||||
+ :expectedresults:
|
||||
+ 1. Success and no messages in stderr
|
||||
+ 2. The number of open connections should be positive
|
||||
+ """
|
||||
+
|
||||
+ inst1 = topo_m2.ms["supplier1"]
|
||||
+ inst2 = topo_m2.ms["supplier2"]
|
||||
+
|
||||
+
|
||||
+ cmd = [ '/usr/bin/snmpwalk', '-v3', '-u', SNMP_USER, '-l', 'AuthPriv',
|
||||
+ '-m', '+RHDS-MIB', '-A', SNMP_PASSWORD, '-a', 'SHA',
|
||||
+ '-X', SNMP_PRIVATE, '-x', 'AES', 'localhost',
|
||||
+ LDAP_OID ]
|
||||
+ result = run_cmd(cmd)
|
||||
+ assert not result.stderr
|
||||
+
|
||||
+ cmd = [ '/usr/bin/snmpwalk', '-v3', '-u', SNMP_USER, '-l', 'AuthPriv',
|
||||
+ '-m', '+RHDS-MIB', '-A', SNMP_PASSWORD, '-a', 'SHA',
|
||||
+ '-X', SNMP_PRIVATE, '-x', 'AES', 'localhost',
|
||||
+ f'{LDAPCONNECTIONS_OID}.{inst1.port}', '-Ov' ]
|
||||
+ result = run_cmd(cmd)
|
||||
+ nbconns = int(result.stdout.split()[1])
|
||||
+ log.info(f'There are {nbconns} open connections on {inst1.serverid}')
|
||||
+ assert nbconns > 0
|
||||
+
|
||||
+
|
||||
+@pytest.mark.skipif(not selinux_present(), reason="SELinux is not enabled")
|
||||
+def test_snmp_avc(topo_m2, setup_snmp):
|
||||
+ """snmp smoke tests.
|
||||
+
|
||||
+ :id: fb79728e-1d0d-11ef-9213-482ae39447e5
|
||||
+ :setup: Two suppliers replication setup, snmp
|
||||
+ :steps:
|
||||
+ 1. Get the system journal about ldap-agent
|
||||
+ :expectedresults:
|
||||
+ 1. No AVC should be present
|
||||
+ """
|
||||
+ result = run_cmd(['journalctl', '-S', setup_snmp, '-g', 'ldap-agent'])
|
||||
+ assert not 'AVC' in result.stdout
|
||||
+
|
||||
+
|
||||
+if __name__ == '__main__':
|
||||
+ # Run isolated
|
||||
+ # -s for DEBUG mode
|
||||
+ CURRENT_FILE = os.path.realpath(__file__)
|
||||
+ pytest.main("-s %s" % CURRENT_FILE)
|
||||
diff --git a/ldap/servers/slapd/agtmmap.c b/ldap/servers/slapd/agtmmap.c
|
||||
index bc5fe1ee1..4dc67dcfb 100644
|
||||
--- a/ldap/servers/slapd/agtmmap.c
|
||||
+++ b/ldap/servers/slapd/agtmmap.c
|
||||
@@ -34,6 +34,70 @@
|
||||
agt_mmap_context_t mmap_tbl[2] = {{AGT_MAP_UNINIT, -1, (caddr_t)-1},
|
||||
{AGT_MAP_UNINIT, -1, (caddr_t)-1}};
|
||||
|
||||
+#define CHECK_MAP_FAILURE(addr) ((addr)==NULL || (addr) == (caddr_t) -1)
|
||||
+
|
||||
+
|
||||
+/****************************************************************************
|
||||
+ *
|
||||
+ * agt_set_fmode () - try to increase file mode if some flags are missing.
|
||||
+ *
|
||||
+ *
|
||||
+ * Inputs:
|
||||
+ * fd -> The file descriptor.
|
||||
+ *
|
||||
+ * mode -> the wanted mode
|
||||
+ *
|
||||
+ * Outputs: None
|
||||
+ * Return Values: None
|
||||
+ *
|
||||
+ ****************************************************************************/
|
||||
+static void
|
||||
+agt_set_fmode(int fd, mode_t mode)
|
||||
+{
|
||||
+ /* ns-slapd umask is 0022 which is usually fine.
|
||||
+ * but ldap-agen needs S_IWGRP permission on snmp semaphore and mmap file
|
||||
+ * ( when SELinux is enforced process with uid=0 does not bypass the file permission
|
||||
+ * (unless the unfamous dac_override capability is set)
|
||||
+ * Changing umask could lead to race conditions so it is better to check the
|
||||
+ * file permission and change them if needed and if the process own the file.
|
||||
+ */
|
||||
+ struct stat fileinfo = {0};
|
||||
+ if (fstat(fd, &fileinfo) == 0 && fileinfo.st_uid == getuid() &&
|
||||
+ (fileinfo.st_mode & mode) != mode) {
|
||||
+ (void) fchmod(fd, fileinfo.st_mode | mode);
|
||||
+ }
|
||||
+}
|
||||
+
|
||||
+/****************************************************************************
|
||||
+ *
|
||||
+ * agt_sem_open () - Like sem_open but ignores umask
|
||||
+ *
|
||||
+ *
|
||||
+ * Inputs: see sem_open man page.
|
||||
+ * Outputs: see sem_open man page.
|
||||
+ * Return Values: see sem_open man page.
|
||||
+ *
|
||||
+ ****************************************************************************/
|
||||
+sem_t *
|
||||
+agt_sem_open(const char *name, int oflag, mode_t mode, unsigned int value)
|
||||
+{
|
||||
+ sem_t *sem = sem_open(name, oflag, mode, value);
|
||||
+ char *semname = NULL;
|
||||
+
|
||||
+ if (sem != NULL) {
|
||||
+ if (asprintf(&semname, "/dev/shm/sem.%s", name+1) > 0) {
|
||||
+ int fd = open(semname, O_RDONLY);
|
||||
+ if (fd >= 0) {
|
||||
+ agt_set_fmode(fd, mode);
|
||||
+ (void) close(fd);
|
||||
+ }
|
||||
+ free(semname);
|
||||
+ semname = NULL;
|
||||
+ }
|
||||
+ }
|
||||
+ return sem;
|
||||
+}
|
||||
+
|
||||
/****************************************************************************
|
||||
*
|
||||
* agt_mopen_stats () - open and Memory Map the stats file. agt_mclose_stats()
|
||||
@@ -52,7 +116,6 @@ agt_mmap_context_t mmap_tbl[2] = {{AGT_MAP_UNINIT, -1, (caddr_t)-1},
|
||||
* as defined in <errno.h>, otherwise.
|
||||
*
|
||||
****************************************************************************/
|
||||
-
|
||||
int
|
||||
agt_mopen_stats(char *statsfile, int mode, int *hdl)
|
||||
{
|
||||
@@ -64,6 +127,7 @@ agt_mopen_stats(char *statsfile, int mode, int *hdl)
|
||||
int err;
|
||||
size_t sz;
|
||||
struct stat fileinfo;
|
||||
+ mode_t rw_mode = S_IWUSR | S_IRUSR | S_IRGRP | S_IWGRP | S_IROTH;
|
||||
|
||||
switch (mode) {
|
||||
case O_RDONLY:
|
||||
@@ -128,10 +192,7 @@ agt_mopen_stats(char *statsfile, int mode, int *hdl)
|
||||
break;
|
||||
|
||||
case O_RDWR:
|
||||
- fd = open(path,
|
||||
- O_RDWR | O_CREAT,
|
||||
- S_IWUSR | S_IRUSR | S_IRGRP | S_IROTH);
|
||||
-
|
||||
+ fd = open(path, O_RDWR | O_CREAT, rw_mode);
|
||||
if (fd < 0) {
|
||||
err = errno;
|
||||
#if (0)
|
||||
@@ -140,6 +201,7 @@ agt_mopen_stats(char *statsfile, int mode, int *hdl)
|
||||
rc = err;
|
||||
goto bail;
|
||||
}
|
||||
+ agt_set_fmode(fd, rw_mode);
|
||||
|
||||
if (fstat(fd, &fileinfo) != 0) {
|
||||
close(fd);
|
||||
diff --git a/ldap/servers/slapd/agtmmap.h b/ldap/servers/slapd/agtmmap.h
|
||||
index fb27ab2c4..99a8584a3 100644
|
||||
--- a/ldap/servers/slapd/agtmmap.h
|
||||
+++ b/ldap/servers/slapd/agtmmap.h
|
||||
@@ -28,6 +28,7 @@
|
||||
#include <sys/types.h>
|
||||
#include <sys/stat.h>
|
||||
#include <fcntl.h>
|
||||
+#include <semaphore.h>
|
||||
#include <errno.h>
|
||||
#include "nspr.h"
|
||||
|
||||
@@ -188,6 +189,18 @@ int agt_mclose_stats(int hdl);
|
||||
|
||||
int agt_mread_stats(int hdl, struct hdr_stats_t *, struct ops_stats_t *, struct entries_stats_t *);
|
||||
|
||||
+/****************************************************************************
|
||||
+ *
|
||||
+ * agt_sem_open () - Like sem_open but ignores umask
|
||||
+ *
|
||||
+ *
|
||||
+ * Inputs: see sem_open man page.
|
||||
+ * Outputs: see sem_open man page.
|
||||
+ * Return Values: see sem_open man page.
|
||||
+ *
|
||||
+ ****************************************************************************/
|
||||
+sem_t *agt_sem_open(const char *name, int oflag, mode_t mode, unsigned int value);
|
||||
+
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
diff --git a/ldap/servers/slapd/dse.c b/ldap/servers/slapd/dse.c
|
||||
index b04fafde6..f1e48c6b1 100644
|
||||
--- a/ldap/servers/slapd/dse.c
|
||||
+++ b/ldap/servers/slapd/dse.c
|
||||
@@ -683,7 +683,7 @@ dse_read_one_file(struct dse *pdse, const char *filename, Slapi_PBlock *pb, int
|
||||
"The configuration file %s could not be accessed, error %d\n",
|
||||
filename, rc);
|
||||
rc = 0; /* Fail */
|
||||
- } else if ((prfd = PR_Open(filename, PR_RDONLY, SLAPD_DEFAULT_FILE_MODE)) == NULL) {
|
||||
+ } else if ((prfd = PR_Open(filename, PR_RDONLY, SLAPD_DEFAULT_DSE_FILE_MODE)) == NULL) {
|
||||
slapi_log_err(SLAPI_LOG_ERR, "dse_read_one_file",
|
||||
"The configuration file %s could not be read. " SLAPI_COMPONENT_NAME_NSPR " %d (%s)\n",
|
||||
filename,
|
||||
@@ -871,7 +871,7 @@ dse_rw_permission_to_one_file(const char *name, int loglevel)
|
||||
PRFileDesc *prfd;
|
||||
|
||||
prfd = PR_Open(name, PR_RDWR | PR_CREATE_FILE | PR_TRUNCATE,
|
||||
- SLAPD_DEFAULT_FILE_MODE);
|
||||
+ SLAPD_DEFAULT_DSE_FILE_MODE);
|
||||
if (NULL == prfd) {
|
||||
prerr = PR_GetError();
|
||||
accesstype = "create";
|
||||
@@ -970,7 +970,7 @@ dse_write_file_nolock(struct dse *pdse)
|
||||
fpw.fpw_prfd = NULL;
|
||||
|
||||
if (NULL != pdse->dse_filename) {
|
||||
- if ((fpw.fpw_prfd = PR_Open(pdse->dse_tmpfile, PR_RDWR | PR_CREATE_FILE | PR_TRUNCATE, SLAPD_DEFAULT_FILE_MODE)) == NULL) {
|
||||
+ if ((fpw.fpw_prfd = PR_Open(pdse->dse_tmpfile, PR_RDWR | PR_CREATE_FILE | PR_TRUNCATE, SLAPD_DEFAULT_DSE_FILE_MODE)) == NULL) {
|
||||
rc = PR_GetOSError();
|
||||
slapi_log_err(SLAPI_LOG_ERR, "dse_write_file_nolock", "Cannot open "
|
||||
"temporary DSE file \"%s\" for update: OS error %d (%s)\n",
|
||||
diff --git a/ldap/servers/slapd/slap.h b/ldap/servers/slapd/slap.h
|
||||
index 469874fd1..927576b70 100644
|
||||
--- a/ldap/servers/slapd/slap.h
|
||||
+++ b/ldap/servers/slapd/slap.h
|
||||
@@ -238,6 +238,12 @@ typedef void (*VFPV)(); /* takes undefined arguments */
|
||||
*/
|
||||
|
||||
#define SLAPD_DEFAULT_FILE_MODE S_IRUSR | S_IWUSR
|
||||
+/* ldap_agent run as uid=root gid=dirsrv and requires S_IRGRP | S_IWGRP
|
||||
+ * on semaphore and mmap file if SELinux is enforced.
|
||||
+ */
|
||||
+#define SLAPD_DEFAULT_SNMP_FILE_MODE S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP
|
||||
+/* ldap_agent run as uid=root gid=dirsrv and requires S_IRGRP on dse.ldif if SELinux is enforced. */
|
||||
+#define SLAPD_DEFAULT_DSE_FILE_MODE S_IRUSR | S_IWUSR | S_IRGRP
|
||||
#define SLAPD_DEFAULT_DIR_MODE S_IRWXU
|
||||
#define SLAPD_DEFAULT_IDLE_TIMEOUT 3600 /* seconds - 0 == never */
|
||||
#define SLAPD_DEFAULT_IDLE_TIMEOUT_STR "3600"
|
||||
diff --git a/ldap/servers/slapd/snmp_collator.c b/ldap/servers/slapd/snmp_collator.c
|
||||
index c998d4262..bd7020585 100644
|
||||
--- a/ldap/servers/slapd/snmp_collator.c
|
||||
+++ b/ldap/servers/slapd/snmp_collator.c
|
||||
@@ -474,7 +474,7 @@ static void
|
||||
snmp_collator_create_semaphore(void)
|
||||
{
|
||||
/* First just try to create the semaphore. This should usually just work. */
|
||||
- if ((stats_sem = sem_open(stats_sem_name, O_CREAT | O_EXCL, SLAPD_DEFAULT_FILE_MODE, 1)) == SEM_FAILED) {
|
||||
+ if ((stats_sem = agt_sem_open(stats_sem_name, O_CREAT | O_EXCL, SLAPD_DEFAULT_SNMP_FILE_MODE, 1)) == SEM_FAILED) {
|
||||
if (errno == EEXIST) {
|
||||
/* It appears that we didn't exit cleanly last time and left the semaphore
|
||||
* around. Recreate it since we don't know what state it is in. */
|
||||
@@ -486,7 +486,7 @@ snmp_collator_create_semaphore(void)
|
||||
exit(1);
|
||||
}
|
||||
|
||||
- if ((stats_sem = sem_open(stats_sem_name, O_CREAT | O_EXCL, SLAPD_DEFAULT_FILE_MODE, 1)) == SEM_FAILED) {
|
||||
+ if ((stats_sem = agt_sem_open(stats_sem_name, O_CREAT | O_EXCL, SLAPD_DEFAULT_SNMP_FILE_MODE, 1)) == SEM_FAILED) {
|
||||
/* No dice */
|
||||
slapi_log_err(SLAPI_LOG_EMERG, "snmp_collator_create_semaphore",
|
||||
"Failed to create semaphore for stats file (/dev/shm/sem.%s). Error %d (%s).\n",
|
||||
diff --git a/src/lib389/lib389/instance/setup.py b/src/lib389/lib389/instance/setup.py
|
||||
index 036664447..fca03383e 100644
|
||||
--- a/src/lib389/lib389/instance/setup.py
|
||||
+++ b/src/lib389/lib389/instance/setup.py
|
||||
@@ -10,6 +10,7 @@
|
||||
import os
|
||||
import sys
|
||||
import shutil
|
||||
+import stat
|
||||
import pwd
|
||||
import grp
|
||||
import re
|
||||
@@ -773,6 +774,10 @@ class SetupDs(object):
|
||||
ldapi_autobind="on",
|
||||
)
|
||||
file_dse.write(dse_fmt)
|
||||
+ # Set minimum permission required by snmp ldap-agent
|
||||
+ status = os.fstat(file_dse.fileno())
|
||||
+ os.fchmod(file_dse.fileno(), status.st_mode | stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP)
|
||||
+ os.chown(os.path.join(slapd['config_dir'], 'dse.ldif'), slapd['user_uid'], slapd['group_gid'])
|
||||
|
||||
self.log.info("Create file system structures ...")
|
||||
# Create all the needed paths
|
||||
diff --git a/wrappers/systemd-snmp.service.in b/wrappers/systemd-snmp.service.in
|
||||
index f18766cb4..d344367a0 100644
|
||||
--- a/wrappers/systemd-snmp.service.in
|
||||
+++ b/wrappers/systemd-snmp.service.in
|
||||
@@ -9,6 +9,7 @@ After=network.target
|
||||
|
||||
[Service]
|
||||
Type=forking
|
||||
+Group=@defaultgroup@
|
||||
PIDFile=/run/dirsrv/ldap-agent.pid
|
||||
ExecStart=@sbindir@/ldap-agent @configdir@/ldap-agent.conf
|
||||
|
||||
--
|
||||
2.49.0
|
||||
|
@ -1,60 +0,0 @@
|
||||
From 12870f410545fb055f664b588df2a2b7ab1c228e Mon Sep 17 00:00:00 2001
|
||||
From: Viktor Ashirov <vashirov@redhat.com>
|
||||
Date: Mon, 4 Mar 2024 07:22:00 +0100
|
||||
Subject: [PATCH] Issue 5305 - OpenLDAP version autodetection doesn't work
|
||||
|
||||
Bug Description:
|
||||
An error is logged during a build in `mock` with Bash 4.4:
|
||||
|
||||
```
|
||||
checking for --with-libldap-r... ./configure: command substitution: line 22848: syntax error near unexpected token `>'
|
||||
./configure: command substitution: line 22848: `ldapsearch -VV 2> >(sed -n '/ldapsearch/ s/.*ldapsearch \([0-9]\+\.[0-9]\+\.[0-9]\+\) .*/\1/p')'
|
||||
no
|
||||
```
|
||||
|
||||
`mock` runs Bash as `sh` (POSIX mode). Support for process substitution
|
||||
in POSIX mode was added in version 5.1:
|
||||
https://lists.gnu.org/archive/html/bug-bash/2020-12/msg00002.html
|
||||
|
||||
> Process substitution is now available in posix mode.
|
||||
|
||||
Fix Description:
|
||||
* Add missing `BuildRequires` for openldap-clients
|
||||
* Replace process substitution with a pipe
|
||||
|
||||
Fixes: https://github.com/389ds/389-ds-base/issues/5305
|
||||
|
||||
Reviewed by: @progier389, @tbordaz (Thanks!)
|
||||
---
|
||||
configure.ac | 2 +-
|
||||
rpm/389-ds-base.spec.in | 1 +
|
||||
2 files changed, 2 insertions(+), 1 deletion(-)
|
||||
|
||||
diff --git a/configure.ac b/configure.ac
|
||||
index ffc2aac14..a690765a3 100644
|
||||
--- a/configure.ac
|
||||
+++ b/configure.ac
|
||||
@@ -912,7 +912,7 @@ AC_ARG_WITH(libldap-r, AS_HELP_STRING([--with-libldap-r],[Use lldap_r shared lib
|
||||
AC_SUBST(with_libldap_r)
|
||||
fi
|
||||
],
|
||||
-OPENLDAP_VERSION=`ldapsearch -VV 2> >(sed -n '/ldapsearch/ s/.*ldapsearch \([[[0-9]]]\+\.[[[0-9]]]\+\.[[[0-9]]]\+\) .*/\1/p')`
|
||||
+OPENLDAP_VERSION=`ldapsearch -VV 2>&1 | sed -n '/ldapsearch/ s/.*ldapsearch \([[[0-9]]]\+\.[[[0-9]]]\+\.[[[0-9]]]\+\) .*/\1/p'`
|
||||
AX_COMPARE_VERSION([$OPENLDAP_VERSION], [lt], [2.5], [ with_libldap_r=yes ], [ with_libldap_r=no ])
|
||||
AC_MSG_RESULT($with_libldap_r))
|
||||
|
||||
diff --git a/rpm/389-ds-base.spec.in b/rpm/389-ds-base.spec.in
|
||||
index cd86138ea..b8c14cd14 100644
|
||||
--- a/rpm/389-ds-base.spec.in
|
||||
+++ b/rpm/389-ds-base.spec.in
|
||||
@@ -65,6 +65,7 @@ Provides: ldif2ldbm
|
||||
# Attach the buildrequires to the top level package:
|
||||
BuildRequires: nspr-devel
|
||||
BuildRequires: nss-devel >= 3.34
|
||||
+BuildRequires: openldap-clients
|
||||
BuildRequires: openldap-devel
|
||||
BuildRequires: libdb-devel
|
||||
BuildRequires: cyrus-sasl-devel
|
||||
--
|
||||
2.49.0
|
||||
|
@ -1,245 +0,0 @@
|
||||
From eca6f5fe18f768fd407d38c85624a5212bcf16ab Mon Sep 17 00:00:00 2001
|
||||
From: Simon Pichugin <spichugi@redhat.com>
|
||||
Date: Wed, 27 Sep 2023 15:40:33 -0700
|
||||
Subject: [PATCH] Issue 1925 - Add a CI test (#5936)
|
||||
|
||||
Description: Verify that the issue is not present. Cover the scenario when
|
||||
we remove existing VLVs, create new VLVs (with the same name) and then
|
||||
we do online re-indexing.
|
||||
|
||||
Related: https://github.com/389ds/389-ds-base/issues/1925
|
||||
|
||||
Reviewed by: @progier389 (Thanks!)
|
||||
|
||||
(cherry picked from the 9633e8d32d28345409680f8e462fb4a53d3b4f83)
|
||||
---
|
||||
.../tests/suites/vlv/regression_test.py | 175 +++++++++++++++---
|
||||
1 file changed, 145 insertions(+), 30 deletions(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/vlv/regression_test.py b/dirsrvtests/tests/suites/vlv/regression_test.py
|
||||
index 6ab709bd3..536fe950f 100644
|
||||
--- a/dirsrvtests/tests/suites/vlv/regression_test.py
|
||||
+++ b/dirsrvtests/tests/suites/vlv/regression_test.py
|
||||
@@ -1,5 +1,5 @@
|
||||
# --- BEGIN COPYRIGHT BLOCK ---
|
||||
-# Copyright (C) 2018 Red Hat, Inc.
|
||||
+# Copyright (C) 2023 Red Hat, Inc.
|
||||
# All rights reserved.
|
||||
#
|
||||
# License: GPL (version 3 or any later version).
|
||||
@@ -9,12 +9,16 @@
|
||||
import pytest, time
|
||||
from lib389.tasks import *
|
||||
from lib389.utils import *
|
||||
-from lib389.topologies import topology_m2
|
||||
+from lib389.topologies import topology_m2, topology_st
|
||||
from lib389.replica import *
|
||||
from lib389._constants import *
|
||||
+from lib389.properties import TASK_WAIT
|
||||
from lib389.index import *
|
||||
from lib389.mappingTree import *
|
||||
from lib389.backend import *
|
||||
+from lib389.idm.user import UserAccounts
|
||||
+from ldap.controls.vlv import VLVRequestControl
|
||||
+from ldap.controls.sss import SSSRequestControl
|
||||
|
||||
pytestmark = pytest.mark.tier1
|
||||
|
||||
@@ -22,6 +26,88 @@ logging.getLogger(__name__).setLevel(logging.DEBUG)
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
+def open_new_ldapi_conn(dsinstance):
|
||||
+ ldapurl, certdir = get_ldapurl_from_serverid(dsinstance)
|
||||
+ assert 'ldapi://' in ldapurl
|
||||
+ conn = ldap.initialize(ldapurl)
|
||||
+ conn.sasl_interactive_bind_s("", ldap.sasl.external())
|
||||
+ return conn
|
||||
+
|
||||
+
|
||||
+def check_vlv_search(conn):
|
||||
+ before_count=1
|
||||
+ after_count=3
|
||||
+ offset=3501
|
||||
+
|
||||
+ vlv_control = VLVRequestControl(criticality=True,
|
||||
+ before_count=before_count,
|
||||
+ after_count=after_count,
|
||||
+ offset=offset,
|
||||
+ content_count=0,
|
||||
+ greater_than_or_equal=None,
|
||||
+ context_id=None)
|
||||
+
|
||||
+ sss_control = SSSRequestControl(criticality=True, ordering_rules=['cn'])
|
||||
+ result = conn.search_ext_s(
|
||||
+ base='dc=example,dc=com',
|
||||
+ scope=ldap.SCOPE_SUBTREE,
|
||||
+ filterstr='(uid=*)',
|
||||
+ serverctrls=[vlv_control, sss_control]
|
||||
+ )
|
||||
+ imin = offset + 998 - before_count
|
||||
+ imax = offset + 998 + after_count
|
||||
+
|
||||
+ for i, (dn, entry) in enumerate(result, start=imin):
|
||||
+ assert i <= imax
|
||||
+ expected_dn = f'uid=testuser{i},ou=People,dc=example,dc=com'
|
||||
+ log.debug(f'found {dn} expected {expected_dn}')
|
||||
+ assert dn.lower() == expected_dn.lower()
|
||||
+
|
||||
+
|
||||
+def add_users(inst, users_num):
|
||||
+ users = UserAccounts(inst, DEFAULT_SUFFIX)
|
||||
+ log.info(f'Adding {users_num} users')
|
||||
+ for i in range(0, users_num):
|
||||
+ uid = 1000 + i
|
||||
+ user_properties = {
|
||||
+ 'uid': f'testuser{uid}',
|
||||
+ 'cn': f'testuser{uid}',
|
||||
+ 'sn': 'user',
|
||||
+ 'uidNumber': str(uid),
|
||||
+ 'gidNumber': str(uid),
|
||||
+ 'homeDirectory': f'/home/testuser{uid}'
|
||||
+ }
|
||||
+ users.create(properties=user_properties)
|
||||
+
|
||||
+
|
||||
+
|
||||
+def create_vlv_search_and_index(inst, basedn=DEFAULT_SUFFIX, bename='userRoot',
|
||||
+ scope=ldap.SCOPE_SUBTREE, prefix="vlv", vlvsort="cn"):
|
||||
+ vlv_searches = VLVSearch(inst)
|
||||
+ vlv_search_properties = {
|
||||
+ "objectclass": ["top", "vlvSearch"],
|
||||
+ "cn": f"{prefix}Srch",
|
||||
+ "vlvbase": basedn,
|
||||
+ "vlvfilter": "(uid=*)",
|
||||
+ "vlvscope": str(scope),
|
||||
+ }
|
||||
+ vlv_searches.create(
|
||||
+ basedn=f"cn={bename},cn=ldbm database,cn=plugins,cn=config",
|
||||
+ properties=vlv_search_properties
|
||||
+ )
|
||||
+
|
||||
+ vlv_index = VLVIndex(inst)
|
||||
+ vlv_index_properties = {
|
||||
+ "objectclass": ["top", "vlvIndex"],
|
||||
+ "cn": f"{prefix}Idx",
|
||||
+ "vlvsort": vlvsort,
|
||||
+ }
|
||||
+ vlv_index.create(
|
||||
+ basedn=f"cn={prefix}Srch,cn={bename},cn=ldbm database,cn=plugins,cn=config",
|
||||
+ properties=vlv_index_properties
|
||||
+ )
|
||||
+ return vlv_searches, vlv_index
|
||||
+
|
||||
class BackendHandler:
|
||||
def __init__(self, inst, bedict, scope=ldap.SCOPE_ONELEVEL):
|
||||
self.inst = inst
|
||||
@@ -101,34 +187,6 @@ class BackendHandler:
|
||||
'dn' : dn}
|
||||
|
||||
|
||||
-def create_vlv_search_and_index(inst, basedn=DEFAULT_SUFFIX, bename='userRoot',
|
||||
- scope=ldap.SCOPE_SUBTREE, prefix="vlv", vlvsort="cn"):
|
||||
- vlv_searches = VLVSearch(inst)
|
||||
- vlv_search_properties = {
|
||||
- "objectclass": ["top", "vlvSearch"],
|
||||
- "cn": f"{prefix}Srch",
|
||||
- "vlvbase": basedn,
|
||||
- "vlvfilter": "(uid=*)",
|
||||
- "vlvscope": str(scope),
|
||||
- }
|
||||
- vlv_searches.create(
|
||||
- basedn=f"cn={bename},cn=ldbm database,cn=plugins,cn=config",
|
||||
- properties=vlv_search_properties
|
||||
- )
|
||||
-
|
||||
- vlv_index = VLVIndex(inst)
|
||||
- vlv_index_properties = {
|
||||
- "objectclass": ["top", "vlvIndex"],
|
||||
- "cn": f"{prefix}Idx",
|
||||
- "vlvsort": vlvsort,
|
||||
- }
|
||||
- vlv_index.create(
|
||||
- basedn=f"cn={prefix}Srch,cn={bename},cn=ldbm database,cn=plugins,cn=config",
|
||||
- properties=vlv_index_properties
|
||||
- )
|
||||
- return vlv_searches, vlv_index
|
||||
-
|
||||
-
|
||||
@pytest.fixture
|
||||
def vlv_setup_with_uid_mr(topology_st, request):
|
||||
inst = topology_st.standalone
|
||||
@@ -245,6 +303,62 @@ def test_bulk_import_when_the_backend_with_vlv_was_recreated(topology_m2):
|
||||
entries = M2.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, "(objectclass=*)")
|
||||
|
||||
|
||||
+def test_vlv_recreation_reindex(topology_st):
|
||||
+ """Test VLV recreation and reindexing.
|
||||
+
|
||||
+ :id: 29f4567f-4ac0-410f-bc99-a32e217a939f
|
||||
+ :setup: Standalone instance.
|
||||
+ :steps:
|
||||
+ 1. Create new VLVs and do the reindex.
|
||||
+ 2. Test the new VLVs.
|
||||
+ 3. Remove the existing VLVs.
|
||||
+ 4. Create new VLVs (with the same name).
|
||||
+ 5. Perform online re-indexing of the new VLVs.
|
||||
+ 6. Test the new VLVs.
|
||||
+ :expectedresults:
|
||||
+ 1. Should Success.
|
||||
+ 2. Should Success.
|
||||
+ 3. Should Success.
|
||||
+ 4. Should Success.
|
||||
+ 5. Should Success.
|
||||
+ 6. Should Success.
|
||||
+ """
|
||||
+
|
||||
+ inst = topology_st.standalone
|
||||
+ reindex_task = Tasks(inst)
|
||||
+
|
||||
+ # Create and test VLVs
|
||||
+ vlv_search, vlv_index = create_vlv_search_and_index(inst)
|
||||
+ assert reindex_task.reindex(
|
||||
+ suffix=DEFAULT_SUFFIX,
|
||||
+ attrname=vlv_index.rdn,
|
||||
+ args={TASK_WAIT: True},
|
||||
+ vlv=True
|
||||
+ ) == 0
|
||||
+
|
||||
+ add_users(inst, 5000)
|
||||
+
|
||||
+ conn = open_new_ldapi_conn(inst.serverid)
|
||||
+ assert len(conn.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, "(cn=*)")) > 0
|
||||
+ check_vlv_search(conn)
|
||||
+
|
||||
+ # Remove and recreate VLVs
|
||||
+ vlv_index.delete()
|
||||
+ vlv_search.delete()
|
||||
+
|
||||
+ vlv_search, vlv_index = create_vlv_search_and_index(inst)
|
||||
+ assert reindex_task.reindex(
|
||||
+ suffix=DEFAULT_SUFFIX,
|
||||
+ attrname=vlv_index.rdn,
|
||||
+ args={TASK_WAIT: True},
|
||||
+ vlv=True
|
||||
+ ) == 0
|
||||
+
|
||||
+ conn = open_new_ldapi_conn(inst.serverid)
|
||||
+ assert len(conn.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, "(cn=*)")) > 0
|
||||
+ check_vlv_search(conn)
|
||||
+
|
||||
+
|
||||
def test_vlv_with_mr(vlv_setup_with_uid_mr):
|
||||
"""
|
||||
Testing vlv having specific matching rule
|
||||
@@ -288,6 +402,7 @@ def test_vlv_with_mr(vlv_setup_with_uid_mr):
|
||||
assert inst.status()
|
||||
|
||||
|
||||
+
|
||||
if __name__ == "__main__":
|
||||
# Run isolated
|
||||
# -s for DEBUG mode
|
||||
--
|
||||
2.49.0
|
||||
|
@ -1,75 +0,0 @@
|
||||
From af3fa90f91efda86f4337e8823bca6581ab61792 Mon Sep 17 00:00:00 2001
|
||||
From: Thierry Bordaz <tbordaz@redhat.com>
|
||||
Date: Fri, 7 Feb 2025 09:43:08 +0100
|
||||
Subject: [PATCH] Issue 6494 - (2nd) Various errors when using extended
|
||||
matching rule on vlv sort filter
|
||||
|
||||
---
|
||||
.../tests/suites/indexes/regression_test.py | 40 +++++++++++++++++++
|
||||
1 file changed, 40 insertions(+)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/indexes/regression_test.py b/dirsrvtests/tests/suites/indexes/regression_test.py
|
||||
index 2196fb2ed..b5bcccc8f 100644
|
||||
--- a/dirsrvtests/tests/suites/indexes/regression_test.py
|
||||
+++ b/dirsrvtests/tests/suites/indexes/regression_test.py
|
||||
@@ -11,17 +11,57 @@ import os
|
||||
import pytest
|
||||
import ldap
|
||||
from lib389._constants import DEFAULT_BENAME, DEFAULT_SUFFIX
|
||||
+from lib389.backend import Backend, Backends, DatabaseConfig
|
||||
from lib389.cos import CosClassicDefinition, CosClassicDefinitions, CosTemplate
|
||||
+from lib389.dbgen import dbgen_users
|
||||
from lib389.index import Indexes
|
||||
from lib389.backend import Backends
|
||||
from lib389.idm.user import UserAccounts
|
||||
from lib389.topologies import topology_st as topo
|
||||
from lib389.utils import ds_is_older
|
||||
from lib389.idm.nscontainer import nsContainer
|
||||
+from lib389.properties import TASK_WAIT
|
||||
+from lib389.tasks import Tasks, Task
|
||||
|
||||
pytestmark = pytest.mark.tier1
|
||||
|
||||
|
||||
+SUFFIX2 = 'dc=example2,dc=com'
|
||||
+BENAME2 = 'be2'
|
||||
+
|
||||
+DEBUGGING = os.getenv("DEBUGGING", default=False)
|
||||
+
|
||||
+@pytest.fixture(scope="function")
|
||||
+def add_backend_and_ldif_50K_users(request, topo):
|
||||
+ """
|
||||
+ Add an empty backend and associated 50K users ldif file
|
||||
+ """
|
||||
+
|
||||
+ tasks = Tasks(topo.standalone)
|
||||
+ import_ldif = f'{topo.standalone.ldifdir}/be2_50K_users.ldif'
|
||||
+ be2 = Backend(topo.standalone)
|
||||
+ be2.create(properties={
|
||||
+ 'cn': BENAME2,
|
||||
+ 'nsslapd-suffix': SUFFIX2,
|
||||
+ },
|
||||
+ )
|
||||
+
|
||||
+ def fin():
|
||||
+ nonlocal be2
|
||||
+ if not DEBUGGING:
|
||||
+ be2.delete()
|
||||
+
|
||||
+ request.addfinalizer(fin)
|
||||
+ parent = f'ou=people,{SUFFIX2}'
|
||||
+ dbgen_users(topo.standalone, 50000, import_ldif, SUFFIX2, generic=True, parent=parent)
|
||||
+ assert tasks.importLDIF(
|
||||
+ suffix=SUFFIX2,
|
||||
+ input_file=import_ldif,
|
||||
+ args={TASK_WAIT: True}
|
||||
+ ) == 0
|
||||
+
|
||||
+ return import_ldif
|
||||
+
|
||||
@pytest.fixture(scope="function")
|
||||
def add_a_group_with_users(request, topo):
|
||||
"""
|
||||
--
|
||||
2.49.0
|
||||
|
@ -1,45 +0,0 @@
|
||||
From 0ad0eb34972c99f30334d7d420f3056e0e794d74 Mon Sep 17 00:00:00 2001
|
||||
From: Thierry Bordaz <tbordaz@redhat.com>
|
||||
Date: Fri, 7 Feb 2025 14:33:46 +0100
|
||||
Subject: [PATCH] Issue 6494 - (3rd) Various errors when using extended
|
||||
matching rule on vlv sort filter
|
||||
|
||||
(cherry picked from the commit f2f917ca55c34c81b578bce1dd5275abff6abb72)
|
||||
---
|
||||
dirsrvtests/tests/suites/vlv/regression_test.py | 8 ++++++--
|
||||
1 file changed, 6 insertions(+), 2 deletions(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/vlv/regression_test.py b/dirsrvtests/tests/suites/vlv/regression_test.py
|
||||
index 536fe950f..d069fdbaf 100644
|
||||
--- a/dirsrvtests/tests/suites/vlv/regression_test.py
|
||||
+++ b/dirsrvtests/tests/suites/vlv/regression_test.py
|
||||
@@ -16,12 +16,16 @@ from lib389.properties import TASK_WAIT
|
||||
from lib389.index import *
|
||||
from lib389.mappingTree import *
|
||||
from lib389.backend import *
|
||||
-from lib389.idm.user import UserAccounts
|
||||
+from lib389.idm.user import UserAccounts, UserAccount
|
||||
+from lib389.idm.organization import Organization
|
||||
+from lib389.idm.organizationalunit import OrganizationalUnits
|
||||
from ldap.controls.vlv import VLVRequestControl
|
||||
from ldap.controls.sss import SSSRequestControl
|
||||
|
||||
pytestmark = pytest.mark.tier1
|
||||
|
||||
+DEMO_PW = 'secret12'
|
||||
+
|
||||
logging.getLogger(__name__).setLevel(logging.DEBUG)
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
@@ -169,7 +173,7 @@ class BackendHandler:
|
||||
'loginShell': '/bin/false',
|
||||
'userpassword': DEMO_PW })
|
||||
# Add regular user
|
||||
- add_users(self.inst, 10, suffix=suffix)
|
||||
+ add_users(self.inst, 10)
|
||||
# Removing ou2
|
||||
ou2.delete()
|
||||
# And export
|
||||
--
|
||||
2.49.0
|
||||
|
@ -1,72 +0,0 @@
|
||||
From 52041811b200292af6670490c9ebc1f599439a22 Mon Sep 17 00:00:00 2001
|
||||
From: Masahiro Matsuya <mmatsuya@redhat.com>
|
||||
Date: Sat, 22 Mar 2025 01:25:25 +0900
|
||||
Subject: [PATCH] Issue 6494 - (4th) Various errors when using extended
|
||||
matching rule on vlv sort filter
|
||||
|
||||
test_vlv_with_mr uses vlv_setup_with_uid_mr fixture to setup backend
|
||||
and testusers. add_users function is called in beh.setup without any
|
||||
suffix for the created backend. As a result, testusers always are
|
||||
created in the DEFAULT_SUFFIX only by add_users function. Another test
|
||||
like test_vlv_recreation_reindex can create the same test user in
|
||||
DEFAULT_SUFFIX, and it caused the ALREADY_EXISTS failure in
|
||||
test_vlv_with_mr test.
|
||||
|
||||
In main branch, add_users have suffix argument. Test users are created
|
||||
on the specific suffix, and the backend is cleaned up after the test.
|
||||
This PR is to follow the same implementation.
|
||||
|
||||
Also, suppressing ldap.ALREADY_EXISTS makes the add_users func to be
|
||||
used easily.
|
||||
|
||||
Related: https://github.com/389ds/389-ds-base/issues/6494
|
||||
---
|
||||
dirsrvtests/tests/suites/vlv/regression_test.py | 11 ++++++-----
|
||||
1 file changed, 6 insertions(+), 5 deletions(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/vlv/regression_test.py b/dirsrvtests/tests/suites/vlv/regression_test.py
|
||||
index d069fdbaf..e9408117b 100644
|
||||
--- a/dirsrvtests/tests/suites/vlv/regression_test.py
|
||||
+++ b/dirsrvtests/tests/suites/vlv/regression_test.py
|
||||
@@ -21,6 +21,7 @@ from lib389.idm.organization import Organization
|
||||
from lib389.idm.organizationalunit import OrganizationalUnits
|
||||
from ldap.controls.vlv import VLVRequestControl
|
||||
from ldap.controls.sss import SSSRequestControl
|
||||
+from contextlib import suppress
|
||||
|
||||
pytestmark = pytest.mark.tier1
|
||||
|
||||
@@ -68,8 +69,8 @@ def check_vlv_search(conn):
|
||||
assert dn.lower() == expected_dn.lower()
|
||||
|
||||
|
||||
-def add_users(inst, users_num):
|
||||
- users = UserAccounts(inst, DEFAULT_SUFFIX)
|
||||
+def add_users(inst, users_num, suffix=DEFAULT_SUFFIX):
|
||||
+ users = UserAccounts(inst, suffix)
|
||||
log.info(f'Adding {users_num} users')
|
||||
for i in range(0, users_num):
|
||||
uid = 1000 + i
|
||||
@@ -81,8 +82,8 @@ def add_users(inst, users_num):
|
||||
'gidNumber': str(uid),
|
||||
'homeDirectory': f'/home/testuser{uid}'
|
||||
}
|
||||
- users.create(properties=user_properties)
|
||||
-
|
||||
+ with suppress(ldap.ALREADY_EXISTS):
|
||||
+ users.create(properties=user_properties)
|
||||
|
||||
|
||||
def create_vlv_search_and_index(inst, basedn=DEFAULT_SUFFIX, bename='userRoot',
|
||||
@@ -173,7 +174,7 @@ class BackendHandler:
|
||||
'loginShell': '/bin/false',
|
||||
'userpassword': DEMO_PW })
|
||||
# Add regular user
|
||||
- add_users(self.inst, 10)
|
||||
+ add_users(self.inst, 10, suffix=suffix)
|
||||
# Removing ou2
|
||||
ou2.delete()
|
||||
# And export
|
||||
--
|
||||
2.49.0
|
||||
|
@ -1,357 +0,0 @@
|
||||
From b812afe4da6db134c1221eb48a6155480e4c2cb3 Mon Sep 17 00:00:00 2001
|
||||
From: Simon Pichugin <spichugi@redhat.com>
|
||||
Date: Tue, 14 Jan 2025 13:55:03 -0500
|
||||
Subject: [PATCH] Issue 6497 - lib389 - Configure replication for multiple
|
||||
suffixes (#6498)
|
||||
|
||||
Bug Description: When trying to set up replication across multiple suffixes -
|
||||
particularly if one of those suffixes is a subsuffix - lib389 fails to properly
|
||||
configure the replication agreements, service accounts, and required groups.
|
||||
The references to the replication_managers group and service account
|
||||
naming do not correctly account for non-default additional suffixes.
|
||||
|
||||
Fix Description: Ensure replication DNs and credentials are correctly tied to each suffix.
|
||||
Enable DSLdapObject.present method to compare values as
|
||||
a normalized DNs if they are DNs.
|
||||
Add a test (test_multi_subsuffix_replication) to verify multi-suffix
|
||||
replication across four suppliers.
|
||||
Fix tests that are related to repl service accounts.
|
||||
|
||||
Fixes: https://github.com/389ds/389-ds-base/issues/6497
|
||||
|
||||
Reviewed: @progier389 (Thanks!)
|
||||
---
|
||||
.../tests/suites/ds_tools/replcheck_test.py | 4 +-
|
||||
.../suites/replication/acceptance_test.py | 153 ++++++++++++++++++
|
||||
.../cleanallruv_shutdown_crash_test.py | 4 +-
|
||||
.../suites/replication/regression_m2_test.py | 2 +-
|
||||
.../replication/tls_client_auth_repl_test.py | 4 +-
|
||||
src/lib389/lib389/_mapped_object.py | 21 ++-
|
||||
src/lib389/lib389/replica.py | 10 +-
|
||||
7 files changed, 182 insertions(+), 16 deletions(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/ds_tools/replcheck_test.py b/dirsrvtests/tests/suites/ds_tools/replcheck_test.py
|
||||
index f61fc432d..dfa1d9423 100644
|
||||
--- a/dirsrvtests/tests/suites/ds_tools/replcheck_test.py
|
||||
+++ b/dirsrvtests/tests/suites/ds_tools/replcheck_test.py
|
||||
@@ -67,10 +67,10 @@ def topo_tls_ldapi(topo):
|
||||
|
||||
# Create the replication dns
|
||||
services = ServiceAccounts(m1, DEFAULT_SUFFIX)
|
||||
- repl_m1 = services.get('%s:%s' % (m1.host, m1.sslport))
|
||||
+ repl_m1 = services.get(f'{DEFAULT_SUFFIX}:{m1.host}:{m1.sslport}')
|
||||
repl_m1.set('nsCertSubjectDN', m1.get_server_tls_subject())
|
||||
|
||||
- repl_m2 = services.get('%s:%s' % (m2.host, m2.sslport))
|
||||
+ repl_m2 = services.get(f'{DEFAULT_SUFFIX}:{m2.host}:{m2.sslport}')
|
||||
repl_m2.set('nsCertSubjectDN', m2.get_server_tls_subject())
|
||||
|
||||
# Check the replication is "done".
|
||||
diff --git a/dirsrvtests/tests/suites/replication/acceptance_test.py b/dirsrvtests/tests/suites/replication/acceptance_test.py
|
||||
index d1cfa8bdb..fc8622051 100644
|
||||
--- a/dirsrvtests/tests/suites/replication/acceptance_test.py
|
||||
+++ b/dirsrvtests/tests/suites/replication/acceptance_test.py
|
||||
@@ -9,6 +9,7 @@
|
||||
import pytest
|
||||
import logging
|
||||
import time
|
||||
+from lib389.backend import Backend
|
||||
from lib389.replica import Replicas
|
||||
from lib389.tasks import *
|
||||
from lib389.utils import *
|
||||
@@ -325,6 +326,158 @@ def test_modify_stripattrs(topo_m4):
|
||||
assert attr_value in entries[0].data['nsds5replicastripattrs']
|
||||
|
||||
|
||||
+def test_multi_subsuffix_replication(topo_m4):
|
||||
+ """Check that replication works with multiple subsuffixes
|
||||
+
|
||||
+ :id: ac1aaeae-173e-48e7-847f-03b9867443c4
|
||||
+ :setup: Four suppliers replication setup
|
||||
+ :steps:
|
||||
+ 1. Create additional suffixes
|
||||
+ 2. Setup replication for all suppliers
|
||||
+ 3. Generate test data for each suffix (add, modify, remove)
|
||||
+ 4. Wait for replication to complete across all suppliers for each suffix
|
||||
+ 5. Check that all expected data is present on all suppliers
|
||||
+ :expectedresults:
|
||||
+ 1. Success
|
||||
+ 2. Success
|
||||
+ 3. Success
|
||||
+ 4. Success
|
||||
+ 5. Success (the data is replicated everywhere)
|
||||
+ """
|
||||
+
|
||||
+ SUFFIX_2 = "dc=test2"
|
||||
+ SUFFIX_3 = f"dc=test3,{DEFAULT_SUFFIX}"
|
||||
+ all_suffixes = [DEFAULT_SUFFIX, SUFFIX_2, SUFFIX_3]
|
||||
+
|
||||
+ test_users_by_suffix = {suffix: [] for suffix in all_suffixes}
|
||||
+ created_backends = []
|
||||
+
|
||||
+ suppliers = [
|
||||
+ topo_m4.ms["supplier1"],
|
||||
+ topo_m4.ms["supplier2"],
|
||||
+ topo_m4.ms["supplier3"],
|
||||
+ topo_m4.ms["supplier4"]
|
||||
+ ]
|
||||
+
|
||||
+ try:
|
||||
+ # Setup additional backends and replication for the new suffixes
|
||||
+ for suffix in [SUFFIX_2, SUFFIX_3]:
|
||||
+ repl = ReplicationManager(suffix)
|
||||
+ for supplier in suppliers:
|
||||
+ # Create a new backend for this suffix
|
||||
+ props = {
|
||||
+ 'cn': f'userRoot_{suffix.split(",")[0][3:]}',
|
||||
+ 'nsslapd-suffix': suffix
|
||||
+ }
|
||||
+ be = Backend(supplier)
|
||||
+ be.create(properties=props)
|
||||
+ be.create_sample_entries('001004002')
|
||||
+
|
||||
+ # Track the backend so we can remove it later
|
||||
+ created_backends.append((supplier, props['cn']))
|
||||
+
|
||||
+ # Enable replication
|
||||
+ if supplier == suppliers[0]:
|
||||
+ repl.create_first_supplier(supplier)
|
||||
+ else:
|
||||
+ repl.join_supplier(suppliers[0], supplier)
|
||||
+
|
||||
+ # Create a full mesh topology for this suffix
|
||||
+ for i, supplier_i in enumerate(suppliers):
|
||||
+ for j, supplier_j in enumerate(suppliers):
|
||||
+ if i != j:
|
||||
+ repl.ensure_agreement(supplier_i, supplier_j)
|
||||
+
|
||||
+ # Generate test data for each suffix (add, modify, remove)
|
||||
+ for suffix in all_suffixes:
|
||||
+ # Create some user entries in supplier1
|
||||
+ for i in range(20):
|
||||
+ user_dn = f'uid=test_user_{i},{suffix}'
|
||||
+ test_user = UserAccount(suppliers[0], user_dn)
|
||||
+ test_user.create(properties={
|
||||
+ 'uid': f'test_user_{i}',
|
||||
+ 'cn': f'Test User {i}',
|
||||
+ 'sn': f'User{i}',
|
||||
+ 'userPassword': 'password',
|
||||
+ 'uidNumber': str(1000 + i),
|
||||
+ 'gidNumber': '2000',
|
||||
+ 'homeDirectory': f'/home/test_user_{i}'
|
||||
+ })
|
||||
+ test_users_by_suffix[suffix].append(test_user)
|
||||
+
|
||||
+ # Perform modifications on these entries
|
||||
+ for user in test_users_by_suffix[suffix]:
|
||||
+ # Add some attributes
|
||||
+ for j in range(3):
|
||||
+ user.add('description', f'Description {j}')
|
||||
+ # Replace an attribute
|
||||
+ user.replace('cn', f'Modified User {user.get_attr_val_utf8("uid")}')
|
||||
+ # Delete the attributes we added
|
||||
+ for j in range(3):
|
||||
+ try:
|
||||
+ user.remove('description', f'Description {j}')
|
||||
+ except Exception:
|
||||
+ pass
|
||||
+
|
||||
+ # Wait for replication to complete across all suppliers, for each suffix
|
||||
+ for suffix in all_suffixes:
|
||||
+ repl = ReplicationManager(suffix)
|
||||
+ for i, supplier_i in enumerate(suppliers):
|
||||
+ for j, supplier_j in enumerate(suppliers):
|
||||
+ if i != j:
|
||||
+ repl.wait_for_replication(supplier_i, supplier_j)
|
||||
+
|
||||
+ # Verify that each user and modification replicated to all suppliers
|
||||
+ for suffix in all_suffixes:
|
||||
+ for i in range(20):
|
||||
+ user_dn = f'uid=test_user_{i},{suffix}'
|
||||
+ # Retrieve this user from all suppliers
|
||||
+ all_user_objs = topo_m4.all_get_dsldapobject(user_dn, UserAccount)
|
||||
+ # Ensure it exists in all 4 suppliers
|
||||
+ assert len(all_user_objs) == 4, (
|
||||
+ f"User {user_dn} not found on all suppliers. "
|
||||
+ f"Found only on {len(all_user_objs)} suppliers."
|
||||
+ )
|
||||
+ # Check modifications: 'cn' should now be 'Modified User test_user_{i}'
|
||||
+ for user_obj in all_user_objs:
|
||||
+ expected_cn = f"Modified User test_user_{i}"
|
||||
+ actual_cn = user_obj.get_attr_val_utf8("cn")
|
||||
+ assert actual_cn == expected_cn, (
|
||||
+ f"User {user_dn} has unexpected 'cn': {actual_cn} "
|
||||
+ f"(expected '{expected_cn}') on supplier {user_obj._instance.serverid}"
|
||||
+ )
|
||||
+ # And check that 'description' attributes were removed
|
||||
+ desc_vals = user_obj.get_attr_vals_utf8('description')
|
||||
+ for j in range(3):
|
||||
+ assert f"Description {j}" not in desc_vals, (
|
||||
+ f"User {user_dn} on supplier {user_obj._instance.serverid} "
|
||||
+ f"still has 'Description {j}'"
|
||||
+ )
|
||||
+ finally:
|
||||
+ for suffix, test_users in test_users_by_suffix.items():
|
||||
+ for user in test_users:
|
||||
+ try:
|
||||
+ if user.exists():
|
||||
+ user.delete()
|
||||
+ except Exception:
|
||||
+ pass
|
||||
+
|
||||
+ for suffix in [SUFFIX_2, SUFFIX_3]:
|
||||
+ repl = ReplicationManager(suffix)
|
||||
+ for supplier in suppliers:
|
||||
+ try:
|
||||
+ repl.remove_supplier(supplier)
|
||||
+ except Exception:
|
||||
+ pass
|
||||
+
|
||||
+ for (supplier, backend_name) in created_backends:
|
||||
+ be = Backend(supplier, backend_name)
|
||||
+ try:
|
||||
+ be.delete()
|
||||
+ except Exception:
|
||||
+ pass
|
||||
+
|
||||
+
|
||||
def test_new_suffix(topo_m4, new_suffix):
|
||||
"""Check that we can enable replication on a new suffix
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/replication/cleanallruv_shutdown_crash_test.py b/dirsrvtests/tests/suites/replication/cleanallruv_shutdown_crash_test.py
|
||||
index b4b74e339..fe9955e7e 100644
|
||||
--- a/dirsrvtests/tests/suites/replication/cleanallruv_shutdown_crash_test.py
|
||||
+++ b/dirsrvtests/tests/suites/replication/cleanallruv_shutdown_crash_test.py
|
||||
@@ -66,10 +66,10 @@ def test_clean_shutdown_crash(topology_m2):
|
||||
|
||||
log.info('Creating replication dns')
|
||||
services = ServiceAccounts(m1, DEFAULT_SUFFIX)
|
||||
- repl_m1 = services.get('%s:%s' % (m1.host, m1.sslport))
|
||||
+ repl_m1 = services.get(f'{DEFAULT_SUFFIX}:{m1.host}:{m1.sslport}')
|
||||
repl_m1.set('nsCertSubjectDN', m1.get_server_tls_subject())
|
||||
|
||||
- repl_m2 = services.get('%s:%s' % (m2.host, m2.sslport))
|
||||
+ repl_m2 = services.get(f'{DEFAULT_SUFFIX}:{m2.host}:{m2.sslport}')
|
||||
repl_m2.set('nsCertSubjectDN', m2.get_server_tls_subject())
|
||||
|
||||
log.info('Changing auth type')
|
||||
diff --git a/dirsrvtests/tests/suites/replication/regression_m2_test.py b/dirsrvtests/tests/suites/replication/regression_m2_test.py
|
||||
index 72d4b9f89..9c707615f 100644
|
||||
--- a/dirsrvtests/tests/suites/replication/regression_m2_test.py
|
||||
+++ b/dirsrvtests/tests/suites/replication/regression_m2_test.py
|
||||
@@ -64,7 +64,7 @@ class _AgmtHelper:
|
||||
self.binddn = f'cn={cn},cn=config'
|
||||
else:
|
||||
self.usedn = False
|
||||
- self.cn = f'{self.from_inst.host}:{self.from_inst.sslport}'
|
||||
+ self.cn = ldap.dn.escape_dn_chars(f'{DEFAULT_SUFFIX}:{self.from_inst.host}:{self.from_inst.sslport}')
|
||||
self.binddn = f'cn={self.cn}, ou=Services, {DEFAULT_SUFFIX}'
|
||||
self.original_state = []
|
||||
self._pass = False
|
||||
diff --git a/dirsrvtests/tests/suites/replication/tls_client_auth_repl_test.py b/dirsrvtests/tests/suites/replication/tls_client_auth_repl_test.py
|
||||
index a00dc5b78..ca17554c7 100644
|
||||
--- a/dirsrvtests/tests/suites/replication/tls_client_auth_repl_test.py
|
||||
+++ b/dirsrvtests/tests/suites/replication/tls_client_auth_repl_test.py
|
||||
@@ -56,10 +56,10 @@ def tls_client_auth(topo_m2):
|
||||
|
||||
# Create the replication dns
|
||||
services = ServiceAccounts(m1, DEFAULT_SUFFIX)
|
||||
- repl_m1 = services.get('%s:%s' % (m1.host, m1.sslport))
|
||||
+ repl_m1 = services.get(f'{DEFAULT_SUFFIX}:{m1.host}:{m1.sslport}')
|
||||
repl_m1.set('nsCertSubjectDN', m1.get_server_tls_subject())
|
||||
|
||||
- repl_m2 = services.get('%s:%s' % (m2.host, m2.sslport))
|
||||
+ repl_m2 = services.get(f'{DEFAULT_SUFFIX}:{m2.host}:{m2.sslport}')
|
||||
repl_m2.set('nsCertSubjectDN', m2.get_server_tls_subject())
|
||||
|
||||
# Check the replication is "done".
|
||||
diff --git a/src/lib389/lib389/_mapped_object.py b/src/lib389/lib389/_mapped_object.py
|
||||
index b7391d8cc..ae00c95d0 100644
|
||||
--- a/src/lib389/lib389/_mapped_object.py
|
||||
+++ b/src/lib389/lib389/_mapped_object.py
|
||||
@@ -19,7 +19,7 @@ from lib389._constants import DIRSRV_STATE_ONLINE
|
||||
from lib389._mapped_object_lint import DSLint, DSLints
|
||||
from lib389.utils import (
|
||||
ensure_bytes, ensure_str, ensure_int, ensure_list_bytes, ensure_list_str,
|
||||
- ensure_list_int, display_log_value, display_log_data
|
||||
+ ensure_list_int, display_log_value, display_log_data, is_a_dn, normalizeDN
|
||||
)
|
||||
|
||||
# This function filter and term generation provided thanks to
|
||||
@@ -292,15 +292,28 @@ class DSLdapObject(DSLogging, DSLint):
|
||||
_search_ext_s(self._instance,self._dn, ldap.SCOPE_BASE, self._object_filter, attrlist=[attr, ],
|
||||
serverctrls=self._server_controls, clientctrls=self._client_controls,
|
||||
escapehatch='i am sure')[0]
|
||||
- values = self.get_attr_vals_bytes(attr)
|
||||
+ values = self.get_attr_vals_utf8(attr)
|
||||
self._log.debug("%s contains %s" % (self._dn, values))
|
||||
|
||||
if value is None:
|
||||
# We are just checking if SOMETHING is present ....
|
||||
return len(values) > 0
|
||||
+
|
||||
+ # Otherwise, we are checking a specific value
|
||||
+ if is_a_dn(value):
|
||||
+ normalized_value = normalizeDN(value)
|
||||
else:
|
||||
- # Check if a value really does exist.
|
||||
- return ensure_bytes(value).lower() in [x.lower() for x in values]
|
||||
+ normalized_value = ensure_bytes(value).lower()
|
||||
+
|
||||
+ # Normalize each returned value depending on whether it is a DN
|
||||
+ normalized_values = []
|
||||
+ for v in values:
|
||||
+ if is_a_dn(v):
|
||||
+ normalized_values.append(normalizeDN(v))
|
||||
+ else:
|
||||
+ normalized_values.append(ensure_bytes(v.lower()))
|
||||
+
|
||||
+ return normalized_value in normalized_values
|
||||
|
||||
def add(self, key, value):
|
||||
"""Add an attribute with a value
|
||||
diff --git a/src/lib389/lib389/replica.py b/src/lib389/lib389/replica.py
|
||||
index 1f321972d..cd46e86d5 100644
|
||||
--- a/src/lib389/lib389/replica.py
|
||||
+++ b/src/lib389/lib389/replica.py
|
||||
@@ -2011,7 +2011,7 @@ class ReplicationManager(object):
|
||||
return repl_group
|
||||
else:
|
||||
try:
|
||||
- repl_group = groups.get('replication_managers')
|
||||
+ repl_group = groups.get(dn=f'cn=replication_managers,{self._suffix}')
|
||||
return repl_group
|
||||
except ldap.NO_SUCH_OBJECT:
|
||||
self._log.warning("{} doesn't have cn=replication_managers,{} entry \
|
||||
@@ -2035,7 +2035,7 @@ class ReplicationManager(object):
|
||||
services = ServiceAccounts(from_instance, self._suffix)
|
||||
# Generate the password and save the credentials
|
||||
# for putting them into agreements in the future
|
||||
- service_name = '{}:{}'.format(to_instance.host, port)
|
||||
+ service_name = f'{self._suffix}:{to_instance.host}:{port}'
|
||||
creds = password_generate()
|
||||
repl_service = services.ensure_state(properties={
|
||||
'cn': service_name,
|
||||
@@ -2299,7 +2299,7 @@ class ReplicationManager(object):
|
||||
Internal Only.
|
||||
"""
|
||||
|
||||
- rdn = '{}:{}'.format(from_instance.host, from_instance.sslport)
|
||||
+ rdn = f'{self._suffix}:{from_instance.host}:{from_instance.sslport}'
|
||||
try:
|
||||
creds = self._repl_creds[rdn]
|
||||
except KeyError:
|
||||
@@ -2499,8 +2499,8 @@ class ReplicationManager(object):
|
||||
# Touch something then wait_for_replication.
|
||||
from_groups = Groups(from_instance, basedn=self._suffix, rdn=None)
|
||||
to_groups = Groups(to_instance, basedn=self._suffix, rdn=None)
|
||||
- from_group = from_groups.get('replication_managers')
|
||||
- to_group = to_groups.get('replication_managers')
|
||||
+ from_group = from_groups.get(dn=f'cn=replication_managers,{self._suffix}')
|
||||
+ to_group = to_groups.get(dn=f'cn=replication_managers,{self._suffix}')
|
||||
|
||||
change = str(uuid.uuid4())
|
||||
|
||||
--
|
||||
2.49.0
|
||||
|
@ -1,126 +0,0 @@
|
||||
From ebe986c78c6cd4e1f10172d8a8a11faf814fbc22 Mon Sep 17 00:00:00 2001
|
||||
From: Mark Reynolds <mreynolds@redhat.com>
|
||||
Date: Thu, 6 Mar 2025 16:49:53 -0500
|
||||
Subject: [PATCH] Issue 6655 - fix replication release replica decoding error
|
||||
|
||||
Description:
|
||||
|
||||
When a start replication session extended op is received acquire and
|
||||
release exclusive access before returning the result to the client.
|
||||
Otherwise there is a race condition where a "end" replication extended
|
||||
op can arrive before the replica is released and that leads to a
|
||||
decoding error on the other replica.
|
||||
|
||||
Relates: https://github.com/389ds/389-ds-base/issues/6655
|
||||
|
||||
Reviewed by: spichugi, tbordaz, and vashirov(Thanks!!!)
|
||||
---
|
||||
.../suites/replication/acceptance_test.py | 12 ++++++++++
|
||||
ldap/servers/plugins/replication/repl_extop.c | 24 ++++++++++++-------
|
||||
2 files changed, 27 insertions(+), 9 deletions(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/replication/acceptance_test.py b/dirsrvtests/tests/suites/replication/acceptance_test.py
|
||||
index fc8622051..0f18edb44 100644
|
||||
--- a/dirsrvtests/tests/suites/replication/acceptance_test.py
|
||||
+++ b/dirsrvtests/tests/suites/replication/acceptance_test.py
|
||||
@@ -1,5 +1,9 @@
|
||||
# --- BEGIN COPYRIGHT BLOCK ---
|
||||
+<<<<<<< HEAD
|
||||
# Copyright (C) 2021 Red Hat, Inc.
|
||||
+=======
|
||||
+# Copyright (C) 2025 Red Hat, Inc.
|
||||
+>>>>>>> a623c3f90 (Issue 6655 - fix replication release replica decoding error)
|
||||
# All rights reserved.
|
||||
#
|
||||
# License: GPL (version 3 or any later version).
|
||||
@@ -453,6 +457,13 @@ def test_multi_subsuffix_replication(topo_m4):
|
||||
f"User {user_dn} on supplier {user_obj._instance.serverid} "
|
||||
f"still has 'Description {j}'"
|
||||
)
|
||||
+
|
||||
+ # Check there are no decoding errors
|
||||
+ assert not topo_m4.ms["supplier1"].ds_error_log.match('.*decoding failed.*')
|
||||
+ assert not topo_m4.ms["supplier2"].ds_error_log.match('.*decoding failed.*')
|
||||
+ assert not topo_m4.ms["supplier3"].ds_error_log.match('.*decoding failed.*')
|
||||
+ assert not topo_m4.ms["supplier4"].ds_error_log.match('.*decoding failed.*')
|
||||
+
|
||||
finally:
|
||||
for suffix, test_users in test_users_by_suffix.items():
|
||||
for user in test_users:
|
||||
@@ -507,6 +518,7 @@ def test_new_suffix(topo_m4, new_suffix):
|
||||
repl.remove_supplier(m1)
|
||||
repl.remove_supplier(m2)
|
||||
|
||||
+
|
||||
def test_many_attrs(topo_m4, create_entry):
|
||||
"""Check a replication with many attributes (add and delete)
|
||||
|
||||
diff --git a/ldap/servers/plugins/replication/repl_extop.c b/ldap/servers/plugins/replication/repl_extop.c
|
||||
index 14b756df1..dacc611c0 100644
|
||||
--- a/ldap/servers/plugins/replication/repl_extop.c
|
||||
+++ b/ldap/servers/plugins/replication/repl_extop.c
|
||||
@@ -1134,6 +1134,12 @@ send_response:
|
||||
slapi_pblock_set(pb, SLAPI_EXT_OP_RET_OID, REPL_NSDS50_REPLICATION_RESPONSE_OID);
|
||||
}
|
||||
|
||||
+ /* connext (release our hold on it at least) */
|
||||
+ if (NULL != connext) {
|
||||
+ /* don't free it, just let go of it */
|
||||
+ consumer_connection_extension_relinquish_exclusive_access(conn, connid, opid, PR_FALSE);
|
||||
+ }
|
||||
+
|
||||
slapi_pblock_set(pb, SLAPI_EXT_OP_RET_VALUE, resp_bval);
|
||||
slapi_log_err(SLAPI_LOG_REPL, repl_plugin_name,
|
||||
"multimaster_extop_StartNSDS50ReplicationRequest - "
|
||||
@@ -1251,12 +1257,6 @@ send_response:
|
||||
if (NULL != ruv_bervals) {
|
||||
ber_bvecfree(ruv_bervals);
|
||||
}
|
||||
- /* connext (our hold on it at least) */
|
||||
- if (NULL != connext) {
|
||||
- /* don't free it, just let go of it */
|
||||
- consumer_connection_extension_relinquish_exclusive_access(conn, connid, opid, PR_FALSE);
|
||||
- connext = NULL;
|
||||
- }
|
||||
|
||||
return return_value;
|
||||
}
|
||||
@@ -1389,6 +1389,13 @@ multimaster_extop_EndNSDS50ReplicationRequest(Slapi_PBlock *pb)
|
||||
}
|
||||
}
|
||||
send_response:
|
||||
+ /* connext (release our hold on it at least) */
|
||||
+ if (NULL != connext) {
|
||||
+ /* don't free it, just let go of it */
|
||||
+ consumer_connection_extension_relinquish_exclusive_access(conn, connid, opid, PR_FALSE);
|
||||
+ connext = NULL;
|
||||
+ }
|
||||
+
|
||||
/* Send the response code */
|
||||
if ((resp_bere = der_alloc()) == NULL) {
|
||||
goto free_and_return;
|
||||
@@ -1419,11 +1426,10 @@ free_and_return:
|
||||
if (NULL != resp_bval) {
|
||||
ber_bvfree(resp_bval);
|
||||
}
|
||||
- /* connext (our hold on it at least) */
|
||||
+ /* connext (release our hold on it if not already released) */
|
||||
if (NULL != connext) {
|
||||
/* don't free it, just let go of it */
|
||||
consumer_connection_extension_relinquish_exclusive_access(conn, connid, opid, PR_FALSE);
|
||||
- connext = NULL;
|
||||
}
|
||||
|
||||
return return_value;
|
||||
@@ -1516,7 +1522,7 @@ multimaster_extop_abort_cleanruv(Slapi_PBlock *pb)
|
||||
rid);
|
||||
}
|
||||
/*
|
||||
- * Get the replica
|
||||
+ * Get the replica
|
||||
*/
|
||||
if ((r = replica_get_replica_from_root(repl_root)) == NULL) {
|
||||
slapi_log_err(SLAPI_LOG_ERR, repl_plugin_name, "multimaster_extop_abort_cleanruv - "
|
||||
--
|
||||
2.49.0
|
||||
|
@ -1,26 +0,0 @@
|
||||
From 5b12463bfeb518f016acb14bc118b5f8ad3eef5e Mon Sep 17 00:00:00 2001
|
||||
From: Viktor Ashirov <vashirov@redhat.com>
|
||||
Date: Thu, 15 May 2025 09:22:22 +0200
|
||||
Subject: [PATCH] Issue 6655 - fix merge conflict
|
||||
|
||||
---
|
||||
dirsrvtests/tests/suites/replication/acceptance_test.py | 4 ----
|
||||
1 file changed, 4 deletions(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/replication/acceptance_test.py b/dirsrvtests/tests/suites/replication/acceptance_test.py
|
||||
index 0f18edb44..6b5186127 100644
|
||||
--- a/dirsrvtests/tests/suites/replication/acceptance_test.py
|
||||
+++ b/dirsrvtests/tests/suites/replication/acceptance_test.py
|
||||
@@ -1,9 +1,5 @@
|
||||
# --- BEGIN COPYRIGHT BLOCK ---
|
||||
-<<<<<<< HEAD
|
||||
-# Copyright (C) 2021 Red Hat, Inc.
|
||||
-=======
|
||||
# Copyright (C) 2025 Red Hat, Inc.
|
||||
->>>>>>> a623c3f90 (Issue 6655 - fix replication release replica decoding error)
|
||||
# All rights reserved.
|
||||
#
|
||||
# License: GPL (version 3 or any later version).
|
||||
--
|
||||
2.49.0
|
||||
|
@ -1,291 +0,0 @@
|
||||
From 8d62124fb4d0700378b6f0669cc9d47338a8151c Mon Sep 17 00:00:00 2001
|
||||
From: tbordaz <tbordaz@redhat.com>
|
||||
Date: Tue, 25 Mar 2025 09:20:50 +0100
|
||||
Subject: [PATCH] Issue 6571 - Nested group does not receive memberOf attribute
|
||||
(#6679)
|
||||
|
||||
Bug description:
|
||||
There is a risk to create a loop in group membership.
|
||||
For example G2 is member of G1 and G1 is member of G2.
|
||||
Memberof plugins iterates from a node to its ancestors
|
||||
to update the 'memberof' values of the node.
|
||||
The plugin uses a valueset ('already_seen_ndn_vals')
|
||||
to keep the track of the nodes it already visited.
|
||||
It uses this valueset to detect a possible loop and
|
||||
in that case it does not add the ancestor as the
|
||||
memberof value of the node.
|
||||
This is an error in case there are multiples paths
|
||||
up to an ancestor.
|
||||
|
||||
Fix description:
|
||||
The ancestor should be added to the node systematically,
|
||||
just in case the ancestor is in 'already_seen_ndn_vals'
|
||||
it skips the final recursion
|
||||
|
||||
fixes: #6571
|
||||
|
||||
Reviewed by: Pierre Rogier, Mark Reynolds (Thanks !!!)
|
||||
---
|
||||
.../suites/memberof_plugin/regression_test.py | 109 ++++++++++++++++++
|
||||
.../tests/suites/plugins/memberof_test.py | 5 +
|
||||
ldap/servers/plugins/memberof/memberof.c | 52 ++++-----
|
||||
3 files changed, 137 insertions(+), 29 deletions(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/memberof_plugin/regression_test.py b/dirsrvtests/tests/suites/memberof_plugin/regression_test.py
|
||||
index 4c681a909..dba908975 100644
|
||||
--- a/dirsrvtests/tests/suites/memberof_plugin/regression_test.py
|
||||
+++ b/dirsrvtests/tests/suites/memberof_plugin/regression_test.py
|
||||
@@ -467,6 +467,21 @@ def _find_memberof_ext(server, user_dn=None, group_dn=None, find_result=True):
|
||||
else:
|
||||
assert (not found)
|
||||
|
||||
+def _check_membership(server, entry, expected_members, expected_memberof):
|
||||
+ assert server
|
||||
+ assert entry
|
||||
+
|
||||
+ memberof = entry.get_attr_vals('memberof')
|
||||
+ member = entry.get_attr_vals('member')
|
||||
+ assert len(member) == len(expected_members)
|
||||
+ assert len(memberof) == len(expected_memberof)
|
||||
+ for e in expected_members:
|
||||
+ server.log.info("Checking %s has member %s" % (entry.dn, e.dn))
|
||||
+ assert e.dn.encode() in member
|
||||
+ for e in expected_memberof:
|
||||
+ server.log.info("Checking %s is member of %s" % (entry.dn, e.dn))
|
||||
+ assert e.dn.encode() in memberof
|
||||
+
|
||||
|
||||
@pytest.mark.ds49161
|
||||
def test_memberof_group(topology_st):
|
||||
@@ -535,6 +550,100 @@ def test_memberof_group(topology_st):
|
||||
_find_memberof_ext(inst, dn1, g2n, True)
|
||||
_find_memberof_ext(inst, dn2, g2n, True)
|
||||
|
||||
+def test_multipaths(topology_st, request):
|
||||
+ """Test memberof succeeds to update memberof when
|
||||
+ there are multiple paths from a leaf to an intermediate node
|
||||
+
|
||||
+ :id: 35aa704a-b895-4153-9dcb-1e8a13612ebf
|
||||
+
|
||||
+ :setup: Single instance
|
||||
+
|
||||
+ :steps:
|
||||
+ 1. Create a graph G1->U1, G2->G21->U1
|
||||
+ 2. Add G2 as member of G1: G1->U1, G1->G2->G21->U1
|
||||
+ 3. Check members and memberof in entries G1,G2,G21,User1
|
||||
+
|
||||
+ :expectedresults:
|
||||
+ 1. Graph should be created
|
||||
+ 2. succeed
|
||||
+ 3. Membership is okay
|
||||
+ """
|
||||
+
|
||||
+ inst = topology_st.standalone
|
||||
+ memberof = MemberOfPlugin(inst)
|
||||
+ memberof.enable()
|
||||
+ memberof.replace('memberOfEntryScope', SUFFIX)
|
||||
+ if (memberof.get_memberofdeferredupdate() and memberof.get_memberofdeferredupdate().lower() == "on"):
|
||||
+ delay = 3
|
||||
+ else:
|
||||
+ delay = 0
|
||||
+ inst.restart()
|
||||
+
|
||||
+ #
|
||||
+ # Create the hierarchy
|
||||
+ #
|
||||
+ #
|
||||
+ # Grp1 ---------------> User1
|
||||
+ # ^
|
||||
+ # /
|
||||
+ # Grp2 ----> Grp21 ------/
|
||||
+ #
|
||||
+ users = UserAccounts(inst, SUFFIX, rdn=None)
|
||||
+ user1 = users.create(properties={'uid': "user1",
|
||||
+ 'cn': "user1",
|
||||
+ 'sn': 'SN',
|
||||
+ 'description': 'leaf',
|
||||
+ 'uidNumber': '1000',
|
||||
+ 'gidNumber': '2000',
|
||||
+ 'homeDirectory': '/home/user1'
|
||||
+ })
|
||||
+ group = Groups(inst, SUFFIX, rdn=None)
|
||||
+ g1 = group.create(properties={'cn': 'group1',
|
||||
+ 'member': user1.dn,
|
||||
+ 'description': 'group1'})
|
||||
+ g21 = group.create(properties={'cn': 'group21',
|
||||
+ 'member': user1.dn,
|
||||
+ 'description': 'group21'})
|
||||
+ g2 = group.create(properties={'cn': 'group2',
|
||||
+ 'member': [g21.dn],
|
||||
+ 'description': 'group2'})
|
||||
+
|
||||
+ # Enable debug logs if necessary
|
||||
+ #inst.config.replace('nsslapd-errorlog-level', '65536')
|
||||
+ #inst.config.set('nsslapd-accesslog-level','260')
|
||||
+ #inst.config.set('nsslapd-plugin-logging', 'on')
|
||||
+ #inst.config.set('nsslapd-auditlog-logging-enabled','on')
|
||||
+ #inst.config.set('nsslapd-auditfaillog-logging-enabled','on')
|
||||
+
|
||||
+ #
|
||||
+ # Update the hierarchy
|
||||
+ #
|
||||
+ #
|
||||
+ # Grp1 ----------------> User1
|
||||
+ # \ ^
|
||||
+ # \ /
|
||||
+ # --> Grp2 --> Grp21 --
|
||||
+ #
|
||||
+ g1.add_member(g2.dn)
|
||||
+ time.sleep(delay)
|
||||
+
|
||||
+ #
|
||||
+ # Check G1, G2, G21 and User1 members and memberof
|
||||
+ #
|
||||
+ _check_membership(inst, g1, expected_members=[g2, user1], expected_memberof=[])
|
||||
+ _check_membership(inst, g2, expected_members=[g21], expected_memberof=[g1])
|
||||
+ _check_membership(inst, g21, expected_members=[user1], expected_memberof=[g2, g1])
|
||||
+ _check_membership(inst, user1, expected_members=[], expected_memberof=[g21, g2, g1])
|
||||
+
|
||||
+ def fin():
|
||||
+ try:
|
||||
+ user1.delete()
|
||||
+ g1.delete()
|
||||
+ g2.delete()
|
||||
+ g21.delete()
|
||||
+ except:
|
||||
+ pass
|
||||
+ request.addfinalizer(fin)
|
||||
|
||||
def _config_memberof_entrycache_on_modrdn_failure(server):
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/plugins/memberof_test.py b/dirsrvtests/tests/suites/plugins/memberof_test.py
|
||||
index 2de1389fd..621c45daf 100644
|
||||
--- a/dirsrvtests/tests/suites/plugins/memberof_test.py
|
||||
+++ b/dirsrvtests/tests/suites/plugins/memberof_test.py
|
||||
@@ -2168,9 +2168,14 @@ def test_complex_group_scenario_6(topology_st):
|
||||
|
||||
# add Grp[1-4] (uniqueMember) to grp5
|
||||
# it creates a membership loop !!!
|
||||
+ topology_st.standalone.config.replace('nsslapd-errorlog-level', '65536')
|
||||
mods = [(ldap.MOD_ADD, 'uniqueMember', memofegrp020_5)]
|
||||
for grp in [memofegrp020_1, memofegrp020_2, memofegrp020_3, memofegrp020_4]:
|
||||
topology_st.standalone.modify_s(ensure_str(grp), mods)
|
||||
+ topology_st.standalone.config.replace('nsslapd-errorlog-level', '0')
|
||||
+
|
||||
+ results = topology_st.standalone.ds_error_log.match('.*detecting a loop in group.*')
|
||||
+ assert results
|
||||
|
||||
time.sleep(5)
|
||||
# assert user[1-4] are member of grp20_[1-4]
|
||||
diff --git a/ldap/servers/plugins/memberof/memberof.c b/ldap/servers/plugins/memberof/memberof.c
|
||||
index e75b99b14..32bdcf3f1 100644
|
||||
--- a/ldap/servers/plugins/memberof/memberof.c
|
||||
+++ b/ldap/servers/plugins/memberof/memberof.c
|
||||
@@ -1592,7 +1592,7 @@ memberof_call_foreach_dn(Slapi_PBlock *pb __attribute__((unused)), Slapi_DN *sdn
|
||||
ht_grp = ancestors_cache_lookup(config, (const void *)ndn);
|
||||
if (ht_grp) {
|
||||
#if MEMBEROF_CACHE_DEBUG
|
||||
- slapi_log_err(SLAPI_LOG_PLUGIN, MEMBEROF_PLUGIN_SUBSYSTEM, "memberof_call_foreach_dn: Ancestors of %s already cached (%x)\n", ndn, ht_grp);
|
||||
+ slapi_log_err(SLAPI_LOG_PLUGIN, MEMBEROF_PLUGIN_SUBSYSTEM, "memberof_call_foreach_dn: Ancestors of %s already cached (%lx)\n", ndn, (ulong) ht_grp);
|
||||
#endif
|
||||
add_ancestors_cbdata(ht_grp, callback_data);
|
||||
*cached = 1;
|
||||
@@ -1600,7 +1600,7 @@ memberof_call_foreach_dn(Slapi_PBlock *pb __attribute__((unused)), Slapi_DN *sdn
|
||||
}
|
||||
}
|
||||
#if MEMBEROF_CACHE_DEBUG
|
||||
- slapi_log_err(SLAPI_LOG_PLUGIN, MEMBEROF_PLUGIN_SUBSYSTEM, "memberof_call_foreach_dn: Ancestors of %s not cached\n", ndn);
|
||||
+ slapi_log_err(SLAPI_LOG_PLUGIN, MEMBEROF_PLUGIN_SUBSYSTEM, "memberof_call_foreach_dn: Ancestors of %s not cached\n", slapi_sdn_get_ndn(sdn));
|
||||
#endif
|
||||
|
||||
/* Escape the dn, and build the search filter. */
|
||||
@@ -3233,7 +3233,8 @@ cache_ancestors(MemberOfConfig *config, Slapi_Value **member_ndn_val, memberof_g
|
||||
return;
|
||||
}
|
||||
#if MEMBEROF_CACHE_DEBUG
|
||||
- if (double_check = ancestors_cache_lookup(config, (const void*) key)) {
|
||||
+ double_check = ancestors_cache_lookup(config, (const void*) key);
|
||||
+ if (double_check) {
|
||||
dump_cache_entry(double_check, "read back");
|
||||
}
|
||||
#endif
|
||||
@@ -3263,13 +3264,13 @@ merge_ancestors(Slapi_Value **member_ndn_val, memberof_get_groups_data *v1, memb
|
||||
sval_dn = slapi_value_new_string(slapi_value_get_string(sval));
|
||||
if (sval_dn) {
|
||||
/* Use the normalized dn from v1 to search it
|
||||
- * in v2
|
||||
- */
|
||||
+ * in v2
|
||||
+ */
|
||||
val_sdn = slapi_sdn_new_dn_byval(slapi_value_get_string(sval_dn));
|
||||
sval_ndn = slapi_value_new_string(slapi_sdn_get_ndn(val_sdn));
|
||||
if (!slapi_valueset_find(
|
||||
((memberof_get_groups_data *)v2)->config->group_slapiattrs[0], v2_group_norm_vals, sval_ndn)) {
|
||||
-/* This ancestor was not already present in v2 => Add it
|
||||
+ /* This ancestor was not already present in v2 => Add it
|
||||
* Using slapi_valueset_add_value it consumes val
|
||||
* so do not free sval
|
||||
*/
|
||||
@@ -3318,7 +3319,7 @@ memberof_get_groups_r(MemberOfConfig *config, Slapi_DN *member_sdn, memberof_get
|
||||
|
||||
merge_ancestors(&member_ndn_val, &member_data, data);
|
||||
if (!cached && member_data.use_cache)
|
||||
- cache_ancestors(config, &member_ndn_val, &member_data);
|
||||
+ cache_ancestors(config, &member_ndn_val, data);
|
||||
|
||||
slapi_value_free(&member_ndn_val);
|
||||
slapi_valueset_free(groupvals);
|
||||
@@ -3379,25 +3380,6 @@ memberof_get_groups_callback(Slapi_Entry *e, void *callback_data)
|
||||
goto bail;
|
||||
}
|
||||
|
||||
- /* Have we been here before? Note that we don't loop through all of the group_slapiattrs
|
||||
- * in config. We only need this attribute for it's syntax so the comparison can be
|
||||
- * performed. Since all of the grouping attributes are validated to use the Dinstinguished
|
||||
- * Name syntax, we can safely just use the first group_slapiattr. */
|
||||
- if (slapi_valueset_find(
|
||||
- ((memberof_get_groups_data *)callback_data)->config->group_slapiattrs[0], already_seen_ndn_vals, group_ndn_val)) {
|
||||
- /* we either hit a recursive grouping, or an entry is
|
||||
- * a member of a group through multiple paths. Either
|
||||
- * way, we can just skip processing this entry since we've
|
||||
- * already gone through this part of the grouping hierarchy. */
|
||||
- slapi_log_err(SLAPI_LOG_PLUGIN, MEMBEROF_PLUGIN_SUBSYSTEM,
|
||||
- "memberof_get_groups_callback - Possible group recursion"
|
||||
- " detected in %s\n",
|
||||
- group_ndn);
|
||||
- slapi_value_free(&group_ndn_val);
|
||||
- ((memberof_get_groups_data *)callback_data)->use_cache = PR_FALSE;
|
||||
- goto bail;
|
||||
- }
|
||||
-
|
||||
/* if the group does not belong to an excluded subtree, adds it to the valueset */
|
||||
if (memberof_entry_in_scope(config, group_sdn)) {
|
||||
/* Push group_dn_val into the valueset. This memory is now owned
|
||||
@@ -3407,9 +3389,21 @@ memberof_get_groups_callback(Slapi_Entry *e, void *callback_data)
|
||||
group_dn_val = slapi_value_new_string(group_dn);
|
||||
slapi_valueset_add_value_ext(groupvals, group_dn_val, SLAPI_VALUE_FLAG_PASSIN);
|
||||
|
||||
- /* push this ndn to detect group recursion */
|
||||
- already_seen_ndn_val = slapi_value_new_string(group_ndn);
|
||||
- slapi_valueset_add_value_ext(already_seen_ndn_vals, already_seen_ndn_val, SLAPI_VALUE_FLAG_PASSIN);
|
||||
+ if (slapi_valueset_find(
|
||||
+ ((memberof_get_groups_data *)callback_data)->config->group_slapiattrs[0], already_seen_ndn_vals, group_ndn_val)) {
|
||||
+ /* The group group_ndn_val has already been processed
|
||||
+ * skip the final recursion to prevent infinite loop
|
||||
+ */
|
||||
+ slapi_log_err(SLAPI_LOG_PLUGIN, MEMBEROF_PLUGIN_SUBSYSTEM,
|
||||
+ "memberof_get_groups_callback - detecting a loop in group %s (stop building memberof)\n",
|
||||
+ group_ndn);
|
||||
+ ((memberof_get_groups_data *)callback_data)->use_cache = PR_FALSE;
|
||||
+ goto bail;
|
||||
+ } else {
|
||||
+ /* keep this ndn to detect a possible group recursion */
|
||||
+ already_seen_ndn_val = slapi_value_new_string(group_ndn);
|
||||
+ slapi_valueset_add_value_ext(already_seen_ndn_vals, already_seen_ndn_val, SLAPI_VALUE_FLAG_PASSIN);
|
||||
+ }
|
||||
}
|
||||
if (!config->skip_nested || config->fixup_task) {
|
||||
/* now recurse to find ancestors groups of e */
|
||||
--
|
||||
2.49.0
|
||||
|
@ -1,272 +0,0 @@
|
||||
From 17da0257b24749765777a4e64c3626cb39cca639 Mon Sep 17 00:00:00 2001
|
||||
From: tbordaz <tbordaz@redhat.com>
|
||||
Date: Mon, 31 Mar 2025 11:05:01 +0200
|
||||
Subject: [PATCH] Issue 6571 - (2nd) Nested group does not receive memberOf
|
||||
attribute (#6697)
|
||||
|
||||
Bug description:
|
||||
erroneous debug change made in previous fix
|
||||
where cache_ancestors is called with the wrong parameter
|
||||
|
||||
Fix description:
|
||||
Restore the orginal param 'member_data'
|
||||
Increase the set of tests around multipaths
|
||||
|
||||
fixes: #6571
|
||||
|
||||
review by: Simon Pichugin (Thanks !!)
|
||||
---
|
||||
.../suites/memberof_plugin/regression_test.py | 154 ++++++++++++++++++
|
||||
ldap/servers/plugins/memberof/memberof.c | 50 +++++-
|
||||
2 files changed, 203 insertions(+), 1 deletion(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/memberof_plugin/regression_test.py b/dirsrvtests/tests/suites/memberof_plugin/regression_test.py
|
||||
index dba908975..9ba40a0c3 100644
|
||||
--- a/dirsrvtests/tests/suites/memberof_plugin/regression_test.py
|
||||
+++ b/dirsrvtests/tests/suites/memberof_plugin/regression_test.py
|
||||
@@ -598,6 +598,8 @@ def test_multipaths(topology_st, request):
|
||||
'homeDirectory': '/home/user1'
|
||||
})
|
||||
group = Groups(inst, SUFFIX, rdn=None)
|
||||
+ g0 = group.create(properties={'cn': 'group0',
|
||||
+ 'description': 'group0'})
|
||||
g1 = group.create(properties={'cn': 'group1',
|
||||
'member': user1.dn,
|
||||
'description': 'group1'})
|
||||
@@ -635,6 +637,158 @@ def test_multipaths(topology_st, request):
|
||||
_check_membership(inst, g21, expected_members=[user1], expected_memberof=[g2, g1])
|
||||
_check_membership(inst, user1, expected_members=[], expected_memberof=[g21, g2, g1])
|
||||
|
||||
+ #inst.config.replace('nsslapd-errorlog-level', '65536')
|
||||
+ #inst.config.set('nsslapd-accesslog-level','260')
|
||||
+ #inst.config.set('nsslapd-plugin-logging', 'on')
|
||||
+ #inst.config.set('nsslapd-auditlog-logging-enabled','on')
|
||||
+ #inst.config.set('nsslapd-auditfaillog-logging-enabled','on')
|
||||
+ #
|
||||
+ # Update the hierarchy
|
||||
+ #
|
||||
+ #
|
||||
+ # Grp1 ----------------> User1
|
||||
+ # ^
|
||||
+ # /
|
||||
+ # Grp2 --> Grp21 --
|
||||
+ #
|
||||
+ g1.remove_member(g2.dn)
|
||||
+ time.sleep(delay)
|
||||
+
|
||||
+ #
|
||||
+ # Check G1, G2, G21 and User1 members and memberof
|
||||
+ #
|
||||
+ _check_membership(inst, g1, expected_members=[user1], expected_memberof=[])
|
||||
+ _check_membership(inst, g2, expected_members=[g21], expected_memberof=[])
|
||||
+ _check_membership(inst, g21, expected_members=[user1], expected_memberof=[g2])
|
||||
+ _check_membership(inst, user1, expected_members=[], expected_memberof=[g21, g2, g1])
|
||||
+
|
||||
+ #
|
||||
+ # Update the hierarchy
|
||||
+ #
|
||||
+ #
|
||||
+ # Grp1 ----------------> User1
|
||||
+ # \__________ ^
|
||||
+ # | /
|
||||
+ # v /
|
||||
+ # Grp2 --> Grp21 ----
|
||||
+ #
|
||||
+ g1.add_member(g21.dn)
|
||||
+ time.sleep(delay)
|
||||
+
|
||||
+ #
|
||||
+ # Check G1, G2, G21 and User1 members and memberof
|
||||
+ #
|
||||
+ _check_membership(inst, g1, expected_members=[user1, g21], expected_memberof=[])
|
||||
+ _check_membership(inst, g2, expected_members=[g21], expected_memberof=[])
|
||||
+ _check_membership(inst, g21, expected_members=[user1], expected_memberof=[g2, g1])
|
||||
+ _check_membership(inst, user1, expected_members=[], expected_memberof=[g21, g2, g1])
|
||||
+
|
||||
+ #
|
||||
+ # Update the hierarchy
|
||||
+ #
|
||||
+ #
|
||||
+ # Grp1 ----------------> User1
|
||||
+ # ^
|
||||
+ # /
|
||||
+ # Grp2 --> Grp21 --
|
||||
+ #
|
||||
+ g1.remove_member(g21.dn)
|
||||
+ time.sleep(delay)
|
||||
+
|
||||
+ #
|
||||
+ # Check G1, G2, G21 and User1 members and memberof
|
||||
+ #
|
||||
+ _check_membership(inst, g1, expected_members=[user1], expected_memberof=[])
|
||||
+ _check_membership(inst, g2, expected_members=[g21], expected_memberof=[])
|
||||
+ _check_membership(inst, g21, expected_members=[user1], expected_memberof=[g2])
|
||||
+ _check_membership(inst, user1, expected_members=[], expected_memberof=[g21, g2, g1])
|
||||
+
|
||||
+ #
|
||||
+ # Update the hierarchy
|
||||
+ #
|
||||
+ #
|
||||
+ # Grp1 ----------------> User1
|
||||
+ # ^
|
||||
+ # /
|
||||
+ # Grp0 ---> Grp2 ---> Grp21 ---
|
||||
+ #
|
||||
+ g0.add_member(g2.dn)
|
||||
+ time.sleep(delay)
|
||||
+
|
||||
+ #
|
||||
+ # Check G0,G1, G2, G21 and User1 members and memberof
|
||||
+ #
|
||||
+ _check_membership(inst, g0, expected_members=[g2], expected_memberof=[])
|
||||
+ _check_membership(inst, g1, expected_members=[user1], expected_memberof=[])
|
||||
+ _check_membership(inst, g2, expected_members=[g21], expected_memberof=[g0])
|
||||
+ _check_membership(inst, g21, expected_members=[user1], expected_memberof=[g0, g2])
|
||||
+ _check_membership(inst, user1, expected_members=[], expected_memberof=[g21, g2, g1, g0])
|
||||
+
|
||||
+ #
|
||||
+ # Update the hierarchy
|
||||
+ #
|
||||
+ #
|
||||
+ # Grp1 ----------------> User1
|
||||
+ # ^ ^
|
||||
+ # / /
|
||||
+ # Grp0 ---> Grp2 ---> Grp21 ---
|
||||
+ #
|
||||
+ g0.add_member(g1.dn)
|
||||
+ time.sleep(delay)
|
||||
+
|
||||
+ #
|
||||
+ # Check G0,G1, G2, G21 and User1 members and memberof
|
||||
+ #
|
||||
+ _check_membership(inst, g0, expected_members=[g1,g2], expected_memberof=[])
|
||||
+ _check_membership(inst, g1, expected_members=[user1], expected_memberof=[g0])
|
||||
+ _check_membership(inst, g2, expected_members=[g21], expected_memberof=[g0])
|
||||
+ _check_membership(inst, g21, expected_members=[user1], expected_memberof=[g0, g2])
|
||||
+ _check_membership(inst, user1, expected_members=[], expected_memberof=[g21, g2, g1, g0])
|
||||
+
|
||||
+ #
|
||||
+ # Update the hierarchy
|
||||
+ #
|
||||
+ #
|
||||
+ # Grp1 ----------------> User1
|
||||
+ # ^ \_____________ ^
|
||||
+ # / | /
|
||||
+ # / V /
|
||||
+ # Grp0 ---> Grp2 ---> Grp21 ---
|
||||
+ #
|
||||
+ g1.add_member(g21.dn)
|
||||
+ time.sleep(delay)
|
||||
+
|
||||
+ #
|
||||
+ # Check G0,G1, G2, G21 and User1 members and memberof
|
||||
+ #
|
||||
+ _check_membership(inst, g0, expected_members=[g1, g2], expected_memberof=[])
|
||||
+ _check_membership(inst, g1, expected_members=[user1, g21], expected_memberof=[g0])
|
||||
+ _check_membership(inst, g2, expected_members=[g21], expected_memberof=[g0])
|
||||
+ _check_membership(inst, g21, expected_members=[user1], expected_memberof=[g0, g1, g2])
|
||||
+ _check_membership(inst, user1, expected_members=[], expected_memberof=[g21, g2, g1, g0])
|
||||
+
|
||||
+ #
|
||||
+ # Update the hierarchy
|
||||
+ #
|
||||
+ #
|
||||
+ # Grp1 ----------------> User1
|
||||
+ # ^ \_____________ ^
|
||||
+ # / | /
|
||||
+ # / V /
|
||||
+ # Grp0 ---> Grp2 Grp21 ---
|
||||
+ #
|
||||
+ g2.remove_member(g21.dn)
|
||||
+ time.sleep(delay)
|
||||
+
|
||||
+ #
|
||||
+ # Check G0,G1, G2, G21 and User1 members and memberof
|
||||
+ #
|
||||
+ _check_membership(inst, g0, expected_members=[g1, g2], expected_memberof=[])
|
||||
+ _check_membership(inst, g1, expected_members=[user1, g21], expected_memberof=[g0])
|
||||
+ _check_membership(inst, g2, expected_members=[], expected_memberof=[g0])
|
||||
+ _check_membership(inst, g21, expected_members=[user1], expected_memberof=[g0, g1])
|
||||
+ _check_membership(inst, user1, expected_members=[], expected_memberof=[g21, g1, g0])
|
||||
+
|
||||
def fin():
|
||||
try:
|
||||
user1.delete()
|
||||
diff --git a/ldap/servers/plugins/memberof/memberof.c b/ldap/servers/plugins/memberof/memberof.c
|
||||
index 32bdcf3f1..f79b083a9 100644
|
||||
--- a/ldap/servers/plugins/memberof/memberof.c
|
||||
+++ b/ldap/servers/plugins/memberof/memberof.c
|
||||
@@ -3258,6 +3258,35 @@ merge_ancestors(Slapi_Value **member_ndn_val, memberof_get_groups_data *v1, memb
|
||||
Slapi_ValueSet *v2_group_norm_vals = *((memberof_get_groups_data *)v2)->group_norm_vals;
|
||||
int merged_cnt = 0;
|
||||
|
||||
+#if MEMBEROF_CACHE_DEBUG
|
||||
+ {
|
||||
+ Slapi_Value *val = 0;
|
||||
+ int hint = 0;
|
||||
+ struct berval *bv;
|
||||
+ hint = slapi_valueset_first_value(v2_groupvals, &val);
|
||||
+ while (val) {
|
||||
+ /* this makes a copy of the berval */
|
||||
+ bv = slapi_value_get_berval(val);
|
||||
+ if (bv && bv->bv_len) {
|
||||
+ slapi_log_err(SLAPI_LOG_PLUGIN, MEMBEROF_PLUGIN_SUBSYSTEM,
|
||||
+ "merge_ancestors: V2 contains %s\n",
|
||||
+ bv->bv_val);
|
||||
+ }
|
||||
+ hint = slapi_valueset_next_value(v2_groupvals, hint, &val);
|
||||
+ }
|
||||
+ hint = slapi_valueset_first_value(v1_groupvals, &val);
|
||||
+ while (val) {
|
||||
+ /* this makes a copy of the berval */
|
||||
+ bv = slapi_value_get_berval(val);
|
||||
+ if (bv && bv->bv_len) {
|
||||
+ slapi_log_err(SLAPI_LOG_PLUGIN, MEMBEROF_PLUGIN_SUBSYSTEM,
|
||||
+ "merge_ancestors: add %s (from V1)\n",
|
||||
+ bv->bv_val);
|
||||
+ }
|
||||
+ hint = slapi_valueset_next_value(v1_groupvals, hint, &val);
|
||||
+ }
|
||||
+ }
|
||||
+#endif
|
||||
hint = slapi_valueset_first_value(v1_groupvals, &sval);
|
||||
while (sval) {
|
||||
if (memberof_compare(config, member_ndn_val, &sval)) {
|
||||
@@ -3319,7 +3348,7 @@ memberof_get_groups_r(MemberOfConfig *config, Slapi_DN *member_sdn, memberof_get
|
||||
|
||||
merge_ancestors(&member_ndn_val, &member_data, data);
|
||||
if (!cached && member_data.use_cache)
|
||||
- cache_ancestors(config, &member_ndn_val, data);
|
||||
+ cache_ancestors(config, &member_ndn_val, &member_data);
|
||||
|
||||
slapi_value_free(&member_ndn_val);
|
||||
slapi_valueset_free(groupvals);
|
||||
@@ -4285,6 +4314,25 @@ memberof_fix_memberof_callback(Slapi_Entry *e, void *callback_data)
|
||||
|
||||
/* get a list of all of the groups this user belongs to */
|
||||
groups = memberof_get_groups(config, sdn);
|
||||
+#if MEMBEROF_CACHE_DEBUG
|
||||
+ {
|
||||
+ Slapi_Value *val = 0;
|
||||
+ int hint = 0;
|
||||
+ struct berval *bv;
|
||||
+ hint = slapi_valueset_first_value(groups, &val);
|
||||
+ while (val) {
|
||||
+ /* this makes a copy of the berval */
|
||||
+ bv = slapi_value_get_berval(val);
|
||||
+ if (bv && bv->bv_len) {
|
||||
+ slapi_log_err(SLAPI_LOG_PLUGIN, MEMBEROF_PLUGIN_SUBSYSTEM,
|
||||
+ "memberof_fix_memberof_callback: %s belongs to %s\n",
|
||||
+ ndn,
|
||||
+ bv->bv_val);
|
||||
+ }
|
||||
+ hint = slapi_valueset_next_value(groups, hint, &val);
|
||||
+ }
|
||||
+ }
|
||||
+#endif
|
||||
|
||||
if (config->group_filter) {
|
||||
if (slapi_filter_test_simple(e, config->group_filter)) {
|
||||
--
|
||||
2.49.0
|
||||
|
@ -1,192 +0,0 @@
|
||||
From ff364a4b1c88e1a8f678e056af88cce50cd8717c Mon Sep 17 00:00:00 2001
|
||||
From: progier389 <progier@redhat.com>
|
||||
Date: Fri, 28 Mar 2025 17:32:14 +0100
|
||||
Subject: [PATCH] Issue 6698 - NPE after configuring invalid filtered role
|
||||
(#6699)
|
||||
|
||||
Server crash when doing search after configuring filtered role with invalid filter.
|
||||
Reason: The part of the filter that should be overwritten are freed before knowing that the filter is invalid.
|
||||
Solution: Check first that the filter is valid before freeing the filtere bits
|
||||
|
||||
Issue: #6698
|
||||
|
||||
Reviewed by: @tbordaz , @mreynolds389 (Thanks!)
|
||||
|
||||
(cherry picked from commit 31e120d2349eda7a41380cf78fc04cf41e394359)
|
||||
---
|
||||
dirsrvtests/tests/suites/roles/basic_test.py | 80 ++++++++++++++++++--
|
||||
ldap/servers/slapd/filter.c | 17 ++++-
|
||||
2 files changed, 88 insertions(+), 9 deletions(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/roles/basic_test.py b/dirsrvtests/tests/suites/roles/basic_test.py
|
||||
index 875ac47c1..b79816c58 100644
|
||||
--- a/dirsrvtests/tests/suites/roles/basic_test.py
|
||||
+++ b/dirsrvtests/tests/suites/roles/basic_test.py
|
||||
@@ -28,6 +28,7 @@ from lib389.dbgen import dbgen_users
|
||||
from lib389.tasks import ImportTask
|
||||
from lib389.utils import get_default_db_lib
|
||||
from lib389.rewriters import *
|
||||
+from lib389._mapped_object import DSLdapObject
|
||||
from lib389.backend import Backends
|
||||
|
||||
logging.getLogger(__name__).setLevel(logging.INFO)
|
||||
@@ -427,7 +428,6 @@ def test_vattr_on_filtered_role_restart(topo, request):
|
||||
log.info("Check the default value of attribute nsslapd-ignore-virtual-attrs should be OFF")
|
||||
assert topo.standalone.config.present('nsslapd-ignore-virtual-attrs', 'off')
|
||||
|
||||
-
|
||||
log.info("Check the virtual attribute definition is found (after a required delay)")
|
||||
topo.standalone.restart()
|
||||
time.sleep(5)
|
||||
@@ -541,7 +541,7 @@ def test_managed_and_filtered_role_rewrite(topo, request):
|
||||
indexes = backend.get_indexes()
|
||||
try:
|
||||
index = indexes.create(properties={
|
||||
- 'cn': attrname,
|
||||
+ 'cn': attrname,
|
||||
'nsSystemIndex': 'false',
|
||||
'nsIndexType': ['eq', 'pres']
|
||||
})
|
||||
@@ -593,7 +593,6 @@ def test_managed_and_filtered_role_rewrite(topo, request):
|
||||
dn = "uid=%s0000%d,%s" % (RDN, i, PARENT)
|
||||
topo.standalone.modify_s(dn, [(ldap.MOD_REPLACE, 'nsRoleDN', [role.dn.encode()])])
|
||||
|
||||
-
|
||||
# Now check that search is fast, evaluating only 4 entries
|
||||
search_start = time.time()
|
||||
entries = topo.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, "(nsrole=%s)" % role.dn)
|
||||
@@ -676,7 +675,7 @@ def test_not_such_entry_role_rewrite(topo, request):
|
||||
indexes = backend.get_indexes()
|
||||
try:
|
||||
index = indexes.create(properties={
|
||||
- 'cn': attrname,
|
||||
+ 'cn': attrname,
|
||||
'nsSystemIndex': 'false',
|
||||
'nsIndexType': ['eq', 'pres']
|
||||
})
|
||||
@@ -730,7 +729,7 @@ def test_not_such_entry_role_rewrite(topo, request):
|
||||
|
||||
# Enable plugin level to check message
|
||||
topo.standalone.config.loglevel(vals=(ErrorLog.DEFAULT,ErrorLog.PLUGIN))
|
||||
-
|
||||
+
|
||||
# Now check that search is fast, evaluating only 4 entries
|
||||
search_start = time.time()
|
||||
entries = topo.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, "(|(nsrole=%s)(nsrole=cn=not_such_entry_role,%s))" % (role.dn, DEFAULT_SUFFIX))
|
||||
@@ -758,6 +757,77 @@ def test_not_such_entry_role_rewrite(topo, request):
|
||||
|
||||
request.addfinalizer(fin)
|
||||
|
||||
+
|
||||
+def test_rewriter_with_invalid_filter(topo, request):
|
||||
+ """Test that server does not crash when having
|
||||
+ invalid filter in filtered role
|
||||
+
|
||||
+ :id: 5013b0b2-0af6-11f0-8684-482ae39447e5
|
||||
+ :setup: standalone server
|
||||
+ :steps:
|
||||
+ 1. Setup filtered role with good filter
|
||||
+ 2. Setup nsrole rewriter
|
||||
+ 3. Restart the server
|
||||
+ 4. Search for entries
|
||||
+ 5. Setup filtered role with bad filter
|
||||
+ 6. Search for entries
|
||||
+ :expectedresults:
|
||||
+ 1. Operation should succeed
|
||||
+ 2. Operation should succeed
|
||||
+ 3. Operation should succeed
|
||||
+ 4. Operation should succeed
|
||||
+ 5. Operation should succeed
|
||||
+ 6. Operation should succeed
|
||||
+ """
|
||||
+ inst = topo.standalone
|
||||
+ entries = []
|
||||
+
|
||||
+ def fin():
|
||||
+ inst.start()
|
||||
+ for entry in entries:
|
||||
+ entry.delete()
|
||||
+ request.addfinalizer(fin)
|
||||
+
|
||||
+ # Setup filtered role
|
||||
+ roles = FilteredRoles(inst, f'ou=people,{DEFAULT_SUFFIX}')
|
||||
+ filter_ko = '(&((objectClass=top)(objectClass=nsPerson))'
|
||||
+ filter_ok = '(&(objectClass=top)(objectClass=nsPerson))'
|
||||
+ role_properties = {
|
||||
+ 'cn': 'TestFilteredRole',
|
||||
+ 'nsRoleFilter': filter_ok,
|
||||
+ 'description': 'Test good filter',
|
||||
+ }
|
||||
+ role = roles.create(properties=role_properties)
|
||||
+ entries.append(role)
|
||||
+
|
||||
+ # Setup nsrole rewriter
|
||||
+ rewriters = Rewriters(inst)
|
||||
+ rewriter_properties = {
|
||||
+ "cn": "nsrole",
|
||||
+ "nsslapd-libpath": 'libroles-plugin',
|
||||
+ "nsslapd-filterrewriter": 'role_nsRole_filter_rewriter',
|
||||
+ }
|
||||
+ rewriter = rewriters.ensure_state(properties=rewriter_properties)
|
||||
+ entries.append(rewriter)
|
||||
+
|
||||
+ # Restart thge instance
|
||||
+ inst.restart()
|
||||
+
|
||||
+ # Search for entries
|
||||
+ entries = inst.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, "(nsrole=%s)" % role.dn)
|
||||
+
|
||||
+ # Set bad filter
|
||||
+ role_properties = {
|
||||
+ 'cn': 'TestFilteredRole',
|
||||
+ 'nsRoleFilter': filter_ko,
|
||||
+ 'description': 'Test bad filter',
|
||||
+ }
|
||||
+ role.ensure_state(properties=role_properties)
|
||||
+
|
||||
+ # Search for entries
|
||||
+ entries = inst.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, "(nsrole=%s)" % role.dn)
|
||||
+
|
||||
+
|
||||
if __name__ == "__main__":
|
||||
CURRENT_FILE = os.path.realpath(__file__)
|
||||
pytest.main("-s -v %s" % CURRENT_FILE)
|
||||
diff --git a/ldap/servers/slapd/filter.c b/ldap/servers/slapd/filter.c
|
||||
index ce09891b8..f541b8fc1 100644
|
||||
--- a/ldap/servers/slapd/filter.c
|
||||
+++ b/ldap/servers/slapd/filter.c
|
||||
@@ -1038,9 +1038,11 @@ slapi_filter_get_subfilt(
|
||||
}
|
||||
|
||||
/*
|
||||
- * Before calling this function, you must free all the parts
|
||||
+ * The function does not know how to free all the parts
|
||||
* which will be overwritten (i.e. slapi_free_the_filter_bits),
|
||||
- * this function dosn't know how to do that
|
||||
+ * so the caller must take care of that.
|
||||
+ * But it must do so AFTER calling slapi_filter_replace_ex to
|
||||
+ * avoid getting invalid filter if slapi_filter_replace_ex fails.
|
||||
*/
|
||||
int
|
||||
slapi_filter_replace_ex(Slapi_Filter *f, char *s)
|
||||
@@ -1099,8 +1101,15 @@ slapi_filter_free_bits(Slapi_Filter *f)
|
||||
int
|
||||
slapi_filter_replace_strfilter(Slapi_Filter *f, char *strfilter)
|
||||
{
|
||||
- slapi_filter_free_bits(f);
|
||||
- return (slapi_filter_replace_ex(f, strfilter));
|
||||
+ /* slapi_filter_replace_ex may fail and we cannot
|
||||
+ * free filter bits before calling it.
|
||||
+ */
|
||||
+ Slapi_Filter save_f = *f;
|
||||
+ int ret = slapi_filter_replace_ex(f, strfilter);
|
||||
+ if (ret == 0) {
|
||||
+ slapi_filter_free_bits(&save_f);
|
||||
+ }
|
||||
+ return ret;
|
||||
}
|
||||
|
||||
static void
|
||||
--
|
||||
2.49.0
|
||||
|
@ -1,455 +0,0 @@
|
||||
From 446a23d0ed2d3ffa76c5fb5e9576d6876bdbf04f Mon Sep 17 00:00:00 2001
|
||||
From: Simon Pichugin <spichugi@redhat.com>
|
||||
Date: Fri, 28 Mar 2025 11:28:54 -0700
|
||||
Subject: [PATCH] Issue 6686 - CLI - Re-enabling user accounts that reached
|
||||
inactivity limit fails with error (#6687)
|
||||
|
||||
Description: When attempting to unlock a user account that has been locked due
|
||||
to exceeding the Account Policy Plugin's inactivity limit, the dsidm account
|
||||
unlock command fails with a Python type error: "float() argument must be a
|
||||
string or a number, not 'NoneType'".
|
||||
|
||||
Enhance the unlock method to properly handle different account locking states,
|
||||
including inactivity limit exceeded states.
|
||||
Add test cases to verify account inactivity locking/unlocking functionality
|
||||
with CoS and role-based indirect locking.
|
||||
|
||||
Fix CoS template class to include the required 'ldapsubentry' objectClass.
|
||||
Improv error messages to provide better guidance on unlocking indirectly
|
||||
locked accounts.
|
||||
|
||||
Fixes: https://github.com/389ds/389-ds-base/issues/6686
|
||||
|
||||
Reviewed by: @mreynolds389 (Thanks!)
|
||||
---
|
||||
.../clu/dsidm_account_inactivity_test.py | 329 ++++++++++++++++++
|
||||
src/lib389/lib389/cli_idm/account.py | 25 +-
|
||||
src/lib389/lib389/idm/account.py | 28 +-
|
||||
3 files changed, 377 insertions(+), 5 deletions(-)
|
||||
create mode 100644 dirsrvtests/tests/suites/clu/dsidm_account_inactivity_test.py
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/clu/dsidm_account_inactivity_test.py b/dirsrvtests/tests/suites/clu/dsidm_account_inactivity_test.py
|
||||
new file mode 100644
|
||||
index 000000000..88a34abf6
|
||||
--- /dev/null
|
||||
+++ b/dirsrvtests/tests/suites/clu/dsidm_account_inactivity_test.py
|
||||
@@ -0,0 +1,329 @@
|
||||
+# --- BEGIN COPYRIGHT BLOCK ---
|
||||
+# Copyright (C) 2025 Red Hat, Inc.
|
||||
+# All rights reserved.
|
||||
+#
|
||||
+# License: GPL (version 3 or any later version).
|
||||
+# See LICENSE for details.
|
||||
+# --- END COPYRIGHT BLOCK ---
|
||||
+#
|
||||
+import ldap
|
||||
+import time
|
||||
+import pytest
|
||||
+import logging
|
||||
+import os
|
||||
+from datetime import datetime, timedelta
|
||||
+
|
||||
+from lib389 import DEFAULT_SUFFIX, DN_PLUGIN, DN_CONFIG
|
||||
+from lib389.cli_idm.account import entry_status, unlock
|
||||
+from lib389.topologies import topology_st
|
||||
+from lib389.cli_base import FakeArgs
|
||||
+from lib389.utils import ds_is_older
|
||||
+from lib389.plugins import AccountPolicyPlugin, AccountPolicyConfigs
|
||||
+from lib389.idm.role import FilteredRoles
|
||||
+from lib389.idm.user import UserAccounts
|
||||
+from lib389.cos import CosTemplate, CosPointerDefinition
|
||||
+from lib389.idm.domain import Domain
|
||||
+from . import check_value_in_log_and_reset
|
||||
+
|
||||
+pytestmark = pytest.mark.tier0
|
||||
+
|
||||
+logging.getLogger(__name__).setLevel(logging.DEBUG)
|
||||
+log = logging.getLogger(__name__)
|
||||
+
|
||||
+# Constants
|
||||
+PLUGIN_ACCT_POLICY = "Account Policy Plugin"
|
||||
+ACCP_DN = f"cn={PLUGIN_ACCT_POLICY},{DN_PLUGIN}"
|
||||
+ACCP_CONF = f"{DN_CONFIG},{ACCP_DN}"
|
||||
+POLICY_NAME = "Account Inactivity Policy"
|
||||
+POLICY_DN = f"cn={POLICY_NAME},{DEFAULT_SUFFIX}"
|
||||
+COS_TEMPLATE_NAME = "TemplateCoS"
|
||||
+COS_TEMPLATE_DN = f"cn={COS_TEMPLATE_NAME},{DEFAULT_SUFFIX}"
|
||||
+COS_DEFINITION_NAME = "DefinitionCoS"
|
||||
+COS_DEFINITION_DN = f"cn={COS_DEFINITION_NAME},{DEFAULT_SUFFIX}"
|
||||
+TEST_USER_NAME = "test_inactive_user"
|
||||
+TEST_USER_DN = f"uid={TEST_USER_NAME},{DEFAULT_SUFFIX}"
|
||||
+TEST_USER_PW = "password"
|
||||
+INACTIVITY_LIMIT = 30
|
||||
+
|
||||
+
|
||||
+@pytest.fixture(scope="function")
|
||||
+def account_policy_setup(topology_st, request):
|
||||
+ """Set up account policy plugin, configuration, and CoS objects"""
|
||||
+ log.info("Setting up Account Policy Plugin and CoS")
|
||||
+
|
||||
+ # Enable Account Policy Plugin
|
||||
+ plugin = AccountPolicyPlugin(topology_st.standalone)
|
||||
+ if not plugin.status():
|
||||
+ plugin.enable()
|
||||
+ plugin.set('nsslapd-pluginarg0', ACCP_CONF)
|
||||
+
|
||||
+ # Configure Account Policy
|
||||
+ accp_configs = AccountPolicyConfigs(topology_st.standalone)
|
||||
+ accp_config = accp_configs.ensure_state(
|
||||
+ properties={
|
||||
+ 'cn': 'config',
|
||||
+ 'alwaysrecordlogin': 'yes',
|
||||
+ 'stateattrname': 'lastLoginTime',
|
||||
+ 'altstateattrname': '1.1',
|
||||
+ 'specattrname': 'acctPolicySubentry',
|
||||
+ 'limitattrname': 'accountInactivityLimit'
|
||||
+ }
|
||||
+ )
|
||||
+
|
||||
+ # Add ACI for anonymous access if it doesn't exist
|
||||
+ domain = Domain(topology_st.standalone, DEFAULT_SUFFIX)
|
||||
+ anon_aci = '(targetattr="*")(version 3.0; acl "Anonymous read access"; allow (read,search,compare) userdn="ldap:///anyone";)'
|
||||
+ domain.ensure_present('aci', anon_aci)
|
||||
+
|
||||
+ # Restart the server to apply plugin configuration
|
||||
+ topology_st.standalone.restart()
|
||||
+
|
||||
+ # Create or update account policy entry
|
||||
+ accp_configs = AccountPolicyConfigs(topology_st.standalone, basedn=DEFAULT_SUFFIX)
|
||||
+ policy = accp_configs.ensure_state(
|
||||
+ properties={
|
||||
+ 'cn': POLICY_NAME,
|
||||
+ 'objectClass': ['top', 'ldapsubentry', 'extensibleObject', 'accountpolicy'],
|
||||
+ 'accountInactivityLimit': str(INACTIVITY_LIMIT)
|
||||
+ }
|
||||
+ )
|
||||
+
|
||||
+ # Create or update CoS template entry
|
||||
+ cos_template = CosTemplate(topology_st.standalone, dn=COS_TEMPLATE_DN)
|
||||
+ cos_template.ensure_state(
|
||||
+ properties={
|
||||
+ 'cn': COS_TEMPLATE_NAME,
|
||||
+ 'objectClass': ['top', 'cosTemplate', 'extensibleObject'],
|
||||
+ 'acctPolicySubentry': policy.dn
|
||||
+ }
|
||||
+ )
|
||||
+
|
||||
+ # Create or update CoS definition entry
|
||||
+ cos_def = CosPointerDefinition(topology_st.standalone, dn=COS_DEFINITION_DN)
|
||||
+ cos_def.ensure_state(
|
||||
+ properties={
|
||||
+ 'cn': COS_DEFINITION_NAME,
|
||||
+ 'objectClass': ['top', 'ldapsubentry', 'cosSuperDefinition', 'cosPointerDefinition'],
|
||||
+ 'cosTemplateDn': COS_TEMPLATE_DN,
|
||||
+ 'cosAttribute': 'acctPolicySubentry default operational-default'
|
||||
+ }
|
||||
+ )
|
||||
+
|
||||
+ # Restart server to ensure CoS is applied
|
||||
+ topology_st.standalone.restart()
|
||||
+
|
||||
+ def fin():
|
||||
+ log.info('Cleaning up Account Policy settings')
|
||||
+ try:
|
||||
+ # Delete CoS and policy entries
|
||||
+ if cos_def.exists():
|
||||
+ cos_def.delete()
|
||||
+ if cos_template.exists():
|
||||
+ cos_template.delete()
|
||||
+ if policy.exists():
|
||||
+ policy.delete()
|
||||
+
|
||||
+ # Disable the plugin
|
||||
+ if plugin.status():
|
||||
+ plugin.disable()
|
||||
+ topology_st.standalone.restart()
|
||||
+ except Exception as e:
|
||||
+ log.error(f'Failed to clean up: {e}')
|
||||
+
|
||||
+ request.addfinalizer(fin)
|
||||
+
|
||||
+ return topology_st.standalone
|
||||
+
|
||||
+
|
||||
+@pytest.fixture(scope="function")
|
||||
+def create_test_user(topology_st, account_policy_setup, request):
|
||||
+ """Create a test user for the inactivity test"""
|
||||
+ log.info('Creating test user')
|
||||
+
|
||||
+ users = UserAccounts(topology_st.standalone, DEFAULT_SUFFIX)
|
||||
+ user = users.ensure_state(
|
||||
+ properties={
|
||||
+ 'uid': TEST_USER_NAME,
|
||||
+ 'cn': TEST_USER_NAME,
|
||||
+ 'sn': TEST_USER_NAME,
|
||||
+ 'userPassword': TEST_USER_PW,
|
||||
+ 'uidNumber': '1000',
|
||||
+ 'gidNumber': '2000',
|
||||
+ 'homeDirectory': f'/home/{TEST_USER_NAME}'
|
||||
+ }
|
||||
+ )
|
||||
+
|
||||
+ def fin():
|
||||
+ log.info('Deleting test user')
|
||||
+ if user.exists():
|
||||
+ user.delete()
|
||||
+
|
||||
+ request.addfinalizer(fin)
|
||||
+ return user
|
||||
+
|
||||
+
|
||||
+@pytest.mark.skipif(ds_is_older("1.4.2"), reason="Indirect account locking not implemented")
|
||||
+def test_dsidm_account_inactivity_lock_unlock(topology_st, create_test_user):
|
||||
+ """Test dsidm account unlock functionality with indirectly locked accounts
|
||||
+
|
||||
+ :id: d7b57083-6111-4dbf-af84-6fca7fc7fb31
|
||||
+ :setup: Standalone instance with Account Policy Plugin and CoS configured
|
||||
+ :steps:
|
||||
+ 1. Create a test user
|
||||
+ 2. Bind as the test user to set lastLoginTime
|
||||
+ 3. Check account status - should be active
|
||||
+ 4. Set user's lastLoginTime to a time in the past that exceeds inactivity limit
|
||||
+ 5. Check account status - should be locked due to inactivity
|
||||
+ 6. Attempt to bind as the user - should fail with constraint violation
|
||||
+ 7. Unlock the account using dsidm account unlock
|
||||
+ 8. Verify account status is active again
|
||||
+ 9. Verify the user can bind again
|
||||
+ :expectedresults:
|
||||
+ 1. Success
|
||||
+ 2. Success
|
||||
+ 3. Account status shows as activated
|
||||
+ 4. Success
|
||||
+ 5. Account status shows as inactivity limit exceeded
|
||||
+ 6. Bind attempt fails with constraint violation
|
||||
+ 7. Account unlocked successfully
|
||||
+ 8. Account status shows as activated
|
||||
+ 9. User can bind successfully
|
||||
+ """
|
||||
+ standalone = topology_st.standalone
|
||||
+ user = create_test_user
|
||||
+
|
||||
+ # Set up FakeArgs for dsidm commands
|
||||
+ args = FakeArgs()
|
||||
+ args.dn = user.dn
|
||||
+ args.json = False
|
||||
+ args.details = False
|
||||
+
|
||||
+ # 1. Check initial account status - should be active
|
||||
+ log.info('Step 1: Checking initial account status')
|
||||
+ entry_status(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args)
|
||||
+ check_value_in_log_and_reset(topology_st, check_value='Entry State: activated')
|
||||
+
|
||||
+ # 2. Bind as test user to set initial lastLoginTime
|
||||
+ log.info('Step 2: Binding as test user to set lastLoginTime')
|
||||
+ try:
|
||||
+ conn = user.bind(TEST_USER_PW)
|
||||
+ conn.unbind()
|
||||
+ log.info("Successfully bound as test user")
|
||||
+ except ldap.LDAPError as e:
|
||||
+ pytest.fail(f"Failed to bind as test user: {e}")
|
||||
+
|
||||
+ # 3. Set lastLoginTime to a time in the past that exceeds inactivity limit
|
||||
+ log.info('Step 3: Setting lastLoginTime to the past')
|
||||
+ past_time = datetime.utcnow() - timedelta(seconds=INACTIVITY_LIMIT * 2)
|
||||
+ past_time_str = past_time.strftime('%Y%m%d%H%M%SZ')
|
||||
+ user.replace('lastLoginTime', past_time_str)
|
||||
+
|
||||
+ # 4. Check account status - should now be locked due to inactivity
|
||||
+ log.info('Step 4: Checking account status after setting old lastLoginTime')
|
||||
+ entry_status(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args)
|
||||
+ check_value_in_log_and_reset(topology_st, check_value='Entry State: inactivity limit exceeded')
|
||||
+
|
||||
+ # 5. Attempt to bind as the user - should fail
|
||||
+ log.info('Step 5: Attempting to bind as user (should fail)')
|
||||
+ with pytest.raises(ldap.CONSTRAINT_VIOLATION) as excinfo:
|
||||
+ conn = user.bind(TEST_USER_PW)
|
||||
+ assert "Account inactivity limit exceeded" in str(excinfo.value)
|
||||
+
|
||||
+ # 6. Unlock the account using dsidm account unlock
|
||||
+ log.info('Step 6: Unlocking the account with dsidm')
|
||||
+ unlock(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args)
|
||||
+ check_value_in_log_and_reset(topology_st,
|
||||
+ check_value='now unlocked by resetting lastLoginTime')
|
||||
+
|
||||
+ # 7. Verify account status is active again
|
||||
+ log.info('Step 7: Checking account status after unlock')
|
||||
+ entry_status(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args)
|
||||
+ check_value_in_log_and_reset(topology_st, check_value='Entry State: activated')
|
||||
+
|
||||
+ # 8. Verify the user can bind again
|
||||
+ log.info('Step 8: Verifying user can bind again')
|
||||
+ try:
|
||||
+ conn = user.bind(TEST_USER_PW)
|
||||
+ conn.unbind()
|
||||
+ log.info("Successfully bound as test user after unlock")
|
||||
+ except ldap.LDAPError as e:
|
||||
+ pytest.fail(f"Failed to bind as test user after unlock: {e}")
|
||||
+
|
||||
+
|
||||
+@pytest.mark.skipif(ds_is_older("1.4.2"), reason="Indirect account locking not implemented")
|
||||
+def test_dsidm_indirectly_locked_via_role(topology_st, create_test_user):
|
||||
+ """Test dsidm account unlock functionality with accounts indirectly locked via role
|
||||
+
|
||||
+ :id: 7bfe69bb-cf99-4214-a763-051ab2b9cf89
|
||||
+ :setup: Standalone instance with Role and user configured
|
||||
+ :steps:
|
||||
+ 1. Create a test user
|
||||
+ 2. Create a Filtered Role that includes the test user
|
||||
+ 3. Lock the role
|
||||
+ 4. Check account status - should be indirectly locked through the role
|
||||
+ 5. Attempt to unlock the account - should fail with appropriate message
|
||||
+ 6. Unlock the role
|
||||
+ 7. Verify account status is active again
|
||||
+ :expectedresults:
|
||||
+ 1. Success
|
||||
+ 2. Success
|
||||
+ 3. Success
|
||||
+ 4. Account status shows as indirectly locked
|
||||
+ 5. Unlock attempt fails with appropriate error message
|
||||
+ 6. Success
|
||||
+ 7. Account status shows as activated
|
||||
+ """
|
||||
+ standalone = topology_st.standalone
|
||||
+ user = create_test_user
|
||||
+
|
||||
+ # Use FilteredRoles and ensure_state for role creation
|
||||
+ log.info('Step 1: Creating Filtered Role')
|
||||
+ roles = FilteredRoles(standalone, DEFAULT_SUFFIX)
|
||||
+ role = roles.ensure_state(
|
||||
+ properties={
|
||||
+ 'cn': 'TestFilterRole',
|
||||
+ 'nsRoleFilter': f'(uid={TEST_USER_NAME})'
|
||||
+ }
|
||||
+ )
|
||||
+
|
||||
+ # Set up FakeArgs for dsidm commands
|
||||
+ args = FakeArgs()
|
||||
+ args.dn = user.dn
|
||||
+ args.json = False
|
||||
+ args.details = False
|
||||
+
|
||||
+ # 2. Check account status before locking role
|
||||
+ log.info('Step 2: Checking account status before locking role')
|
||||
+ entry_status(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args)
|
||||
+ check_value_in_log_and_reset(topology_st, check_value='Entry State: activated')
|
||||
+
|
||||
+ # 3. Lock the role
|
||||
+ log.info('Step 3: Locking the role')
|
||||
+ role.lock()
|
||||
+
|
||||
+ # 4. Check account status - should be indirectly locked
|
||||
+ log.info('Step 4: Checking account status after locking role')
|
||||
+ entry_status(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args)
|
||||
+ check_value_in_log_and_reset(topology_st, check_value='Entry State: indirectly locked through a Role')
|
||||
+
|
||||
+ # 5. Attempt to unlock the account - should fail
|
||||
+ log.info('Step 5: Attempting to unlock indirectly locked account')
|
||||
+ unlock(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args)
|
||||
+ check_value_in_log_and_reset(topology_st,
|
||||
+ check_value='Account is locked through role')
|
||||
+
|
||||
+ # 6. Unlock the role
|
||||
+ log.info('Step 6: Unlocking the role')
|
||||
+ role.unlock()
|
||||
+
|
||||
+ # 7. Verify account status is active again
|
||||
+ log.info('Step 7: Checking account status after unlocking role')
|
||||
+ entry_status(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args)
|
||||
+ check_value_in_log_and_reset(topology_st, check_value='Entry State: activated')
|
||||
+
|
||||
+
|
||||
+if __name__ == '__main__':
|
||||
+ # Run isolated
|
||||
+ # -s for DEBUG mode
|
||||
+ CURRENT_FILE = os.path.realpath(__file__)
|
||||
+ pytest.main(["-s", CURRENT_FILE])
|
||||
\ No newline at end of file
|
||||
diff --git a/src/lib389/lib389/cli_idm/account.py b/src/lib389/lib389/cli_idm/account.py
|
||||
index 15f766588..a0dfd8f65 100644
|
||||
--- a/src/lib389/lib389/cli_idm/account.py
|
||||
+++ b/src/lib389/lib389/cli_idm/account.py
|
||||
@@ -176,8 +176,29 @@ def unlock(inst, basedn, log, args):
|
||||
dn = _get_dn_arg(args.dn, msg="Enter dn to unlock")
|
||||
accounts = Accounts(inst, basedn)
|
||||
acct = accounts.get(dn=dn)
|
||||
- acct.unlock()
|
||||
- log.info(f'Entry {dn} is unlocked')
|
||||
+
|
||||
+ try:
|
||||
+ # Get the account status before attempting to unlock
|
||||
+ status = acct.status()
|
||||
+ state = status["state"]
|
||||
+
|
||||
+ # Attempt to unlock the account
|
||||
+ acct.unlock()
|
||||
+
|
||||
+ # Success message
|
||||
+ log.info(f'Entry {dn} is unlocked')
|
||||
+ if state == AccountState.DIRECTLY_LOCKED:
|
||||
+ log.info(f'The entry was directly locked')
|
||||
+ elif state == AccountState.INACTIVITY_LIMIT_EXCEEDED:
|
||||
+ log.info(f'The entry was locked due to inactivity and is now unlocked by resetting lastLoginTime')
|
||||
+
|
||||
+ except ValueError as e:
|
||||
+ # Provide a more detailed error message based on failure reason
|
||||
+ if "through role" in str(e):
|
||||
+ log.error(f"Cannot unlock {dn}: {str(e)}")
|
||||
+ log.info("To unlock this account, you must modify the role that's locking it.")
|
||||
+ else:
|
||||
+ log.error(f"Failed to unlock {dn}: {str(e)}")
|
||||
|
||||
|
||||
def reset_password(inst, basedn, log, args):
|
||||
diff --git a/src/lib389/lib389/idm/account.py b/src/lib389/lib389/idm/account.py
|
||||
index 4b823b662..faf6f6f16 100644
|
||||
--- a/src/lib389/lib389/idm/account.py
|
||||
+++ b/src/lib389/lib389/idm/account.py
|
||||
@@ -140,7 +140,8 @@ class Account(DSLdapObject):
|
||||
"nsAccountLock", state_attr])
|
||||
|
||||
last_login_time = self._dict_get_with_ignore_indexerror(account_data, state_attr)
|
||||
- if not last_login_time:
|
||||
+ # if last_login_time not exist then check alt_state_attr only if its not disabled and exist
|
||||
+ if not last_login_time and alt_state_attr in account_data:
|
||||
last_login_time = self._dict_get_with_ignore_indexerror(account_data, alt_state_attr)
|
||||
|
||||
create_time = self._dict_get_with_ignore_indexerror(account_data, "createTimestamp")
|
||||
@@ -203,12 +204,33 @@ class Account(DSLdapObject):
|
||||
self.replace('nsAccountLock', 'true')
|
||||
|
||||
def unlock(self):
|
||||
- """Unset nsAccountLock"""
|
||||
+ """Unset nsAccountLock if it's set and reset lastLoginTime if account is locked due to inactivity"""
|
||||
|
||||
current_status = self.status()
|
||||
+
|
||||
if current_status["state"] == AccountState.ACTIVATED:
|
||||
raise ValueError("Account is already active")
|
||||
- self.remove('nsAccountLock', None)
|
||||
+
|
||||
+ if current_status["state"] == AccountState.DIRECTLY_LOCKED:
|
||||
+ # Account is directly locked with nsAccountLock attribute
|
||||
+ self.remove('nsAccountLock', None)
|
||||
+ elif current_status["state"] == AccountState.INACTIVITY_LIMIT_EXCEEDED:
|
||||
+ # Account is locked due to inactivity - reset lastLoginTime to current time
|
||||
+ # The lastLoginTime attribute stores its value in GMT/UTC time (Zulu time zone)
|
||||
+ current_time = time.strftime('%Y%m%d%H%M%SZ', time.gmtime())
|
||||
+ self.replace('lastLoginTime', current_time)
|
||||
+ elif current_status["state"] == AccountState.INDIRECTLY_LOCKED:
|
||||
+ # Account is locked through a role
|
||||
+ role_dn = current_status.get("role_dn")
|
||||
+ if role_dn:
|
||||
+ raise ValueError(f"Account is locked through role {role_dn}. "
|
||||
+ f"Please modify the role to unlock this account.")
|
||||
+ else:
|
||||
+ raise ValueError("Account is locked through an unknown role. "
|
||||
+ "Please check the roles configuration to unlock this account.")
|
||||
+ else:
|
||||
+ # Should not happen, but just in case
|
||||
+ raise ValueError(f"Unknown lock state: {current_status['state'].value}")
|
||||
|
||||
# If the account can be bound to, this will attempt to do so. We don't check
|
||||
# for exceptions, just pass them back!
|
||||
--
|
||||
2.49.0
|
||||
|
@ -1,70 +0,0 @@
|
||||
From 09a284ee43c2b4346da892f8756f97accd15ca68 Mon Sep 17 00:00:00 2001
|
||||
From: Simon Pichugin <spichugi@redhat.com>
|
||||
Date: Wed, 4 Dec 2024 21:59:40 -0500
|
||||
Subject: [PATCH] Issue 6302 - Allow to run replication status without a prompt
|
||||
(#6410)
|
||||
|
||||
Description: We should allow running replication status and
|
||||
other similar commands without requesting a password and bind DN.
|
||||
|
||||
This way, the current instance's root DN and root PW will be used on other
|
||||
instances when requesting CSN info. If they are incorrect,
|
||||
then the info won't be printed, but otherwise, the agreement status
|
||||
will be displayed correctly.
|
||||
|
||||
Fixes: https://github.com/389ds/389-ds-base/issues/6302
|
||||
|
||||
Reviewed by: @progier389 (Thanks!)
|
||||
---
|
||||
src/lib389/lib389/cli_conf/replication.py | 15 +++------------
|
||||
1 file changed, 3 insertions(+), 12 deletions(-)
|
||||
|
||||
diff --git a/src/lib389/lib389/cli_conf/replication.py b/src/lib389/lib389/cli_conf/replication.py
|
||||
index 399d0d2f8..cd4a331a8 100644
|
||||
--- a/src/lib389/lib389/cli_conf/replication.py
|
||||
+++ b/src/lib389/lib389/cli_conf/replication.py
|
||||
@@ -319,12 +319,9 @@ def list_suffixes(inst, basedn, log, args):
|
||||
def get_repl_status(inst, basedn, log, args):
|
||||
replicas = Replicas(inst)
|
||||
replica = replicas.get(args.suffix)
|
||||
- pw_and_dn_prompt = False
|
||||
if args.bind_passwd_file is not None:
|
||||
args.bind_passwd = get_passwd_from_file(args.bind_passwd_file)
|
||||
- if args.bind_passwd_prompt or args.bind_dn is None or args.bind_passwd is None:
|
||||
- pw_and_dn_prompt = True
|
||||
- status = replica.status(binddn=args.bind_dn, bindpw=args.bind_passwd, pwprompt=pw_and_dn_prompt)
|
||||
+ status = replica.status(binddn=args.bind_dn, bindpw=args.bind_passwd, pwprompt=args.bind_passwd_prompt)
|
||||
if args.json:
|
||||
log.info(json.dumps({"type": "list", "items": status}, indent=4))
|
||||
else:
|
||||
@@ -335,12 +332,9 @@ def get_repl_status(inst, basedn, log, args):
|
||||
def get_repl_winsync_status(inst, basedn, log, args):
|
||||
replicas = Replicas(inst)
|
||||
replica = replicas.get(args.suffix)
|
||||
- pw_and_dn_prompt = False
|
||||
if args.bind_passwd_file is not None:
|
||||
args.bind_passwd = get_passwd_from_file(args.bind_passwd_file)
|
||||
- if args.bind_passwd_prompt or args.bind_dn is None or args.bind_passwd is None:
|
||||
- pw_and_dn_prompt = True
|
||||
- status = replica.status(binddn=args.bind_dn, bindpw=args.bind_passwd, winsync=True, pwprompt=pw_and_dn_prompt)
|
||||
+ status = replica.status(binddn=args.bind_dn, bindpw=args.bind_passwd, winsync=True, pwprompt=args.bind_passwd_prompt)
|
||||
if args.json:
|
||||
log.info(json.dumps({"type": "list", "items": status}, indent=4))
|
||||
else:
|
||||
@@ -874,12 +868,9 @@ def poke_agmt(inst, basedn, log, args):
|
||||
|
||||
def get_agmt_status(inst, basedn, log, args):
|
||||
agmt = get_agmt(inst, args)
|
||||
- pw_and_dn_prompt = False
|
||||
if args.bind_passwd_file is not None:
|
||||
args.bind_passwd = get_passwd_from_file(args.bind_passwd_file)
|
||||
- if args.bind_passwd_prompt or args.bind_dn is None or args.bind_passwd is None:
|
||||
- pw_and_dn_prompt = True
|
||||
- status = agmt.status(use_json=args.json, binddn=args.bind_dn, bindpw=args.bind_passwd, pwprompt=pw_and_dn_prompt)
|
||||
+ status = agmt.status(use_json=args.json, binddn=args.bind_dn, bindpw=args.bind_passwd, pwprompt=args.bind_passwd_prompt)
|
||||
log.info(status)
|
||||
|
||||
|
||||
--
|
||||
2.49.0
|
||||
|
3
SOURCES/389-ds-base.sysusers
Normal file
3
SOURCES/389-ds-base.sysusers
Normal file
@ -0,0 +1,3 @@
|
||||
#Type Name ID GECOS Home directory Shell
|
||||
g dirsrv 389
|
||||
u dirsrv 389:389 "user for 389-ds-base" /usr/share/dirsrv/ /sbin/nologin
|
@ -13,29 +13,15 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "adler2"
|
||||
version = "2.0.0"
|
||||
version = "2.0.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "512761e0bb2578dd7380c6baaa0f4ce03e84f95e960231d1dec8bf4d7d6e2627"
|
||||
checksum = "320119579fcad9c21884f5c4861d16174d0e06250625266f50fe6898340abefa"
|
||||
|
||||
[[package]]
|
||||
name = "ahash"
|
||||
version = "0.7.8"
|
||||
name = "allocator-api2"
|
||||
version = "0.2.21"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "891477e0c6a8957309ee5c45a6368af3ae14bb510732d2684ffa19af310920f9"
|
||||
dependencies = [
|
||||
"getrandom 0.2.16",
|
||||
"once_cell",
|
||||
"version_check",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "ansi_term"
|
||||
version = "0.12.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d52a9bb7ec0cf484c551830a7ce27bd20d67eac647e1befb56b0be4ee39a55d2"
|
||||
dependencies = [
|
||||
"winapi",
|
||||
]
|
||||
checksum = "683d7910e743518b0e34f1186f92494becacb047c7b6bf616c96772180fef923"
|
||||
|
||||
[[package]]
|
||||
name = "atty"
|
||||
@ -50,9 +36,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "autocfg"
|
||||
version = "1.4.0"
|
||||
version = "1.5.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ace50bade8e6234aa140d9a2f552bbee1db4d353f69b8217bc503490fc1a9f26"
|
||||
checksum = "c08606f8c3cbf4ce6ec8e28fb0014a2c086708fe954eaa885384a6165172e7e8"
|
||||
|
||||
[[package]]
|
||||
name = "backtrace"
|
||||
@ -66,7 +52,7 @@ dependencies = [
|
||||
"miniz_oxide",
|
||||
"object",
|
||||
"rustc-demangle",
|
||||
"windows-targets",
|
||||
"windows-targets 0.52.6",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@ -95,11 +81,13 @@ checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b"
|
||||
|
||||
[[package]]
|
||||
name = "cbindgen"
|
||||
version = "0.9.1"
|
||||
version = "0.26.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9daec6140ab4dcd38c3dd57e580b59a621172a526ac79f1527af760a55afeafd"
|
||||
checksum = "da6bc11b07529f16944307272d5bd9b22530bc7d05751717c9d416586cedab49"
|
||||
dependencies = [
|
||||
"clap",
|
||||
"heck",
|
||||
"indexmap",
|
||||
"log",
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
@ -112,9 +100,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "cc"
|
||||
version = "1.2.25"
|
||||
version = "1.2.31"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d0fc897dc1e865cc67c0e05a836d9d3f1df3cbe442aa4a9473b18e12624a4951"
|
||||
checksum = "c3a42d84bb6b69d3a8b3eaacf0d88f179e1929695e1ad012b6cf64d9caaa5fd2"
|
||||
dependencies = [
|
||||
"jobserver",
|
||||
"libc",
|
||||
@ -123,72 +111,49 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "cfg-if"
|
||||
version = "1.0.0"
|
||||
version = "1.0.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
|
||||
checksum = "9555578bc9e57714c812a1f84e4fc5b4d21fcb063490c624de019f7464c91268"
|
||||
|
||||
[[package]]
|
||||
name = "clap"
|
||||
version = "2.34.0"
|
||||
version = "3.2.25"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a0610544180c38b88101fecf2dd634b174a62eef6946f84dfc6a7127512b381c"
|
||||
checksum = "4ea181bf566f71cb9a5d17a59e1871af638180a18fb0035c92ae62b705207123"
|
||||
dependencies = [
|
||||
"ansi_term",
|
||||
"atty",
|
||||
"bitflags 1.3.2",
|
||||
"clap_lex",
|
||||
"indexmap",
|
||||
"strsim",
|
||||
"termcolor",
|
||||
"textwrap",
|
||||
"unicode-width",
|
||||
"vec_map",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "clap_lex"
|
||||
version = "0.2.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "2850f2f5a82cbf437dd5af4d49848fbdfc27c157c3d010345776f952765261c5"
|
||||
dependencies = [
|
||||
"os_str_bytes",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "concread"
|
||||
version = "0.2.21"
|
||||
version = "0.5.7"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "dcc9816f5ac93ebd51c37f7f9a6bf2b40dfcd42978ad2aea5d542016e9244cf6"
|
||||
checksum = "07fd8c4b53f0aafeec114fa1cd863f323880f790656f2d7508af83a9b5110e8d"
|
||||
dependencies = [
|
||||
"ahash",
|
||||
"crossbeam",
|
||||
"crossbeam-epoch",
|
||||
"crossbeam-utils",
|
||||
"lru",
|
||||
"parking_lot",
|
||||
"rand",
|
||||
"smallvec",
|
||||
"tokio",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "crossbeam"
|
||||
version = "0.8.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "1137cd7e7fc0fb5d3c5a8678be38ec56e819125d8d7907411fe24ccb943faca8"
|
||||
dependencies = [
|
||||
"crossbeam-channel",
|
||||
"crossbeam-deque",
|
||||
"crossbeam-epoch",
|
||||
"crossbeam-queue",
|
||||
"crossbeam-utils",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "crossbeam-channel"
|
||||
version = "0.5.15"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "82b8f8f868b36967f9606790d1903570de9ceaf870a7bf9fbbd3016d636a2cb2"
|
||||
dependencies = [
|
||||
"crossbeam-utils",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "crossbeam-deque"
|
||||
version = "0.8.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9dd111b7b7f7d55b72c0a6ae361660ee5853c9af73f70c3c2ef6858b950e2e51"
|
||||
dependencies = [
|
||||
"crossbeam-epoch",
|
||||
"crossbeam-utils",
|
||||
"foldhash",
|
||||
"lru",
|
||||
"smallvec",
|
||||
"sptr",
|
||||
"tokio",
|
||||
"tracing",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@ -238,13 +203,19 @@ dependencies = [
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "errno"
|
||||
version = "0.3.12"
|
||||
name = "equivalent"
|
||||
version = "1.0.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "cea14ef9355e3beab063703aa9dab15afd25f0667c341310c1e5274bb1d0da18"
|
||||
checksum = "877a4ace8713b0bcf2a4e7eec82529c029f1d0619886d18145fea96c3ffe5c0f"
|
||||
|
||||
[[package]]
|
||||
name = "errno"
|
||||
version = "0.3.13"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "778e2ac28f6c47af28e4907f13ffd1e1ddbd400980a9abd7c8df189bf578a5ad"
|
||||
dependencies = [
|
||||
"libc",
|
||||
"windows-sys",
|
||||
"windows-sys 0.60.2",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@ -266,6 +237,12 @@ dependencies = [
|
||||
"zeroize",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "foldhash"
|
||||
version = "0.1.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d9c4f5dac5e15c24eb999c26181a6ca40b39fe946cbe4c263c7209467bc83af2"
|
||||
|
||||
[[package]]
|
||||
name = "foreign-types"
|
||||
version = "0.3.2"
|
||||
@ -289,7 +266,7 @@ checksum = "335ff9f135e4384c8150d6f27c6daed433577f86b4750418338c01a1a2528592"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
"libc",
|
||||
"wasi 0.11.0+wasi-snapshot-preview1",
|
||||
"wasi 0.11.1+wasi-snapshot-preview1",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@ -315,10 +292,24 @@ name = "hashbrown"
|
||||
version = "0.12.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888"
|
||||
|
||||
[[package]]
|
||||
name = "hashbrown"
|
||||
version = "0.15.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "5971ac85611da7067dbfcabef3c70ebb5606018acd9e2a3903a0da507521e0d5"
|
||||
dependencies = [
|
||||
"ahash",
|
||||
"allocator-api2",
|
||||
"equivalent",
|
||||
"foldhash",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "heck"
|
||||
version = "0.4.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8"
|
||||
|
||||
[[package]]
|
||||
name = "hermit-abi"
|
||||
version = "0.1.19"
|
||||
@ -329,12 +320,24 @@ dependencies = [
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "instant"
|
||||
version = "0.1.13"
|
||||
name = "indexmap"
|
||||
version = "1.9.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e0242819d153cba4b4b05a5a8f2a7e9bbf97b6055b2a002b395c96b5ff3c0222"
|
||||
checksum = "bd070e393353796e801d209ad339e89596eb4c8d430d18ede6a1cced8fafbd99"
|
||||
dependencies = [
|
||||
"autocfg",
|
||||
"hashbrown 0.12.3",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "io-uring"
|
||||
version = "0.7.9"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d93587f37623a1a17d94ef2bc9ada592f5465fe7732084ab7beefabe5c77c0c4"
|
||||
dependencies = [
|
||||
"bitflags 2.9.1",
|
||||
"cfg-if",
|
||||
"libc",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@ -355,9 +358,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "libc"
|
||||
version = "0.2.172"
|
||||
version = "0.2.174"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d750af042f7ef4f724306de029d18836c26c1765a54a6a3f094cbd23a7267ffa"
|
||||
checksum = "1171693293099992e19cddea4e8b849964e9846f4acee11b3948bcc337be8776"
|
||||
|
||||
[[package]]
|
||||
name = "librnsslapd"
|
||||
@ -384,16 +387,6 @@ version = "0.9.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "cd945864f07fe9f5371a27ad7b52a172b4b499999f1d97574c9fa68373937e12"
|
||||
|
||||
[[package]]
|
||||
name = "lock_api"
|
||||
version = "0.4.13"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "96936507f153605bddfcda068dd804796c84324ed2510809e5b2a624c81da765"
|
||||
dependencies = [
|
||||
"autocfg",
|
||||
"scopeguard",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "log"
|
||||
version = "0.4.27"
|
||||
@ -402,28 +395,39 @@ checksum = "13dc2df351e3202783a1fe0d44375f7295ffb4049267b0f3018346dc122a1d94"
|
||||
|
||||
[[package]]
|
||||
name = "lru"
|
||||
version = "0.7.8"
|
||||
version = "0.13.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e999beba7b6e8345721bd280141ed958096a2e4abdf74f67ff4ce49b4b54e47a"
|
||||
checksum = "227748d55f2f0ab4735d87fd623798cb6b664512fe979705f829c9f81c934465"
|
||||
dependencies = [
|
||||
"hashbrown",
|
||||
"hashbrown 0.15.4",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "memchr"
|
||||
version = "2.7.4"
|
||||
version = "2.7.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "78ca9ab1a0babb1e7d5695e3530886289c18cf2f87ec19a575a0abdce112e3a3"
|
||||
checksum = "32a282da65faaf38286cf3be983213fcf1d2e2a58700e808f83f4ea9a4804bc0"
|
||||
|
||||
[[package]]
|
||||
name = "miniz_oxide"
|
||||
version = "0.8.8"
|
||||
version = "0.8.9"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "3be647b768db090acb35d5ec5db2b0e1f1de11133ca123b9eacf5137868f892a"
|
||||
checksum = "1fa76a2c86f704bdb222d66965fb3d63269ce38518b83cb0575fca855ebb6316"
|
||||
dependencies = [
|
||||
"adler2",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "mio"
|
||||
version = "1.0.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "78bed444cc8a2160f01cbcf811ef18cac863ad68ae8ca62092e8db51d51c761c"
|
||||
dependencies = [
|
||||
"libc",
|
||||
"wasi 0.11.1+wasi-snapshot-preview1",
|
||||
"windows-sys 0.59.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "object"
|
||||
version = "0.36.7"
|
||||
@ -462,7 +466,7 @@ checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn 2.0.101",
|
||||
"syn 2.0.104",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@ -478,29 +482,10 @@ dependencies = [
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "parking_lot"
|
||||
version = "0.11.2"
|
||||
name = "os_str_bytes"
|
||||
version = "6.6.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7d17b78036a60663b797adeaee46f5c9dfebb86948d1255007a1d6be0271ff99"
|
||||
dependencies = [
|
||||
"instant",
|
||||
"lock_api",
|
||||
"parking_lot_core",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "parking_lot_core"
|
||||
version = "0.8.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "60a2cfe6f0ad2bfc16aefa463b497d5c7a5ecd44a23efa72aa342d90177356dc"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
"instant",
|
||||
"libc",
|
||||
"redox_syscall",
|
||||
"smallvec",
|
||||
"winapi",
|
||||
]
|
||||
checksum = "e2355d85b9a3786f481747ced0e0ff2ba35213a1f9bd406ed906554d7af805a1"
|
||||
|
||||
[[package]]
|
||||
name = "paste"
|
||||
@ -533,15 +518,6 @@ version = "0.3.32"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7edddbd0b52d732b21ad9a5fab5c704c14cd949e5e9a1ec5929a24fded1b904c"
|
||||
|
||||
[[package]]
|
||||
name = "ppv-lite86"
|
||||
version = "0.2.21"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "85eae3c4ed2f50dcfe72643da4befc30deadb458a9b590d720cde2f2b1e97da9"
|
||||
dependencies = [
|
||||
"zerocopy",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "proc-macro-hack"
|
||||
version = "0.5.20+deprecated"
|
||||
@ -581,70 +557,27 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "r-efi"
|
||||
version = "5.2.0"
|
||||
version = "5.3.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "74765f6d916ee2faa39bc8e68e4f3ed8949b48cccdac59983d287a7cb71ce9c5"
|
||||
|
||||
[[package]]
|
||||
name = "rand"
|
||||
version = "0.8.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404"
|
||||
dependencies = [
|
||||
"libc",
|
||||
"rand_chacha",
|
||||
"rand_core",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "rand_chacha"
|
||||
version = "0.3.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88"
|
||||
dependencies = [
|
||||
"ppv-lite86",
|
||||
"rand_core",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "rand_core"
|
||||
version = "0.6.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c"
|
||||
dependencies = [
|
||||
"getrandom 0.2.16",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "redox_syscall"
|
||||
version = "0.2.16"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "fb5a58c1855b4b6819d59012155603f0b22ad30cad752600aadfcb695265519a"
|
||||
dependencies = [
|
||||
"bitflags 1.3.2",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "rsds"
|
||||
version = "0.1.0"
|
||||
checksum = "69cdb34c158ceb288df11e18b4bd39de994f6657d83847bdffdbd7f346754b0f"
|
||||
|
||||
[[package]]
|
||||
name = "rustc-demangle"
|
||||
version = "0.1.24"
|
||||
version = "0.1.26"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "719b953e2095829ee67db738b3bfa9fa368c94900df327b3f07fe6e794d2fe1f"
|
||||
checksum = "56f7d92ca342cea22a06f2121d944b4fd82af56988c270852495420f961d4ace"
|
||||
|
||||
[[package]]
|
||||
name = "rustix"
|
||||
version = "1.0.7"
|
||||
version = "1.0.8"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c71e83d6afe7ff64890ec6b71d6a69bb8a610ab78ce364b3352876bb4c801266"
|
||||
checksum = "11181fbabf243db407ef8df94a6ce0b2f9a733bd8be4ad02b4eda9602296cac8"
|
||||
dependencies = [
|
||||
"bitflags 2.9.1",
|
||||
"errno",
|
||||
"libc",
|
||||
"linux-raw-sys",
|
||||
"windows-sys",
|
||||
"windows-sys 0.60.2",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@ -653,12 +586,6 @@ version = "1.0.20"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "28d3b2b1366ec20994f1fd18c3c594f05c5dd4bc44d8bb0c1c632c8d6829481f"
|
||||
|
||||
[[package]]
|
||||
name = "scopeguard"
|
||||
version = "1.2.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49"
|
||||
|
||||
[[package]]
|
||||
name = "serde"
|
||||
version = "1.0.219"
|
||||
@ -676,14 +603,14 @@ checksum = "5b0276cf7f2c73365f7157c8123c21cd9a50fbbd844757af28ca1f5925fc2a00"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn 2.0.101",
|
||||
"syn 2.0.104",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "serde_json"
|
||||
version = "1.0.140"
|
||||
version = "1.0.142"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "20068b6e96dc6c9bd23e01df8827e6c7e1f2fddd43c21810382803c136b99373"
|
||||
checksum = "030fedb782600dcbd6f02d479bf0d817ac3bb40d644745b769d6a96bc3afc5a7"
|
||||
dependencies = [
|
||||
"itoa",
|
||||
"memchr",
|
||||
@ -697,6 +624,12 @@ version = "1.3.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64"
|
||||
|
||||
[[package]]
|
||||
name = "slab"
|
||||
version = "0.4.10"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "04dc19736151f35336d325007ac991178d504a119863a2fcb3758cdb5e52c50d"
|
||||
|
||||
[[package]]
|
||||
name = "slapd"
|
||||
version = "0.1.0"
|
||||
@ -715,15 +648,21 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "smallvec"
|
||||
version = "1.15.0"
|
||||
version = "1.15.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8917285742e9f3e1683f0a9c4e6b57960b7314d0b08d30d1ecd426713ee2eee9"
|
||||
checksum = "67b1b7a3b5fe4f1376887184045fcf45c69e92af734b7aaddc05fb777b6fbd03"
|
||||
|
||||
[[package]]
|
||||
name = "sptr"
|
||||
version = "0.3.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "3b9b39299b249ad65f3b7e96443bad61c02ca5cd3589f46cb6d610a0fd6c0d6a"
|
||||
|
||||
[[package]]
|
||||
name = "strsim"
|
||||
version = "0.8.0"
|
||||
version = "0.10.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8ea5119cdb4c55b55d432abb513a0429384878c15dde60cc77b1c99de1a95a6a"
|
||||
checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623"
|
||||
|
||||
[[package]]
|
||||
name = "syn"
|
||||
@ -738,9 +677,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "syn"
|
||||
version = "2.0.101"
|
||||
version = "2.0.104"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8ce2b7fc941b3a24138a0a7cf8e858bfc6a992e7978a068a5c760deb0ed43caf"
|
||||
checksum = "17b6f705963418cdb9927482fa304bc562ece2fdd4f616084c50b7023b435a40"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
@ -757,38 +696,36 @@ dependencies = [
|
||||
"getrandom 0.3.3",
|
||||
"once_cell",
|
||||
"rustix",
|
||||
"windows-sys",
|
||||
"windows-sys 0.59.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "termcolor"
|
||||
version = "1.4.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "06794f8f6c5c898b3275aebefa6b8a1cb24cd2c6c79397ab15774837a0bc5755"
|
||||
dependencies = [
|
||||
"winapi-util",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "textwrap"
|
||||
version = "0.11.0"
|
||||
version = "0.16.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d326610f408c7a4eb6f51c37c330e496b08506c9457c9d34287ecc38809fb060"
|
||||
dependencies = [
|
||||
"unicode-width",
|
||||
]
|
||||
checksum = "c13547615a44dc9c452a8a534638acdf07120d4b6847c8178705da06306a3057"
|
||||
|
||||
[[package]]
|
||||
name = "tokio"
|
||||
version = "1.45.1"
|
||||
version = "1.47.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "75ef51a33ef1da925cea3e4eb122833cb377c61439ca401b770f54902b806779"
|
||||
checksum = "89e49afdadebb872d3145a5638b59eb0691ea23e46ca484037cfab3b76b95038"
|
||||
dependencies = [
|
||||
"backtrace",
|
||||
"io-uring",
|
||||
"libc",
|
||||
"mio",
|
||||
"pin-project-lite",
|
||||
"tokio-macros",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tokio-macros"
|
||||
version = "2.5.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "6e06d43f1345a3bcd39f6a56dbb7dcab2ba47e68e8ac134855e7e2bdbaf8cab8"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn 2.0.101",
|
||||
"slab",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@ -800,18 +737,43 @@ dependencies = [
|
||||
"serde",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tracing"
|
||||
version = "0.1.41"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "784e0ac535deb450455cbfa28a6f0df145ea1bb7ae51b821cf5e7927fdcfbdd0"
|
||||
dependencies = [
|
||||
"pin-project-lite",
|
||||
"tracing-attributes",
|
||||
"tracing-core",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tracing-attributes"
|
||||
version = "0.1.30"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "81383ab64e72a7a8b8e13130c49e3dab29def6d0c7d76a03087b3cf71c5c6903"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn 2.0.104",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tracing-core"
|
||||
version = "0.1.34"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b9d12581f227e93f094d3af2ae690a574abb8a2b9b7a96e7cfe9647b2b617678"
|
||||
dependencies = [
|
||||
"once_cell",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "unicode-ident"
|
||||
version = "1.0.18"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "5a5f39404a5da50712a4c1eecf25e90dd62b613502b7e925fd4e4d19b5c96512"
|
||||
|
||||
[[package]]
|
||||
name = "unicode-width"
|
||||
version = "0.1.14"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7dd6e30e90baa6f72411720665d41d89b9a3d039dc45b8faea1ddd07f617f6af"
|
||||
|
||||
[[package]]
|
||||
name = "uuid"
|
||||
version = "0.8.2"
|
||||
@ -827,23 +789,11 @@ version = "0.2.15"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426"
|
||||
|
||||
[[package]]
|
||||
name = "vec_map"
|
||||
version = "0.8.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f1bddf1187be692e79c5ffeab891132dfb0f236ed36a43c7ed39f1165ee20191"
|
||||
|
||||
[[package]]
|
||||
name = "version_check"
|
||||
version = "0.9.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "0b928f33d975fc6ad9f86c8f283853ad26bdd5b10b7f1542aa2fa15e2289105a"
|
||||
|
||||
[[package]]
|
||||
name = "wasi"
|
||||
version = "0.11.0+wasi-snapshot-preview1"
|
||||
version = "0.11.1+wasi-snapshot-preview1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423"
|
||||
checksum = "ccf3ec651a847eb01de73ccad15eb7d99f80485de043efb2f370cd654f4ea44b"
|
||||
|
||||
[[package]]
|
||||
name = "wasi"
|
||||
@ -870,19 +820,43 @@ version = "0.4.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6"
|
||||
|
||||
[[package]]
|
||||
name = "winapi-util"
|
||||
version = "0.1.9"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "cf221c93e13a30d793f7645a0e7762c55d169dbb0a49671918a2319d289b10bb"
|
||||
dependencies = [
|
||||
"windows-sys 0.59.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "winapi-x86_64-pc-windows-gnu"
|
||||
version = "0.4.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f"
|
||||
|
||||
[[package]]
|
||||
name = "windows-link"
|
||||
version = "0.1.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "5e6ad25900d524eaabdbbb96d20b4311e1e7ae1699af4fb28c17ae66c80d798a"
|
||||
|
||||
[[package]]
|
||||
name = "windows-sys"
|
||||
version = "0.59.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "1e38bc4d79ed67fd075bcc251a1c39b32a1776bbe92e5bef1f0bf1f8c531853b"
|
||||
dependencies = [
|
||||
"windows-targets",
|
||||
"windows-targets 0.52.6",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "windows-sys"
|
||||
version = "0.60.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f2f500e4d28234f72040990ec9d39e3a6b950f9f22d3dba18416c35882612bcb"
|
||||
dependencies = [
|
||||
"windows-targets 0.53.3",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@ -891,14 +865,31 @@ version = "0.52.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973"
|
||||
dependencies = [
|
||||
"windows_aarch64_gnullvm",
|
||||
"windows_aarch64_msvc",
|
||||
"windows_i686_gnu",
|
||||
"windows_i686_gnullvm",
|
||||
"windows_i686_msvc",
|
||||
"windows_x86_64_gnu",
|
||||
"windows_x86_64_gnullvm",
|
||||
"windows_x86_64_msvc",
|
||||
"windows_aarch64_gnullvm 0.52.6",
|
||||
"windows_aarch64_msvc 0.52.6",
|
||||
"windows_i686_gnu 0.52.6",
|
||||
"windows_i686_gnullvm 0.52.6",
|
||||
"windows_i686_msvc 0.52.6",
|
||||
"windows_x86_64_gnu 0.52.6",
|
||||
"windows_x86_64_gnullvm 0.52.6",
|
||||
"windows_x86_64_msvc 0.52.6",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "windows-targets"
|
||||
version = "0.53.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d5fe6031c4041849d7c496a8ded650796e7b6ecc19df1a431c1a363342e5dc91"
|
||||
dependencies = [
|
||||
"windows-link",
|
||||
"windows_aarch64_gnullvm 0.53.0",
|
||||
"windows_aarch64_msvc 0.53.0",
|
||||
"windows_i686_gnu 0.53.0",
|
||||
"windows_i686_gnullvm 0.53.0",
|
||||
"windows_i686_msvc 0.53.0",
|
||||
"windows_x86_64_gnu 0.53.0",
|
||||
"windows_x86_64_gnullvm 0.53.0",
|
||||
"windows_x86_64_msvc 0.53.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@ -907,48 +898,96 @@ version = "0.52.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3"
|
||||
|
||||
[[package]]
|
||||
name = "windows_aarch64_gnullvm"
|
||||
version = "0.53.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "86b8d5f90ddd19cb4a147a5fa63ca848db3df085e25fee3cc10b39b6eebae764"
|
||||
|
||||
[[package]]
|
||||
name = "windows_aarch64_msvc"
|
||||
version = "0.52.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469"
|
||||
|
||||
[[package]]
|
||||
name = "windows_aarch64_msvc"
|
||||
version = "0.53.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c7651a1f62a11b8cbd5e0d42526e55f2c99886c77e007179efff86c2b137e66c"
|
||||
|
||||
[[package]]
|
||||
name = "windows_i686_gnu"
|
||||
version = "0.52.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b"
|
||||
|
||||
[[package]]
|
||||
name = "windows_i686_gnu"
|
||||
version = "0.53.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c1dc67659d35f387f5f6c479dc4e28f1d4bb90ddd1a5d3da2e5d97b42d6272c3"
|
||||
|
||||
[[package]]
|
||||
name = "windows_i686_gnullvm"
|
||||
version = "0.52.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66"
|
||||
|
||||
[[package]]
|
||||
name = "windows_i686_gnullvm"
|
||||
version = "0.53.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9ce6ccbdedbf6d6354471319e781c0dfef054c81fbc7cf83f338a4296c0cae11"
|
||||
|
||||
[[package]]
|
||||
name = "windows_i686_msvc"
|
||||
version = "0.52.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66"
|
||||
|
||||
[[package]]
|
||||
name = "windows_i686_msvc"
|
||||
version = "0.53.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "581fee95406bb13382d2f65cd4a908ca7b1e4c2f1917f143ba16efe98a589b5d"
|
||||
|
||||
[[package]]
|
||||
name = "windows_x86_64_gnu"
|
||||
version = "0.52.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78"
|
||||
|
||||
[[package]]
|
||||
name = "windows_x86_64_gnu"
|
||||
version = "0.53.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "2e55b5ac9ea33f2fc1716d1742db15574fd6fc8dadc51caab1c16a3d3b4190ba"
|
||||
|
||||
[[package]]
|
||||
name = "windows_x86_64_gnullvm"
|
||||
version = "0.52.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d"
|
||||
|
||||
[[package]]
|
||||
name = "windows_x86_64_gnullvm"
|
||||
version = "0.53.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "0a6e035dd0599267ce1ee132e51c27dd29437f63325753051e71dd9e42406c57"
|
||||
|
||||
[[package]]
|
||||
name = "windows_x86_64_msvc"
|
||||
version = "0.52.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec"
|
||||
|
||||
[[package]]
|
||||
name = "windows_x86_64_msvc"
|
||||
version = "0.53.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "271414315aff87387382ec3d271b52d7ae78726f5d44ac98b4f4030c91880486"
|
||||
|
||||
[[package]]
|
||||
name = "wit-bindgen-rt"
|
||||
version = "0.39.0"
|
||||
@ -958,26 +997,6 @@ dependencies = [
|
||||
"bitflags 2.9.1",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "zerocopy"
|
||||
version = "0.8.25"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a1702d9583232ddb9174e01bb7c15a2ab8fb1bc6f227aa1233858c351a3ba0cb"
|
||||
dependencies = [
|
||||
"zerocopy-derive",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "zerocopy-derive"
|
||||
version = "0.8.25"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "28a6e20d751156648aa063f3800b706ee209a32c0b4d9f24be3d980b01be55ef"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn 2.0.101",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "zeroize"
|
||||
version = "1.8.1"
|
||||
@ -995,5 +1014,5 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn 2.0.101",
|
||||
"syn 2.0.104",
|
||||
]
|
File diff suppressed because it is too large
Load Diff
Loading…
Reference in New Issue
Block a user