Compare commits
1 Commits
c8-stream-
...
c10-beta
Author | SHA1 | Date | |
---|---|---|---|
89b7694df5 |
@ -1,3 +0,0 @@
|
||||
bd9aab32d9cbf9231058d585479813f3420dc872 SOURCES/389-ds-base-1.4.3.39.tar.bz2
|
||||
1c8f2d0dfbf39fa8cd86363bf3314351ab21f8d4 SOURCES/jemalloc-5.3.0.tar.bz2
|
||||
8d3275209f2f8e1a69053340930ad1fb037d61fb SOURCES/vendor-1.4.3.39-3.tar.gz
|
6
.gitignore
vendored
6
.gitignore
vendored
@ -1,3 +1,3 @@
|
||||
SOURCES/389-ds-base-1.4.3.39.tar.bz2
|
||||
SOURCES/jemalloc-5.3.0.tar.bz2
|
||||
SOURCES/vendor-1.4.3.39-3.tar.gz
|
||||
389-ds-base-3.0.4.tar.bz2
|
||||
jemalloc-5.3.0.tar.bz2
|
||||
libdb-5.3.28-59.tar.bz2
|
||||
|
237
0001-Issue-6316-lmdb-reindex-is-broken-if-index-type-is-s.patch
Normal file
237
0001-Issue-6316-lmdb-reindex-is-broken-if-index-type-is-s.patch
Normal file
@ -0,0 +1,237 @@
|
||||
From a251914c8defce11c3f8496406af8dec6cf50c4b Mon Sep 17 00:00:00 2001
|
||||
From: progier389 <progier@redhat.com>
|
||||
Date: Fri, 6 Sep 2024 18:07:17 +0200
|
||||
Subject: [PATCH] Issue 6316 - lmdb reindex is broken if index type is
|
||||
specified (#6318)
|
||||
|
||||
While reindexing using task or offline reindex, if the attribute name contains the index type (for example :eq,pres)
|
||||
Then the attribute is not reindexed. Problem occurs when lmdb is used, things are working fine with bdb.
|
||||
Solution: strip the index type in reindex as it is done in bdb case.
|
||||
Anyway the reindex design requires that for a given attribute all the configured index types must be rebuild.
|
||||
|
||||
Issue: #6316
|
||||
|
||||
Reviewed by: @tbordaz, @droideck (Thanks!)
|
||||
---
|
||||
.../tests/suites/indexes/regression_test.py | 141 +++++++++++++++++-
|
||||
.../slapd/back-ldbm/db-mdb/mdb_import.c | 10 +-
|
||||
2 files changed, 147 insertions(+), 4 deletions(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/indexes/regression_test.py b/dirsrvtests/tests/suites/indexes/regression_test.py
|
||||
index 51f88017d..e98ca6172 100644
|
||||
--- a/dirsrvtests/tests/suites/indexes/regression_test.py
|
||||
+++ b/dirsrvtests/tests/suites/indexes/regression_test.py
|
||||
@@ -10,6 +10,9 @@ import time
|
||||
import os
|
||||
import pytest
|
||||
import ldap
|
||||
+import logging
|
||||
+import glob
|
||||
+import re
|
||||
from lib389._constants import DEFAULT_BENAME, DEFAULT_SUFFIX
|
||||
from lib389.backend import Backend, Backends, DatabaseConfig
|
||||
from lib389.cos import CosClassicDefinition, CosClassicDefinitions, CosTemplate
|
||||
@@ -31,6 +34,8 @@ SUFFIX2 = 'dc=example2,dc=com'
|
||||
BENAME2 = 'be2'
|
||||
|
||||
DEBUGGING = os.getenv("DEBUGGING", default=False)
|
||||
+logging.getLogger(__name__).setLevel(logging.INFO)
|
||||
+log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@pytest.fixture(scope="function")
|
||||
@@ -83,6 +88,7 @@ def add_a_group_with_users(request, topo):
|
||||
'cn': USER_NAME,
|
||||
'uidNumber': f'{num}',
|
||||
'gidNumber': f'{num}',
|
||||
+ 'description': f'Description for {USER_NAME}',
|
||||
'homeDirectory': f'/home/{USER_NAME}'
|
||||
})
|
||||
users_list.append(user)
|
||||
@@ -95,9 +101,10 @@ def add_a_group_with_users(request, topo):
|
||||
# If the server crashed, start it again to do the cleanup
|
||||
if not topo.standalone.status():
|
||||
topo.standalone.start()
|
||||
- for user in users_list:
|
||||
- user.delete()
|
||||
- group.delete()
|
||||
+ if not DEBUGGING:
|
||||
+ for user in users_list:
|
||||
+ user.delete()
|
||||
+ group.delete()
|
||||
|
||||
request.addfinalizer(fin)
|
||||
|
||||
@@ -124,6 +131,38 @@ def set_small_idlistscanlimit(request, topo):
|
||||
|
||||
request.addfinalizer(fin)
|
||||
|
||||
+
|
||||
+@pytest.fixture(scope="function")
|
||||
+def set_description_index(request, topo, add_a_group_with_users):
|
||||
+ """
|
||||
+ Set some description values and description index without reindexing.
|
||||
+ """
|
||||
+ inst = topo.standalone
|
||||
+ backends = Backends(inst)
|
||||
+ backend = backends.get(DEFAULT_BENAME)
|
||||
+ indexes = backend.get_indexes()
|
||||
+ attr = 'description'
|
||||
+
|
||||
+ def fin(always=False):
|
||||
+ if always or not DEBUGGING:
|
||||
+ try:
|
||||
+ idx = indexes.get(attr)
|
||||
+ idx.delete()
|
||||
+ except ldap.NO_SUCH_OBJECT:
|
||||
+ pass
|
||||
+
|
||||
+ request.addfinalizer(fin)
|
||||
+ fin(always=True)
|
||||
+ index = indexes.create(properties={
|
||||
+ 'cn': attr,
|
||||
+ 'nsSystemIndex': 'false',
|
||||
+ 'nsIndexType': ['eq', 'pres', 'sub']
|
||||
+ })
|
||||
+ # Restart needed with lmdb (to open the dbi handle)
|
||||
+ inst.restart()
|
||||
+ return (indexes, attr)
|
||||
+
|
||||
+
|
||||
#unstable or unstatus tests, skipped for now
|
||||
@pytest.mark.flaky(max_runs=2, min_passes=1)
|
||||
@pytest.mark.skipif(ds_is_older("1.4.4.4"), reason="Not implemented")
|
||||
@@ -346,6 +385,102 @@ def test_task_status(topo):
|
||||
assert reindex_task.get_exit_code() == 0
|
||||
|
||||
|
||||
+def count_keys(inst, bename, attr, prefix=''):
|
||||
+ indexfile = os.path.join(inst.dbdir, bename, attr + '.db')
|
||||
+ # (bdb - we should also accept a version number for .db suffix)
|
||||
+ for f in glob.glob(f'{indexfile}*'):
|
||||
+ indexfile = f
|
||||
+
|
||||
+ inst.stop()
|
||||
+ output = inst.dbscan(None, None, args=['-f', indexfile, '-A'], stopping=False).decode()
|
||||
+ inst.start()
|
||||
+ count = 0
|
||||
+ regexp = f'^KEY: {re.escape(prefix)}'
|
||||
+ for match in re.finditer(regexp, output, flags=re.MULTILINE):
|
||||
+ count += 1
|
||||
+ log.info(f"count_keys found {count} keys starting with '{prefix}' in {indexfile}")
|
||||
+ return count
|
||||
+
|
||||
+
|
||||
+def test_reindex_task_with_type(topo, set_description_index):
|
||||
+ """Check that reindex task works as expected when index type is specified.
|
||||
+
|
||||
+ :id: 0c7f2fda-69f6-11ef-9eb8-083a88554478
|
||||
+ :setup: Standalone instance
|
||||
+ - with 100 users having description attribute
|
||||
+ - with description:eq,pres,sub index entry but not yet reindexed
|
||||
+ :steps:
|
||||
+ 1. Set description in suffix entry
|
||||
+ 2. Count number of equality keys in description index
|
||||
+ 3. Start a Reindex task on description:eq,pres and wait for completion
|
||||
+ 4. Check the task status and exit code
|
||||
+ 5. Count the equality, presence and substring keys in description index
|
||||
+ 6. Start a Reindex task on description and wait for completion
|
||||
+ 7. Check the task status and exit code
|
||||
+ 8. Count the equality, presence and substring keys in description index
|
||||
+
|
||||
+ :expectedresults:
|
||||
+ 1. Success
|
||||
+ 2. Should be either no key (bdb) or a single one (lmdb)
|
||||
+ 3. Success
|
||||
+ 4. Success
|
||||
+ 5. Should have: more equality keys than in step 2
|
||||
+ one presence key
|
||||
+ some substrings keys
|
||||
+ 6. Success
|
||||
+ 7. Success
|
||||
+ 8. Should have same counts than in step 5
|
||||
+ """
|
||||
+ (indexes, attr) = set_description_index
|
||||
+ inst = topo.standalone
|
||||
+ if not inst.is_dbi_supported():
|
||||
+ pytest.skip('This test requires that dbscan supports -A option')
|
||||
+ # modify indexed value
|
||||
+ Domain(inst, DEFAULT_SUFFIX).replace(attr, f'test_before_reindex')
|
||||
+
|
||||
+ keys1 = count_keys(inst, DEFAULT_BENAME, attr, prefix='=')
|
||||
+ assert keys1 <= 1
|
||||
+
|
||||
+ tasks = Tasks(topo.standalone)
|
||||
+ # completed reindex tasks MUST have a status because freeipa check it.
|
||||
+
|
||||
+ # Reindex attr with eq,pres types
|
||||
+ log.info(f'Reindex {attr} with eq,pres types')
|
||||
+ tasks.reindex(
|
||||
+ suffix=DEFAULT_SUFFIX,
|
||||
+ attrname=f'{attr}:eq,pres',
|
||||
+ args={TASK_WAIT: True}
|
||||
+ )
|
||||
+ reindex_task = Task(topo.standalone, tasks.dn)
|
||||
+ assert reindex_task.status()
|
||||
+ assert reindex_task.get_exit_code() == 0
|
||||
+
|
||||
+ keys2e = count_keys(inst, DEFAULT_BENAME, attr, prefix='=')
|
||||
+ keys2p = count_keys(inst, DEFAULT_BENAME, attr, prefix='+')
|
||||
+ keys2s = count_keys(inst, DEFAULT_BENAME, attr, prefix='*')
|
||||
+ assert keys2e > keys1
|
||||
+ assert keys2p > 0
|
||||
+ assert keys2s > 0
|
||||
+
|
||||
+ # Reindex attr without types
|
||||
+ log.info(f'Reindex {attr} without types')
|
||||
+ tasks.reindex(
|
||||
+ suffix=DEFAULT_SUFFIX,
|
||||
+ attrname=attr,
|
||||
+ args={TASK_WAIT: True}
|
||||
+ )
|
||||
+ reindex_task = Task(topo.standalone, tasks.dn)
|
||||
+ assert reindex_task.status()
|
||||
+ assert reindex_task.get_exit_code() == 0
|
||||
+
|
||||
+ keys3e = count_keys(inst, DEFAULT_BENAME, attr, prefix='=')
|
||||
+ keys3p = count_keys(inst, DEFAULT_BENAME, attr, prefix='+')
|
||||
+ keys3s = count_keys(inst, DEFAULT_BENAME, attr, prefix='*')
|
||||
+ assert keys3e == keys2e
|
||||
+ assert keys3p == keys2p
|
||||
+ assert keys3s == keys2s
|
||||
+
|
||||
+
|
||||
def test_task_and_be(topo, add_backend_and_ldif_50K_users):
|
||||
"""Check that backend is writable after finishing a tasks
|
||||
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/db-mdb/mdb_import.c b/ldap/servers/slapd/back-ldbm/db-mdb/mdb_import.c
|
||||
index cfd9de268..5f8e36cdc 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/db-mdb/mdb_import.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/db-mdb/mdb_import.c
|
||||
@@ -1150,6 +1150,8 @@ process_db2index_attrs(Slapi_PBlock *pb, ImportCtx_t *ctx)
|
||||
* TBD
|
||||
*/
|
||||
char **attrs = NULL;
|
||||
+ char *attrname = NULL;
|
||||
+ char *pt = NULL;
|
||||
int i;
|
||||
|
||||
slapi_pblock_get(pb, SLAPI_DB2INDEX_ATTRS, &attrs);
|
||||
@@ -1157,7 +1159,13 @@ process_db2index_attrs(Slapi_PBlock *pb, ImportCtx_t *ctx)
|
||||
for (i = 0; attrs && attrs[i]; i++) {
|
||||
switch (attrs[i][0]) {
|
||||
case 't': /* attribute type to index */
|
||||
- slapi_ch_array_add(&ctx->indexAttrs, slapi_ch_strdup(attrs[i] + 1));
|
||||
+ attrname = slapi_ch_strdup(attrs[i] + 1);
|
||||
+ /* Strip index type */
|
||||
+ pt = strchr(attrname, ':');
|
||||
+ if (pt != NULL) {
|
||||
+ *pt = '\0';
|
||||
+ }
|
||||
+ slapi_ch_array_add(&ctx->indexAttrs, attrname);
|
||||
break;
|
||||
case 'T': /* VLV Search to index */
|
||||
slapi_ch_array_add(&ctx->indexVlvs, get_vlv_dbname(attrs[i] + 1));
|
||||
--
|
||||
2.46.0
|
||||
|
4
389-ds-base-devel.README
Normal file
4
389-ds-base-devel.README
Normal file
@ -0,0 +1,4 @@
|
||||
For detailed information on developing plugins for 389 Directory Server visit
|
||||
|
||||
https://www.port389.org/docs/389ds/design/plugins.html
|
||||
https://github.com/389ds/389-ds-base/blob/main/src/slapi_r_plugin/README.md
|
1061
389-ds-base.spec
Normal file
1061
389-ds-base.spec
Normal file
File diff suppressed because it is too large
Load Diff
3
389-ds-base.sysusers
Normal file
3
389-ds-base.sysusers
Normal file
@ -0,0 +1,3 @@
|
||||
#Type Name ID GECOS Home directory Shell
|
||||
g dirsrv 389
|
||||
u dirsrv 389:389 "user for 389-ds-base" /usr/share/dirsrv/ /sbin/nologin
|
@ -1,119 +0,0 @@
|
||||
From dddb14210b402f317e566b6387c76a8e659bf7fa Mon Sep 17 00:00:00 2001
|
||||
From: progier389 <progier@redhat.com>
|
||||
Date: Tue, 14 Feb 2023 13:34:10 +0100
|
||||
Subject: [PATCH 1/2] issue 5647 - covscan: memory leak in audit log when
|
||||
adding entries (#5650)
|
||||
|
||||
covscan reported an issue about "vals" variable in auditlog.c:231 and indeed a charray_free is missing.
|
||||
Issue: 5647
|
||||
Reviewed by: @mreynolds389, @droideck
|
||||
---
|
||||
ldap/servers/slapd/auditlog.c | 71 +++++++++++++++++++----------------
|
||||
1 file changed, 38 insertions(+), 33 deletions(-)
|
||||
|
||||
diff --git a/ldap/servers/slapd/auditlog.c b/ldap/servers/slapd/auditlog.c
|
||||
index 68cbc674d..3128e0497 100644
|
||||
--- a/ldap/servers/slapd/auditlog.c
|
||||
+++ b/ldap/servers/slapd/auditlog.c
|
||||
@@ -177,6 +177,40 @@ write_auditfail_log_entry(Slapi_PBlock *pb)
|
||||
slapi_ch_free_string(&audit_config);
|
||||
}
|
||||
|
||||
+/*
|
||||
+ * Write the attribute values to the audit log as "comments"
|
||||
+ *
|
||||
+ * Slapi_Attr *entry - the attribute begin logged.
|
||||
+ * char *attrname - the attribute name.
|
||||
+ * lenstr *l - the audit log buffer
|
||||
+ *
|
||||
+ * Resulting output in the log:
|
||||
+ *
|
||||
+ * #ATTR: VALUE
|
||||
+ * #ATTR: VALUE
|
||||
+ */
|
||||
+static void
|
||||
+log_entry_attr(Slapi_Attr *entry_attr, char *attrname, lenstr *l)
|
||||
+{
|
||||
+ Slapi_Value **vals = attr_get_present_values(entry_attr);
|
||||
+ for(size_t i = 0; vals && vals[i]; i++) {
|
||||
+ char log_val[256] = "";
|
||||
+ const struct berval *bv = slapi_value_get_berval(vals[i]);
|
||||
+ if (bv->bv_len >= 256) {
|
||||
+ strncpy(log_val, bv->bv_val, 252);
|
||||
+ strcpy(log_val+252, "...");
|
||||
+ } else {
|
||||
+ strncpy(log_val, bv->bv_val, bv->bv_len);
|
||||
+ log_val[bv->bv_len] = 0;
|
||||
+ }
|
||||
+ addlenstr(l, "#");
|
||||
+ addlenstr(l, attrname);
|
||||
+ addlenstr(l, ": ");
|
||||
+ addlenstr(l, log_val);
|
||||
+ addlenstr(l, "\n");
|
||||
+ }
|
||||
+}
|
||||
+
|
||||
/*
|
||||
* Write "requested" attributes from the entry to the audit log as "comments"
|
||||
*
|
||||
@@ -212,21 +246,9 @@ add_entry_attrs(Slapi_Entry *entry, lenstr *l)
|
||||
for (req_attr = ldap_utf8strtok_r(display_attrs, ", ", &last); req_attr;
|
||||
req_attr = ldap_utf8strtok_r(NULL, ", ", &last))
|
||||
{
|
||||
- char **vals = slapi_entry_attr_get_charray(entry, req_attr);
|
||||
- for(size_t i = 0; vals && vals[i]; i++) {
|
||||
- char log_val[256] = {0};
|
||||
-
|
||||
- if (strlen(vals[i]) > 256) {
|
||||
- strncpy(log_val, vals[i], 252);
|
||||
- strcat(log_val, "...");
|
||||
- } else {
|
||||
- strcpy(log_val, vals[i]);
|
||||
- }
|
||||
- addlenstr(l, "#");
|
||||
- addlenstr(l, req_attr);
|
||||
- addlenstr(l, ": ");
|
||||
- addlenstr(l, log_val);
|
||||
- addlenstr(l, "\n");
|
||||
+ slapi_entry_attr_find(entry, req_attr, &entry_attr);
|
||||
+ if (entry_attr) {
|
||||
+ log_entry_attr(entry_attr, req_attr, l);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
@@ -234,7 +256,6 @@ add_entry_attrs(Slapi_Entry *entry, lenstr *l)
|
||||
for (; entry_attr; entry_attr = entry_attr->a_next) {
|
||||
Slapi_Value **vals = attr_get_present_values(entry_attr);
|
||||
char *attr = NULL;
|
||||
- const char *val = NULL;
|
||||
|
||||
slapi_attr_get_type(entry_attr, &attr);
|
||||
if (strcmp(attr, PSEUDO_ATTR_UNHASHEDUSERPASSWORD) == 0) {
|
||||
@@ -251,23 +272,7 @@ add_entry_attrs(Slapi_Entry *entry, lenstr *l)
|
||||
addlenstr(l, ": ****************************\n");
|
||||
continue;
|
||||
}
|
||||
-
|
||||
- for(size_t i = 0; vals && vals[i]; i++) {
|
||||
- char log_val[256] = {0};
|
||||
-
|
||||
- val = slapi_value_get_string(vals[i]);
|
||||
- if (strlen(val) > 256) {
|
||||
- strncpy(log_val, val, 252);
|
||||
- strcat(log_val, "...");
|
||||
- } else {
|
||||
- strcpy(log_val, val);
|
||||
- }
|
||||
- addlenstr(l, "#");
|
||||
- addlenstr(l, attr);
|
||||
- addlenstr(l, ": ");
|
||||
- addlenstr(l, log_val);
|
||||
- addlenstr(l, "\n");
|
||||
- }
|
||||
+ log_entry_attr(entry_attr, attr, l);
|
||||
}
|
||||
}
|
||||
slapi_ch_free_string(&display_attrs);
|
||||
--
|
||||
2.43.0
|
||||
|
@ -1,27 +0,0 @@
|
||||
From be7c2b82958e91ce08775bf6b5da3c311d3b00e5 Mon Sep 17 00:00:00 2001
|
||||
From: progier389 <progier@redhat.com>
|
||||
Date: Mon, 20 Feb 2023 16:14:05 +0100
|
||||
Subject: [PATCH 2/2] Issue 5647 - Fix unused variable warning from previous
|
||||
commit (#5670)
|
||||
|
||||
* issue 5647 - memory leak in audit log when adding entries
|
||||
* Issue 5647 - Fix unused variable warning from previous commit
|
||||
---
|
||||
ldap/servers/slapd/auditlog.c | 1 -
|
||||
1 file changed, 1 deletion(-)
|
||||
|
||||
diff --git a/ldap/servers/slapd/auditlog.c b/ldap/servers/slapd/auditlog.c
|
||||
index 3128e0497..0597ecc6f 100644
|
||||
--- a/ldap/servers/slapd/auditlog.c
|
||||
+++ b/ldap/servers/slapd/auditlog.c
|
||||
@@ -254,7 +254,6 @@ add_entry_attrs(Slapi_Entry *entry, lenstr *l)
|
||||
} else {
|
||||
/* Return all attributes */
|
||||
for (; entry_attr; entry_attr = entry_attr->a_next) {
|
||||
- Slapi_Value **vals = attr_get_present_values(entry_attr);
|
||||
char *attr = NULL;
|
||||
|
||||
slapi_attr_get_type(entry_attr, &attr);
|
||||
--
|
||||
2.43.0
|
||||
|
@ -1,147 +0,0 @@
|
||||
From 692c4cec6cc5c0086cf58f83bcfa690c766c9887 Mon Sep 17 00:00:00 2001
|
||||
From: Thierry Bordaz <tbordaz@redhat.com>
|
||||
Date: Fri, 2 Feb 2024 14:14:28 +0100
|
||||
Subject: [PATCH] Issue 5407 - sync_repl crashes if enabled while dynamic
|
||||
plugin is enabled (#5411)
|
||||
|
||||
Bug description:
|
||||
When dynamic plugin is enabled, if a MOD enables sync_repl plugin
|
||||
then sync_repl init function registers the postop callback
|
||||
that will be called for the MOD itself while the preop
|
||||
has not been called.
|
||||
postop expects preop to be called and so primary operation
|
||||
to be set. When it is not set it crashes
|
||||
|
||||
Fix description:
|
||||
If the primary operation is not set, just return
|
||||
|
||||
relates: #5407
|
||||
---
|
||||
.../suites/syncrepl_plugin/basic_test.py | 68 +++++++++++++++++++
|
||||
ldap/servers/plugins/sync/sync_persist.c | 23 ++++++-
|
||||
2 files changed, 90 insertions(+), 1 deletion(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/syncrepl_plugin/basic_test.py b/dirsrvtests/tests/suites/syncrepl_plugin/basic_test.py
|
||||
index eb3770b78..cdf35eeaa 100644
|
||||
--- a/dirsrvtests/tests/suites/syncrepl_plugin/basic_test.py
|
||||
+++ b/dirsrvtests/tests/suites/syncrepl_plugin/basic_test.py
|
||||
@@ -592,6 +592,74 @@ def test_sync_repl_cenotaph(topo_m2, request):
|
||||
|
||||
request.addfinalizer(fin)
|
||||
|
||||
+def test_sync_repl_dynamic_plugin(topology, request):
|
||||
+ """Test sync_repl with dynamic plugin
|
||||
+
|
||||
+ :id: d4f84913-c18a-459f-8525-110f610ca9e6
|
||||
+ :setup: install a standalone instance
|
||||
+ :steps:
|
||||
+ 1. reset instance to standard (no retroCL, no sync_repl, no dynamic plugin)
|
||||
+ 2. Enable dynamic plugin
|
||||
+ 3. Enable retroCL/content_sync
|
||||
+ 4. Establish a sync_repl req
|
||||
+ :expectedresults:
|
||||
+ 1. Should succeeds
|
||||
+ 2. Should succeeds
|
||||
+ 3. Should succeeds
|
||||
+ 4. Should succeeds
|
||||
+ """
|
||||
+
|
||||
+ # Reset the instance in a default config
|
||||
+ # Disable content sync plugin
|
||||
+ topology.standalone.plugins.disable(name=PLUGIN_REPL_SYNC)
|
||||
+
|
||||
+ # Disable retro changelog
|
||||
+ topology.standalone.plugins.disable(name=PLUGIN_RETRO_CHANGELOG)
|
||||
+
|
||||
+ # Disable dynamic plugins
|
||||
+ topology.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-dynamic-plugins', b'off')])
|
||||
+ topology.standalone.restart()
|
||||
+
|
||||
+ # Now start the test
|
||||
+ # Enable dynamic plugins
|
||||
+ try:
|
||||
+ topology.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-dynamic-plugins', b'on')])
|
||||
+ except ldap.LDAPError as e:
|
||||
+ log.error('Failed to enable dynamic plugin! {}'.format(e.args[0]['desc']))
|
||||
+ assert False
|
||||
+
|
||||
+ # Enable retro changelog
|
||||
+ topology.standalone.plugins.enable(name=PLUGIN_RETRO_CHANGELOG)
|
||||
+
|
||||
+ # Enbale content sync plugin
|
||||
+ topology.standalone.plugins.enable(name=PLUGIN_REPL_SYNC)
|
||||
+
|
||||
+ # create a sync repl client and wait 5 seconds to be sure it is running
|
||||
+ sync_repl = Sync_persist(topology.standalone)
|
||||
+ sync_repl.start()
|
||||
+ time.sleep(5)
|
||||
+
|
||||
+ # create users
|
||||
+ users = UserAccounts(topology.standalone, DEFAULT_SUFFIX)
|
||||
+ users_set = []
|
||||
+ for i in range(10001, 10004):
|
||||
+ users_set.append(users.create_test_user(uid=i))
|
||||
+
|
||||
+ time.sleep(10)
|
||||
+ # delete users, that automember/memberof will generate nested updates
|
||||
+ for user in users_set:
|
||||
+ user.delete()
|
||||
+ # stop the server to get the sync_repl result set (exit from while loop).
|
||||
+ # Only way I found to acheive that.
|
||||
+ # and wait a bit to let sync_repl thread time to set its result before fetching it.
|
||||
+ topology.standalone.stop()
|
||||
+ sync_repl.get_result()
|
||||
+ sync_repl.join()
|
||||
+ log.info('test_sync_repl_dynamic_plugin: PASS\n')
|
||||
+
|
||||
+ # Success
|
||||
+ log.info('Test complete')
|
||||
+
|
||||
def test_sync_repl_invalid_cookie(topology, request):
|
||||
"""Test sync_repl with invalid cookie
|
||||
|
||||
diff --git a/ldap/servers/plugins/sync/sync_persist.c b/ldap/servers/plugins/sync/sync_persist.c
|
||||
index d2210b64c..283607361 100644
|
||||
--- a/ldap/servers/plugins/sync/sync_persist.c
|
||||
+++ b/ldap/servers/plugins/sync/sync_persist.c
|
||||
@@ -156,6 +156,17 @@ ignore_op_pl(Slapi_PBlock *pb)
|
||||
* This is the same for ident
|
||||
*/
|
||||
prim_op = get_thread_primary_op();
|
||||
+ if (prim_op == NULL) {
|
||||
+ /* This can happen if the PRE_OP (sync_update_persist_betxn_pre_op) was not called.
|
||||
+ * The only known case it happens is with dynamic plugin enabled and an
|
||||
+ * update that enable the sync_repl plugin. In such case sync_repl registers
|
||||
+ * the postop (sync_update_persist_op) that is called while the preop was not called
|
||||
+ */
|
||||
+ slapi_log_err(SLAPI_LOG_PLUGIN, SYNC_PLUGIN_SUBSYSTEM,
|
||||
+ "ignore_op_pl - Operation without primary op set (0x%lx)\n",
|
||||
+ (ulong) op);
|
||||
+ return;
|
||||
+ }
|
||||
ident = sync_persist_get_operation_extension(pb);
|
||||
|
||||
if (ident) {
|
||||
@@ -232,8 +243,18 @@ sync_update_persist_op(Slapi_PBlock *pb, Slapi_Entry *e, Slapi_Entry *eprev, ber
|
||||
|
||||
|
||||
prim_op = get_thread_primary_op();
|
||||
+ if (prim_op == NULL) {
|
||||
+ /* This can happen if the PRE_OP (sync_update_persist_betxn_pre_op) was not called.
|
||||
+ * The only known case it happens is with dynamic plugin enabled and an
|
||||
+ * update that enable the sync_repl plugin. In such case sync_repl registers
|
||||
+ * the postop (sync_update_persist_op) that is called while the preop was not called
|
||||
+ */
|
||||
+ slapi_log_err(SLAPI_LOG_PLUGIN, SYNC_PLUGIN_SUBSYSTEM,
|
||||
+ "sync_update_persist_op - Operation without primary op set (0x%lx)\n",
|
||||
+ (ulong) pb_op);
|
||||
+ return;
|
||||
+ }
|
||||
ident = sync_persist_get_operation_extension(pb);
|
||||
- PR_ASSERT(prim_op);
|
||||
|
||||
if ((ident == NULL) && operation_is_flag_set(pb_op, OP_FLAG_NOOP)) {
|
||||
/* This happens for URP (add cenotaph, fixup rename, tombstone resurrect)
|
||||
--
|
||||
2.43.0
|
||||
|
@ -1,840 +0,0 @@
|
||||
From 8dc61a176323f0d41df730abd715ccff3034c2be Mon Sep 17 00:00:00 2001
|
||||
From: Mark Reynolds <mreynolds@redhat.com>
|
||||
Date: Sun, 27 Nov 2022 09:37:19 -0500
|
||||
Subject: [PATCH] Issue 5547 - automember plugin improvements
|
||||
|
||||
Description:
|
||||
|
||||
Rebuild task has the following improvements:
|
||||
|
||||
- Only one task allowed at a time
|
||||
- Do not cleanup previous members by default. Add new CLI option to intentionally
|
||||
cleanup memberships before rebuilding from scratch.
|
||||
- Add better task logging to show fixup progress
|
||||
|
||||
To prevent automember from being called in a nested be_txn loop thread storage is
|
||||
used to check and skip these loops.
|
||||
|
||||
relates: https://github.com/389ds/389-ds-base/issues/5547
|
||||
|
||||
Reviewed by: spichugi(Thanks!)
|
||||
---
|
||||
.../automember_plugin/automember_mod_test.py | 43 +++-
|
||||
ldap/servers/plugins/automember/automember.c | 232 ++++++++++++++----
|
||||
ldap/servers/slapd/back-ldbm/ldbm_add.c | 11 +-
|
||||
ldap/servers/slapd/back-ldbm/ldbm_delete.c | 10 +-
|
||||
ldap/servers/slapd/back-ldbm/ldbm_modify.c | 11 +-
|
||||
.../lib389/cli_conf/plugins/automember.py | 10 +-
|
||||
src/lib389/lib389/plugins.py | 7 +-
|
||||
src/lib389/lib389/tasks.py | 9 +-
|
||||
8 files changed, 250 insertions(+), 83 deletions(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/automember_plugin/automember_mod_test.py b/dirsrvtests/tests/suites/automember_plugin/automember_mod_test.py
|
||||
index 8d25384bf..7a0ed3275 100644
|
||||
--- a/dirsrvtests/tests/suites/automember_plugin/automember_mod_test.py
|
||||
+++ b/dirsrvtests/tests/suites/automember_plugin/automember_mod_test.py
|
||||
@@ -5,12 +5,13 @@
|
||||
# License: GPL (version 3 or any later version).
|
||||
# See LICENSE for details.
|
||||
# --- END COPYRIGHT BLOCK ---
|
||||
-#
|
||||
+import ldap
|
||||
import logging
|
||||
import pytest
|
||||
import os
|
||||
+import time
|
||||
from lib389.utils import ds_is_older
|
||||
-from lib389._constants import *
|
||||
+from lib389._constants import DEFAULT_SUFFIX
|
||||
from lib389.plugins import AutoMembershipPlugin, AutoMembershipDefinitions
|
||||
from lib389.idm.user import UserAccounts
|
||||
from lib389.idm.group import Groups
|
||||
@@ -41,6 +42,11 @@ def automember_fixture(topo, request):
|
||||
user_accts = UserAccounts(topo.standalone, DEFAULT_SUFFIX)
|
||||
user = user_accts.create_test_user()
|
||||
|
||||
+ # Create extra users
|
||||
+ users = UserAccounts(topo.standalone, DEFAULT_SUFFIX)
|
||||
+ for i in range(0, 100):
|
||||
+ users.create_test_user(uid=i)
|
||||
+
|
||||
# Create automember definitions and regex rules
|
||||
automember_prop = {
|
||||
'cn': 'testgroup_definition',
|
||||
@@ -59,7 +65,7 @@ def automember_fixture(topo, request):
|
||||
automemberplugin.enable()
|
||||
topo.standalone.restart()
|
||||
|
||||
- return (user, groups)
|
||||
+ return user, groups
|
||||
|
||||
|
||||
def test_mods(automember_fixture, topo):
|
||||
@@ -72,19 +78,21 @@ def test_mods(automember_fixture, topo):
|
||||
2. Update user that should add it to group[1]
|
||||
3. Update user that should add it to group[2]
|
||||
4. Update user that should add it to group[0]
|
||||
- 5. Test rebuild task correctly moves user to group[1]
|
||||
+ 5. Test rebuild task adds user to group[1]
|
||||
+ 6. Test rebuild task cleanups groups and only adds it to group[1]
|
||||
:expectedresults:
|
||||
1. Success
|
||||
2. Success
|
||||
3. Success
|
||||
4. Success
|
||||
5. Success
|
||||
+ 6. Success
|
||||
"""
|
||||
(user, groups) = automember_fixture
|
||||
|
||||
# Update user which should go into group[0]
|
||||
user.replace('cn', 'whatever')
|
||||
- groups[0].is_member(user.dn)
|
||||
+ assert groups[0].is_member(user.dn)
|
||||
if groups[1].is_member(user.dn):
|
||||
assert False
|
||||
if groups[2].is_member(user.dn):
|
||||
@@ -92,7 +100,7 @@ def test_mods(automember_fixture, topo):
|
||||
|
||||
# Update user0 which should go into group[1]
|
||||
user.replace('cn', 'mark')
|
||||
- groups[1].is_member(user.dn)
|
||||
+ assert groups[1].is_member(user.dn)
|
||||
if groups[0].is_member(user.dn):
|
||||
assert False
|
||||
if groups[2].is_member(user.dn):
|
||||
@@ -100,7 +108,7 @@ def test_mods(automember_fixture, topo):
|
||||
|
||||
# Update user which should go into group[2]
|
||||
user.replace('cn', 'simon')
|
||||
- groups[2].is_member(user.dn)
|
||||
+ assert groups[2].is_member(user.dn)
|
||||
if groups[0].is_member(user.dn):
|
||||
assert False
|
||||
if groups[1].is_member(user.dn):
|
||||
@@ -108,7 +116,7 @@ def test_mods(automember_fixture, topo):
|
||||
|
||||
# Update user which should go back into group[0] (full circle)
|
||||
user.replace('cn', 'whatever')
|
||||
- groups[0].is_member(user.dn)
|
||||
+ assert groups[0].is_member(user.dn)
|
||||
if groups[1].is_member(user.dn):
|
||||
assert False
|
||||
if groups[2].is_member(user.dn):
|
||||
@@ -128,12 +136,24 @@ def test_mods(automember_fixture, topo):
|
||||
automemberplugin.enable()
|
||||
topo.standalone.restart()
|
||||
|
||||
- # Run rebuild task
|
||||
+ # Run rebuild task (no cleanup)
|
||||
task = automemberplugin.fixup(DEFAULT_SUFFIX, "objectclass=posixaccount")
|
||||
+ with pytest.raises(ldap.UNWILLING_TO_PERFORM):
|
||||
+ # test only one fixup task is allowed at a time
|
||||
+ automemberplugin.fixup(DEFAULT_SUFFIX, "objectclass=top")
|
||||
task.wait()
|
||||
|
||||
- # Test membership
|
||||
- groups[1].is_member(user.dn)
|
||||
+ # Test membership (user should still be in groups[0])
|
||||
+ assert groups[1].is_member(user.dn)
|
||||
+ if not groups[0].is_member(user.dn):
|
||||
+ assert False
|
||||
+
|
||||
+ # Run rebuild task with cleanup
|
||||
+ task = automemberplugin.fixup(DEFAULT_SUFFIX, "objectclass=posixaccount", cleanup=True)
|
||||
+ task.wait()
|
||||
+
|
||||
+ # Test membership (user should only be in groups[1])
|
||||
+ assert groups[1].is_member(user.dn)
|
||||
if groups[0].is_member(user.dn):
|
||||
assert False
|
||||
if groups[2].is_member(user.dn):
|
||||
@@ -148,4 +168,3 @@ if __name__ == '__main__':
|
||||
# -s for DEBUG mode
|
||||
CURRENT_FILE = os.path.realpath(__file__)
|
||||
pytest.main(["-s", CURRENT_FILE])
|
||||
-
|
||||
diff --git a/ldap/servers/plugins/automember/automember.c b/ldap/servers/plugins/automember/automember.c
|
||||
index 3494d0343..419adb052 100644
|
||||
--- a/ldap/servers/plugins/automember/automember.c
|
||||
+++ b/ldap/servers/plugins/automember/automember.c
|
||||
@@ -1,5 +1,5 @@
|
||||
/** BEGIN COPYRIGHT BLOCK
|
||||
- * Copyright (C) 2011 Red Hat, Inc.
|
||||
+ * Copyright (C) 2022 Red Hat, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* License: GPL (version 3 or any later version).
|
||||
@@ -14,7 +14,7 @@
|
||||
* Auto Membership Plug-in
|
||||
*/
|
||||
#include "automember.h"
|
||||
-
|
||||
+#include <pthread.h>
|
||||
|
||||
/*
|
||||
* Plug-in globals
|
||||
@@ -22,7 +22,9 @@
|
||||
static PRCList *g_automember_config = NULL;
|
||||
static Slapi_RWLock *g_automember_config_lock = NULL;
|
||||
static uint64_t abort_rebuild_task = 0;
|
||||
-
|
||||
+static pthread_key_t td_automem_block_nested;
|
||||
+static PRBool fixup_running = PR_FALSE;
|
||||
+static PRLock *fixup_lock = NULL;
|
||||
static void *_PluginID = NULL;
|
||||
static Slapi_DN *_PluginDN = NULL;
|
||||
static Slapi_DN *_ConfigAreaDN = NULL;
|
||||
@@ -93,9 +95,43 @@ static void automember_task_export_destructor(Slapi_Task *task);
|
||||
static void automember_task_map_destructor(Slapi_Task *task);
|
||||
|
||||
#define DEFAULT_FILE_MODE PR_IRUSR | PR_IWUSR
|
||||
+#define FIXUP_PROGRESS_LIMIT 1000
|
||||
static uint64_t plugin_do_modify = 0;
|
||||
static uint64_t plugin_is_betxn = 0;
|
||||
|
||||
+/* automember_plugin fixup task and add operations should block other be_txn
|
||||
+ * plugins from calling automember_post_op_mod() */
|
||||
+static int32_t
|
||||
+slapi_td_block_nested_post_op(void)
|
||||
+{
|
||||
+ int32_t val = 12345;
|
||||
+
|
||||
+ if (pthread_setspecific(td_automem_block_nested, (void *)&val) != 0) {
|
||||
+ return PR_FAILURE;
|
||||
+ }
|
||||
+ return PR_SUCCESS;
|
||||
+}
|
||||
+
|
||||
+static int32_t
|
||||
+slapi_td_unblock_nested_post_op(void)
|
||||
+{
|
||||
+ if (pthread_setspecific(td_automem_block_nested, NULL) != 0) {
|
||||
+ return PR_FAILURE;
|
||||
+ }
|
||||
+ return PR_SUCCESS;
|
||||
+}
|
||||
+
|
||||
+static int32_t
|
||||
+slapi_td_is_post_op_nested(void)
|
||||
+{
|
||||
+ int32_t *value = pthread_getspecific(td_automem_block_nested);
|
||||
+
|
||||
+ if (value == NULL) {
|
||||
+ return 0;
|
||||
+ }
|
||||
+ return 1;
|
||||
+}
|
||||
+
|
||||
/*
|
||||
* Config cache locking functions
|
||||
*/
|
||||
@@ -317,6 +353,14 @@ automember_start(Slapi_PBlock *pb)
|
||||
return -1;
|
||||
}
|
||||
|
||||
+ if (fixup_lock == NULL) {
|
||||
+ if ((fixup_lock = PR_NewLock()) == NULL) {
|
||||
+ slapi_log_err(SLAPI_LOG_ERR, AUTOMEMBER_PLUGIN_SUBSYSTEM,
|
||||
+ "automember_start - Failed to create fixup lock.\n");
|
||||
+ return -1;
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
/*
|
||||
* Get the plug-in target dn from the system
|
||||
* and store it for future use. */
|
||||
@@ -360,6 +404,11 @@ automember_start(Slapi_PBlock *pb)
|
||||
}
|
||||
}
|
||||
|
||||
+ if (pthread_key_create(&td_automem_block_nested, NULL) != 0) {
|
||||
+ slapi_log_err(SLAPI_LOG_ERR, AUTOMEMBER_PLUGIN_SUBSYSTEM,
|
||||
+ "automember_start - pthread_key_create failed\n");
|
||||
+ }
|
||||
+
|
||||
slapi_log_err(SLAPI_LOG_PLUGIN, AUTOMEMBER_PLUGIN_SUBSYSTEM,
|
||||
"automember_start - ready for service\n");
|
||||
slapi_log_err(SLAPI_LOG_TRACE, AUTOMEMBER_PLUGIN_SUBSYSTEM,
|
||||
@@ -394,6 +443,8 @@ automember_close(Slapi_PBlock *pb __attribute__((unused)))
|
||||
slapi_sdn_free(&_ConfigAreaDN);
|
||||
slapi_destroy_rwlock(g_automember_config_lock);
|
||||
g_automember_config_lock = NULL;
|
||||
+ PR_DestroyLock(fixup_lock);
|
||||
+ fixup_lock = NULL;
|
||||
|
||||
slapi_log_err(SLAPI_LOG_TRACE, AUTOMEMBER_PLUGIN_SUBSYSTEM,
|
||||
"<-- automember_close\n");
|
||||
@@ -1619,7 +1670,6 @@ out:
|
||||
return rc;
|
||||
}
|
||||
|
||||
-
|
||||
/*
|
||||
* automember_update_member_value()
|
||||
*
|
||||
@@ -1634,7 +1684,7 @@ automember_update_member_value(Slapi_Entry *member_e, const char *group_dn, char
|
||||
LDAPMod *mods[2];
|
||||
char *vals[2];
|
||||
char *member_value = NULL;
|
||||
- int rc = 0;
|
||||
+ int rc = LDAP_SUCCESS;
|
||||
Slapi_DN *group_sdn;
|
||||
|
||||
/* First thing check that the group still exists */
|
||||
@@ -1653,7 +1703,7 @@ automember_update_member_value(Slapi_Entry *member_e, const char *group_dn, char
|
||||
"automember_update_member_value - group (default or target) can not be retrieved (%s) err=%d\n",
|
||||
group_dn, rc);
|
||||
}
|
||||
- return rc;
|
||||
+ goto out;
|
||||
}
|
||||
|
||||
/* If grouping_value is dn, we need to fetch the dn instead. */
|
||||
@@ -1879,6 +1929,13 @@ automember_mod_post_op(Slapi_PBlock *pb)
|
||||
PRCList *list = NULL;
|
||||
int rc = SLAPI_PLUGIN_SUCCESS;
|
||||
|
||||
+ if (slapi_td_is_post_op_nested()) {
|
||||
+ /* don't process op twice in the same thread */
|
||||
+ return rc;
|
||||
+ } else {
|
||||
+ slapi_td_block_nested_post_op();
|
||||
+ }
|
||||
+
|
||||
slapi_log_err(SLAPI_LOG_TRACE, AUTOMEMBER_PLUGIN_SUBSYSTEM,
|
||||
"--> automember_mod_post_op\n");
|
||||
|
||||
@@ -2005,6 +2062,7 @@ automember_mod_post_op(Slapi_PBlock *pb)
|
||||
}
|
||||
}
|
||||
}
|
||||
+ slapi_td_unblock_nested_post_op();
|
||||
|
||||
slapi_log_err(SLAPI_LOG_TRACE, AUTOMEMBER_PLUGIN_SUBSYSTEM,
|
||||
"<-- automember_mod_post_op (%d)\n", rc);
|
||||
@@ -2024,6 +2082,13 @@ automember_add_post_op(Slapi_PBlock *pb)
|
||||
slapi_log_err(SLAPI_LOG_TRACE, AUTOMEMBER_PLUGIN_SUBSYSTEM,
|
||||
"--> automember_add_post_op\n");
|
||||
|
||||
+ if (slapi_td_is_post_op_nested()) {
|
||||
+ /* don't process op twice in the same thread */
|
||||
+ return rc;
|
||||
+ } else {
|
||||
+ slapi_td_block_nested_post_op();
|
||||
+ }
|
||||
+
|
||||
/* Reload config if a config entry was added. */
|
||||
if ((sdn = automember_get_sdn(pb))) {
|
||||
if (automember_dn_is_config(sdn)) {
|
||||
@@ -2039,7 +2104,7 @@ automember_add_post_op(Slapi_PBlock *pb)
|
||||
|
||||
/* If replication, just bail. */
|
||||
if (automember_isrepl(pb)) {
|
||||
- return SLAPI_PLUGIN_SUCCESS;
|
||||
+ goto bail;
|
||||
}
|
||||
|
||||
/* Get the newly added entry. */
|
||||
@@ -2052,7 +2117,7 @@ automember_add_post_op(Slapi_PBlock *pb)
|
||||
tombstone);
|
||||
slapi_value_free(&tombstone);
|
||||
if (is_tombstone) {
|
||||
- return SLAPI_PLUGIN_SUCCESS;
|
||||
+ goto bail;
|
||||
}
|
||||
|
||||
/* Check if a config entry applies
|
||||
@@ -2063,21 +2128,19 @@ automember_add_post_op(Slapi_PBlock *pb)
|
||||
list = PR_LIST_HEAD(g_automember_config);
|
||||
while (list != g_automember_config) {
|
||||
config = (struct configEntry *)list;
|
||||
-
|
||||
/* Does the entry meet scope and filter requirements? */
|
||||
if (slapi_dn_issuffix(slapi_sdn_get_dn(sdn), config->scope) &&
|
||||
- (slapi_filter_test_simple(e, config->filter) == 0)) {
|
||||
+ (slapi_filter_test_simple(e, config->filter) == 0))
|
||||
+ {
|
||||
/* Find out what membership changes are needed and make them. */
|
||||
if (automember_update_membership(config, e, NULL) == SLAPI_PLUGIN_FAILURE) {
|
||||
rc = SLAPI_PLUGIN_FAILURE;
|
||||
break;
|
||||
}
|
||||
}
|
||||
-
|
||||
list = PR_NEXT_LINK(list);
|
||||
}
|
||||
}
|
||||
-
|
||||
automember_config_unlock();
|
||||
} else {
|
||||
slapi_log_err(SLAPI_LOG_PLUGIN, AUTOMEMBER_PLUGIN_SUBSYSTEM,
|
||||
@@ -2098,6 +2161,7 @@ bail:
|
||||
slapi_pblock_set(pb, SLAPI_RESULT_CODE, &result);
|
||||
slapi_pblock_set(pb, SLAPI_PB_RESULT_TEXT, &errtxt);
|
||||
}
|
||||
+ slapi_td_unblock_nested_post_op();
|
||||
|
||||
return rc;
|
||||
}
|
||||
@@ -2138,6 +2202,7 @@ typedef struct _task_data
|
||||
Slapi_DN *base_dn;
|
||||
char *bind_dn;
|
||||
int scope;
|
||||
+ PRBool cleanup;
|
||||
} task_data;
|
||||
|
||||
static void
|
||||
@@ -2270,6 +2335,7 @@ automember_task_abort_thread(void *arg)
|
||||
* basedn: dc=example,dc=com
|
||||
* filter: (uid=*)
|
||||
* scope: sub
|
||||
+ * cleanup: yes/on (default is off)
|
||||
*
|
||||
* basedn and filter are required. If scope is omitted, the default is sub
|
||||
*/
|
||||
@@ -2284,9 +2350,22 @@ automember_task_add(Slapi_PBlock *pb, Slapi_Entry *e, Slapi_Entry *eAfter __attr
|
||||
const char *base_dn;
|
||||
const char *filter;
|
||||
const char *scope;
|
||||
+ const char *cleanup_str;
|
||||
+ PRBool cleanup = PR_FALSE;
|
||||
|
||||
*returncode = LDAP_SUCCESS;
|
||||
|
||||
+ PR_Lock(fixup_lock);
|
||||
+ if (fixup_running) {
|
||||
+ PR_Unlock(fixup_lock);
|
||||
+ *returncode = LDAP_UNWILLING_TO_PERFORM;
|
||||
+ slapi_log_err(SLAPI_LOG_ERR, AUTOMEMBER_PLUGIN_SUBSYSTEM,
|
||||
+ "automember_task_add - there is already a fixup task running\n");
|
||||
+ rv = SLAPI_DSE_CALLBACK_ERROR;
|
||||
+ goto out;
|
||||
+ }
|
||||
+ PR_Unlock(fixup_lock);
|
||||
+
|
||||
/*
|
||||
* Grab the task params
|
||||
*/
|
||||
@@ -2300,6 +2379,12 @@ automember_task_add(Slapi_PBlock *pb, Slapi_Entry *e, Slapi_Entry *eAfter __attr
|
||||
rv = SLAPI_DSE_CALLBACK_ERROR;
|
||||
goto out;
|
||||
}
|
||||
+ if ((cleanup_str = slapi_entry_attr_get_ref(e, "cleanup"))) {
|
||||
+ if (strcasecmp(cleanup_str, "yes") == 0 || strcasecmp(cleanup_str, "on")) {
|
||||
+ cleanup = PR_TRUE;
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
scope = slapi_fetch_attr(e, "scope", "sub");
|
||||
/*
|
||||
* setup our task data
|
||||
@@ -2315,6 +2400,7 @@ automember_task_add(Slapi_PBlock *pb, Slapi_Entry *e, Slapi_Entry *eAfter __attr
|
||||
mytaskdata->bind_dn = slapi_ch_strdup(bind_dn);
|
||||
mytaskdata->base_dn = slapi_sdn_new_dn_byval(base_dn);
|
||||
mytaskdata->filter_str = slapi_ch_strdup(filter);
|
||||
+ mytaskdata->cleanup = cleanup;
|
||||
|
||||
if (scope) {
|
||||
if (strcasecmp(scope, "sub") == 0) {
|
||||
@@ -2334,6 +2420,9 @@ automember_task_add(Slapi_PBlock *pb, Slapi_Entry *e, Slapi_Entry *eAfter __attr
|
||||
task = slapi_plugin_new_task(slapi_entry_get_ndn(e), arg);
|
||||
slapi_task_set_destructor_fn(task, automember_task_destructor);
|
||||
slapi_task_set_data(task, mytaskdata);
|
||||
+ PR_Lock(fixup_lock);
|
||||
+ fixup_running = PR_TRUE;
|
||||
+ PR_Unlock(fixup_lock);
|
||||
/*
|
||||
* Start the task as a separate thread
|
||||
*/
|
||||
@@ -2345,6 +2434,9 @@ automember_task_add(Slapi_PBlock *pb, Slapi_Entry *e, Slapi_Entry *eAfter __attr
|
||||
"automember_task_add - Unable to create task thread!\n");
|
||||
*returncode = LDAP_OPERATIONS_ERROR;
|
||||
slapi_task_finish(task, *returncode);
|
||||
+ PR_Lock(fixup_lock);
|
||||
+ fixup_running = PR_FALSE;
|
||||
+ PR_Unlock(fixup_lock);
|
||||
rv = SLAPI_DSE_CALLBACK_ERROR;
|
||||
} else {
|
||||
rv = SLAPI_DSE_CALLBACK_OK;
|
||||
@@ -2372,6 +2464,9 @@ automember_rebuild_task_thread(void *arg)
|
||||
PRCList *list = NULL;
|
||||
PRCList *include_list = NULL;
|
||||
int result = 0;
|
||||
+ int64_t fixup_progress_count = 0;
|
||||
+ int64_t fixup_progress_elapsed = 0;
|
||||
+ int64_t fixup_start_time = 0;
|
||||
size_t i = 0;
|
||||
|
||||
/* Reset abort flag */
|
||||
@@ -2380,6 +2475,7 @@ automember_rebuild_task_thread(void *arg)
|
||||
if (!task) {
|
||||
return; /* no task */
|
||||
}
|
||||
+
|
||||
slapi_task_inc_refcount(task);
|
||||
slapi_log_err(SLAPI_LOG_PLUGIN, AUTOMEMBER_PLUGIN_SUBSYSTEM,
|
||||
"automember_rebuild_task_thread - Refcount incremented.\n");
|
||||
@@ -2393,9 +2489,11 @@ automember_rebuild_task_thread(void *arg)
|
||||
slapi_task_log_status(task, "Automember rebuild task starting (base dn: (%s) filter (%s)...",
|
||||
slapi_sdn_get_dn(td->base_dn), td->filter_str);
|
||||
/*
|
||||
- * Set the bind dn in the local thread data
|
||||
+ * Set the bind dn in the local thread data, and block post op mods
|
||||
*/
|
||||
slapi_td_set_dn(slapi_ch_strdup(td->bind_dn));
|
||||
+ slapi_td_block_nested_post_op();
|
||||
+ fixup_start_time = slapi_current_rel_time_t();
|
||||
/*
|
||||
* Take the config lock now and search the database
|
||||
*/
|
||||
@@ -2426,6 +2524,21 @@ automember_rebuild_task_thread(void *arg)
|
||||
* Loop over the entries
|
||||
*/
|
||||
for (i = 0; entries && (entries[i] != NULL); i++) {
|
||||
+ fixup_progress_count++;
|
||||
+ if (fixup_progress_count % FIXUP_PROGRESS_LIMIT == 0 ) {
|
||||
+ slapi_task_log_notice(task,
|
||||
+ "Processed %ld entries in %ld seconds (+%ld seconds)",
|
||||
+ fixup_progress_count,
|
||||
+ slapi_current_rel_time_t() - fixup_start_time,
|
||||
+ slapi_current_rel_time_t() - fixup_progress_elapsed);
|
||||
+ slapi_task_log_status(task,
|
||||
+ "Processed %ld entries in %ld seconds (+%ld seconds)",
|
||||
+ fixup_progress_count,
|
||||
+ slapi_current_rel_time_t() - fixup_start_time,
|
||||
+ slapi_current_rel_time_t() - fixup_progress_elapsed);
|
||||
+ slapi_task_inc_progress(task);
|
||||
+ fixup_progress_elapsed = slapi_current_rel_time_t();
|
||||
+ }
|
||||
if (slapi_atomic_load_64(&abort_rebuild_task, __ATOMIC_ACQUIRE) == 1) {
|
||||
/* The task was aborted */
|
||||
slapi_task_log_notice(task, "Automember rebuild task was intentionally aborted");
|
||||
@@ -2443,48 +2556,66 @@ automember_rebuild_task_thread(void *arg)
|
||||
if (slapi_dn_issuffix(slapi_entry_get_dn(entries[i]), config->scope) &&
|
||||
(slapi_filter_test_simple(entries[i], config->filter) == 0))
|
||||
{
|
||||
- /* First clear out all the defaults groups */
|
||||
- for (size_t ii = 0; config->default_groups && config->default_groups[ii]; ii++) {
|
||||
- if ((result = automember_update_member_value(entries[i], config->default_groups[ii],
|
||||
- config->grouping_attr, config->grouping_value, NULL, DEL_MEMBER)))
|
||||
- {
|
||||
- slapi_task_log_notice(task, "Automember rebuild membership task unable to delete "
|
||||
- "member from default group (%s) error (%d)",
|
||||
- config->default_groups[ii], result);
|
||||
- slapi_task_log_status(task, "Automember rebuild membership task unable to delete "
|
||||
- "member from default group (%s) error (%d)",
|
||||
- config->default_groups[ii], result);
|
||||
- slapi_log_err(SLAPI_LOG_ERR, AUTOMEMBER_PLUGIN_SUBSYSTEM,
|
||||
- "automember_rebuild_task_thread - Unable to unable to delete from (%s) error (%d)\n",
|
||||
- config->default_groups[ii], result);
|
||||
- goto out;
|
||||
- }
|
||||
- }
|
||||
-
|
||||
- /* Then clear out the non-default group */
|
||||
- if (config->inclusive_rules && !PR_CLIST_IS_EMPTY((PRCList *)config->inclusive_rules)) {
|
||||
- include_list = PR_LIST_HEAD((PRCList *)config->inclusive_rules);
|
||||
- while (include_list != (PRCList *)config->inclusive_rules) {
|
||||
- struct automemberRegexRule *curr_rule = (struct automemberRegexRule *)include_list;
|
||||
- if ((result = automember_update_member_value(entries[i], slapi_sdn_get_dn(curr_rule->target_group_dn),
|
||||
- config->grouping_attr, config->grouping_value, NULL, DEL_MEMBER)))
|
||||
+ if (td->cleanup) {
|
||||
+
|
||||
+ slapi_log_err(SLAPI_LOG_PLUGIN, AUTOMEMBER_PLUGIN_SUBSYSTEM,
|
||||
+ "automember_rebuild_task_thread - Cleaning up groups (config %s)\n",
|
||||
+ config->dn);
|
||||
+ /* First clear out all the defaults groups */
|
||||
+ for (size_t ii = 0; config->default_groups && config->default_groups[ii]; ii++) {
|
||||
+ if ((result = automember_update_member_value(entries[i],
|
||||
+ config->default_groups[ii],
|
||||
+ config->grouping_attr,
|
||||
+ config->grouping_value,
|
||||
+ NULL, DEL_MEMBER)))
|
||||
{
|
||||
slapi_task_log_notice(task, "Automember rebuild membership task unable to delete "
|
||||
- "member from group (%s) error (%d)",
|
||||
- slapi_sdn_get_dn(curr_rule->target_group_dn), result);
|
||||
+ "member from default group (%s) error (%d)",
|
||||
+ config->default_groups[ii], result);
|
||||
slapi_task_log_status(task, "Automember rebuild membership task unable to delete "
|
||||
- "member from group (%s) error (%d)",
|
||||
- slapi_sdn_get_dn(curr_rule->target_group_dn), result);
|
||||
+ "member from default group (%s) error (%d)",
|
||||
+ config->default_groups[ii], result);
|
||||
slapi_log_err(SLAPI_LOG_ERR, AUTOMEMBER_PLUGIN_SUBSYSTEM,
|
||||
"automember_rebuild_task_thread - Unable to unable to delete from (%s) error (%d)\n",
|
||||
- slapi_sdn_get_dn(curr_rule->target_group_dn), result);
|
||||
+ config->default_groups[ii], result);
|
||||
goto out;
|
||||
}
|
||||
- include_list = PR_NEXT_LINK(include_list);
|
||||
}
|
||||
+
|
||||
+ /* Then clear out the non-default group */
|
||||
+ if (config->inclusive_rules && !PR_CLIST_IS_EMPTY((PRCList *)config->inclusive_rules)) {
|
||||
+ include_list = PR_LIST_HEAD((PRCList *)config->inclusive_rules);
|
||||
+ while (include_list != (PRCList *)config->inclusive_rules) {
|
||||
+ struct automemberRegexRule *curr_rule = (struct automemberRegexRule *)include_list;
|
||||
+ if ((result = automember_update_member_value(entries[i],
|
||||
+ slapi_sdn_get_dn(curr_rule->target_group_dn),
|
||||
+ config->grouping_attr,
|
||||
+ config->grouping_value,
|
||||
+ NULL, DEL_MEMBER)))
|
||||
+ {
|
||||
+ slapi_task_log_notice(task, "Automember rebuild membership task unable to delete "
|
||||
+ "member from group (%s) error (%d)",
|
||||
+ slapi_sdn_get_dn(curr_rule->target_group_dn), result);
|
||||
+ slapi_task_log_status(task, "Automember rebuild membership task unable to delete "
|
||||
+ "member from group (%s) error (%d)",
|
||||
+ slapi_sdn_get_dn(curr_rule->target_group_dn), result);
|
||||
+ slapi_log_err(SLAPI_LOG_ERR, AUTOMEMBER_PLUGIN_SUBSYSTEM,
|
||||
+ "automember_rebuild_task_thread - Unable to unable to delete from (%s) error (%d)\n",
|
||||
+ slapi_sdn_get_dn(curr_rule->target_group_dn), result);
|
||||
+ goto out;
|
||||
+ }
|
||||
+ include_list = PR_NEXT_LINK(include_list);
|
||||
+ }
|
||||
+ }
|
||||
+ slapi_log_err(SLAPI_LOG_PLUGIN, AUTOMEMBER_PLUGIN_SUBSYSTEM,
|
||||
+ "automember_rebuild_task_thread - Finished cleaning up groups (config %s)\n",
|
||||
+ config->dn);
|
||||
}
|
||||
|
||||
/* Update the memberships for this entries */
|
||||
+ slapi_log_err(SLAPI_LOG_PLUGIN, AUTOMEMBER_PLUGIN_SUBSYSTEM,
|
||||
+ "automember_rebuild_task_thread - Updating membership (config %s)\n",
|
||||
+ config->dn);
|
||||
if (slapi_is_shutting_down() ||
|
||||
automember_update_membership(config, entries[i], NULL) == SLAPI_PLUGIN_FAILURE)
|
||||
{
|
||||
@@ -2508,15 +2639,22 @@ out:
|
||||
slapi_task_log_notice(task, "Automember rebuild task aborted. Error (%d)", result);
|
||||
slapi_task_log_status(task, "Automember rebuild task aborted. Error (%d)", result);
|
||||
} else {
|
||||
- slapi_task_log_notice(task, "Automember rebuild task finished. Processed (%d) entries.", (int32_t)i);
|
||||
- slapi_task_log_status(task, "Automember rebuild task finished. Processed (%d) entries.", (int32_t)i);
|
||||
+ slapi_task_log_notice(task, "Automember rebuild task finished. Processed (%ld) entries in %ld seconds",
|
||||
+ (int64_t)i, slapi_current_rel_time_t() - fixup_start_time);
|
||||
+ slapi_task_log_status(task, "Automember rebuild task finished. Processed (%ld) entries in %ld seconds",
|
||||
+ (int64_t)i, slapi_current_rel_time_t() - fixup_start_time);
|
||||
}
|
||||
slapi_task_inc_progress(task);
|
||||
slapi_task_finish(task, result);
|
||||
slapi_task_dec_refcount(task);
|
||||
slapi_atomic_store_64(&abort_rebuild_task, 0, __ATOMIC_RELEASE);
|
||||
+ slapi_td_unblock_nested_post_op();
|
||||
+ PR_Lock(fixup_lock);
|
||||
+ fixup_running = PR_FALSE;
|
||||
+ PR_Unlock(fixup_lock);
|
||||
+
|
||||
slapi_log_err(SLAPI_LOG_PLUGIN, AUTOMEMBER_PLUGIN_SUBSYSTEM,
|
||||
- "automember_rebuild_task_thread - Refcount decremented.\n");
|
||||
+ "automember_rebuild_task_thread - task finished, refcount decremented.\n");
|
||||
}
|
||||
|
||||
/*
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/ldbm_add.c b/ldap/servers/slapd/back-ldbm/ldbm_add.c
|
||||
index ba2d73a84..ce4c314a1 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/ldbm_add.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/ldbm_add.c
|
||||
@@ -1,6 +1,6 @@
|
||||
/** BEGIN COPYRIGHT BLOCK
|
||||
* Copyright (C) 2001 Sun Microsystems, Inc. Used by permission.
|
||||
- * Copyright (C) 2005 Red Hat, Inc.
|
||||
+ * Copyright (C) 2022 Red Hat, Inc.
|
||||
* Copyright (C) 2009 Hewlett-Packard Development Company, L.P.
|
||||
* All rights reserved.
|
||||
*
|
||||
@@ -1264,10 +1264,6 @@ ldbm_back_add(Slapi_PBlock *pb)
|
||||
goto common_return;
|
||||
|
||||
error_return:
|
||||
- /* Revert the caches if this is the parent operation */
|
||||
- if (parent_op && betxn_callback_fails) {
|
||||
- revert_cache(inst, &parent_time);
|
||||
- }
|
||||
if (addingentry_id_assigned) {
|
||||
next_id_return(be, addingentry->ep_id);
|
||||
}
|
||||
@@ -1376,6 +1372,11 @@ diskfull_return:
|
||||
if (!not_an_error) {
|
||||
rc = SLAPI_FAIL_GENERAL;
|
||||
}
|
||||
+
|
||||
+ /* Revert the caches if this is the parent operation */
|
||||
+ if (parent_op && betxn_callback_fails) {
|
||||
+ revert_cache(inst, &parent_time);
|
||||
+ }
|
||||
}
|
||||
|
||||
common_return:
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/ldbm_delete.c b/ldap/servers/slapd/back-ldbm/ldbm_delete.c
|
||||
index de23190c3..27f0ac58a 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/ldbm_delete.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/ldbm_delete.c
|
||||
@@ -1407,11 +1407,6 @@ commit_return:
|
||||
goto common_return;
|
||||
|
||||
error_return:
|
||||
- /* Revert the caches if this is the parent operation */
|
||||
- if (parent_op && betxn_callback_fails) {
|
||||
- revert_cache(inst, &parent_time);
|
||||
- }
|
||||
-
|
||||
if (tombstone) {
|
||||
if (cache_is_in_cache(&inst->inst_cache, tombstone)) {
|
||||
tomb_ep_id = tombstone->ep_id; /* Otherwise, tombstone might have been freed. */
|
||||
@@ -1496,6 +1491,11 @@ error_return:
|
||||
conn_id, op_id, parent_modify_c.old_entry, parent_modify_c.new_entry, myrc);
|
||||
}
|
||||
|
||||
+ /* Revert the caches if this is the parent operation */
|
||||
+ if (parent_op && betxn_callback_fails) {
|
||||
+ revert_cache(inst, &parent_time);
|
||||
+ }
|
||||
+
|
||||
common_return:
|
||||
if (orig_entry) {
|
||||
/* NOTE: #define SLAPI_DELETE_BEPREOP_ENTRY SLAPI_ENTRY_PRE_OP */
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/ldbm_modify.c b/ldap/servers/slapd/back-ldbm/ldbm_modify.c
|
||||
index 537369055..64b293001 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/ldbm_modify.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/ldbm_modify.c
|
||||
@@ -1,6 +1,6 @@
|
||||
/** BEGIN COPYRIGHT BLOCK
|
||||
* Copyright (C) 2001 Sun Microsystems, Inc. Used by permission.
|
||||
- * Copyright (C) 2005 Red Hat, Inc.
|
||||
+ * Copyright (C) 2022 Red Hat, Inc.
|
||||
* Copyright (C) 2009 Hewlett-Packard Development Company, L.P.
|
||||
* All rights reserved.
|
||||
*
|
||||
@@ -1043,11 +1043,6 @@ ldbm_back_modify(Slapi_PBlock *pb)
|
||||
goto common_return;
|
||||
|
||||
error_return:
|
||||
- /* Revert the caches if this is the parent operation */
|
||||
- if (parent_op && betxn_callback_fails) {
|
||||
- revert_cache(inst, &parent_time);
|
||||
- }
|
||||
-
|
||||
if (postentry != NULL) {
|
||||
slapi_entry_free(postentry);
|
||||
postentry = NULL;
|
||||
@@ -1103,6 +1098,10 @@ error_return:
|
||||
if (!not_an_error) {
|
||||
rc = SLAPI_FAIL_GENERAL;
|
||||
}
|
||||
+ /* Revert the caches if this is the parent operation */
|
||||
+ if (parent_op && betxn_callback_fails) {
|
||||
+ revert_cache(inst, &parent_time);
|
||||
+ }
|
||||
}
|
||||
|
||||
/* if ec is in cache, remove it, then add back e if we still have it */
|
||||
diff --git a/src/lib389/lib389/cli_conf/plugins/automember.py b/src/lib389/lib389/cli_conf/plugins/automember.py
|
||||
index 15b00c633..568586ad8 100644
|
||||
--- a/src/lib389/lib389/cli_conf/plugins/automember.py
|
||||
+++ b/src/lib389/lib389/cli_conf/plugins/automember.py
|
||||
@@ -155,7 +155,7 @@ def fixup(inst, basedn, log, args):
|
||||
log.info('Attempting to add task entry... This will fail if Automembership plug-in is not enabled.')
|
||||
if not plugin.status():
|
||||
log.error("'%s' is disabled. Rebuild membership task can't be executed" % plugin.rdn)
|
||||
- fixup_task = plugin.fixup(args.DN, args.filter)
|
||||
+ fixup_task = plugin.fixup(args.DN, args.filter, args.cleanup)
|
||||
if args.wait:
|
||||
log.info(f'Waiting for fixup task "{fixup_task.dn}" to complete. You can safely exit by pressing Control C ...')
|
||||
fixup_task.wait(timeout=args.timeout)
|
||||
@@ -225,8 +225,8 @@ def create_parser(subparsers):
|
||||
subcommands = automember.add_subparsers(help='action')
|
||||
add_generic_plugin_parsers(subcommands, AutoMembershipPlugin)
|
||||
|
||||
- list = subcommands.add_parser('list', help='List Automembership definitions or regex rules.')
|
||||
- subcommands_list = list.add_subparsers(help='action')
|
||||
+ automember_list = subcommands.add_parser('list', help='List Automembership definitions or regex rules.')
|
||||
+ subcommands_list = automember_list.add_subparsers(help='action')
|
||||
list_definitions = subcommands_list.add_parser('definitions', help='Lists Automembership definitions.')
|
||||
list_definitions.set_defaults(func=definition_list)
|
||||
list_regexes = subcommands_list.add_parser('regexes', help='List Automembership regex rules.')
|
||||
@@ -269,6 +269,8 @@ def create_parser(subparsers):
|
||||
fixup_task.add_argument('-f', '--filter', required=True, help='Sets the LDAP filter for entries to fix up')
|
||||
fixup_task.add_argument('-s', '--scope', required=True, choices=['sub', 'base', 'one'], type=str.lower,
|
||||
help='Sets the LDAP search scope for entries to fix up')
|
||||
+ fixup_task.add_argument('--cleanup', action='store_true',
|
||||
+ help="Clean up previous group memberships before rebuilding")
|
||||
fixup_task.add_argument('--wait', action='store_true',
|
||||
help="Wait for the task to finish, this could take a long time")
|
||||
fixup_task.add_argument('--timeout', default=0, type=int,
|
||||
@@ -279,7 +281,7 @@ def create_parser(subparsers):
|
||||
fixup_status.add_argument('--dn', help="The task entry's DN")
|
||||
fixup_status.add_argument('--show-log', action='store_true', help="Display the task log")
|
||||
fixup_status.add_argument('--watch', action='store_true',
|
||||
- help="Watch the task's status and wait for it to finish")
|
||||
+ help="Watch the task's status and wait for it to finish")
|
||||
|
||||
abort_fixup = subcommands.add_parser('abort-fixup', help='Abort the rebuild membership task.')
|
||||
abort_fixup.set_defaults(func=abort)
|
||||
diff --git a/src/lib389/lib389/plugins.py b/src/lib389/lib389/plugins.py
|
||||
index 52691a44c..a1ad0a45b 100644
|
||||
--- a/src/lib389/lib389/plugins.py
|
||||
+++ b/src/lib389/lib389/plugins.py
|
||||
@@ -1141,13 +1141,15 @@ class AutoMembershipPlugin(Plugin):
|
||||
def __init__(self, instance, dn="cn=Auto Membership Plugin,cn=plugins,cn=config"):
|
||||
super(AutoMembershipPlugin, self).__init__(instance, dn)
|
||||
|
||||
- def fixup(self, basedn, _filter=None):
|
||||
+ def fixup(self, basedn, _filter=None, cleanup=False):
|
||||
"""Create an automember rebuild membership task
|
||||
|
||||
:param basedn: Basedn to fix up
|
||||
:type basedn: str
|
||||
:param _filter: a filter for entries to fix up
|
||||
:type _filter: str
|
||||
+ :param cleanup: cleanup old group memberships
|
||||
+ :type cleanup: boolean
|
||||
|
||||
:returns: an instance of Task(DSLdapObject)
|
||||
"""
|
||||
@@ -1156,6 +1158,9 @@ class AutoMembershipPlugin(Plugin):
|
||||
task_properties = {'basedn': basedn}
|
||||
if _filter is not None:
|
||||
task_properties['filter'] = _filter
|
||||
+ if cleanup:
|
||||
+ task_properties['cleanup'] = "yes"
|
||||
+
|
||||
task.create(properties=task_properties)
|
||||
|
||||
return task
|
||||
diff --git a/src/lib389/lib389/tasks.py b/src/lib389/lib389/tasks.py
|
||||
index 1a16bbb83..193805780 100644
|
||||
--- a/src/lib389/lib389/tasks.py
|
||||
+++ b/src/lib389/lib389/tasks.py
|
||||
@@ -1006,12 +1006,13 @@ class Tasks(object):
|
||||
return exitCode
|
||||
|
||||
def automemberRebuild(self, suffix=DEFAULT_SUFFIX, scope='sub',
|
||||
- filterstr='objectclass=top', args=None):
|
||||
+ filterstr='objectclass=top', cleanup=False, args=None):
|
||||
'''
|
||||
- @param suffix - The suffix the task should examine - defualt is
|
||||
+ @param suffix - The suffix the task should examine - default is
|
||||
"dc=example,dc=com"
|
||||
@param scope - The scope of the search to find entries
|
||||
- @param fitlerstr - THe search filter to find entries
|
||||
+ @param fitlerstr - The search filter to find entries
|
||||
+ @param cleanup - reset/clear the old group mmeberships prior to rebuilding
|
||||
@param args - is a dictionary that contains modifier of the task
|
||||
wait: True/[False] - If True, waits for the completion of
|
||||
the task before to return
|
||||
@@ -1027,6 +1028,8 @@ class Tasks(object):
|
||||
entry.setValues('basedn', suffix)
|
||||
entry.setValues('filter', filterstr)
|
||||
entry.setValues('scope', scope)
|
||||
+ if cleanup:
|
||||
+ entry.setValues('cleanup', 'yes')
|
||||
|
||||
# start the task and possibly wait for task completion
|
||||
try:
|
||||
--
|
||||
2.43.0
|
||||
|
@ -1,83 +0,0 @@
|
||||
From 9319d5b022918f14cacb00e3faef85a6ab730a26 Mon Sep 17 00:00:00 2001
|
||||
From: Simon Pichugin <spichugi@redhat.com>
|
||||
Date: Tue, 27 Feb 2024 16:30:47 -0800
|
||||
Subject: [PATCH] Issue 3527 - Support HAProxy and Instance on the same machine
|
||||
configuration (#6107)
|
||||
|
||||
Description: Improve how we handle HAProxy connections to work better when
|
||||
the DS and HAProxy are on the same machine.
|
||||
Ensure the client and header destination IPs are checked against the trusted IP list.
|
||||
|
||||
Additionally, this change will also allow configuration having
|
||||
HAProxy is listening on a different subnet than the one used to forward the request.
|
||||
|
||||
Related: https://github.com/389ds/389-ds-base/issues/3527
|
||||
|
||||
Reviewed by: @progier389, @jchapma (Thanks!)
|
||||
---
|
||||
ldap/servers/slapd/connection.c | 35 +++++++++++++++++++++++++--------
|
||||
1 file changed, 27 insertions(+), 8 deletions(-)
|
||||
|
||||
diff --git a/ldap/servers/slapd/connection.c b/ldap/servers/slapd/connection.c
|
||||
index d28a39bf7..10a8cc577 100644
|
||||
--- a/ldap/servers/slapd/connection.c
|
||||
+++ b/ldap/servers/slapd/connection.c
|
||||
@@ -1187,6 +1187,8 @@ connection_read_operation(Connection *conn, Operation *op, ber_tag_t *tag, int *
|
||||
char str_ip[INET6_ADDRSTRLEN + 1] = {0};
|
||||
char str_haproxy_ip[INET6_ADDRSTRLEN + 1] = {0};
|
||||
char str_haproxy_destip[INET6_ADDRSTRLEN + 1] = {0};
|
||||
+ int trusted_matches_ip_found = 0;
|
||||
+ int trusted_matches_destip_found = 0;
|
||||
struct berval **bvals = NULL;
|
||||
int proxy_connection = 0;
|
||||
|
||||
@@ -1245,21 +1247,38 @@ connection_read_operation(Connection *conn, Operation *op, ber_tag_t *tag, int *
|
||||
normalize_IPv4(conn->cin_addr, buf_ip, sizeof(buf_ip), str_ip, sizeof(str_ip));
|
||||
normalize_IPv4(&pr_netaddr_dest, buf_haproxy_destip, sizeof(buf_haproxy_destip),
|
||||
str_haproxy_destip, sizeof(str_haproxy_destip));
|
||||
+ size_t ip_len = strlen(buf_ip);
|
||||
+ size_t destip_len = strlen(buf_haproxy_destip);
|
||||
|
||||
/* Now, reset RC and set it to 0 only if a match is found */
|
||||
haproxy_rc = -1;
|
||||
|
||||
- /* Allow only:
|
||||
- * Trusted IP == Original Client IP == HAProxy Header Destination IP */
|
||||
+ /*
|
||||
+ * We need to allow a configuration where DS instance and HAProxy are on the same machine.
|
||||
+ * In this case, we need to check if
|
||||
+ * the HAProxy client IP (which will be a loopback address) matches one of the the trusted IP addresses,
|
||||
+ * while still checking that
|
||||
+ * the HAProxy header destination IP address matches one of the trusted IP addresses.
|
||||
+ * Additionally, this change will also allow configuration having
|
||||
+ * HAProxy listening on a different subnet than one used to forward the request.
|
||||
+ */
|
||||
for (size_t i = 0; bvals[i] != NULL; ++i) {
|
||||
- if ((strlen(bvals[i]->bv_val) == strlen(buf_ip)) &&
|
||||
- (strlen(bvals[i]->bv_val) == strlen(buf_haproxy_destip)) &&
|
||||
- (strncasecmp(bvals[i]->bv_val, buf_ip, strlen(buf_ip)) == 0) &&
|
||||
- (strncasecmp(bvals[i]->bv_val, buf_haproxy_destip, strlen(buf_haproxy_destip)) == 0)) {
|
||||
- haproxy_rc = 0;
|
||||
- break;
|
||||
+ size_t bval_len = strlen(bvals[i]->bv_val);
|
||||
+
|
||||
+ /* Check if the Client IP (HAProxy's machine IP) address matches the trusted IP address */
|
||||
+ if (!trusted_matches_ip_found) {
|
||||
+ trusted_matches_ip_found = (bval_len == ip_len) && (strncasecmp(bvals[i]->bv_val, buf_ip, ip_len) == 0);
|
||||
+ }
|
||||
+ /* Check if the HAProxy header destination IP address matches the trusted IP address */
|
||||
+ if (!trusted_matches_destip_found) {
|
||||
+ trusted_matches_destip_found = (bval_len == destip_len) && (strncasecmp(bvals[i]->bv_val, buf_haproxy_destip, destip_len) == 0);
|
||||
}
|
||||
}
|
||||
+
|
||||
+ if (trusted_matches_ip_found && trusted_matches_destip_found) {
|
||||
+ haproxy_rc = 0;
|
||||
+ }
|
||||
+
|
||||
if (haproxy_rc == -1) {
|
||||
slapi_log_err(SLAPI_LOG_CONNS, "connection_read_operation", "HAProxy header received from unknown source.\n");
|
||||
disconnect_server_nomutex(conn, conn->c_connid, -1, SLAPD_DISCONNECT_PROXY_UNKNOWN, EPROTO);
|
||||
--
|
||||
2.45.0
|
||||
|
@ -1,108 +0,0 @@
|
||||
From 016a2b6bd3e27cbff36609824a75b020dfd24823 Mon Sep 17 00:00:00 2001
|
||||
From: James Chapman <jachapma@redhat.com>
|
||||
Date: Wed, 1 May 2024 15:01:33 +0100
|
||||
Subject: [PATCH] CVE-2024-2199
|
||||
|
||||
---
|
||||
.../tests/suites/password/password_test.py | 56 +++++++++++++++++++
|
||||
ldap/servers/slapd/modify.c | 8 ++-
|
||||
2 files changed, 62 insertions(+), 2 deletions(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/password/password_test.py b/dirsrvtests/tests/suites/password/password_test.py
|
||||
index 38079476a..b3ff08904 100644
|
||||
--- a/dirsrvtests/tests/suites/password/password_test.py
|
||||
+++ b/dirsrvtests/tests/suites/password/password_test.py
|
||||
@@ -65,6 +65,62 @@ def test_password_delete_specific_password(topology_st):
|
||||
log.info('test_password_delete_specific_password: PASSED')
|
||||
|
||||
|
||||
+def test_password_modify_non_utf8(topology_st):
|
||||
+ """Attempt a modify of the userPassword attribute with
|
||||
+ an invalid non utf8 value
|
||||
+
|
||||
+ :id: a31af9d5-d665-42b9-8d6e-fea3d0837d36
|
||||
+ :setup: Standalone instance
|
||||
+ :steps:
|
||||
+ 1. Add a user if it doesnt exist and set its password
|
||||
+ 2. Verify password with a bind
|
||||
+ 3. Modify userPassword attr with invalid value
|
||||
+ 4. Attempt a bind with invalid password value
|
||||
+ 5. Verify original password with a bind
|
||||
+ :expectedresults:
|
||||
+ 1. The user with userPassword should be added successfully
|
||||
+ 2. Operation should be successful
|
||||
+ 3. Server returns ldap.UNWILLING_TO_PERFORM
|
||||
+ 4. Server returns ldap.INVALID_CREDENTIALS
|
||||
+ 5. Operation should be successful
|
||||
+ """
|
||||
+
|
||||
+ log.info('Running test_password_modify_non_utf8...')
|
||||
+
|
||||
+ # Create user and set password
|
||||
+ standalone = topology_st.standalone
|
||||
+ users = UserAccounts(standalone, DEFAULT_SUFFIX)
|
||||
+ if not users.exists(TEST_USER_PROPERTIES['uid'][0]):
|
||||
+ user = users.create(properties=TEST_USER_PROPERTIES)
|
||||
+ else:
|
||||
+ user = users.get(TEST_USER_PROPERTIES['uid'][0])
|
||||
+ user.set('userpassword', PASSWORD)
|
||||
+
|
||||
+ # Verify password
|
||||
+ try:
|
||||
+ user.bind(PASSWORD)
|
||||
+ except ldap.LDAPError as e:
|
||||
+ log.fatal('Failed to bind as {}, error: '.format(user.dn) + e.args[0]['desc'])
|
||||
+ assert False
|
||||
+
|
||||
+ # Modify userPassword with an invalid value
|
||||
+ password = b'tes\x82t-password' # A non UTF-8 encoded password
|
||||
+ with pytest.raises(ldap.UNWILLING_TO_PERFORM):
|
||||
+ user.replace('userpassword', password)
|
||||
+
|
||||
+ # Verify a bind fails with invalid pasword
|
||||
+ with pytest.raises(ldap.INVALID_CREDENTIALS):
|
||||
+ user.bind(password)
|
||||
+
|
||||
+ # Verify we can still bind with original password
|
||||
+ try:
|
||||
+ user.bind(PASSWORD)
|
||||
+ except ldap.LDAPError as e:
|
||||
+ log.fatal('Failed to bind as {}, error: '.format(user.dn) + e.args[0]['desc'])
|
||||
+ assert False
|
||||
+
|
||||
+ log.info('test_password_modify_non_utf8: PASSED')
|
||||
+
|
||||
if __name__ == '__main__':
|
||||
# Run isolated
|
||||
# -s for DEBUG mode
|
||||
diff --git a/ldap/servers/slapd/modify.c b/ldap/servers/slapd/modify.c
|
||||
index 5ca78539c..669bb104c 100644
|
||||
--- a/ldap/servers/slapd/modify.c
|
||||
+++ b/ldap/servers/slapd/modify.c
|
||||
@@ -765,8 +765,10 @@ op_shared_modify(Slapi_PBlock *pb, int pw_change, char *old_pw)
|
||||
* flagged - leave mod attributes alone */
|
||||
if (!repl_op && !skip_modified_attrs && lastmod) {
|
||||
modify_update_last_modified_attr(pb, &smods);
|
||||
+ slapi_pblock_set(pb, SLAPI_MODIFY_MODS, slapi_mods_get_ldapmods_byref(&smods));
|
||||
}
|
||||
|
||||
+
|
||||
if (0 == slapi_mods_get_num_mods(&smods)) {
|
||||
/* nothing to do - no mods - this is not an error - just
|
||||
send back LDAP_SUCCESS */
|
||||
@@ -933,8 +935,10 @@ op_shared_modify(Slapi_PBlock *pb, int pw_change, char *old_pw)
|
||||
|
||||
/* encode password */
|
||||
if (pw_encodevals_ext(pb, sdn, va)) {
|
||||
- slapi_log_err(SLAPI_LOG_CRIT, "op_shared_modify", "Unable to hash userPassword attribute for %s.\n", slapi_entry_get_dn_const(e));
|
||||
- send_ldap_result(pb, LDAP_UNWILLING_TO_PERFORM, NULL, "Unable to store attribute \"userPassword\" correctly\n", 0, NULL);
|
||||
+ slapi_log_err(SLAPI_LOG_CRIT, "op_shared_modify", "Unable to hash userPassword attribute for %s, "
|
||||
+ "check value is utf8 string.\n", slapi_entry_get_dn_const(e));
|
||||
+ send_ldap_result(pb, LDAP_UNWILLING_TO_PERFORM, NULL, "Unable to hash \"userPassword\" attribute, "
|
||||
+ "check value is utf8 string.\n", 0, NULL);
|
||||
valuearray_free(&va);
|
||||
goto free_and_return;
|
||||
}
|
||||
--
|
||||
2.45.0
|
||||
|
@ -1,213 +0,0 @@
|
||||
From d5bbe52fbe84a7d3b5938bf82d5c4af15061a8e2 Mon Sep 17 00:00:00 2001
|
||||
From: Pierre Rogier <progier@redhat.com>
|
||||
Date: Wed, 17 Apr 2024 18:18:04 +0200
|
||||
Subject: [PATCH] CVE-2024-3657
|
||||
|
||||
---
|
||||
.../tests/suites/filter/large_filter_test.py | 34 +++++-
|
||||
ldap/servers/slapd/back-ldbm/index.c | 111 ++++++++++--------
|
||||
2 files changed, 92 insertions(+), 53 deletions(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/filter/large_filter_test.py b/dirsrvtests/tests/suites/filter/large_filter_test.py
|
||||
index ecc7bf979..40526bb16 100644
|
||||
--- a/dirsrvtests/tests/suites/filter/large_filter_test.py
|
||||
+++ b/dirsrvtests/tests/suites/filter/large_filter_test.py
|
||||
@@ -13,19 +13,29 @@ verify and testing Filter from a search
|
||||
|
||||
import os
|
||||
import pytest
|
||||
+import ldap
|
||||
|
||||
-from lib389._constants import PW_DM
|
||||
+from lib389._constants import PW_DM, DEFAULT_SUFFIX, ErrorLog
|
||||
from lib389.topologies import topology_st as topo
|
||||
from lib389.idm.user import UserAccounts, UserAccount
|
||||
from lib389.idm.account import Accounts
|
||||
from lib389.backend import Backends
|
||||
from lib389.idm.domain import Domain
|
||||
+from lib389.utils import get_ldapurl_from_serverid
|
||||
|
||||
SUFFIX = 'dc=anuj,dc=com'
|
||||
|
||||
pytestmark = pytest.mark.tier1
|
||||
|
||||
|
||||
+def open_new_ldapi_conn(dsinstance):
|
||||
+ ldapurl, certdir = get_ldapurl_from_serverid(dsinstance)
|
||||
+ assert 'ldapi://' in ldapurl
|
||||
+ conn = ldap.initialize(ldapurl)
|
||||
+ conn.sasl_interactive_bind_s("", ldap.sasl.external())
|
||||
+ return conn
|
||||
+
|
||||
+
|
||||
@pytest.fixture(scope="module")
|
||||
def _create_entries(request, topo):
|
||||
"""
|
||||
@@ -160,6 +170,28 @@ def test_large_filter(topo, _create_entries, real_value):
|
||||
assert len(Accounts(conn, SUFFIX).filter(real_value)) == 3
|
||||
|
||||
|
||||
+def test_long_filter_value(topo):
|
||||
+ """Exercise large eq filter with dn syntax attributes
|
||||
+
|
||||
+ :id: b069ef72-fcc3-11ee-981c-482ae39447e5
|
||||
+ :setup: Standalone
|
||||
+ :steps:
|
||||
+ 1. Try to pass filter rules as per the condition.
|
||||
+ :expectedresults:
|
||||
+ 1. Pass
|
||||
+ """
|
||||
+ inst = topo.standalone
|
||||
+ conn = open_new_ldapi_conn(inst.serverid)
|
||||
+ inst.config.loglevel(vals=(ErrorLog.DEFAULT,ErrorLog.TRACE,ErrorLog.SEARCH_FILTER))
|
||||
+ filter_value = "a\x1Edmin" * 1025
|
||||
+ conn.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, f'(cn={filter_value})')
|
||||
+ filter_value = "aAdmin" * 1025
|
||||
+ conn.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, f'(cn={filter_value})')
|
||||
+ filter_value = "*"
|
||||
+ conn.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, f'(cn={filter_value})')
|
||||
+ inst.config.loglevel(vals=(ErrorLog.DEFAULT,))
|
||||
+
|
||||
+
|
||||
if __name__ == '__main__':
|
||||
CURRENT_FILE = os.path.realpath(__file__)
|
||||
pytest.main("-s -v %s" % CURRENT_FILE)
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/index.c b/ldap/servers/slapd/back-ldbm/index.c
|
||||
index 410db23d1..30fa09ebb 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/index.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/index.c
|
||||
@@ -71,6 +71,32 @@ typedef struct _index_buffer_handle index_buffer_handle;
|
||||
#define INDEX_BUFFER_FLAG_SERIALIZE 1
|
||||
#define INDEX_BUFFER_FLAG_STATS 2
|
||||
|
||||
+/*
|
||||
+ * space needed to encode a byte:
|
||||
+ * 0x00-0x31 and 0x7f-0xff requires 3 bytes: \xx
|
||||
+ * 0x22 and 0x5C requires 2 bytes: \" and \\
|
||||
+ * other requires 1 byte: c
|
||||
+ */
|
||||
+static char encode_size[] = {
|
||||
+ /* 0x00 */ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
|
||||
+ /* 0x10 */ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
|
||||
+ /* 0x20 */ 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
|
||||
+ /* 0x30 */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
|
||||
+ /* 0x40 */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
|
||||
+ /* 0x50 */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1,
|
||||
+ /* 0x60 */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
|
||||
+ /* 0x70 */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 3,
|
||||
+ /* 0x80 */ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
|
||||
+ /* 0x90 */ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
|
||||
+ /* 0xA0 */ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
|
||||
+ /* 0xB0 */ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
|
||||
+ /* 0xC0 */ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
|
||||
+ /* 0xD0 */ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
|
||||
+ /* 0xE0 */ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
|
||||
+ /* 0xF0 */ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
|
||||
+};
|
||||
+
|
||||
+
|
||||
/* Index buffering functions */
|
||||
|
||||
static int
|
||||
@@ -799,65 +825,46 @@ index_add_mods(
|
||||
|
||||
/*
|
||||
* Convert a 'struct berval' into a displayable ASCII string
|
||||
+ * returns the printable string
|
||||
*/
|
||||
-
|
||||
-#define SPECIAL(c) (c < 32 || c > 126 || c == '\\' || c == '"')
|
||||
-
|
||||
const char *
|
||||
encode(const struct berval *data, char buf[BUFSIZ])
|
||||
{
|
||||
- char *s;
|
||||
- char *last;
|
||||
- if (data == NULL || data->bv_len == 0)
|
||||
- return "";
|
||||
- last = data->bv_val + data->bv_len - 1;
|
||||
- for (s = data->bv_val; s < last; ++s) {
|
||||
- if (SPECIAL(*s)) {
|
||||
- char *first = data->bv_val;
|
||||
- char *bufNext = buf;
|
||||
- size_t bufSpace = BUFSIZ - 4;
|
||||
- while (1) {
|
||||
- /* printf ("%lu bytes ASCII\n", (unsigned long)(s - first)); */
|
||||
- if (bufSpace < (size_t)(s - first))
|
||||
- s = first + bufSpace - 1;
|
||||
- if (s != first) {
|
||||
- memcpy(bufNext, first, s - first);
|
||||
- bufNext += (s - first);
|
||||
- bufSpace -= (s - first);
|
||||
- }
|
||||
- do {
|
||||
- if (bufSpace) {
|
||||
- *bufNext++ = '\\';
|
||||
- --bufSpace;
|
||||
- }
|
||||
- if (bufSpace < 2) {
|
||||
- memcpy(bufNext, "..", 2);
|
||||
- bufNext += 2;
|
||||
- goto bail;
|
||||
- }
|
||||
- if (*s == '\\' || *s == '"') {
|
||||
- *bufNext++ = *s;
|
||||
- --bufSpace;
|
||||
- } else {
|
||||
- sprintf(bufNext, "%02x", (unsigned)*(unsigned char *)s);
|
||||
- bufNext += 2;
|
||||
- bufSpace -= 2;
|
||||
- }
|
||||
- } while (++s <= last && SPECIAL(*s));
|
||||
- if (s > last)
|
||||
- break;
|
||||
- first = s;
|
||||
- while (!SPECIAL(*s) && s <= last)
|
||||
- ++s;
|
||||
- }
|
||||
- bail:
|
||||
- *bufNext = '\0';
|
||||
- /* printf ("%lu chars in buffer\n", (unsigned long)(bufNext - buf)); */
|
||||
+ if (!data || !data->bv_val) {
|
||||
+ strcpy(buf, "<NULL>");
|
||||
+ return buf;
|
||||
+ }
|
||||
+ char *endbuff = &buf[BUFSIZ-4]; /* Reserve space to append "...\0" */
|
||||
+ char *ptout = buf;
|
||||
+ unsigned char *ptin = (unsigned char*) data->bv_val;
|
||||
+ unsigned char *endptin = ptin+data->bv_len;
|
||||
+
|
||||
+ while (ptin < endptin) {
|
||||
+ if (ptout >= endbuff) {
|
||||
+ /*
|
||||
+ * BUFSIZ(8K) > SLAPI_LOG_BUFSIZ(2K) so the error log message will be
|
||||
+ * truncated anyway. So there is no real interrest to test if the original
|
||||
+ * data contains no special characters and return it as is.
|
||||
+ */
|
||||
+ strcpy(endbuff, "...");
|
||||
return buf;
|
||||
}
|
||||
+ switch (encode_size[*ptin]) {
|
||||
+ case 1:
|
||||
+ *ptout++ = *ptin++;
|
||||
+ break;
|
||||
+ case 2:
|
||||
+ *ptout++ = '\\';
|
||||
+ *ptout++ = *ptin++;
|
||||
+ break;
|
||||
+ case 3:
|
||||
+ sprintf(ptout, "\\%02x", *ptin++);
|
||||
+ ptout += 3;
|
||||
+ break;
|
||||
+ }
|
||||
}
|
||||
- /* printf ("%lu bytes, all ASCII\n", (unsigned long)(s - data->bv_val)); */
|
||||
- return data->bv_val;
|
||||
+ *ptout = 0;
|
||||
+ return buf;
|
||||
}
|
||||
|
||||
static const char *
|
||||
--
|
||||
2.45.0
|
||||
|
@ -1,143 +0,0 @@
|
||||
From 6e5f03d5872129963106024f53765234a282406c Mon Sep 17 00:00:00 2001
|
||||
From: James Chapman <jachapma@redhat.com>
|
||||
Date: Fri, 16 Feb 2024 11:13:16 +0000
|
||||
Subject: [PATCH] Issue 6096 - Improve connection timeout error logging (#6097)
|
||||
|
||||
Bug description: When a paged result search is run with a time limit,
|
||||
if the time limit is exceed the server closes the connection with
|
||||
closed IO timeout (nsslapd-ioblocktimeout) - T2. This error message
|
||||
is incorrect as the reason the connection has been closed was because
|
||||
the specified time limit on a paged result search has been exceeded.
|
||||
|
||||
Fix description: Correct error message
|
||||
|
||||
Relates: https://github.com/389ds/389-ds-base/issues/6096
|
||||
|
||||
Reviewed by: @tbordaz (Thank you)
|
||||
---
|
||||
ldap/admin/src/logconv.pl | 24 ++++++++++++++++++-
|
||||
ldap/servers/slapd/daemon.c | 4 ++--
|
||||
ldap/servers/slapd/disconnect_error_strings.h | 1 +
|
||||
ldap/servers/slapd/disconnect_errors.h | 2 +-
|
||||
4 files changed, 27 insertions(+), 4 deletions(-)
|
||||
|
||||
diff --git a/ldap/admin/src/logconv.pl b/ldap/admin/src/logconv.pl
|
||||
index 7698c383a..2a933c4a3 100755
|
||||
--- a/ldap/admin/src/logconv.pl
|
||||
+++ b/ldap/admin/src/logconv.pl
|
||||
@@ -267,7 +267,7 @@ my $optimeAvg = 0;
|
||||
my %cipher = ();
|
||||
my @removefiles = ();
|
||||
|
||||
-my @conncodes = qw(A1 B1 B4 T1 T2 B2 B3 R1 P1 P2 U1);
|
||||
+my @conncodes = qw(A1 B1 B4 T1 T2 T3 B2 B3 R1 P1 P2 U1);
|
||||
my %conn = ();
|
||||
map {$conn{$_} = $_} @conncodes;
|
||||
|
||||
@@ -355,6 +355,7 @@ $connmsg{"B1"} = "Bad Ber Tag Encountered";
|
||||
$connmsg{"B4"} = "Server failed to flush data (response) back to Client";
|
||||
$connmsg{"T1"} = "Idle Timeout Exceeded";
|
||||
$connmsg{"T2"} = "IO Block Timeout Exceeded or NTSSL Timeout";
|
||||
+$connmsg{"T3"} = "Paged Search Time Limit Exceeded";
|
||||
$connmsg{"B2"} = "Ber Too Big";
|
||||
$connmsg{"B3"} = "Ber Peek";
|
||||
$connmsg{"R1"} = "Revents";
|
||||
@@ -1723,6 +1724,10 @@ if ($usage =~ /j/i || $verb eq "yes"){
|
||||
print "\n $recCount. You have some coonections that are being closed by the ioblocktimeout setting. You may want to increase the ioblocktimeout.\n";
|
||||
$recCount++;
|
||||
}
|
||||
+ if (defined($conncount->{"T3"}) and $conncount->{"T3"} > 0){
|
||||
+ print "\n $recCount. You have some connections that are being closed because a paged result search limit has been exceeded. You may want to increase the search time limit.\n";
|
||||
+ $recCount++;
|
||||
+ }
|
||||
# compare binds to unbinds, if the difference is more than 30% of the binds, then report a issue
|
||||
if (($bindCount - $unbindCount) > ($bindCount*.3)){
|
||||
print "\n $recCount. You have a significant difference between binds and unbinds. You may want to investigate this difference.\n";
|
||||
@@ -2366,6 +2371,7 @@ sub parseLineNormal
|
||||
$brokenPipeCount++;
|
||||
if (m/- T1/){ $hashes->{rc}->{"T1"}++; }
|
||||
elsif (m/- T2/){ $hashes->{rc}->{"T2"}++; }
|
||||
+ elsif (m/- T3/){ $hashes->{rc}->{"T3"}++; }
|
||||
elsif (m/- A1/){ $hashes->{rc}->{"A1"}++; }
|
||||
elsif (m/- B1/){ $hashes->{rc}->{"B1"}++; }
|
||||
elsif (m/- B4/){ $hashes->{rc}->{"B4"}++; }
|
||||
@@ -2381,6 +2387,7 @@ sub parseLineNormal
|
||||
$connResetByPeerCount++;
|
||||
if (m/- T1/){ $hashes->{src}->{"T1"}++; }
|
||||
elsif (m/- T2/){ $hashes->{src}->{"T2"}++; }
|
||||
+ elsif (m/- T3/){ $hashes->{src}->{"T3"}++; }
|
||||
elsif (m/- A1/){ $hashes->{src}->{"A1"}++; }
|
||||
elsif (m/- B1/){ $hashes->{src}->{"B1"}++; }
|
||||
elsif (m/- B4/){ $hashes->{src}->{"B4"}++; }
|
||||
@@ -2396,6 +2403,7 @@ sub parseLineNormal
|
||||
$resourceUnavailCount++;
|
||||
if (m/- T1/){ $hashes->{rsrc}->{"T1"}++; }
|
||||
elsif (m/- T2/){ $hashes->{rsrc}->{"T2"}++; }
|
||||
+ elsif (m/- T3/){ $hashes->{rsrc}->{"T3"}++; }
|
||||
elsif (m/- A1/){ $hashes->{rsrc}->{"A1"}++; }
|
||||
elsif (m/- B1/){ $hashes->{rsrc}->{"B1"}++; }
|
||||
elsif (m/- B4/){ $hashes->{rsrc}->{"B4"}++; }
|
||||
@@ -2494,6 +2502,20 @@ sub parseLineNormal
|
||||
}
|
||||
}
|
||||
}
|
||||
+ if (m/- T3/){
|
||||
+ if ($_ =~ /conn= *([0-9A-Z]+)/i) {
|
||||
+ $exc = "no";
|
||||
+ $ip = getIPfromConn($1, $serverRestartCount);
|
||||
+ for (my $xxx = 0; $xxx < $#excludeIP; $xxx++){
|
||||
+ if ($ip eq $excludeIP[$xxx]){$exc = "yes";}
|
||||
+ }
|
||||
+ if ($exc ne "yes"){
|
||||
+ $hashes->{T3}->{$ip}++;
|
||||
+ $hashes->{conncount}->{"T3"}++;
|
||||
+ $connCodeCount++;
|
||||
+ }
|
||||
+ }
|
||||
+ }
|
||||
if (m/- B2/){
|
||||
if ($_ =~ /conn= *([0-9A-Z]+)/i) {
|
||||
$exc = "no";
|
||||
diff --git a/ldap/servers/slapd/daemon.c b/ldap/servers/slapd/daemon.c
|
||||
index 5a48aa66f..bb80dae36 100644
|
||||
--- a/ldap/servers/slapd/daemon.c
|
||||
+++ b/ldap/servers/slapd/daemon.c
|
||||
@@ -1599,9 +1599,9 @@ setup_pr_read_pds(Connection_Table *ct)
|
||||
int add_fd = 1;
|
||||
/* check timeout for PAGED RESULTS */
|
||||
if (pagedresults_is_timedout_nolock(c)) {
|
||||
- /* Exceeded the timelimit; disconnect the client */
|
||||
+ /* Exceeded the paged search timelimit; disconnect the client */
|
||||
disconnect_server_nomutex(c, c->c_connid, -1,
|
||||
- SLAPD_DISCONNECT_IO_TIMEOUT,
|
||||
+ SLAPD_DISCONNECT_PAGED_SEARCH_LIMIT,
|
||||
0);
|
||||
connection_table_move_connection_out_of_active_list(ct,
|
||||
c);
|
||||
diff --git a/ldap/servers/slapd/disconnect_error_strings.h b/ldap/servers/slapd/disconnect_error_strings.h
|
||||
index f7a31d728..c2d9e283b 100644
|
||||
--- a/ldap/servers/slapd/disconnect_error_strings.h
|
||||
+++ b/ldap/servers/slapd/disconnect_error_strings.h
|
||||
@@ -27,6 +27,7 @@ ER2(SLAPD_DISCONNECT_BER_FLUSH, "B4")
|
||||
ER2(SLAPD_DISCONNECT_IDLE_TIMEOUT, "T1")
|
||||
ER2(SLAPD_DISCONNECT_REVENTS, "R1")
|
||||
ER2(SLAPD_DISCONNECT_IO_TIMEOUT, "T2")
|
||||
+ER2(SLAPD_DISCONNECT_PAGED_SEARCH_LIMIT, "T3")
|
||||
ER2(SLAPD_DISCONNECT_PLUGIN, "P1")
|
||||
ER2(SLAPD_DISCONNECT_UNBIND, "U1")
|
||||
ER2(SLAPD_DISCONNECT_POLL, "P2")
|
||||
diff --git a/ldap/servers/slapd/disconnect_errors.h b/ldap/servers/slapd/disconnect_errors.h
|
||||
index a0484f1c2..e118f674c 100644
|
||||
--- a/ldap/servers/slapd/disconnect_errors.h
|
||||
+++ b/ldap/servers/slapd/disconnect_errors.h
|
||||
@@ -35,6 +35,6 @@
|
||||
#define SLAPD_DISCONNECT_SASL_FAIL SLAPD_DISCONNECT_ERROR_BASE + 12
|
||||
#define SLAPD_DISCONNECT_PROXY_INVALID_HEADER SLAPD_DISCONNECT_ERROR_BASE + 13
|
||||
#define SLAPD_DISCONNECT_PROXY_UNKNOWN SLAPD_DISCONNECT_ERROR_BASE + 14
|
||||
-
|
||||
+#define SLAPD_DISCONNECT_PAGED_SEARCH_LIMIT SLAPD_DISCONNECT_ERROR_BASE + 15
|
||||
|
||||
#endif /* __DISCONNECT_ERRORS_H_ */
|
||||
--
|
||||
2.45.0
|
||||
|
@ -1,44 +0,0 @@
|
||||
From a112394af3a20787755029804684d57a9c3ffa9a Mon Sep 17 00:00:00 2001
|
||||
From: James Chapman <jachapma@redhat.com>
|
||||
Date: Wed, 21 Feb 2024 12:43:03 +0000
|
||||
Subject: [PATCH] Issue 6103 - New connection timeout error breaks errormap
|
||||
(#6104)
|
||||
|
||||
Bug description: A recent addition to the connection disconnect error
|
||||
messaging, conflicts with how errormap.c maps error codes/strings.
|
||||
|
||||
Fix description: errormap expects error codes/strings to be in ascending
|
||||
order. Moved the new error code to the bottom of the list.
|
||||
|
||||
Relates: https://github.com/389ds/389-ds-base/issues/6103
|
||||
|
||||
Reviewed by: @droideck. @progier389 (Thank you)
|
||||
---
|
||||
ldap/servers/slapd/disconnect_error_strings.h | 5 +++--
|
||||
1 file changed, 3 insertions(+), 2 deletions(-)
|
||||
|
||||
diff --git a/ldap/servers/slapd/disconnect_error_strings.h b/ldap/servers/slapd/disconnect_error_strings.h
|
||||
index c2d9e283b..f603a08ce 100644
|
||||
--- a/ldap/servers/slapd/disconnect_error_strings.h
|
||||
+++ b/ldap/servers/slapd/disconnect_error_strings.h
|
||||
@@ -14,7 +14,8 @@
|
||||
/* disconnect_error_strings.h
|
||||
*
|
||||
* Strings describing the errors used in logging the reason a connection
|
||||
- * was closed.
|
||||
+ * was closed. Ensure definitions are in the same order as the error codes
|
||||
+ * defined in disconnect_errors.h
|
||||
*/
|
||||
#ifndef __DISCONNECT_ERROR_STRINGS_H_
|
||||
#define __DISCONNECT_ERROR_STRINGS_H_
|
||||
@@ -35,6 +36,6 @@ ER2(SLAPD_DISCONNECT_NTSSL_TIMEOUT, "T2")
|
||||
ER2(SLAPD_DISCONNECT_SASL_FAIL, "S1")
|
||||
ER2(SLAPD_DISCONNECT_PROXY_INVALID_HEADER, "P3")
|
||||
ER2(SLAPD_DISCONNECT_PROXY_UNKNOWN, "P4")
|
||||
-
|
||||
+ER2(SLAPD_DISCONNECT_PAGED_SEARCH_LIMIT, "T3")
|
||||
|
||||
#endif /* __DISCONNECT_ERROR_STRINGS_H_ */
|
||||
--
|
||||
2.45.0
|
||||
|
@ -1,30 +0,0 @@
|
||||
From edd9abc8901604dde1d739d87ca2906734d53dd3 Mon Sep 17 00:00:00 2001
|
||||
From: Viktor Ashirov <vashirov@redhat.com>
|
||||
Date: Thu, 13 Jun 2024 13:35:09 +0200
|
||||
Subject: [PATCH] Issue 6103 - New connection timeout error breaks errormap
|
||||
|
||||
Description:
|
||||
Remove duplicate SLAPD_DISCONNECT_PAGED_SEARCH_LIMIT error code.
|
||||
|
||||
Fixes: https://github.com/389ds/389-ds-base/issues/6103
|
||||
|
||||
Reviewed by: @tbordaz (Thanks!)
|
||||
---
|
||||
ldap/servers/slapd/disconnect_error_strings.h | 1 -
|
||||
1 file changed, 1 deletion(-)
|
||||
|
||||
diff --git a/ldap/servers/slapd/disconnect_error_strings.h b/ldap/servers/slapd/disconnect_error_strings.h
|
||||
index f603a08ce..d49cc79a2 100644
|
||||
--- a/ldap/servers/slapd/disconnect_error_strings.h
|
||||
+++ b/ldap/servers/slapd/disconnect_error_strings.h
|
||||
@@ -28,7 +28,6 @@ ER2(SLAPD_DISCONNECT_BER_FLUSH, "B4")
|
||||
ER2(SLAPD_DISCONNECT_IDLE_TIMEOUT, "T1")
|
||||
ER2(SLAPD_DISCONNECT_REVENTS, "R1")
|
||||
ER2(SLAPD_DISCONNECT_IO_TIMEOUT, "T2")
|
||||
-ER2(SLAPD_DISCONNECT_PAGED_SEARCH_LIMIT, "T3")
|
||||
ER2(SLAPD_DISCONNECT_PLUGIN, "P1")
|
||||
ER2(SLAPD_DISCONNECT_UNBIND, "U1")
|
||||
ER2(SLAPD_DISCONNECT_POLL, "P2")
|
||||
--
|
||||
2.45.0
|
||||
|
@ -1,220 +0,0 @@
|
||||
From 8cf981c00ae18d3efaeb10819282cd991621e9a2 Mon Sep 17 00:00:00 2001
|
||||
From: tbordaz <tbordaz@redhat.com>
|
||||
Date: Wed, 22 May 2024 11:29:05 +0200
|
||||
Subject: [PATCH] Issue 6172 - RFE: improve the performance of evaluation of
|
||||
filter component when tested against a large valueset (like group members)
|
||||
(#6173)
|
||||
|
||||
Bug description:
|
||||
Before returning an entry (to a SRCH) the server checks that the entry matches the SRCH filter.
|
||||
If a filter component (equality) is testing the value (ava) against a
|
||||
large valueset (like uniquemember values), it takes a long time because
|
||||
of the large number of values and required normalization of the values.
|
||||
This can be improved taking benefit of sorted valueset. Those sorted
|
||||
valueset were created to improve updates of large valueset (groups) but
|
||||
at that time not implemented in SRCH path.
|
||||
|
||||
Fix description:
|
||||
In case of LDAP_FILTER_EQUALITY component, the server can get
|
||||
benefit of the sorted valuearray.
|
||||
To limit the risk of regression, we use the sorted valuearray
|
||||
only for the DN syntax attribute. Indeed the sorted valuearray was
|
||||
designed for those type of attribute.
|
||||
With those two limitations, there is no need of a toggle and
|
||||
the call to plugin_call_syntax_filter_ava can be replaced by
|
||||
a call to slapi_valueset_find.
|
||||
In both cases, sorted valueset and plugin_call_syntax_filter_ava, ava and
|
||||
values are normalized.
|
||||
In sorted valueset, the values have been normalized to insert the index
|
||||
in the sorted array and then comparison is done on normalized values.
|
||||
In plugin_call_syntax_filter_ava, all values in valuearray (of valueset) are normalized
|
||||
before comparison.
|
||||
|
||||
relates: #6172
|
||||
|
||||
Reviewed by: Pierre Rogier, Simon Pichugin (Big Thanks !!!)
|
||||
---
|
||||
.../tests/suites/filter/filter_test.py | 125 ++++++++++++++++++
|
||||
ldap/servers/slapd/filterentry.c | 22 ++-
|
||||
2 files changed, 146 insertions(+), 1 deletion(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/filter/filter_test.py b/dirsrvtests/tests/suites/filter/filter_test.py
|
||||
index d6bfa5a3b..4baaf04a7 100644
|
||||
--- a/dirsrvtests/tests/suites/filter/filter_test.py
|
||||
+++ b/dirsrvtests/tests/suites/filter/filter_test.py
|
||||
@@ -9,7 +9,11 @@
|
||||
import logging
|
||||
|
||||
import pytest
|
||||
+import time
|
||||
+from lib389.dirsrv_log import DirsrvAccessLog
|
||||
from lib389.tasks import *
|
||||
+from lib389.backend import Backends, Backend
|
||||
+from lib389.dbgen import dbgen_users, dbgen_groups
|
||||
from lib389.topologies import topology_st
|
||||
from lib389._constants import PASSWORD, DEFAULT_SUFFIX, DN_DM, SUFFIX
|
||||
from lib389.utils import *
|
||||
@@ -304,6 +308,127 @@ def test_extended_search(topology_st):
|
||||
ents = topology_st.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, myfilter)
|
||||
assert len(ents) == 1
|
||||
|
||||
+def test_match_large_valueset(topology_st):
|
||||
+ """Test that when returning a big number of entries
|
||||
+ and that we need to match the filter from a large valueset
|
||||
+ we get benefit to use the sorted valueset
|
||||
+
|
||||
+ :id: 7db5aa88-50e0-4c31-85dd-1d2072cb674c
|
||||
+
|
||||
+ :setup: Standalone instance
|
||||
+
|
||||
+ :steps:
|
||||
+ 1. Create a users and groups backends and tune them
|
||||
+ 2. Generate a test ldif (2k users and 1K groups with all users)
|
||||
+ 3. Import test ldif file using Offline import (ldif2db).
|
||||
+ 4. Prim the 'groups' entrycache with a "fast" search
|
||||
+ 5. Search the 'groups' with a difficult matching value
|
||||
+ 6. check that etime from step 5 is less than a second
|
||||
+
|
||||
+ :expectedresults:
|
||||
+ 1. Create a users and groups backends should PASS
|
||||
+ 2. Generate LDIF should PASS.
|
||||
+ 3. Offline import should PASS.
|
||||
+ 4. Priming should PASS.
|
||||
+ 5. Performance search should PASS.
|
||||
+ 6. Etime of performance search should PASS.
|
||||
+ """
|
||||
+
|
||||
+ log.info('Running test_match_large_valueset...')
|
||||
+ #
|
||||
+ # Test online/offline LDIF imports
|
||||
+ #
|
||||
+ inst = topology_st.standalone
|
||||
+ inst.start()
|
||||
+ backends = Backends(inst)
|
||||
+ users_suffix = "ou=users,%s" % DEFAULT_SUFFIX
|
||||
+ users_backend = 'users'
|
||||
+ users_ldif = 'users_import.ldif'
|
||||
+ groups_suffix = "ou=groups,%s" % DEFAULT_SUFFIX
|
||||
+ groups_backend = 'groups'
|
||||
+ groups_ldif = 'groups_import.ldif'
|
||||
+ groups_entrycache = '200000000'
|
||||
+ users_number = 2000
|
||||
+ groups_number = 1000
|
||||
+
|
||||
+
|
||||
+ # For priming the cache we just want to be fast
|
||||
+ # taking the first value in the valueset is good
|
||||
+ # whether the valueset is sorted or not
|
||||
+ priming_user_rdn = "user0001"
|
||||
+
|
||||
+ # For performance testing, this is important to use
|
||||
+ # user1000 rather then user0001
|
||||
+ # Because user0001 is the first value in the valueset
|
||||
+ # whether we use the sorted valuearray or non sorted
|
||||
+ # valuearray the performance will be similar.
|
||||
+ # With middle value user1000, the performance boost of
|
||||
+ # the sorted valuearray will make the difference.
|
||||
+ perf_user_rdn = "user1000"
|
||||
+
|
||||
+ # Step 1. Prepare the backends and tune the groups entrycache
|
||||
+ try:
|
||||
+ be_users = backends.create(properties={'parent': DEFAULT_SUFFIX, 'nsslapd-suffix': users_suffix, 'name': users_backend})
|
||||
+ be_groups = backends.create(properties={'parent': DEFAULT_SUFFIX, 'nsslapd-suffix': groups_suffix, 'name': groups_backend})
|
||||
+
|
||||
+ # set the entry cache to 200Mb as the 1K groups of 2K users require at least 170Mb
|
||||
+ be_groups.replace('nsslapd-cachememsize', groups_entrycache)
|
||||
+ except:
|
||||
+ raise
|
||||
+
|
||||
+ # Step 2. Generate a test ldif (10k users entries)
|
||||
+ log.info("Generating users LDIF...")
|
||||
+ ldif_dir = inst.get_ldif_dir()
|
||||
+ users_import_ldif = "%s/%s" % (ldif_dir, users_ldif)
|
||||
+ groups_import_ldif = "%s/%s" % (ldif_dir, groups_ldif)
|
||||
+ dbgen_users(inst, users_number, users_import_ldif, suffix=users_suffix, generic=True, parent=users_suffix)
|
||||
+
|
||||
+ # Generate a test ldif (800 groups with 10k members) that fit in 700Mb entry cache
|
||||
+ props = {
|
||||
+ "name": "group",
|
||||
+ "suffix": groups_suffix,
|
||||
+ "parent": groups_suffix,
|
||||
+ "number": groups_number,
|
||||
+ "numMembers": users_number,
|
||||
+ "createMembers": False,
|
||||
+ "memberParent": users_suffix,
|
||||
+ "membershipAttr": "uniquemember",
|
||||
+ }
|
||||
+ dbgen_groups(inst, groups_import_ldif, props)
|
||||
+
|
||||
+ # Step 3. Do the both offline imports
|
||||
+ inst.stop()
|
||||
+ if not inst.ldif2db(users_backend, None, None, None, users_import_ldif):
|
||||
+ log.fatal('test_basic_import_export: Offline users import failed')
|
||||
+ assert False
|
||||
+ if not inst.ldif2db(groups_backend, None, None, None, groups_import_ldif):
|
||||
+ log.fatal('test_basic_import_export: Offline groups import failed')
|
||||
+ assert False
|
||||
+ inst.start()
|
||||
+
|
||||
+ # Step 4. first prime the cache
|
||||
+ # Just request the 'DN'. We are interested by the time of matching not by the time of transfert
|
||||
+ entries = topology_st.standalone.search_s(groups_suffix, ldap.SCOPE_SUBTREE, "(&(objectclass=groupOfUniqueNames)(uniquemember=uid=%s,%s))" % (priming_user_rdn, users_suffix), ['dn'])
|
||||
+ assert len(entries) == groups_number
|
||||
+
|
||||
+ # Step 5. Now do the real performance checking it should take less than a second
|
||||
+ # Just request the 'DN'. We are interested by the time of matching not by the time of transfert
|
||||
+ search_start = time.time()
|
||||
+ entries = topology_st.standalone.search_s(groups_suffix, ldap.SCOPE_SUBTREE, "(&(objectclass=groupOfUniqueNames)(uniquemember=uid=%s,%s))" % (perf_user_rdn, users_suffix), ['dn'])
|
||||
+ duration = time.time() - search_start
|
||||
+ log.info("Duration of the search was %f", duration)
|
||||
+
|
||||
+ # Step 6. Gather the etime from the access log
|
||||
+ inst.stop()
|
||||
+ access_log = DirsrvAccessLog(inst)
|
||||
+ search_result = access_log.match(".*RESULT err=0 tag=101 nentries=%s.*" % groups_number)
|
||||
+ log.info("Found patterns are %s", search_result[0])
|
||||
+ log.info("Found patterns are %s", search_result[1])
|
||||
+ etime = float(search_result[1].split('etime=')[1])
|
||||
+ log.info("Duration of the search from access log was %f", etime)
|
||||
+ assert len(entries) == groups_number
|
||||
+ assert (etime < 1)
|
||||
+
|
||||
if __name__ == '__main__':
|
||||
# Run isolated
|
||||
# -s for DEBUG mode
|
||||
diff --git a/ldap/servers/slapd/filterentry.c b/ldap/servers/slapd/filterentry.c
|
||||
index fd8fdda9f..cae5c7edc 100644
|
||||
--- a/ldap/servers/slapd/filterentry.c
|
||||
+++ b/ldap/servers/slapd/filterentry.c
|
||||
@@ -296,7 +296,27 @@ test_ava_filter(
|
||||
rc = -1;
|
||||
for (; a != NULL; a = a->a_next) {
|
||||
if (slapi_attr_type_cmp(ava->ava_type, a->a_type, SLAPI_TYPE_CMP_SUBTYPE) == 0) {
|
||||
- rc = plugin_call_syntax_filter_ava(a, ftype, ava);
|
||||
+ if ((ftype == LDAP_FILTER_EQUALITY) &&
|
||||
+ (slapi_attr_is_dn_syntax_type(a->a_type))) {
|
||||
+ /* This path is for a performance improvement */
|
||||
+
|
||||
+ /* In case of equality filter we can get benefit of the
|
||||
+ * sorted valuearray (from valueset).
|
||||
+ * This improvement is limited to DN syntax attributes for
|
||||
+ * which the sorted valueset was designed.
|
||||
+ */
|
||||
+ Slapi_Value *sval = NULL;
|
||||
+ sval = slapi_value_new_berval(&ava->ava_value);
|
||||
+ if (slapi_valueset_find((const Slapi_Attr *)a, &a->a_present_values, sval)) {
|
||||
+ rc = 0;
|
||||
+ }
|
||||
+ slapi_value_free(&sval);
|
||||
+ } else {
|
||||
+ /* When sorted valuearray optimization cannot be used
|
||||
+ * lets filter the value according to its syntax
|
||||
+ */
|
||||
+ rc = plugin_call_syntax_filter_ava(a, ftype, ava);
|
||||
+ }
|
||||
if (rc == 0) {
|
||||
break;
|
||||
}
|
||||
--
|
||||
2.46.0
|
||||
|
@ -1,163 +0,0 @@
|
||||
From 57051154bafaf50b83fc27dadbd89a49fd1c8c36 Mon Sep 17 00:00:00 2001
|
||||
From: Pierre Rogier <progier@redhat.com>
|
||||
Date: Fri, 14 Jun 2024 13:27:10 +0200
|
||||
Subject: [PATCH] Security fix for CVE-2024-5953
|
||||
|
||||
Description:
|
||||
A denial of service vulnerability was found in the 389 Directory Server.
|
||||
This issue may allow an authenticated user to cause a server denial
|
||||
of service while attempting to log in with a user with a malformed hash
|
||||
in their password.
|
||||
|
||||
Fix Description:
|
||||
To prevent buffer overflow when a bind request is processed, the bind fails
|
||||
if the hash size is not coherent without even attempting to process further
|
||||
the hashed password.
|
||||
|
||||
References:
|
||||
- https://nvd.nist.gov/vuln/detail/CVE-2024-5953
|
||||
- https://access.redhat.com/security/cve/CVE-2024-5953
|
||||
- https://bugzilla.redhat.com/show_bug.cgi?id=2292104
|
||||
---
|
||||
.../tests/suites/password/regression_test.py | 54 ++++++++++++++++++-
|
||||
ldap/servers/plugins/pwdstorage/md5_pwd.c | 9 +++-
|
||||
ldap/servers/plugins/pwdstorage/pbkdf2_pwd.c | 6 +++
|
||||
3 files changed, 66 insertions(+), 3 deletions(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/password/regression_test.py b/dirsrvtests/tests/suites/password/regression_test.py
|
||||
index 8f1facb6d..1fa581643 100644
|
||||
--- a/dirsrvtests/tests/suites/password/regression_test.py
|
||||
+++ b/dirsrvtests/tests/suites/password/regression_test.py
|
||||
@@ -7,12 +7,14 @@
|
||||
#
|
||||
import pytest
|
||||
import time
|
||||
+import glob
|
||||
+import base64
|
||||
from lib389._constants import PASSWORD, DN_DM, DEFAULT_SUFFIX
|
||||
from lib389._constants import SUFFIX, PASSWORD, DN_DM, DN_CONFIG, PLUGIN_RETRO_CHANGELOG, DEFAULT_SUFFIX, DEFAULT_CHANGELOG_DB
|
||||
from lib389 import Entry
|
||||
from lib389.topologies import topology_m1 as topo_supplier
|
||||
-from lib389.idm.user import UserAccounts
|
||||
-from lib389.utils import ldap, os, logging, ensure_bytes, ds_is_newer
|
||||
+from lib389.idm.user import UserAccounts, UserAccount
|
||||
+from lib389.utils import ldap, os, logging, ensure_bytes, ds_is_newer, ds_supports_new_changelog
|
||||
from lib389.topologies import topology_st as topo
|
||||
from lib389.idm.organizationalunit import OrganizationalUnits
|
||||
|
||||
@@ -39,6 +41,13 @@ TEST_PASSWORDS += ['CNpwtest1ZZZZ', 'ZZZZZCNpwtest1',
|
||||
TEST_PASSWORDS2 = (
|
||||
'CN12pwtest31', 'SN3pwtest231', 'UID1pwtest123', 'MAIL2pwtest12@redhat.com', '2GN1pwtest123', 'People123')
|
||||
|
||||
+SUPPORTED_SCHEMES = (
|
||||
+ "{SHA}", "{SSHA}", "{SHA256}", "{SSHA256}",
|
||||
+ "{SHA384}", "{SSHA384}", "{SHA512}", "{SSHA512}",
|
||||
+ "{crypt}", "{NS-MTA-MD5}", "{clear}", "{MD5}",
|
||||
+ "{SMD5}", "{PBKDF2_SHA256}", "{PBKDF2_SHA512}",
|
||||
+ "{GOST_YESCRYPT}", "{PBKDF2-SHA256}", "{PBKDF2-SHA512}" )
|
||||
+
|
||||
def _check_unhashed_userpw(inst, user_dn, is_present=False):
|
||||
"""Check if unhashed#user#password attribute is present or not in the changelog"""
|
||||
unhashed_pwd_attribute = 'unhashed#user#password'
|
||||
@@ -319,6 +328,47 @@ def test_unhashed_pw_switch(topo_supplier):
|
||||
# Add debugging steps(if any)...
|
||||
pass
|
||||
|
||||
+@pytest.mark.parametrize("scheme", SUPPORTED_SCHEMES )
|
||||
+def test_long_hashed_password(topo, create_user, scheme):
|
||||
+ """Check that hashed password with very long value does not cause trouble
|
||||
+
|
||||
+ :id: 252a1f76-114b-11ef-8a7a-482ae39447e5
|
||||
+ :setup: standalone Instance
|
||||
+ :parametrized: yes
|
||||
+ :steps:
|
||||
+ 1. Add a test user user
|
||||
+ 2. Set a long password with requested scheme
|
||||
+ 3. Bind on that user using a wrong password
|
||||
+ 4. Check that instance is still alive
|
||||
+ 5. Remove the added user
|
||||
+ :expectedresults:
|
||||
+ 1. Success
|
||||
+ 2. Success
|
||||
+ 3. Should get ldap.INVALID_CREDENTIALS exception
|
||||
+ 4. Success
|
||||
+ 5. Success
|
||||
+ """
|
||||
+ inst = topo.standalone
|
||||
+ inst.simple_bind_s(DN_DM, PASSWORD)
|
||||
+ users = UserAccounts(inst, DEFAULT_SUFFIX)
|
||||
+ # Make sure that server is started as this test may crash it
|
||||
+ inst.start()
|
||||
+ # Adding Test user (It may already exists if previous test failed)
|
||||
+ user2 = UserAccount(inst, dn='uid=test_user_1002,ou=People,dc=example,dc=com')
|
||||
+ if not user2.exists():
|
||||
+ user2 = users.create_test_user(uid=1002, gid=2002)
|
||||
+ # Setting hashed password
|
||||
+ passwd = 'A'*4000
|
||||
+ hashed_passwd = scheme.encode('utf-8') + base64.b64encode(passwd.encode('utf-8'))
|
||||
+ user2.replace('userpassword', hashed_passwd)
|
||||
+ # Bind on that user using a wrong password
|
||||
+ with pytest.raises(ldap.INVALID_CREDENTIALS):
|
||||
+ conn = user2.bind(PASSWORD)
|
||||
+ # Check that instance is still alive
|
||||
+ assert inst.status()
|
||||
+ # Remove the added user
|
||||
+ user2.delete()
|
||||
+
|
||||
|
||||
if __name__ == '__main__':
|
||||
# Run isolated
|
||||
diff --git a/ldap/servers/plugins/pwdstorage/md5_pwd.c b/ldap/servers/plugins/pwdstorage/md5_pwd.c
|
||||
index 1e2cf58e7..b9a48d5ca 100644
|
||||
--- a/ldap/servers/plugins/pwdstorage/md5_pwd.c
|
||||
+++ b/ldap/servers/plugins/pwdstorage/md5_pwd.c
|
||||
@@ -37,6 +37,7 @@ md5_pw_cmp(const char *userpwd, const char *dbpwd)
|
||||
unsigned char hash_out[MD5_HASH_LEN];
|
||||
unsigned char b2a_out[MD5_HASH_LEN * 2]; /* conservative */
|
||||
SECItem binary_item;
|
||||
+ size_t dbpwd_len = strlen(dbpwd);
|
||||
|
||||
ctx = PK11_CreateDigestContext(SEC_OID_MD5);
|
||||
if (ctx == NULL) {
|
||||
@@ -45,6 +46,12 @@ md5_pw_cmp(const char *userpwd, const char *dbpwd)
|
||||
goto loser;
|
||||
}
|
||||
|
||||
+ if (dbpwd_len >= sizeof b2a_out) {
|
||||
+ slapi_log_err(SLAPI_LOG_PLUGIN, MD5_SUBSYSTEM_NAME,
|
||||
+ "The hashed password stored in the user entry is longer than any valid md5 hash");
|
||||
+ goto loser;
|
||||
+ }
|
||||
+
|
||||
/* create the hash */
|
||||
PK11_DigestBegin(ctx);
|
||||
PK11_DigestOp(ctx, (const unsigned char *)userpwd, strlen(userpwd));
|
||||
@@ -57,7 +64,7 @@ md5_pw_cmp(const char *userpwd, const char *dbpwd)
|
||||
bver = NSSBase64_EncodeItem(NULL, (char *)b2a_out, sizeof b2a_out, &binary_item);
|
||||
/* bver points to b2a_out upon success */
|
||||
if (bver) {
|
||||
- rc = slapi_ct_memcmp(bver, dbpwd, strlen(dbpwd));
|
||||
+ rc = slapi_ct_memcmp(bver, dbpwd, dbpwd_len);
|
||||
} else {
|
||||
slapi_log_err(SLAPI_LOG_PLUGIN, MD5_SUBSYSTEM_NAME,
|
||||
"Could not base64 encode hashed value for password compare");
|
||||
diff --git a/ldap/servers/plugins/pwdstorage/pbkdf2_pwd.c b/ldap/servers/plugins/pwdstorage/pbkdf2_pwd.c
|
||||
index dcac4fcdd..82b8c9501 100644
|
||||
--- a/ldap/servers/plugins/pwdstorage/pbkdf2_pwd.c
|
||||
+++ b/ldap/servers/plugins/pwdstorage/pbkdf2_pwd.c
|
||||
@@ -255,6 +255,12 @@ pbkdf2_sha256_pw_cmp(const char *userpwd, const char *dbpwd)
|
||||
passItem.data = (unsigned char *)userpwd;
|
||||
passItem.len = strlen(userpwd);
|
||||
|
||||
+ if (pwdstorage_base64_decode_len(dbpwd, dbpwd_len) > sizeof dbhash) {
|
||||
+ /* Hashed value is too long and cannot match any value generated by pbkdf2_sha256_hash */
|
||||
+ slapi_log_err(SLAPI_LOG_ERR, (char *)schemeName, "Unable to base64 decode dbpwd value. (hashed value is too long)\n");
|
||||
+ return result;
|
||||
+ }
|
||||
+
|
||||
/* Decode the DBpwd to bytes from b64 */
|
||||
if (PL_Base64Decode(dbpwd, dbpwd_len, dbhash) == NULL) {
|
||||
slapi_log_err(SLAPI_LOG_ERR, (char *)schemeName, "Unable to base64 decode dbpwd value\n");
|
||||
--
|
||||
2.46.0
|
||||
|
@ -1,178 +0,0 @@
|
||||
From e8a5b1deef1b455aafecb71efc029d2407b1b06f Mon Sep 17 00:00:00 2001
|
||||
From: Simon Pichugin <spichugi@redhat.com>
|
||||
Date: Tue, 16 Jul 2024 08:32:21 -0700
|
||||
Subject: [PATCH] Issue 4778 - Add COMPACT_CL5 task to dsconf replication
|
||||
(#6260)
|
||||
|
||||
Description: In 1.4.3, the changelog is not part of a backend.
|
||||
It can be compacted with nsds5task: CAMPACT_CL5 as part of the replication entry.
|
||||
Add the task as a compact-changelog command under the dsconf replication tool.
|
||||
Add tests for the feature and fix old tests.
|
||||
|
||||
Related: https://github.com/389ds/389-ds-base/issues/4778
|
||||
|
||||
Reviewed by: @progier389 (Thanks!)
|
||||
---
|
||||
.../tests/suites/config/compact_test.py | 36 ++++++++++++++---
|
||||
src/lib389/lib389/cli_conf/replication.py | 10 +++++
|
||||
src/lib389/lib389/replica.py | 40 +++++++++++++++++++
|
||||
3 files changed, 81 insertions(+), 5 deletions(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/config/compact_test.py b/dirsrvtests/tests/suites/config/compact_test.py
|
||||
index 317258d0e..31d98d10c 100644
|
||||
--- a/dirsrvtests/tests/suites/config/compact_test.py
|
||||
+++ b/dirsrvtests/tests/suites/config/compact_test.py
|
||||
@@ -13,14 +13,14 @@ import time
|
||||
import datetime
|
||||
from lib389.tasks import DBCompactTask
|
||||
from lib389.backend import DatabaseConfig
|
||||
-from lib389.replica import Changelog5
|
||||
+from lib389.replica import Changelog5, Replicas
|
||||
from lib389.topologies import topology_m1 as topo
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def test_compact_db_task(topo):
|
||||
- """Specify a test case purpose or name here
|
||||
+ """Test compaction of database
|
||||
|
||||
:id: 1b3222ef-a336-4259-be21-6a52f76e1859
|
||||
:setup: Standalone Instance
|
||||
@@ -48,7 +48,7 @@ def test_compact_db_task(topo):
|
||||
|
||||
|
||||
def test_compaction_interval_and_time(topo):
|
||||
- """Specify a test case purpose or name here
|
||||
+ """Test compaction interval and time for database and changelog
|
||||
|
||||
:id: f361bee9-d7e7-4569-9255-d7b60dd9d92e
|
||||
:setup: Supplier Instance
|
||||
@@ -95,10 +95,36 @@ def test_compaction_interval_and_time(topo):
|
||||
|
||||
# Check compaction occurred as expected
|
||||
time.sleep(45)
|
||||
- assert not inst.searchErrorsLog("Compacting databases")
|
||||
+ assert not inst.searchErrorsLog("compacting replication changelogs")
|
||||
|
||||
time.sleep(90)
|
||||
- assert inst.searchErrorsLog("Compacting databases")
|
||||
+ assert inst.searchErrorsLog("compacting replication changelogs")
|
||||
+ inst.deleteErrorLogs(restart=False)
|
||||
+
|
||||
+
|
||||
+def test_compact_cl5_task(topo):
|
||||
+ """Test compaction of changelog5 database
|
||||
+
|
||||
+ :id: aadfa9f7-73c0-463a-912c-0a29aa1f8167
|
||||
+ :setup: Standalone Instance
|
||||
+ :steps:
|
||||
+ 1. Run compaction task
|
||||
+ 2. Check errors log to show task was run
|
||||
+ :expectedresults:
|
||||
+ 1. Success
|
||||
+ 2. Success
|
||||
+ """
|
||||
+ inst = topo.ms["supplier1"]
|
||||
+
|
||||
+ replicas = Replicas(inst)
|
||||
+ replicas.compact_changelog(log=log)
|
||||
+
|
||||
+ # Check compaction occurred as expected. But instead of time.sleep(5) check 1 sec in loop
|
||||
+ for _ in range(5):
|
||||
+ time.sleep(1)
|
||||
+ if inst.searchErrorsLog("compacting replication changelogs"):
|
||||
+ break
|
||||
+ assert inst.searchErrorsLog("compacting replication changelogs")
|
||||
inst.deleteErrorLogs(restart=False)
|
||||
|
||||
|
||||
diff --git a/src/lib389/lib389/cli_conf/replication.py b/src/lib389/lib389/cli_conf/replication.py
|
||||
index 352c0ee5b..ccc394255 100644
|
||||
--- a/src/lib389/lib389/cli_conf/replication.py
|
||||
+++ b/src/lib389/lib389/cli_conf/replication.py
|
||||
@@ -1199,6 +1199,11 @@ def restore_cl_dir(inst, basedn, log, args):
|
||||
replicas.restore_changelog(replica_roots=args.REPLICA_ROOTS, log=log)
|
||||
|
||||
|
||||
+def compact_cl5(inst, basedn, log, args):
|
||||
+ replicas = Replicas(inst)
|
||||
+ replicas.compact_changelog(replica_roots=args.REPLICA_ROOTS, log=log)
|
||||
+
|
||||
+
|
||||
def create_parser(subparsers):
|
||||
|
||||
############################################
|
||||
@@ -1326,6 +1331,11 @@ def create_parser(subparsers):
|
||||
help="Specify one replica root whose changelog you want to restore. "
|
||||
"The replica root will be consumed from the LDIF file name if the option is omitted.")
|
||||
|
||||
+ compact_cl = repl_subcommands.add_parser('compact-changelog', help='Compact the changelog database')
|
||||
+ compact_cl.set_defaults(func=compact_cl5)
|
||||
+ compact_cl.add_argument('REPLICA_ROOTS', nargs="+",
|
||||
+ help="Specify replica roots whose changelog you want to compact.")
|
||||
+
|
||||
restore_changelogdir = restore_subcommands.add_parser('from-changelogdir', help='Restore LDIF files from changelogdir.')
|
||||
restore_changelogdir.set_defaults(func=restore_cl_dir)
|
||||
restore_changelogdir.add_argument('REPLICA_ROOTS', nargs="+",
|
||||
diff --git a/src/lib389/lib389/replica.py b/src/lib389/lib389/replica.py
|
||||
index 94e1fdad5..1f321972d 100644
|
||||
--- a/src/lib389/lib389/replica.py
|
||||
+++ b/src/lib389/lib389/replica.py
|
||||
@@ -1648,6 +1648,11 @@ class Replica(DSLdapObject):
|
||||
"""
|
||||
self.replace('nsds5task', 'ldif2cl')
|
||||
|
||||
+ def begin_task_compact_cl5(self):
|
||||
+ """Begin COMPACT_CL5 task
|
||||
+ """
|
||||
+ self.replace('nsds5task', 'COMPACT_CL5')
|
||||
+
|
||||
def get_suffix(self):
|
||||
"""Return the suffix
|
||||
"""
|
||||
@@ -1829,6 +1834,41 @@ class Replicas(DSLdapObjects):
|
||||
log.error(f"Changelog LDIF for '{repl_root}' was not found")
|
||||
continue
|
||||
|
||||
+ def compact_changelog(self, replica_roots=[], log=None):
|
||||
+ """Compact Directory Server replication changelog
|
||||
+
|
||||
+ :param replica_roots: Replica suffixes that need to be processed (and optional LDIF file path)
|
||||
+ :type replica_roots: list of str
|
||||
+ :param log: The logger object
|
||||
+ :type log: logger
|
||||
+ """
|
||||
+
|
||||
+ if log is None:
|
||||
+ log = self._log
|
||||
+
|
||||
+ # Check if the changelog entry exists
|
||||
+ try:
|
||||
+ cl = Changelog5(self._instance)
|
||||
+ cl.get_attr_val_utf8_l("nsslapd-changelogdir")
|
||||
+ except ldap.NO_SUCH_OBJECT:
|
||||
+ raise ValueError("Changelog entry was not found. Probably, the replication is not enabled on this instance")
|
||||
+
|
||||
+ # Get all the replicas on the server if --replica-roots option is not specified
|
||||
+ repl_roots = []
|
||||
+ if not replica_roots:
|
||||
+ for replica in self.list():
|
||||
+ repl_roots.append(replica.get_attr_val_utf8("nsDS5ReplicaRoot"))
|
||||
+ else:
|
||||
+ for repl_root in replica_roots:
|
||||
+ repl_roots.append(repl_root)
|
||||
+
|
||||
+ # Dump the changelog for the replica
|
||||
+
|
||||
+ # Dump the changelog for the replica
|
||||
+ for repl_root in repl_roots:
|
||||
+ replica = self.get(repl_root)
|
||||
+ replica.begin_task_compact_cl5()
|
||||
+
|
||||
|
||||
class BootstrapReplicationManager(DSLdapObject):
|
||||
"""A Replication Manager credential for bootstrapping the repl process.
|
||||
--
|
||||
2.47.0
|
||||
|
@ -1,55 +0,0 @@
|
||||
From d1cd9a5675e2953b7c8034ebb87a434cdd3ce0c3 Mon Sep 17 00:00:00 2001
|
||||
From: tbordaz <tbordaz@redhat.com>
|
||||
Date: Mon, 2 Dec 2024 17:18:32 +0100
|
||||
Subject: [PATCH] Issue 6417 - If an entry RDN is identical to the suffix, then
|
||||
Entryrdn gets broken during a reindex (#6418)
|
||||
|
||||
Bug description:
|
||||
During a reindex, the entryrdn index is built at the end from
|
||||
each entry in the suffix.
|
||||
If one entry has a RDN that is identical to the suffix DN,
|
||||
then entryrdn_lookup_dn may erroneously return the suffix DN
|
||||
as the DN of the entry.
|
||||
|
||||
Fix description:
|
||||
When the lookup entry has no parent (because index is under
|
||||
work) the loop lookup the entry using the RDN.
|
||||
If this RDN matches the suffix DN, then it exits from the loop
|
||||
with the suffix DN.
|
||||
Before exiting it checks that the original lookup entryID
|
||||
is equal to suffix entryID. If it does not match
|
||||
the function fails and then the DN from the entry will be
|
||||
built from id2enty
|
||||
|
||||
fixes: #6417
|
||||
|
||||
Reviewed by: Pierre Rogier, Simon Pichugin (Thanks !!!)
|
||||
---
|
||||
ldap/servers/slapd/back-ldbm/ldbm_entryrdn.c | 11 ++++++++++-
|
||||
1 file changed, 10 insertions(+), 1 deletion(-)
|
||||
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/ldbm_entryrdn.c b/ldap/servers/slapd/back-ldbm/ldbm_entryrdn.c
|
||||
index 5797dd779..83b041192 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/ldbm_entryrdn.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/ldbm_entryrdn.c
|
||||
@@ -1224,7 +1224,16 @@ entryrdn_lookup_dn(backend *be,
|
||||
}
|
||||
goto bail;
|
||||
}
|
||||
- maybesuffix = 1;
|
||||
+ if (workid == 1) {
|
||||
+ /* The loop (workid) iterates from the starting 'id'
|
||||
+ * up to the suffix ID (i.e. '1').
|
||||
+ * A corner case (#6417) is if an entry, on the path
|
||||
+ * 'id' -> suffix, has the same RDN than the suffix.
|
||||
+ * In order to erroneously believe the loop hits the suffix
|
||||
+ * we need to check that 'workid' is '1' (suffix)
|
||||
+ */
|
||||
+ maybesuffix = 1;
|
||||
+ }
|
||||
} else {
|
||||
_entryrdn_cursor_print_error("entryrdn_lookup_dn",
|
||||
key.data, data.size, data.ulen, rc);
|
||||
--
|
||||
2.48.0
|
||||
|
@ -1,267 +0,0 @@
|
||||
From 9b2fc77a36156ea987dcea6e2043f8e4c4a6b259 Mon Sep 17 00:00:00 2001
|
||||
From: progier389 <progier@redhat.com>
|
||||
Date: Tue, 18 Jun 2024 14:21:07 +0200
|
||||
Subject: [PATCH] Issue 6224 - d2entry - Could not open id2entry err 0 - at
|
||||
startup when having sub-suffixes (#6225)
|
||||
|
||||
Problem:: d2entry - Could not open id2entry err 0 is logged at startup when having sub-suffixes
|
||||
Reason: The slapi_exist_referral internal search access a backend that is not yet started.
|
||||
Solution: Limit the internal search to a single backend
|
||||
|
||||
Issue: #6224
|
||||
|
||||
Reviewed by: @droideck Thanks!
|
||||
|
||||
(cherry picked from commit 796f703021e961fdd8cbc53b4ad4e20258af0e96)
|
||||
---
|
||||
.../tests/suites/ds_logs/ds_logs_test.py | 1 +
|
||||
.../suites/mapping_tree/regression_test.py | 161 +++++++++++++++++-
|
||||
ldap/servers/slapd/backend.c | 7 +-
|
||||
3 files changed, 159 insertions(+), 10 deletions(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/ds_logs/ds_logs_test.py b/dirsrvtests/tests/suites/ds_logs/ds_logs_test.py
|
||||
index 812936c62..84a9c6ec8 100644
|
||||
--- a/dirsrvtests/tests/suites/ds_logs/ds_logs_test.py
|
||||
+++ b/dirsrvtests/tests/suites/ds_logs/ds_logs_test.py
|
||||
@@ -1222,6 +1222,7 @@ def test_referral_check(topology_st, request):
|
||||
|
||||
request.addfinalizer(fin)
|
||||
|
||||
+<<<<<<< HEAD
|
||||
def test_referral_subsuffix(topology_st, request):
|
||||
"""Test the results of an inverted parent suffix definition in the configuration.
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/mapping_tree/regression_test.py b/dirsrvtests/tests/suites/mapping_tree/regression_test.py
|
||||
index 99d4a1d5f..689ff9f59 100644
|
||||
--- a/dirsrvtests/tests/suites/mapping_tree/regression_test.py
|
||||
+++ b/dirsrvtests/tests/suites/mapping_tree/regression_test.py
|
||||
@@ -11,10 +11,14 @@ import ldap
|
||||
import logging
|
||||
import os
|
||||
import pytest
|
||||
+import time
|
||||
from lib389.backend import Backends, Backend
|
||||
+from lib389._constants import HOST_STANDALONE, PORT_STANDALONE, DN_DM, PW_DM
|
||||
from lib389.dbgen import dbgen_users
|
||||
from lib389.mappingTree import MappingTrees
|
||||
from lib389.topologies import topology_st
|
||||
+from lib389.referral import Referrals, Referral
|
||||
+
|
||||
|
||||
try:
|
||||
from lib389.backend import BackendSuffixView
|
||||
@@ -31,14 +35,26 @@ else:
|
||||
logging.getLogger(__name__).setLevel(logging.INFO)
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
+PARENT_SUFFIX = "dc=parent"
|
||||
+CHILD1_SUFFIX = f"dc=child1,{PARENT_SUFFIX}"
|
||||
+CHILD2_SUFFIX = f"dc=child2,{PARENT_SUFFIX}"
|
||||
+
|
||||
+PARENT_REFERRAL_DN = f"cn=ref,ou=People,{PARENT_SUFFIX}"
|
||||
+CHILD1_REFERRAL_DN = f"cn=ref,ou=people,{CHILD1_SUFFIX}"
|
||||
+CHILD2_REFERRAL_DN = f"cn=ref,ou=people,{CHILD2_SUFFIX}"
|
||||
+
|
||||
+REFERRAL_CHECK_PEDIOD = 7
|
||||
+
|
||||
+
|
||||
+
|
||||
BESTRUCT = [
|
||||
- { "bename" : "parent", "suffix": "dc=parent" },
|
||||
- { "bename" : "child1", "suffix": "dc=child1,dc=parent" },
|
||||
- { "bename" : "child2", "suffix": "dc=child2,dc=parent" },
|
||||
+ { "bename" : "parent", "suffix": PARENT_SUFFIX },
|
||||
+ { "bename" : "child1", "suffix": CHILD1_SUFFIX },
|
||||
+ { "bename" : "child2", "suffix": CHILD2_SUFFIX },
|
||||
]
|
||||
|
||||
|
||||
-@pytest.fixture(scope="function")
|
||||
+@pytest.fixture(scope="module")
|
||||
def topo(topology_st, request):
|
||||
bes = []
|
||||
|
||||
@@ -50,6 +66,9 @@ def topo(topology_st, request):
|
||||
request.addfinalizer(fin)
|
||||
|
||||
inst = topology_st.standalone
|
||||
+ # Reduce nsslapd-referral-check-period to accelerate test
|
||||
+ topology_st.standalone.config.set("nsslapd-referral-check-period", str(REFERRAL_CHECK_PEDIOD))
|
||||
+
|
||||
ldif_files = {}
|
||||
for d in BESTRUCT:
|
||||
bename = d['bename']
|
||||
@@ -76,14 +95,13 @@ def topo(topology_st, request):
|
||||
inst.start()
|
||||
return topology_st
|
||||
|
||||
-# Parameters for test_change_repl_passwd
|
||||
-EXPECTED_ENTRIES = (("dc=parent", 39), ("dc=child1,dc=parent", 13), ("dc=child2,dc=parent", 13))
|
||||
+# Parameters for test_sub_suffixes
|
||||
@pytest.mark.parametrize(
|
||||
"orphan_param",
|
||||
[
|
||||
- pytest.param( ( True, { "dc=parent": 2, "dc=child1,dc=parent":1, "dc=child2,dc=parent":1}), id="orphan-is-true" ),
|
||||
- pytest.param( ( False, { "dc=parent": 3, "dc=child1,dc=parent":1, "dc=child2,dc=parent":1}), id="orphan-is-false" ),
|
||||
- pytest.param( ( None, { "dc=parent": 3, "dc=child1,dc=parent":1, "dc=child2,dc=parent":1}), id="no-orphan" ),
|
||||
+ pytest.param( ( True, { PARENT_SUFFIX: 2, CHILD1_SUFFIX:1, CHILD2_SUFFIX:1}), id="orphan-is-true" ),
|
||||
+ pytest.param( ( False, { PARENT_SUFFIX: 3, CHILD1_SUFFIX:1, CHILD2_SUFFIX:1}), id="orphan-is-false" ),
|
||||
+ pytest.param( ( None, { PARENT_SUFFIX: 3, CHILD1_SUFFIX:1, CHILD2_SUFFIX:1}), id="no-orphan" ),
|
||||
],
|
||||
)
|
||||
|
||||
@@ -128,3 +146,128 @@ def test_sub_suffixes(topo, orphan_param):
|
||||
log.info('Test PASSED')
|
||||
|
||||
|
||||
+def test_one_level_search_on_sub_suffixes(topo):
|
||||
+ """ Perform one level scoped search accross suffix and sub-suffix
|
||||
+
|
||||
+ :id: 92f3139e-280e-11ef-a989-482ae39447e5
|
||||
+ :feature: mapping-tree
|
||||
+ :setup: Standalone instance with 3 additional backends:
|
||||
+ dc=parent, dc=child1,dc=parent, dc=childr21,dc=parent
|
||||
+ :steps:
|
||||
+ 1. Perform a ONE LEVEL search on dc=parent
|
||||
+ 2. Check that all expected entries have been returned
|
||||
+ 3. Check that only the expected entries have been returned
|
||||
+ :expectedresults:
|
||||
+ 1. Success
|
||||
+ 2. each expected dn should be in the result set
|
||||
+ 3. Number of returned entries should be the same as the number of expected entries
|
||||
+ """
|
||||
+ expected_dns = ( 'dc=child1,dc=parent',
|
||||
+ 'dc=child2,dc=parent',
|
||||
+ 'ou=accounting,dc=parent',
|
||||
+ 'ou=product development,dc=parent',
|
||||
+ 'ou=product testing,dc=parent',
|
||||
+ 'ou=human resources,dc=parent',
|
||||
+ 'ou=payroll,dc=parent',
|
||||
+ 'ou=people,dc=parent',
|
||||
+ 'ou=groups,dc=parent', )
|
||||
+ entries = topo.standalone.search_s("dc=parent", ldap.SCOPE_ONELEVEL, "(objectClass=*)",
|
||||
+ attrlist=("dc","ou"), escapehatch='i am sure')
|
||||
+ log.info(f'one level search on dc=parent returned the following entries: {entries}')
|
||||
+ dns = [ entry.dn for entry in entries ]
|
||||
+ for dn in expected_dns:
|
||||
+ assert dn in dns
|
||||
+ assert len(entries) == len(expected_dns)
|
||||
+
|
||||
+
|
||||
+def test_sub_suffixes_errlog(topo):
|
||||
+ """ check the entries found on suffix/sub-suffix
|
||||
+ used int
|
||||
+
|
||||
+ :id: 1db9d52e-28de-11ef-b286-482ae39447e5
|
||||
+ :feature: mapping-tree
|
||||
+ :setup: Standalone instance with 3 additional backends:
|
||||
+ dc=parent, dc=child1,dc=parent, dc=childr21,dc=parent
|
||||
+ :steps:
|
||||
+ 1. Check that id2entry error message is not in the error log.
|
||||
+ :expectedresults:
|
||||
+ 1. Success
|
||||
+ """
|
||||
+ inst = topo.standalone
|
||||
+ assert not inst.searchErrorsLog('id2entry - Could not open id2entry err 0')
|
||||
+
|
||||
+
|
||||
+# Parameters for test_referral_subsuffix:
|
||||
+# a tuple pair containing:
|
||||
+# - list of referral dn that must be created
|
||||
+# - dict of searches basedn: expected_number_of_referrals
|
||||
+@pytest.mark.parametrize(
|
||||
+ "parameters",
|
||||
+ [
|
||||
+ pytest.param( ((PARENT_REFERRAL_DN, CHILD1_REFERRAL_DN), {PARENT_SUFFIX: 2, CHILD1_SUFFIX:1, CHILD2_SUFFIX:0}), id="Both"),
|
||||
+ pytest.param( ((PARENT_REFERRAL_DN,), {PARENT_SUFFIX: 1, CHILD1_SUFFIX:0, CHILD2_SUFFIX:0}) , id="Parent"),
|
||||
+ pytest.param( ((CHILD1_REFERRAL_DN,), {PARENT_SUFFIX: 1, CHILD1_SUFFIX:1, CHILD2_SUFFIX:0}) , id="Child"),
|
||||
+ pytest.param( ((), {PARENT_SUFFIX: 0, CHILD1_SUFFIX:0, CHILD2_SUFFIX:0}), id="None"),
|
||||
+ ])
|
||||
+
|
||||
+def test_referral_subsuffix(topo, request, parameters):
|
||||
+ """Test the results of an inverted parent suffix definition in the configuration.
|
||||
+
|
||||
+ For more details see:
|
||||
+ https://www.port389.org/docs/389ds/design/mapping_tree_assembly.html
|
||||
+
|
||||
+ :id: 4e111a22-2a5d-11ef-a890-482ae39447e5
|
||||
+ :feature: referrals
|
||||
+ :setup: Standalone instance with 3 additional backends:
|
||||
+ dc=parent, dc=child1,dc=parent, dc=childr21,dc=parent
|
||||
+
|
||||
+ :setup: Standalone instance
|
||||
+ :parametrized: yes
|
||||
+ :steps:
|
||||
+ refs,searches = referrals
|
||||
+
|
||||
+ 1. Create the referrals according to the current parameter
|
||||
+ 2. Wait enough time so they get detected
|
||||
+ 3. For each search base dn, in the current parameter, perform the two following steps
|
||||
+ 4. In 3. loop: Perform a search with provided base dn
|
||||
+ 5. In 3. loop: Check that the number of returned referrals is the expected one.
|
||||
+
|
||||
+ :expectedresults:
|
||||
+ all steps succeeds
|
||||
+ """
|
||||
+ inst = topo.standalone
|
||||
+
|
||||
+ def fin():
|
||||
+ log.info('Deleting all referrals')
|
||||
+ for ref in Referrals(inst, PARENT_SUFFIX).list():
|
||||
+ ref.delete()
|
||||
+
|
||||
+ # Set cleanup callback
|
||||
+ if DEBUGGING:
|
||||
+ request.addfinalizer(fin)
|
||||
+
|
||||
+ # Remove all referrals
|
||||
+ fin()
|
||||
+ # Add requested referrals
|
||||
+ for dn in parameters[0]:
|
||||
+ refs = Referral(inst, dn=dn)
|
||||
+ refs.create(basedn=dn, properties={ 'cn': 'ref', 'ref': f'ldap://remote/{dn}'})
|
||||
+ # Wait that the internal search detects the referrals
|
||||
+ time.sleep(REFERRAL_CHECK_PEDIOD + 1)
|
||||
+ # Open a test connection
|
||||
+ ldc = ldap.initialize(f"ldap://{HOST_STANDALONE}:{PORT_STANDALONE}")
|
||||
+ ldc.set_option(ldap.OPT_REFERRALS,0)
|
||||
+ ldc.simple_bind_s(DN_DM,PW_DM)
|
||||
+
|
||||
+ # For each search base dn:
|
||||
+ for basedn,nbref in parameters[1].items():
|
||||
+ log.info(f"Referrals are: {parameters[0]}")
|
||||
+ # Perform a search with provided base dn
|
||||
+ result = ldc.search_s(basedn, ldap.SCOPE_SUBTREE, filterstr="(ou=People)")
|
||||
+ found_dns = [ dn for dn,entry in result if dn is not None ]
|
||||
+ found_refs = [ entry for dn,entry in result if dn is None ]
|
||||
+ log.info(f"Search on {basedn} returned {found_dns} and {found_refs}")
|
||||
+ # Check that the number of returned referrals is the expected one.
|
||||
+ log.info(f"Search returned {len(found_refs)} referrals. {nbref} are expected.")
|
||||
+ assert len(found_refs) == nbref
|
||||
+ ldc.unbind()
|
||||
diff --git a/ldap/servers/slapd/backend.c b/ldap/servers/slapd/backend.c
|
||||
index 498f683b1..f86b0b9b6 100644
|
||||
--- a/ldap/servers/slapd/backend.c
|
||||
+++ b/ldap/servers/slapd/backend.c
|
||||
@@ -230,12 +230,17 @@ slapi_exist_referral(Slapi_Backend *be)
|
||||
|
||||
/* search for ("smart") referral entries */
|
||||
search_pb = slapi_pblock_new();
|
||||
- server_ctrls = (LDAPControl **) slapi_ch_calloc(2, sizeof (LDAPControl *));
|
||||
+ server_ctrls = (LDAPControl **) slapi_ch_calloc(3, sizeof (LDAPControl *));
|
||||
server_ctrls[0] = (LDAPControl *) slapi_ch_malloc(sizeof (LDAPControl));
|
||||
server_ctrls[0]->ldctl_oid = slapi_ch_strdup(LDAP_CONTROL_MANAGEDSAIT);
|
||||
server_ctrls[0]->ldctl_value.bv_val = NULL;
|
||||
server_ctrls[0]->ldctl_value.bv_len = 0;
|
||||
server_ctrls[0]->ldctl_iscritical = '\0';
|
||||
+ server_ctrls[1] = (LDAPControl *) slapi_ch_malloc(sizeof (LDAPControl));
|
||||
+ server_ctrls[1]->ldctl_oid = slapi_ch_strdup(MTN_CONTROL_USE_ONE_BACKEND_EXT_OID);
|
||||
+ server_ctrls[1]->ldctl_value.bv_val = NULL;
|
||||
+ server_ctrls[1]->ldctl_value.bv_len = 0;
|
||||
+ server_ctrls[1]->ldctl_iscritical = '\0';
|
||||
slapi_search_internal_set_pb(search_pb, suffix, LDAP_SCOPE_SUBTREE,
|
||||
filter, NULL, 0, server_ctrls, NULL,
|
||||
(void *) plugin_get_default_component_id(), 0);
|
||||
--
|
||||
2.48.0
|
||||
|
@ -1,32 +0,0 @@
|
||||
From ab06b3cebbe0287ef557c0307ca2ee86fe8cb761 Mon Sep 17 00:00:00 2001
|
||||
From: progier389 <progier@redhat.com>
|
||||
Date: Thu, 21 Nov 2024 16:26:02 +0100
|
||||
Subject: [PATCH] Issue 6224 - Fix merge issue in 389-ds-base-2.1 for
|
||||
ds_log_test.py (#6414)
|
||||
|
||||
Fix a merge issue during cherry-pick over 389-ds-base-2.1 and 389-ds-base-1.4.3 branches
|
||||
|
||||
Issue: #6224
|
||||
|
||||
Reviewed by: @mreynolds389
|
||||
|
||||
(cherry picked from commit 2b541c64b8317209e4dafa4f82918d714039907c)
|
||||
---
|
||||
dirsrvtests/tests/suites/ds_logs/ds_logs_test.py | 1 -
|
||||
1 file changed, 1 deletion(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/ds_logs/ds_logs_test.py b/dirsrvtests/tests/suites/ds_logs/ds_logs_test.py
|
||||
index 84a9c6ec8..812936c62 100644
|
||||
--- a/dirsrvtests/tests/suites/ds_logs/ds_logs_test.py
|
||||
+++ b/dirsrvtests/tests/suites/ds_logs/ds_logs_test.py
|
||||
@@ -1222,7 +1222,6 @@ def test_referral_check(topology_st, request):
|
||||
|
||||
request.addfinalizer(fin)
|
||||
|
||||
-<<<<<<< HEAD
|
||||
def test_referral_subsuffix(topology_st, request):
|
||||
"""Test the results of an inverted parent suffix definition in the configuration.
|
||||
|
||||
--
|
||||
2.48.0
|
||||
|
@ -1,214 +0,0 @@
|
||||
From 3fe2cf7cdedcdf5cafb59867e52a1fbe4a643571 Mon Sep 17 00:00:00 2001
|
||||
From: Masahiro Matsuya <mmatsuya@redhat.com>
|
||||
Date: Fri, 20 Dec 2024 22:37:15 +0900
|
||||
Subject: [PATCH] Issue 6224 - Remove test_referral_subsuffix from
|
||||
ds_logs_test.py (#6456)
|
||||
|
||||
Bug Description:
|
||||
|
||||
test_referral_subsuffix test was removed from main branch and some other
|
||||
ones for higher versions. But, it was not removed from 389-ds-base-1.4.3
|
||||
and 389-ds-base-2.1. The test doesn't work anymore with the fix for
|
||||
Issue 6224, because the added new control limited one backend for internal
|
||||
search. The test should be removed.
|
||||
|
||||
Fix Description:
|
||||
|
||||
remove the test from ds_logs_test.py
|
||||
|
||||
relates: https://github.com/389ds/389-ds-base/issues/6224
|
||||
---
|
||||
.../tests/suites/ds_logs/ds_logs_test.py | 177 ------------------
|
||||
1 file changed, 177 deletions(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/ds_logs/ds_logs_test.py b/dirsrvtests/tests/suites/ds_logs/ds_logs_test.py
|
||||
index 812936c62..84d721756 100644
|
||||
--- a/dirsrvtests/tests/suites/ds_logs/ds_logs_test.py
|
||||
+++ b/dirsrvtests/tests/suites/ds_logs/ds_logs_test.py
|
||||
@@ -1222,183 +1222,6 @@ def test_referral_check(topology_st, request):
|
||||
|
||||
request.addfinalizer(fin)
|
||||
|
||||
-def test_referral_subsuffix(topology_st, request):
|
||||
- """Test the results of an inverted parent suffix definition in the configuration.
|
||||
-
|
||||
- For more details see:
|
||||
- https://www.port389.org/docs/389ds/design/mapping_tree_assembly.html
|
||||
-
|
||||
- :id: 4faf210a-4fde-4e4f-8834-865bdc8f4d37
|
||||
- :setup: Standalone instance
|
||||
- :steps:
|
||||
- 1. First create two Backends, without mapping trees.
|
||||
- 2. create the mapping trees for these backends
|
||||
- 3. reduce nsslapd-referral-check-period to accelerate test
|
||||
- 4. Remove error log file
|
||||
- 5. Create a referral entry on parent suffix
|
||||
- 6. Check that the server detected the referral
|
||||
- 7. Delete the referral entry
|
||||
- 8. Check that the server detected the deletion of the referral
|
||||
- 9. Remove error log file
|
||||
- 10. Create a referral entry on child suffix
|
||||
- 11. Check that the server detected the referral on both parent and child suffixes
|
||||
- 12. Delete the referral entry
|
||||
- 13. Check that the server detected the deletion of the referral on both parent and child suffixes
|
||||
- 14. Remove error log file
|
||||
- 15. Create a referral entry on parent suffix
|
||||
- 16. Check that the server detected the referral on both parent and child suffixes
|
||||
- 17. Delete the child referral entry
|
||||
- 18. Check that the server detected the deletion of the referral on child suffix but not on parent suffix
|
||||
- 19. Delete the parent referral entry
|
||||
- 20. Check that the server detected the deletion of the referral parent suffix
|
||||
-
|
||||
- :expectedresults:
|
||||
- all steps succeeds
|
||||
- """
|
||||
- inst = topology_st.standalone
|
||||
- # Step 1 First create two Backends, without mapping trees.
|
||||
- PARENT_SUFFIX='dc=parent,dc=com'
|
||||
- CHILD_SUFFIX='dc=child,%s' % PARENT_SUFFIX
|
||||
- be1 = create_backend(inst, 'Parent', PARENT_SUFFIX)
|
||||
- be2 = create_backend(inst, 'Child', CHILD_SUFFIX)
|
||||
- # Step 2 create the mapping trees for these backends
|
||||
- mts = MappingTrees(inst)
|
||||
- mt1 = mts.create(properties={
|
||||
- 'cn': PARENT_SUFFIX,
|
||||
- 'nsslapd-state': 'backend',
|
||||
- 'nsslapd-backend': 'Parent',
|
||||
- })
|
||||
- mt2 = mts.create(properties={
|
||||
- 'cn': CHILD_SUFFIX,
|
||||
- 'nsslapd-state': 'backend',
|
||||
- 'nsslapd-backend': 'Child',
|
||||
- 'nsslapd-parent-suffix': PARENT_SUFFIX,
|
||||
- })
|
||||
-
|
||||
- dc_ex = Domain(inst, dn=PARENT_SUFFIX)
|
||||
- assert dc_ex.exists()
|
||||
-
|
||||
- dc_st = Domain(inst, dn=CHILD_SUFFIX)
|
||||
- assert dc_st.exists()
|
||||
-
|
||||
- # Step 3 reduce nsslapd-referral-check-period to accelerate test
|
||||
- # requires a restart done on step 4
|
||||
- REFERRAL_CHECK=7
|
||||
- topology_st.standalone.config.set("nsslapd-referral-check-period", str(REFERRAL_CHECK))
|
||||
-
|
||||
- # Check that if we create a referral at parent level
|
||||
- # - referral is detected at parent backend
|
||||
- # - referral is not detected at child backend
|
||||
-
|
||||
- # Step 3 Remove error log file
|
||||
- topology_st.standalone.stop()
|
||||
- lpath = topology_st.standalone.ds_error_log._get_log_path()
|
||||
- os.unlink(lpath)
|
||||
- topology_st.standalone.start()
|
||||
-
|
||||
- # Step 4 Create a referral entry on parent suffix
|
||||
- rs_parent = Referrals(topology_st.standalone, PARENT_SUFFIX)
|
||||
-
|
||||
- referral_entry_parent = rs_parent.create(properties={
|
||||
- 'cn': 'testref',
|
||||
- 'ref': 'ldap://localhost:38901/ou=People,dc=example,dc=com'
|
||||
- })
|
||||
-
|
||||
- # Step 5 Check that the server detected the referral
|
||||
- time.sleep(REFERRAL_CHECK + 1)
|
||||
- assert topology_st.standalone.ds_error_log.match('.*slapd_daemon - New referral entries are detected under %s.*' % PARENT_SUFFIX)
|
||||
- assert not topology_st.standalone.ds_error_log.match('.*slapd_daemon - New referral entries are detected under %s.*' % CHILD_SUFFIX)
|
||||
- assert not topology_st.standalone.ds_error_log.match('.*slapd_daemon - No more referral entry under %s' % PARENT_SUFFIX)
|
||||
-
|
||||
- # Step 6 Delete the referral entry
|
||||
- referral_entry_parent.delete()
|
||||
-
|
||||
- # Step 7 Check that the server detected the deletion of the referral
|
||||
- time.sleep(REFERRAL_CHECK + 1)
|
||||
- assert topology_st.standalone.ds_error_log.match('.*slapd_daemon - No more referral entry under %s' % PARENT_SUFFIX)
|
||||
-
|
||||
- # Check that if we create a referral at child level
|
||||
- # - referral is detected at parent backend
|
||||
- # - referral is detected at child backend
|
||||
-
|
||||
- # Step 8 Remove error log file
|
||||
- topology_st.standalone.stop()
|
||||
- lpath = topology_st.standalone.ds_error_log._get_log_path()
|
||||
- os.unlink(lpath)
|
||||
- topology_st.standalone.start()
|
||||
-
|
||||
- # Step 9 Create a referral entry on child suffix
|
||||
- rs_child = Referrals(topology_st.standalone, CHILD_SUFFIX)
|
||||
- referral_entry_child = rs_child.create(properties={
|
||||
- 'cn': 'testref',
|
||||
- 'ref': 'ldap://localhost:38901/ou=People,dc=example,dc=com'
|
||||
- })
|
||||
-
|
||||
- # Step 10 Check that the server detected the referral on both parent and child suffixes
|
||||
- time.sleep(REFERRAL_CHECK + 1)
|
||||
- assert topology_st.standalone.ds_error_log.match('.*slapd_daemon - New referral entries are detected under %s.*' % PARENT_SUFFIX)
|
||||
- assert topology_st.standalone.ds_error_log.match('.*slapd_daemon - New referral entries are detected under %s.*' % CHILD_SUFFIX)
|
||||
- assert not topology_st.standalone.ds_error_log.match('.*slapd_daemon - No more referral entry under %s' % CHILD_SUFFIX)
|
||||
-
|
||||
- # Step 11 Delete the referral entry
|
||||
- referral_entry_child.delete()
|
||||
-
|
||||
- # Step 12 Check that the server detected the deletion of the referral on both parent and child suffixes
|
||||
- time.sleep(REFERRAL_CHECK + 1)
|
||||
- assert topology_st.standalone.ds_error_log.match('.*slapd_daemon - No more referral entry under %s' % PARENT_SUFFIX)
|
||||
- assert topology_st.standalone.ds_error_log.match('.*slapd_daemon - No more referral entry under %s' % CHILD_SUFFIX)
|
||||
-
|
||||
- # Check that if we create a referral at child level and parent level
|
||||
- # - referral is detected at parent backend
|
||||
- # - referral is detected at child backend
|
||||
-
|
||||
- # Step 13 Remove error log file
|
||||
- topology_st.standalone.stop()
|
||||
- lpath = topology_st.standalone.ds_error_log._get_log_path()
|
||||
- os.unlink(lpath)
|
||||
- topology_st.standalone.start()
|
||||
-
|
||||
- # Step 14 Create a referral entry on parent suffix
|
||||
- # Create a referral entry on child suffix
|
||||
- referral_entry_parent = rs_parent.create(properties={
|
||||
- 'cn': 'testref',
|
||||
- 'ref': 'ldap://localhost:38901/ou=People,dc=example,dc=com'
|
||||
- })
|
||||
- referral_entry_child = rs_child.create(properties={
|
||||
- 'cn': 'testref',
|
||||
- 'ref': 'ldap://localhost:38901/ou=People,dc=example,dc=com'
|
||||
- })
|
||||
-
|
||||
- # Step 15 Check that the server detected the referral on both parent and child suffixes
|
||||
- time.sleep(REFERRAL_CHECK + 1)
|
||||
- assert topology_st.standalone.ds_error_log.match('.*slapd_daemon - New referral entries are detected under %s.*' % PARENT_SUFFIX)
|
||||
- assert topology_st.standalone.ds_error_log.match('.*slapd_daemon - New referral entries are detected under %s.*' % CHILD_SUFFIX)
|
||||
- assert not topology_st.standalone.ds_error_log.match('.*slapd_daemon - No more referral entry under %s' % CHILD_SUFFIX)
|
||||
-
|
||||
- # Step 16 Delete the child referral entry
|
||||
- referral_entry_child.delete()
|
||||
-
|
||||
- # Step 17 Check that the server detected the deletion of the referral on child suffix but not on parent suffix
|
||||
- time.sleep(REFERRAL_CHECK + 1)
|
||||
- assert topology_st.standalone.ds_error_log.match('.*slapd_daemon - No more referral entry under %s' % CHILD_SUFFIX)
|
||||
- assert not topology_st.standalone.ds_error_log.match('.*slapd_daemon - No more referral entry under %s' % PARENT_SUFFIX)
|
||||
-
|
||||
- # Step 18 Delete the parent referral entry
|
||||
- referral_entry_parent.delete()
|
||||
-
|
||||
- # Step 19 Check that the server detected the deletion of the referral parent suffix
|
||||
- time.sleep(REFERRAL_CHECK + 1)
|
||||
- assert topology_st.standalone.ds_error_log.match('.*slapd_daemon - No more referral entry under %s' % PARENT_SUFFIX)
|
||||
-
|
||||
- def fin():
|
||||
- log.info('Deleting referral')
|
||||
- try:
|
||||
- referral_entry_parent.delete()
|
||||
- referral.entry_child.delete()
|
||||
- except:
|
||||
- pass
|
||||
-
|
||||
- request.addfinalizer(fin)
|
||||
|
||||
def test_missing_backend_suffix(topology_st, request):
|
||||
"""Test that the server does not crash if a backend has no suffix
|
||||
--
|
||||
2.48.0
|
||||
|
@ -1,90 +0,0 @@
|
||||
From 4121ffe7a44fbacf513758661e71e483eb11ee3c Mon Sep 17 00:00:00 2001
|
||||
From: tbordaz <tbordaz@redhat.com>
|
||||
Date: Mon, 6 Jan 2025 14:00:39 +0100
|
||||
Subject: [PATCH] Issue 6417 - (2nd) If an entry RDN is identical to the
|
||||
suffix, then Entryrdn gets broken during a reindex (#6460)
|
||||
|
||||
Bug description:
|
||||
The primary fix has a flaw as it assumes that the
|
||||
suffix ID is '1'.
|
||||
If the RUV entry is the first entry of the database
|
||||
the server loops indefinitely
|
||||
|
||||
Fix description:
|
||||
Read the suffix ID from the entryrdn index
|
||||
|
||||
fixes: #6417
|
||||
|
||||
Reviewed by: Pierre Rogier (also reviewed the first fix)
|
||||
---
|
||||
.../suites/replication/regression_m2_test.py | 9 +++++++++
|
||||
ldap/servers/slapd/back-ldbm/ldbm_entryrdn.c | 19 ++++++++++++++++++-
|
||||
2 files changed, 27 insertions(+), 1 deletion(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/replication/regression_m2_test.py b/dirsrvtests/tests/suites/replication/regression_m2_test.py
|
||||
index abac46ada..72d4b9f89 100644
|
||||
--- a/dirsrvtests/tests/suites/replication/regression_m2_test.py
|
||||
+++ b/dirsrvtests/tests/suites/replication/regression_m2_test.py
|
||||
@@ -1010,6 +1010,15 @@ def test_online_reinit_may_hang(topo_with_sigkill):
|
||||
"""
|
||||
M1 = topo_with_sigkill.ms["supplier1"]
|
||||
M2 = topo_with_sigkill.ms["supplier2"]
|
||||
+
|
||||
+ # The RFE 5367 (when enabled) retrieves the DN
|
||||
+ # from the dncache. This hides an issue
|
||||
+ # with primary fix for 6417.
|
||||
+ # We need to disable the RFE to verify that the primary
|
||||
+ # fix is properly fixed.
|
||||
+ if ds_is_newer('2.3.1'):
|
||||
+ M1.config.replace('nsslapd-return-original-entrydn', 'off')
|
||||
+
|
||||
M1.stop()
|
||||
ldif_file = '%s/supplier1.ldif' % M1.get_ldif_dir()
|
||||
M1.db2ldif(bename=DEFAULT_BENAME, suffixes=[DEFAULT_SUFFIX],
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/ldbm_entryrdn.c b/ldap/servers/slapd/back-ldbm/ldbm_entryrdn.c
|
||||
index 83b041192..1bbb6252a 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/ldbm_entryrdn.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/ldbm_entryrdn.c
|
||||
@@ -1115,6 +1115,7 @@ entryrdn_lookup_dn(backend *be,
|
||||
rdn_elem *elem = NULL;
|
||||
int maybesuffix = 0;
|
||||
int db_retry = 0;
|
||||
+ ID suffix_id = 1;
|
||||
|
||||
slapi_log_err(SLAPI_LOG_TRACE, "entryrdn_lookup_dn",
|
||||
"--> entryrdn_lookup_dn\n");
|
||||
@@ -1175,6 +1176,22 @@ entryrdn_lookup_dn(backend *be,
|
||||
/* Setting the bulk fetch buffer */
|
||||
data.flags = DB_DBT_MALLOC;
|
||||
|
||||
+ /* Just in case the suffix ID is not '1' retrieve it from the database */
|
||||
+ keybuf = slapi_ch_strdup(slapi_sdn_get_ndn(be->be_suffix));
|
||||
+ dblayer_value_set(be, &key, keybuf, strlen(keybuf) + 1);
|
||||
+ rc = dblayer_cursor_op(&ctx.cursor, DBI_OP_MOVE_TO_KEY, &key, &data);
|
||||
+ if (rc) {
|
||||
+ slapi_log_err(SLAPI_LOG_WARNING, "entryrdn_lookup_dn",
|
||||
+ "Fails to retrieve the ID of suffix %s - keep the default value '%d'\n",
|
||||
+ slapi_sdn_get_ndn(be->be_suffix),
|
||||
+ suffix_id);
|
||||
+ } else {
|
||||
+ elem = (rdn_elem *)data.data;
|
||||
+ suffix_id = id_stored_to_internal(elem->rdn_elem_id);
|
||||
+ }
|
||||
+ dblayer_value_free(be, &data);
|
||||
+ dblayer_value_free(be, &key);
|
||||
+
|
||||
do {
|
||||
/* Setting up a key for the node to get its parent */
|
||||
slapi_ch_free_string(&keybuf);
|
||||
@@ -1224,7 +1241,7 @@ entryrdn_lookup_dn(backend *be,
|
||||
}
|
||||
goto bail;
|
||||
}
|
||||
- if (workid == 1) {
|
||||
+ if (workid == suffix_id) {
|
||||
/* The loop (workid) iterates from the starting 'id'
|
||||
* up to the suffix ID (i.e. '1').
|
||||
* A corner case (#6417) is if an entry, on the path
|
||||
--
|
||||
2.48.0
|
||||
|
@ -1,40 +0,0 @@
|
||||
From 1ffcc9aa9a397180fe35283ee61b164471d073fb Mon Sep 17 00:00:00 2001
|
||||
From: Thierry Bordaz <tbordaz@redhat.com>
|
||||
Date: Tue, 7 Jan 2025 10:01:51 +0100
|
||||
Subject: [PATCH] Issue 6417 - (2nd) fix typo
|
||||
|
||||
---
|
||||
ldap/servers/slapd/back-ldbm/ldbm_entryrdn.c | 10 ++++++----
|
||||
1 file changed, 6 insertions(+), 4 deletions(-)
|
||||
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/ldbm_entryrdn.c b/ldap/servers/slapd/back-ldbm/ldbm_entryrdn.c
|
||||
index 1bbb6252a..e2b8273a2 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/ldbm_entryrdn.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/ldbm_entryrdn.c
|
||||
@@ -1178,8 +1178,10 @@ entryrdn_lookup_dn(backend *be,
|
||||
|
||||
/* Just in case the suffix ID is not '1' retrieve it from the database */
|
||||
keybuf = slapi_ch_strdup(slapi_sdn_get_ndn(be->be_suffix));
|
||||
- dblayer_value_set(be, &key, keybuf, strlen(keybuf) + 1);
|
||||
- rc = dblayer_cursor_op(&ctx.cursor, DBI_OP_MOVE_TO_KEY, &key, &data);
|
||||
+ key.data = keybuf;
|
||||
+ key.size = key.ulen = strlen(keybuf) + 1;
|
||||
+ key.flags = DB_DBT_USERMEM;
|
||||
+ rc = cursor->c_get(cursor, &key, &data, DB_SET);
|
||||
if (rc) {
|
||||
slapi_log_err(SLAPI_LOG_WARNING, "entryrdn_lookup_dn",
|
||||
"Fails to retrieve the ID of suffix %s - keep the default value '%d'\n",
|
||||
@@ -1189,8 +1191,8 @@ entryrdn_lookup_dn(backend *be,
|
||||
elem = (rdn_elem *)data.data;
|
||||
suffix_id = id_stored_to_internal(elem->rdn_elem_id);
|
||||
}
|
||||
- dblayer_value_free(be, &data);
|
||||
- dblayer_value_free(be, &key);
|
||||
+ slapi_ch_free(&data.data);
|
||||
+ slapi_ch_free_string(&keybuf);
|
||||
|
||||
do {
|
||||
/* Setting up a key for the node to get its parent */
|
||||
--
|
||||
2.48.0
|
||||
|
@ -1,75 +0,0 @@
|
||||
From 9e1284122a929fe14633a2aa6e2de4d72891f98f Mon Sep 17 00:00:00 2001
|
||||
From: Thierry Bordaz <tbordaz@redhat.com>
|
||||
Date: Mon, 13 Jan 2025 17:41:18 +0100
|
||||
Subject: [PATCH] Issue 6417 - (3rd) If an entry RDN is identical to the
|
||||
suffix, then Entryrdn gets broken during a reindex (#6480)
|
||||
|
||||
Bug description:
|
||||
The previous fix had a flaw.
|
||||
In case entryrdn_lookup_dn is called with an undefined suffix
|
||||
the lookup of the suffix trigger a crash.
|
||||
For example it can occur during internal search of an
|
||||
unexisting map (view plugin).
|
||||
The issue exists in all releases but is hidden since 2.3.
|
||||
|
||||
Fix description:
|
||||
testing the suffix is defined
|
||||
|
||||
fixes: #6417
|
||||
|
||||
Reviewed by: Pierre Rogier (THnaks !)
|
||||
---
|
||||
ldap/servers/slapd/back-ldbm/ldbm_entryrdn.c | 36 +++++++++++---------
|
||||
1 file changed, 20 insertions(+), 16 deletions(-)
|
||||
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/ldbm_entryrdn.c b/ldap/servers/slapd/back-ldbm/ldbm_entryrdn.c
|
||||
index e2b8273a2..01c77156f 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/ldbm_entryrdn.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/ldbm_entryrdn.c
|
||||
@@ -1176,23 +1176,27 @@ entryrdn_lookup_dn(backend *be,
|
||||
/* Setting the bulk fetch buffer */
|
||||
data.flags = DB_DBT_MALLOC;
|
||||
|
||||
- /* Just in case the suffix ID is not '1' retrieve it from the database */
|
||||
- keybuf = slapi_ch_strdup(slapi_sdn_get_ndn(be->be_suffix));
|
||||
- key.data = keybuf;
|
||||
- key.size = key.ulen = strlen(keybuf) + 1;
|
||||
- key.flags = DB_DBT_USERMEM;
|
||||
- rc = cursor->c_get(cursor, &key, &data, DB_SET);
|
||||
- if (rc) {
|
||||
- slapi_log_err(SLAPI_LOG_WARNING, "entryrdn_lookup_dn",
|
||||
- "Fails to retrieve the ID of suffix %s - keep the default value '%d'\n",
|
||||
- slapi_sdn_get_ndn(be->be_suffix),
|
||||
- suffix_id);
|
||||
- } else {
|
||||
- elem = (rdn_elem *)data.data;
|
||||
- suffix_id = id_stored_to_internal(elem->rdn_elem_id);
|
||||
+ /* Just in case the suffix ID is not '1' retrieve it from the database
|
||||
+ * if the suffix is not defined suffix_id remains '1'
|
||||
+ */
|
||||
+ if (be->be_suffix) {
|
||||
+ keybuf = slapi_ch_strdup(slapi_sdn_get_ndn(be->be_suffix));
|
||||
+ key.data = keybuf;
|
||||
+ key.size = key.ulen = strlen(keybuf) + 1;
|
||||
+ key.flags = DB_DBT_USERMEM;
|
||||
+ rc = cursor->c_get(cursor, &key, &data, DB_SET);
|
||||
+ if (rc) {
|
||||
+ slapi_log_err(SLAPI_LOG_WARNING, "entryrdn_lookup_dn",
|
||||
+ "Fails to retrieve the ID of suffix %s - keep the default value '%d'\n",
|
||||
+ slapi_sdn_get_ndn(be->be_suffix),
|
||||
+ suffix_id);
|
||||
+ } else {
|
||||
+ elem = (rdn_elem *) data.data;
|
||||
+ suffix_id = id_stored_to_internal(elem->rdn_elem_id);
|
||||
+ }
|
||||
+ slapi_ch_free(&data.data);
|
||||
+ slapi_ch_free_string(&keybuf);
|
||||
}
|
||||
- slapi_ch_free(&data.data);
|
||||
- slapi_ch_free_string(&keybuf);
|
||||
|
||||
do {
|
||||
/* Setting up a key for the node to get its parent */
|
||||
--
|
||||
2.48.0
|
||||
|
@ -1,297 +0,0 @@
|
||||
From d2f9dd82e3610ee9b73feea981c680c03bb21394 Mon Sep 17 00:00:00 2001
|
||||
From: Mark Reynolds <mreynolds@redhat.com>
|
||||
Date: Thu, 16 Jan 2025 08:42:53 -0500
|
||||
Subject: [PATCH] Issue 6509 - Race condition with Paged Result searches
|
||||
|
||||
Description:
|
||||
|
||||
There is a race condition with Paged Result searches when a new operation comes
|
||||
in while a paged search is finishing. This triggers an invalid time out error
|
||||
and closes the connection with a T3 code.
|
||||
|
||||
The problem is that we do not use the "PagedResult lock" when checking the
|
||||
connection's paged result data for a timeout event. This causes the paged
|
||||
result timeout value to change unexpectedly and trigger a false timeout when a
|
||||
new operation arrives.
|
||||
|
||||
Now we check the timeout without hte conn lock, if its expired it could
|
||||
be a race condition and false positive. Try the lock again and test the
|
||||
timeout. This also prevents blocking non-paged result searches from
|
||||
getting held up by the lock when it's not necessary.
|
||||
|
||||
This also fixes some memory leaks that occur when an error happens.
|
||||
|
||||
Relates: https://github.com/389ds/389-ds-base/issues/6509
|
||||
|
||||
Reviewed by: tbordaz & proger (Thanks!!)
|
||||
---
|
||||
ldap/servers/slapd/daemon.c | 61 ++++++++++++++++++-------------
|
||||
ldap/servers/slapd/opshared.c | 58 ++++++++++++++---------------
|
||||
ldap/servers/slapd/pagedresults.c | 9 +++++
|
||||
ldap/servers/slapd/slap.h | 2 +-
|
||||
4 files changed, 75 insertions(+), 55 deletions(-)
|
||||
|
||||
diff --git a/ldap/servers/slapd/daemon.c b/ldap/servers/slapd/daemon.c
|
||||
index bb80dae36..13dfe250d 100644
|
||||
--- a/ldap/servers/slapd/daemon.c
|
||||
+++ b/ldap/servers/slapd/daemon.c
|
||||
@@ -1578,7 +1578,29 @@ setup_pr_read_pds(Connection_Table *ct)
|
||||
if (c->c_state == CONN_STATE_FREE) {
|
||||
connection_table_move_connection_out_of_active_list(ct, c);
|
||||
} else {
|
||||
- /* we try to acquire the connection mutex, if it is already
|
||||
+ /* Check for a timeout for PAGED RESULTS */
|
||||
+ if (pagedresults_is_timedout_nolock(c)) {
|
||||
+ /*
|
||||
+ * There could be a race condition so lets try again with the
|
||||
+ * right lock
|
||||
+ */
|
||||
+ pthread_mutex_t *pr_mutex = pageresult_lock_get_addr(c);
|
||||
+ if (pthread_mutex_trylock(pr_mutex) == EBUSY) {
|
||||
+ c = next;
|
||||
+ continue;
|
||||
+ }
|
||||
+ if (pagedresults_is_timedout_nolock(c)) {
|
||||
+ pthread_mutex_unlock(pr_mutex);
|
||||
+ disconnect_server(c, c->c_connid, -1,
|
||||
+ SLAPD_DISCONNECT_PAGED_SEARCH_LIMIT,
|
||||
+ 0);
|
||||
+ } else {
|
||||
+ pthread_mutex_unlock(pr_mutex);
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ /*
|
||||
+ * we try to acquire the connection mutex, if it is already
|
||||
* acquired by another thread, don't wait
|
||||
*/
|
||||
if (pthread_mutex_trylock(&(c->c_mutex)) == EBUSY) {
|
||||
@@ -1586,35 +1608,24 @@ setup_pr_read_pds(Connection_Table *ct)
|
||||
continue;
|
||||
}
|
||||
if (c->c_flags & CONN_FLAG_CLOSING) {
|
||||
- /* A worker thread has marked that this connection
|
||||
- * should be closed by calling disconnect_server.
|
||||
- * move this connection out of the active list
|
||||
- * the last thread to use the connection will close it
|
||||
+ /*
|
||||
+ * A worker thread, or paged result timeout, has marked that
|
||||
+ * this connection should be closed by calling
|
||||
+ * disconnect_server(). Move this connection out of the active
|
||||
+ * list then the last thread to use the connection will close
|
||||
+ * it.
|
||||
*/
|
||||
connection_table_move_connection_out_of_active_list(ct, c);
|
||||
} else if (c->c_sd == SLAPD_INVALID_SOCKET) {
|
||||
connection_table_move_connection_out_of_active_list(ct, c);
|
||||
} else if (c->c_prfd != NULL) {
|
||||
if ((!c->c_gettingber) && (c->c_threadnumber < c->c_max_threads_per_conn)) {
|
||||
- int add_fd = 1;
|
||||
- /* check timeout for PAGED RESULTS */
|
||||
- if (pagedresults_is_timedout_nolock(c)) {
|
||||
- /* Exceeded the paged search timelimit; disconnect the client */
|
||||
- disconnect_server_nomutex(c, c->c_connid, -1,
|
||||
- SLAPD_DISCONNECT_PAGED_SEARCH_LIMIT,
|
||||
- 0);
|
||||
- connection_table_move_connection_out_of_active_list(ct,
|
||||
- c);
|
||||
- add_fd = 0; /* do not poll on this fd */
|
||||
- }
|
||||
- if (add_fd) {
|
||||
- ct->fd[count].fd = c->c_prfd;
|
||||
- ct->fd[count].in_flags = SLAPD_POLL_FLAGS;
|
||||
- /* slot i of the connection table is mapped to slot
|
||||
- * count of the fds array */
|
||||
- c->c_fdi = count;
|
||||
- count++;
|
||||
- }
|
||||
+ ct->fd[listnum][count].fd = c->c_prfd;
|
||||
+ ct->fd[listnum][count].in_flags = SLAPD_POLL_FLAGS;
|
||||
+ /* slot i of the connection table is mapped to slot
|
||||
+ * count of the fds array */
|
||||
+ c->c_fdi = count;
|
||||
+ count++;
|
||||
} else {
|
||||
if (c->c_threadnumber >= c->c_max_threads_per_conn) {
|
||||
c->c_maxthreadsblocked++;
|
||||
@@ -1675,7 +1686,7 @@ handle_pr_read_ready(Connection_Table *ct, PRIntn num_poll __attribute__((unused
|
||||
continue;
|
||||
}
|
||||
|
||||
- /* Try to get connection mutex, if not available just skip the connection and
|
||||
+ /* Try to get connection mutex, if not available just skip the connection and
|
||||
* process other connections events. May generates cpu load for listening thread
|
||||
* if connection mutex is held for a long time
|
||||
*/
|
||||
diff --git a/ldap/servers/slapd/opshared.c b/ldap/servers/slapd/opshared.c
|
||||
index 7ab4117cd..a29eed052 100644
|
||||
--- a/ldap/servers/slapd/opshared.c
|
||||
+++ b/ldap/servers/slapd/opshared.c
|
||||
@@ -250,7 +250,7 @@ op_shared_search(Slapi_PBlock *pb, int send_result)
|
||||
char *errtext = NULL;
|
||||
int nentries, pnentries;
|
||||
int flag_search_base_found = 0;
|
||||
- int flag_no_such_object = 0;
|
||||
+ bool flag_no_such_object = false;
|
||||
int flag_referral = 0;
|
||||
int flag_psearch = 0;
|
||||
int err_code = LDAP_SUCCESS;
|
||||
@@ -315,7 +315,7 @@ op_shared_search(Slapi_PBlock *pb, int send_result)
|
||||
rc = -1;
|
||||
goto free_and_return_nolock;
|
||||
}
|
||||
-
|
||||
+
|
||||
/* Set the time we actually started the operation */
|
||||
slapi_operation_set_time_started(operation);
|
||||
|
||||
@@ -798,11 +798,11 @@ op_shared_search(Slapi_PBlock *pb, int send_result)
|
||||
}
|
||||
|
||||
/* subtree searches :
|
||||
- * if the search was started above the backend suffix
|
||||
- * - temporarily set the SLAPI_SEARCH_TARGET_SDN to the
|
||||
- * base of the node so that we don't get a NO SUCH OBJECT error
|
||||
- * - do not change the scope
|
||||
- */
|
||||
+ * if the search was started above the backend suffix
|
||||
+ * - temporarily set the SLAPI_SEARCH_TARGET_SDN to the
|
||||
+ * base of the node so that we don't get a NO SUCH OBJECT error
|
||||
+ * - do not change the scope
|
||||
+ */
|
||||
if (scope == LDAP_SCOPE_SUBTREE) {
|
||||
if (slapi_sdn_issuffix(be_suffix, basesdn)) {
|
||||
if (free_sdn) {
|
||||
@@ -825,53 +825,53 @@ op_shared_search(Slapi_PBlock *pb, int send_result)
|
||||
switch (rc) {
|
||||
case 1:
|
||||
/* if the backend returned LDAP_NO_SUCH_OBJECT for a SEARCH request,
|
||||
- * it will not have sent back a result - otherwise, it will have
|
||||
- * sent a result */
|
||||
+ * it will not have sent back a result - otherwise, it will have
|
||||
+ * sent a result */
|
||||
rc = SLAPI_FAIL_GENERAL;
|
||||
slapi_pblock_get(pb, SLAPI_RESULT_CODE, &err);
|
||||
if (err == LDAP_NO_SUCH_OBJECT) {
|
||||
/* may be the object exist somewhere else
|
||||
- * wait the end of the loop to send back this error
|
||||
- */
|
||||
- flag_no_such_object = 1;
|
||||
+ * wait the end of the loop to send back this error
|
||||
+ */
|
||||
+ flag_no_such_object = true;
|
||||
} else {
|
||||
/* err something other than LDAP_NO_SUCH_OBJECT, so the backend will
|
||||
- * have sent the result -
|
||||
- * Set a flag here so we don't return another result. */
|
||||
+ * have sent the result -
|
||||
+ * Set a flag here so we don't return another result. */
|
||||
sent_result = 1;
|
||||
}
|
||||
- /* fall through */
|
||||
+ /* fall through */
|
||||
|
||||
case -1: /* an error occurred */
|
||||
+ slapi_pblock_get(pb, SLAPI_RESULT_CODE, &err);
|
||||
/* PAGED RESULTS */
|
||||
if (op_is_pagedresults(operation)) {
|
||||
/* cleanup the slot */
|
||||
pthread_mutex_lock(pagedresults_mutex);
|
||||
+ if (err != LDAP_NO_SUCH_OBJECT && !flag_no_such_object) {
|
||||
+ /* Free the results if not "no_such_object" */
|
||||
+ void *sr = NULL;
|
||||
+ slapi_pblock_get(pb, SLAPI_SEARCH_RESULT_SET, &sr);
|
||||
+ be->be_search_results_release(&sr);
|
||||
+ }
|
||||
pagedresults_set_search_result(pb_conn, operation, NULL, 1, pr_idx);
|
||||
rc = pagedresults_set_current_be(pb_conn, NULL, pr_idx, 1);
|
||||
pthread_mutex_unlock(pagedresults_mutex);
|
||||
}
|
||||
- if (1 == flag_no_such_object) {
|
||||
- break;
|
||||
- }
|
||||
- slapi_pblock_get(pb, SLAPI_RESULT_CODE, &err);
|
||||
- if (err == LDAP_NO_SUCH_OBJECT) {
|
||||
- /* may be the object exist somewhere else
|
||||
- * wait the end of the loop to send back this error
|
||||
- */
|
||||
- flag_no_such_object = 1;
|
||||
+
|
||||
+ if (err == LDAP_NO_SUCH_OBJECT || flag_no_such_object) {
|
||||
+ /* Maybe the object exists somewhere else, wait to the end
|
||||
+ * of the loop to send back this error */
|
||||
+ flag_no_such_object = true;
|
||||
break;
|
||||
} else {
|
||||
- /* for error other than LDAP_NO_SUCH_OBJECT
|
||||
- * the error has already been sent
|
||||
- * stop the search here
|
||||
- */
|
||||
+ /* For error other than LDAP_NO_SUCH_OBJECT the error has
|
||||
+ * already been sent stop the search here */
|
||||
cache_return_target_entry(pb, be, operation);
|
||||
goto free_and_return;
|
||||
}
|
||||
|
||||
/* when rc == SLAPI_FAIL_DISKFULL this case is executed */
|
||||
-
|
||||
case SLAPI_FAIL_DISKFULL:
|
||||
operation_out_of_disk_space();
|
||||
cache_return_target_entry(pb, be, operation);
|
||||
diff --git a/ldap/servers/slapd/pagedresults.c b/ldap/servers/slapd/pagedresults.c
|
||||
index db87e486e..4aa1fa3e5 100644
|
||||
--- a/ldap/servers/slapd/pagedresults.c
|
||||
+++ b/ldap/servers/slapd/pagedresults.c
|
||||
@@ -121,12 +121,15 @@ pagedresults_parse_control_value(Slapi_PBlock *pb,
|
||||
if (ber_scanf(ber, "{io}", pagesize, &cookie) == LBER_ERROR) {
|
||||
slapi_log_err(SLAPI_LOG_ERR, "pagedresults_parse_control_value",
|
||||
"<= corrupted control value\n");
|
||||
+ ber_free(ber, 1);
|
||||
return LDAP_PROTOCOL_ERROR;
|
||||
}
|
||||
if (!maxreqs) {
|
||||
slapi_log_err(SLAPI_LOG_ERR, "pagedresults_parse_control_value",
|
||||
"Simple paged results requests per conn exceeded the limit: %d\n",
|
||||
maxreqs);
|
||||
+ ber_free(ber, 1);
|
||||
+ slapi_ch_free_string(&cookie.bv_val);
|
||||
return LDAP_UNWILLING_TO_PERFORM;
|
||||
}
|
||||
|
||||
@@ -376,6 +379,10 @@ pagedresults_free_one_msgid(Connection *conn, ber_int_t msgid, pthread_mutex_t *
|
||||
}
|
||||
prp->pr_flags |= CONN_FLAG_PAGEDRESULTS_ABANDONED;
|
||||
prp->pr_flags &= ~CONN_FLAG_PAGEDRESULTS_PROCESSING;
|
||||
+ if (conn->c_pagedresults.prl_count > 0) {
|
||||
+ _pr_cleanup_one_slot(prp);
|
||||
+ conn->c_pagedresults.prl_count--;
|
||||
+ }
|
||||
rc = 0;
|
||||
break;
|
||||
}
|
||||
@@ -940,7 +947,9 @@ pagedresults_is_timedout_nolock(Connection *conn)
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
+
|
||||
slapi_log_err(SLAPI_LOG_TRACE, "<-- pagedresults_is_timedout", "<= false 2\n");
|
||||
+
|
||||
return 0;
|
||||
}
|
||||
|
||||
diff --git a/ldap/servers/slapd/slap.h b/ldap/servers/slapd/slap.h
|
||||
index 072f6f962..469874fd1 100644
|
||||
--- a/ldap/servers/slapd/slap.h
|
||||
+++ b/ldap/servers/slapd/slap.h
|
||||
@@ -74,7 +74,7 @@ static char ptokPBE[34] = "Internal (Software) Token ";
|
||||
#include <sys/stat.h>
|
||||
#include <sys/socket.h>
|
||||
#include <netinet/in.h>
|
||||
-
|
||||
+#include <stdbool.h>
|
||||
#include <time.h> /* For timespec definitions */
|
||||
|
||||
/* Provides our int types and platform specific requirements. */
|
||||
--
|
||||
2.48.0
|
||||
|
@ -1,29 +0,0 @@
|
||||
From 27cd055197bc3cae458a1f86621aa5410c66dd2c Mon Sep 17 00:00:00 2001
|
||||
From: Mark Reynolds <mreynolds@redhat.com>
|
||||
Date: Mon, 20 Jan 2025 15:51:24 -0500
|
||||
Subject: [PATCH] Issue 6509 - Fix cherry pick issue (race condition in Paged
|
||||
results)
|
||||
|
||||
Relates: https://github.com/389ds/389-ds-base/issues/6509
|
||||
---
|
||||
ldap/servers/slapd/daemon.c | 4 ++--
|
||||
1 file changed, 2 insertions(+), 2 deletions(-)
|
||||
|
||||
diff --git a/ldap/servers/slapd/daemon.c b/ldap/servers/slapd/daemon.c
|
||||
index 13dfe250d..57e07e5f5 100644
|
||||
--- a/ldap/servers/slapd/daemon.c
|
||||
+++ b/ldap/servers/slapd/daemon.c
|
||||
@@ -1620,8 +1620,8 @@ setup_pr_read_pds(Connection_Table *ct)
|
||||
connection_table_move_connection_out_of_active_list(ct, c);
|
||||
} else if (c->c_prfd != NULL) {
|
||||
if ((!c->c_gettingber) && (c->c_threadnumber < c->c_max_threads_per_conn)) {
|
||||
- ct->fd[listnum][count].fd = c->c_prfd;
|
||||
- ct->fd[listnum][count].in_flags = SLAPD_POLL_FLAGS;
|
||||
+ ct->fd[count].fd = c->c_prfd;
|
||||
+ ct->fd[count].in_flags = SLAPD_POLL_FLAGS;
|
||||
/* slot i of the connection table is mapped to slot
|
||||
* count of the fds array */
|
||||
c->c_fdi = count;
|
||||
--
|
||||
2.48.0
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -1,236 +0,0 @@
|
||||
From 1845aed98becaba6b975342229cb5e0de79d208d Mon Sep 17 00:00:00 2001
|
||||
From: James Chapman <jachapma@redhat.com>
|
||||
Date: Wed, 29 Jan 2025 17:41:55 +0000
|
||||
Subject: [PATCH] Issue 6436 - MOD on a large group slow if substring index is
|
||||
present (#6437)
|
||||
|
||||
Bug Description: If the substring index is configured for the group
|
||||
membership attribute ( member or uniqueMember ), the removal of a
|
||||
member from a large static group is pretty slow.
|
||||
|
||||
Fix Description: A solution to this issue would be to introduce
|
||||
a new index to track a membership atttribute index. In the interm,
|
||||
we add a check to healthcheck to inform the user of the implications
|
||||
of this configuration.
|
||||
|
||||
Fixes: https://github.com/389ds/389-ds-base/issues/6436
|
||||
|
||||
Reviewed by: @Firstyear, @tbordaz, @droideck (Thanks)
|
||||
---
|
||||
.../suites/healthcheck/health_config_test.py | 89 ++++++++++++++++++-
|
||||
src/lib389/lib389/lint.py | 15 ++++
|
||||
src/lib389/lib389/plugins.py | 37 +++++++-
|
||||
3 files changed, 137 insertions(+), 4 deletions(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/healthcheck/health_config_test.py b/dirsrvtests/tests/suites/healthcheck/health_config_test.py
|
||||
index 6d3d08bfa..747699486 100644
|
||||
--- a/dirsrvtests/tests/suites/healthcheck/health_config_test.py
|
||||
+++ b/dirsrvtests/tests/suites/healthcheck/health_config_test.py
|
||||
@@ -212,6 +212,7 @@ def test_healthcheck_RI_plugin_missing_indexes(topology_st):
|
||||
MEMBER_DN = 'cn=member,cn=index,cn=userroot,cn=ldbm database,cn=plugins,cn=config'
|
||||
|
||||
standalone = topology_st.standalone
|
||||
+ standalone.config.set("nsslapd-accesslog-logbuffering", "on")
|
||||
|
||||
log.info('Enable RI plugin')
|
||||
plugin = ReferentialIntegrityPlugin(standalone)
|
||||
@@ -233,7 +234,7 @@ def test_healthcheck_RI_plugin_missing_indexes(topology_st):
|
||||
|
||||
|
||||
def test_healthcheck_MO_plugin_missing_indexes(topology_st):
|
||||
- """Check if HealthCheck returns DSMOLE0002 code
|
||||
+ """Check if HealthCheck returns DSMOLE0001 code
|
||||
|
||||
:id: 236b0ec2-13da-48fb-b65a-db7406d56d5d
|
||||
:setup: Standalone instance
|
||||
@@ -248,8 +249,8 @@ def test_healthcheck_MO_plugin_missing_indexes(topology_st):
|
||||
:expectedresults:
|
||||
1. Success
|
||||
2. Success
|
||||
- 3. Healthcheck reports DSMOLE0002 code and related details
|
||||
- 4. Healthcheck reports DSMOLE0002 code and related details
|
||||
+ 3. Healthcheck reports DSMOLE0001 code and related details
|
||||
+ 4. Healthcheck reports DSMOLE0001 code and related details
|
||||
5. Success
|
||||
6. Healthcheck reports no issue found
|
||||
7. Healthcheck reports no issue found
|
||||
@@ -259,6 +260,7 @@ def test_healthcheck_MO_plugin_missing_indexes(topology_st):
|
||||
MO_GROUP_ATTR = 'creatorsname'
|
||||
|
||||
standalone = topology_st.standalone
|
||||
+ standalone.config.set("nsslapd-accesslog-logbuffering", "on")
|
||||
|
||||
log.info('Enable MO plugin')
|
||||
plugin = MemberOfPlugin(standalone)
|
||||
@@ -279,6 +281,87 @@ def test_healthcheck_MO_plugin_missing_indexes(topology_st):
|
||||
run_healthcheck_and_flush_log(topology_st, standalone, json=True, searched_code=JSON_OUTPUT)
|
||||
|
||||
|
||||
+def test_healthcheck_MO_plugin_substring_index(topology_st):
|
||||
+ """Check if HealthCheck returns DSMOLE0002 code when the
|
||||
+ member, uniquemember attribute contains a substring index type
|
||||
+
|
||||
+ :id: 10954811-24ac-4886-8183-e30892f8e02d
|
||||
+ :setup: Standalone instance
|
||||
+ :steps:
|
||||
+ 1. Create DS instance
|
||||
+ 2. Configure the instance with MO Plugin
|
||||
+ 3. Change index type to substring for member attribute
|
||||
+ 4. Use HealthCheck without --json option
|
||||
+ 5. Use HealthCheck with --json option
|
||||
+ 6. Change index type back to equality for member attribute
|
||||
+ 7. Use HealthCheck without --json option
|
||||
+ 8. Use HealthCheck with --json option
|
||||
+ 9. Change index type to substring for uniquemember attribute
|
||||
+ 10. Use HealthCheck without --json option
|
||||
+ 11. Use HealthCheck with --json option
|
||||
+ 12. Change index type back to equality for uniquemember attribute
|
||||
+ 13. Use HealthCheck without --json option
|
||||
+ 14. Use HealthCheck with --json option
|
||||
+
|
||||
+ :expectedresults:
|
||||
+ 1. Success
|
||||
+ 2. Success
|
||||
+ 3. Success
|
||||
+ 4. Healthcheck reports DSMOLE0002 code and related details
|
||||
+ 5. Healthcheck reports DSMOLE0002 code and related details
|
||||
+ 6. Success
|
||||
+ 7. Healthcheck reports no issue found
|
||||
+ 8. Healthcheck reports no issue found
|
||||
+ 9. Success
|
||||
+ 10. Healthcheck reports DSMOLE0002 code and related details
|
||||
+ 11. Healthcheck reports DSMOLE0002 code and related details
|
||||
+ 12. Success
|
||||
+ 13. Healthcheck reports no issue found
|
||||
+ 14. Healthcheck reports no issue found
|
||||
+ """
|
||||
+
|
||||
+ RET_CODE = 'DSMOLE0002'
|
||||
+ MEMBER_DN = 'cn=member,cn=index,cn=userroot,cn=ldbm database,cn=plugins,cn=config'
|
||||
+ UNIQUE_MEMBER_DN = 'cn=uniquemember,cn=index,cn=userroot,cn=ldbm database,cn=plugins,cn=config'
|
||||
+
|
||||
+ standalone = topology_st.standalone
|
||||
+ standalone.config.set("nsslapd-accesslog-logbuffering", "on")
|
||||
+
|
||||
+ log.info('Enable MO plugin')
|
||||
+ plugin = MemberOfPlugin(standalone)
|
||||
+ plugin.disable()
|
||||
+ plugin.enable()
|
||||
+
|
||||
+ log.info('Change the index type of the member attribute index to substring')
|
||||
+ index = Index(topology_st.standalone, MEMBER_DN)
|
||||
+ index.replace('nsIndexType', 'sub')
|
||||
+
|
||||
+ run_healthcheck_and_flush_log(topology_st, standalone, json=False, searched_code=RET_CODE)
|
||||
+ run_healthcheck_and_flush_log(topology_st, standalone, json=True, searched_code=RET_CODE)
|
||||
+
|
||||
+ log.info('Set the index type of the member attribute index back to eq')
|
||||
+ index.replace('nsIndexType', 'eq')
|
||||
+
|
||||
+ run_healthcheck_and_flush_log(topology_st, standalone, json=False, searched_code=CMD_OUTPUT)
|
||||
+ run_healthcheck_and_flush_log(topology_st, standalone, json=True, searched_code=JSON_OUTPUT)
|
||||
+
|
||||
+ log.info('Change the index type of the uniquemember attribute index to substring')
|
||||
+ index = Index(topology_st.standalone, UNIQUE_MEMBER_DN)
|
||||
+ index.replace('nsIndexType', 'sub')
|
||||
+
|
||||
+ run_healthcheck_and_flush_log(topology_st, standalone, json=False, searched_code=RET_CODE)
|
||||
+ run_healthcheck_and_flush_log(topology_st, standalone, json=True, searched_code=RET_CODE)
|
||||
+
|
||||
+ log.info('Set the index type of the uniquemember attribute index back to eq')
|
||||
+ index.replace('nsIndexType', 'eq')
|
||||
+
|
||||
+ run_healthcheck_and_flush_log(topology_st, standalone, json=False, searched_code=CMD_OUTPUT)
|
||||
+ run_healthcheck_and_flush_log(topology_st, standalone, json=True, searched_code=JSON_OUTPUT)
|
||||
+
|
||||
+ # Restart the instance after changing the plugin to avoid breaking the other tests
|
||||
+ standalone.restart()
|
||||
+
|
||||
+
|
||||
@pytest.mark.ds50873
|
||||
@pytest.mark.bz1685160
|
||||
@pytest.mark.xfail(ds_is_older("1.4.1"), reason="Not implemented")
|
||||
diff --git a/src/lib389/lib389/lint.py b/src/lib389/lib389/lint.py
|
||||
index 4d9cbb666..3d3c79ea3 100644
|
||||
--- a/src/lib389/lib389/lint.py
|
||||
+++ b/src/lib389/lib389/lint.py
|
||||
@@ -231,6 +231,21 @@ database after adding the missing index type. Here is an example using dsconf:
|
||||
"""
|
||||
}
|
||||
|
||||
+DSMOLE0002 = {
|
||||
+ 'dsle': 'DSMOLE0002',
|
||||
+ 'severity': 'LOW',
|
||||
+ 'description': 'Removal of a member can be slow ',
|
||||
+ 'items': ['cn=memberof plugin,cn=plugins,cn=config', ],
|
||||
+ 'detail': """If the substring index is configured for a membership attribute. The removal of a member
|
||||
+from the large group can be slow.
|
||||
+
|
||||
+""",
|
||||
+ 'fix': """If not required, you can remove the substring index type using dsconf:
|
||||
+
|
||||
+ # dsconf slapd-YOUR_INSTANCE backend index set --attr=ATTR BACKEND --del-type=sub
|
||||
+"""
|
||||
+}
|
||||
+
|
||||
# Disk Space check. Note - PARTITION is replaced by the calling function
|
||||
DSDSLE0001 = {
|
||||
'dsle': 'DSDSLE0001',
|
||||
diff --git a/src/lib389/lib389/plugins.py b/src/lib389/lib389/plugins.py
|
||||
index 6bf1843ad..185398e5b 100644
|
||||
--- a/src/lib389/lib389/plugins.py
|
||||
+++ b/src/lib389/lib389/plugins.py
|
||||
@@ -12,7 +12,7 @@ import copy
|
||||
import os.path
|
||||
from lib389 import tasks
|
||||
from lib389._mapped_object import DSLdapObjects, DSLdapObject
|
||||
-from lib389.lint import DSRILE0001, DSRILE0002, DSMOLE0001
|
||||
+from lib389.lint import DSRILE0001, DSRILE0002, DSMOLE0001, DSMOLE0002
|
||||
from lib389.utils import ensure_str, ensure_list_bytes
|
||||
from lib389.schema import Schema
|
||||
from lib389._constants import (
|
||||
@@ -827,6 +827,41 @@ class MemberOfPlugin(Plugin):
|
||||
report['check'] = f'memberof:attr_indexes'
|
||||
yield report
|
||||
|
||||
+ def _lint_member_substring_index(self):
|
||||
+ if self.status():
|
||||
+ from lib389.backend import Backends
|
||||
+ backends = Backends(self._instance).list()
|
||||
+ membership_attrs = ['member', 'uniquemember']
|
||||
+ container = self.get_attr_val_utf8_l("nsslapd-plugincontainerscope")
|
||||
+ for backend in backends:
|
||||
+ suffix = backend.get_attr_val_utf8_l('nsslapd-suffix')
|
||||
+ if suffix == "cn=changelog":
|
||||
+ # Always skip retro changelog
|
||||
+ continue
|
||||
+ if container is not None:
|
||||
+ # Check if this backend is in the scope
|
||||
+ if not container.endswith(suffix):
|
||||
+ # skip this backend that is not in the scope
|
||||
+ continue
|
||||
+ indexes = backend.get_indexes()
|
||||
+ for attr in membership_attrs:
|
||||
+ report = copy.deepcopy(DSMOLE0002)
|
||||
+ try:
|
||||
+ index = indexes.get(attr)
|
||||
+ types = index.get_attr_vals_utf8_l("nsIndexType")
|
||||
+ if "sub" in types:
|
||||
+ report['detail'] = report['detail'].replace('ATTR', attr)
|
||||
+ report['detail'] = report['detail'].replace('BACKEND', suffix)
|
||||
+ report['fix'] = report['fix'].replace('ATTR', attr)
|
||||
+ report['fix'] = report['fix'].replace('BACKEND', suffix)
|
||||
+ report['fix'] = report['fix'].replace('YOUR_INSTANCE', self._instance.serverid)
|
||||
+ report['items'].append(suffix)
|
||||
+ report['items'].append(attr)
|
||||
+ report['check'] = f'attr:substring_index'
|
||||
+ yield report
|
||||
+ except KeyError:
|
||||
+ continue
|
||||
+
|
||||
def get_attr(self):
|
||||
"""Get memberofattr attribute"""
|
||||
|
||||
--
|
||||
2.48.1
|
||||
|
@ -1,651 +0,0 @@
|
||||
From dba27e56161943fbcf54ecbc28337e2c81b07979 Mon Sep 17 00:00:00 2001
|
||||
From: progier389 <progier@redhat.com>
|
||||
Date: Mon, 13 Jan 2025 18:03:07 +0100
|
||||
Subject: [PATCH] Issue 6494 - Various errors when using extended matching rule
|
||||
on vlv sort filter (#6495)
|
||||
|
||||
* Issue 6494 - Various errors when using extended matching rule on vlv sort filter
|
||||
|
||||
Various issues when configuring and using extended matching rule within a vlv sort filter:
|
||||
|
||||
Race condition about the keys storage while indexing leading to various heap and data corruption. (lmdb only)
|
||||
Crash while indexing if vlv are misconfigured because NULL key is not checked.
|
||||
Read after block because of data type mismatch between SlapiValue and berval
|
||||
Memory leaks
|
||||
Solution:
|
||||
|
||||
Serialize the vlv index key generation if vlv filter has an extended matching rule.
|
||||
Check null keys
|
||||
Always provides SlapiValue even ifg we want to get keys as bervals
|
||||
Free properly the resources
|
||||
Issue: #6494
|
||||
|
||||
Reviewed by: @mreynolds389 (Thanks!)
|
||||
|
||||
(cherry picked from commit 4bd27ecc4e1d21c8af5ab8cad795d70477179a98)
|
||||
(cherry picked from commit 223a20250cbf29a546dcb398cfc76024d2f91347)
|
||||
(cherry picked from commit 280043740a525eaf0438129fd8b99ca251c62366)
|
||||
---
|
||||
.../tests/suites/indexes/regression_test.py | 29 +++
|
||||
.../tests/suites/vlv/regression_test.py | 183 ++++++++++++++++++
|
||||
ldap/servers/slapd/back-ldbm/cleanup.c | 8 +
|
||||
ldap/servers/slapd/back-ldbm/dblayer.c | 22 ++-
|
||||
ldap/servers/slapd/back-ldbm/ldbm_attr.c | 2 +-
|
||||
ldap/servers/slapd/back-ldbm/matchrule.c | 8 +-
|
||||
.../servers/slapd/back-ldbm/proto-back-ldbm.h | 3 +-
|
||||
ldap/servers/slapd/back-ldbm/sort.c | 37 ++--
|
||||
ldap/servers/slapd/back-ldbm/vlv.c | 26 +--
|
||||
ldap/servers/slapd/back-ldbm/vlv_srch.c | 4 +-
|
||||
ldap/servers/slapd/generation.c | 5 +
|
||||
ldap/servers/slapd/plugin_mr.c | 12 +-
|
||||
src/lib389/lib389/backend.py | 10 +
|
||||
13 files changed, 292 insertions(+), 57 deletions(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/indexes/regression_test.py b/dirsrvtests/tests/suites/indexes/regression_test.py
|
||||
index fc6db727f..2196fb2ed 100644
|
||||
--- a/dirsrvtests/tests/suites/indexes/regression_test.py
|
||||
+++ b/dirsrvtests/tests/suites/indexes/regression_test.py
|
||||
@@ -227,6 +227,35 @@ def test_reject_virtual_attr_for_indexing(topo):
|
||||
break
|
||||
|
||||
|
||||
+def test_reindex_extended_matching_rule(topo, add_backend_and_ldif_50K_users):
|
||||
+ """Check that index with extended matching rule are reindexed properly.
|
||||
+
|
||||
+ :id: 8a3198e8-cc5a-11ef-a3e7-482ae39447e5
|
||||
+ :setup: Standalone instance + a second backend with 50K users
|
||||
+ :steps:
|
||||
+ 1. Configure uid with 2.5.13.2 matching rule
|
||||
+ 1. Configure cn with 2.5.13.2 matching rule
|
||||
+ 2. Reindex
|
||||
+ :expectedresults:
|
||||
+ 1. Success
|
||||
+ 2. Success
|
||||
+ """
|
||||
+
|
||||
+ inst = topo.standalone
|
||||
+ tasks = Tasks(inst)
|
||||
+ be2 = Backends(topo.standalone).get_backend(SUFFIX2)
|
||||
+ index = be2.get_index('uid')
|
||||
+ index.replace('nsMatchingRule', '2.5.13.2')
|
||||
+ index = be2.get_index('cn')
|
||||
+ index.replace('nsMatchingRule', '2.5.13.2')
|
||||
+
|
||||
+ assert tasks.reindex(
|
||||
+ suffix=SUFFIX2,
|
||||
+ args={TASK_WAIT: True}
|
||||
+ ) == 0
|
||||
+
|
||||
+
|
||||
+
|
||||
if __name__ == "__main__":
|
||||
# Run isolated
|
||||
# -s for DEBUG mode
|
||||
diff --git a/dirsrvtests/tests/suites/vlv/regression_test.py b/dirsrvtests/tests/suites/vlv/regression_test.py
|
||||
index 3b66de8b5..6ab709bd3 100644
|
||||
--- a/dirsrvtests/tests/suites/vlv/regression_test.py
|
||||
+++ b/dirsrvtests/tests/suites/vlv/regression_test.py
|
||||
@@ -22,6 +22,146 @@ logging.getLogger(__name__).setLevel(logging.DEBUG)
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
+class BackendHandler:
|
||||
+ def __init__(self, inst, bedict, scope=ldap.SCOPE_ONELEVEL):
|
||||
+ self.inst = inst
|
||||
+ self.bedict = bedict
|
||||
+ self.bes = Backends(inst)
|
||||
+ self.scope = scope
|
||||
+ self.data = {}
|
||||
+
|
||||
+ def find_backend(self, bename):
|
||||
+ for be in self.bes.list():
|
||||
+ if be.get_attr_val_utf8_l('cn') == bename:
|
||||
+ return be
|
||||
+ return None
|
||||
+
|
||||
+ def cleanup(self):
|
||||
+ benames = list(self.bedict.keys())
|
||||
+ benames.reverse()
|
||||
+ for bename in benames:
|
||||
+ be = self.find_backend(bename)
|
||||
+ if be:
|
||||
+ be.delete()
|
||||
+
|
||||
+ def setup(self):
|
||||
+ # Create backends, add vlv index and populate the backends.
|
||||
+ for bename,suffix in self.bedict.items():
|
||||
+ be = self.bes.create(properties={
|
||||
+ 'cn': bename,
|
||||
+ 'nsslapd-suffix': suffix,
|
||||
+ })
|
||||
+ # Add suffix entry
|
||||
+ Organization(self.inst, dn=suffix).create(properties={ 'o': bename, })
|
||||
+ # Configure vlv
|
||||
+ vlv_search, vlv_index = create_vlv_search_and_index(
|
||||
+ self.inst, basedn=suffix,
|
||||
+ bename=bename, scope=self.scope,
|
||||
+ prefix=f'vlv_1lvl_{bename}')
|
||||
+ # Reindex
|
||||
+ reindex_task = Tasks(self.inst)
|
||||
+ assert reindex_task.reindex(
|
||||
+ suffix=suffix,
|
||||
+ attrname=vlv_index.rdn,
|
||||
+ args={TASK_WAIT: True},
|
||||
+ vlv=True
|
||||
+ ) == 0
|
||||
+ # Add ou=People entry
|
||||
+ OrganizationalUnits(self.inst, suffix).create(properties={'ou': 'People'})
|
||||
+ # Add another ou that will be deleted before the export
|
||||
+ # so that import will change the vlv search basedn entryid
|
||||
+ ou2 = OrganizationalUnits(self.inst, suffix).create(properties={'ou': 'dummy ou'})
|
||||
+ # Add a demo user so that vlv_check is happy
|
||||
+ dn = f'uid=demo_user,ou=people,{suffix}'
|
||||
+ UserAccount(self.inst, dn=dn).create( properties= {
|
||||
+ 'uid': 'demo_user',
|
||||
+ 'cn': 'Demo user',
|
||||
+ 'sn': 'Demo user',
|
||||
+ 'uidNumber': '99998',
|
||||
+ 'gidNumber': '99998',
|
||||
+ 'homeDirectory': '/var/empty',
|
||||
+ 'loginShell': '/bin/false',
|
||||
+ 'userpassword': DEMO_PW })
|
||||
+ # Add regular user
|
||||
+ add_users(self.inst, 10, suffix=suffix)
|
||||
+ # Removing ou2
|
||||
+ ou2.delete()
|
||||
+ # And export
|
||||
+ tasks = Tasks(self.inst)
|
||||
+ ldif = f'{self.inst.get_ldif_dir()}/db-{bename}.ldif'
|
||||
+ assert tasks.exportLDIF(suffix=suffix,
|
||||
+ output_file=ldif,
|
||||
+ args={TASK_WAIT: True}) == 0
|
||||
+ # Add the various parameters in topology_st.belist
|
||||
+ self.data[bename] = { 'be': be,
|
||||
+ 'suffix': suffix,
|
||||
+ 'ldif': ldif,
|
||||
+ 'vlv_search' : vlv_search,
|
||||
+ 'vlv_index' : vlv_index,
|
||||
+ 'dn' : dn}
|
||||
+
|
||||
+
|
||||
+def create_vlv_search_and_index(inst, basedn=DEFAULT_SUFFIX, bename='userRoot',
|
||||
+ scope=ldap.SCOPE_SUBTREE, prefix="vlv", vlvsort="cn"):
|
||||
+ vlv_searches = VLVSearch(inst)
|
||||
+ vlv_search_properties = {
|
||||
+ "objectclass": ["top", "vlvSearch"],
|
||||
+ "cn": f"{prefix}Srch",
|
||||
+ "vlvbase": basedn,
|
||||
+ "vlvfilter": "(uid=*)",
|
||||
+ "vlvscope": str(scope),
|
||||
+ }
|
||||
+ vlv_searches.create(
|
||||
+ basedn=f"cn={bename},cn=ldbm database,cn=plugins,cn=config",
|
||||
+ properties=vlv_search_properties
|
||||
+ )
|
||||
+
|
||||
+ vlv_index = VLVIndex(inst)
|
||||
+ vlv_index_properties = {
|
||||
+ "objectclass": ["top", "vlvIndex"],
|
||||
+ "cn": f"{prefix}Idx",
|
||||
+ "vlvsort": vlvsort,
|
||||
+ }
|
||||
+ vlv_index.create(
|
||||
+ basedn=f"cn={prefix}Srch,cn={bename},cn=ldbm database,cn=plugins,cn=config",
|
||||
+ properties=vlv_index_properties
|
||||
+ )
|
||||
+ return vlv_searches, vlv_index
|
||||
+
|
||||
+
|
||||
+@pytest.fixture
|
||||
+def vlv_setup_with_uid_mr(topology_st, request):
|
||||
+ inst = topology_st.standalone
|
||||
+ bename = 'be1'
|
||||
+ besuffix = f'o={bename}'
|
||||
+ beh = BackendHandler(inst, { bename: besuffix })
|
||||
+
|
||||
+ def fin():
|
||||
+ # Cleanup function
|
||||
+ if not DEBUGGING and inst.exists() and inst.status():
|
||||
+ beh.cleanup()
|
||||
+
|
||||
+ request.addfinalizer(fin)
|
||||
+
|
||||
+ # Make sure that our backend are not already present.
|
||||
+ beh.cleanup()
|
||||
+
|
||||
+ # Then add the new backend
|
||||
+ beh.setup()
|
||||
+
|
||||
+ index = Index(inst, f'cn=uid,cn=index,cn={bename},cn=ldbm database,cn=plugins,cn=config')
|
||||
+ index.add('nsMatchingRule', '2.5.13.2')
|
||||
+ reindex_task = Tasks(inst)
|
||||
+ assert reindex_task.reindex(
|
||||
+ suffix=besuffix,
|
||||
+ attrname='uid',
|
||||
+ args={TASK_WAIT: True}
|
||||
+ ) == 0
|
||||
+
|
||||
+ topology_st.beh = beh
|
||||
+ return topology_st
|
||||
+
|
||||
+
|
||||
@pytest.mark.DS47966
|
||||
def test_bulk_import_when_the_backend_with_vlv_was_recreated(topology_m2):
|
||||
"""
|
||||
@@ -105,6 +245,49 @@ def test_bulk_import_when_the_backend_with_vlv_was_recreated(topology_m2):
|
||||
entries = M2.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, "(objectclass=*)")
|
||||
|
||||
|
||||
+def test_vlv_with_mr(vlv_setup_with_uid_mr):
|
||||
+ """
|
||||
+ Testing vlv having specific matching rule
|
||||
+
|
||||
+ :id: 5e04afe2-beec-11ef-aa84-482ae39447e5
|
||||
+ :setup: Standalone with uid have a matching rule index
|
||||
+ :steps:
|
||||
+ 1. Append vlvIndex entries then vlvSearch entry in the dse.ldif
|
||||
+ 2. Restart the server
|
||||
+ :expectedresults:
|
||||
+ 1. Should Success.
|
||||
+ 2. Should Success.
|
||||
+ """
|
||||
+ inst = vlv_setup_with_uid_mr.standalone
|
||||
+ beh = vlv_setup_with_uid_mr.beh
|
||||
+ bename, besuffix = next(iter(beh.bedict.items()))
|
||||
+ vlv_searches, vlv_index = create_vlv_search_and_index(
|
||||
+ inst, basedn=besuffix, bename=bename,
|
||||
+ vlvsort="uid:2.5.13.2")
|
||||
+ # Reindex the vlv
|
||||
+ reindex_task = Tasks(inst)
|
||||
+ assert reindex_task.reindex(
|
||||
+ suffix=besuffix,
|
||||
+ attrname=vlv_index.rdn,
|
||||
+ args={TASK_WAIT: True},
|
||||
+ vlv=True
|
||||
+ ) == 0
|
||||
+
|
||||
+ inst.restart()
|
||||
+ users = UserAccounts(inst, besuffix)
|
||||
+ user_properties = {
|
||||
+ 'uid': f'a new testuser',
|
||||
+ 'cn': f'a new testuser',
|
||||
+ 'sn': 'user',
|
||||
+ 'uidNumber': '0',
|
||||
+ 'gidNumber': '0',
|
||||
+ 'homeDirectory': 'foo'
|
||||
+ }
|
||||
+ user = users.create(properties=user_properties)
|
||||
+ user.delete()
|
||||
+ assert inst.status()
|
||||
+
|
||||
+
|
||||
if __name__ == "__main__":
|
||||
# Run isolated
|
||||
# -s for DEBUG mode
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/cleanup.c b/ldap/servers/slapd/back-ldbm/cleanup.c
|
||||
index 6b2e9faef..939d8bc4f 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/cleanup.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/cleanup.c
|
||||
@@ -15,12 +15,14 @@
|
||||
|
||||
#include "back-ldbm.h"
|
||||
#include "dblayer.h"
|
||||
+#include "vlv_srch.h"
|
||||
|
||||
int
|
||||
ldbm_back_cleanup(Slapi_PBlock *pb)
|
||||
{
|
||||
struct ldbminfo *li;
|
||||
Slapi_Backend *be;
|
||||
+ struct vlvSearch *nextp;
|
||||
|
||||
slapi_log_err(SLAPI_LOG_TRACE, "ldbm_back_cleanup", "ldbm backend cleaning up\n");
|
||||
slapi_pblock_get(pb, SLAPI_PLUGIN_PRIVATE, &li);
|
||||
@@ -45,6 +47,12 @@ ldbm_back_cleanup(Slapi_PBlock *pb)
|
||||
return 0;
|
||||
}
|
||||
|
||||
+ /* Release the vlv list */
|
||||
+ for (struct vlvSearch *p=be->vlvSearchList; p; p=nextp) {
|
||||
+ nextp = p->vlv_next;
|
||||
+ vlvSearch_delete(&p);
|
||||
+ }
|
||||
+
|
||||
/*
|
||||
* We check if li is NULL. Because of an issue in how we create backends
|
||||
* we share the li and plugin info between many unique backends. This causes
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/dblayer.c b/ldap/servers/slapd/back-ldbm/dblayer.c
|
||||
index 05cc5b891..6b8ce0016 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/dblayer.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/dblayer.c
|
||||
@@ -494,8 +494,12 @@ int
|
||||
dblayer_close(struct ldbminfo *li, int dbmode)
|
||||
{
|
||||
dblayer_private *priv = (dblayer_private *)li->li_dblayer_private;
|
||||
-
|
||||
- return priv->dblayer_close_fn(li, dbmode);
|
||||
+ int rc = priv->dblayer_close_fn(li, dbmode);
|
||||
+ if (rc == 0) {
|
||||
+ /* Clean thread specific data */
|
||||
+ dblayer_destroy_txn_stack();
|
||||
+ }
|
||||
+ return rc;
|
||||
}
|
||||
|
||||
/* Routines for opening and closing random files in the DB_ENV.
|
||||
@@ -621,6 +625,9 @@ dblayer_erase_index_file(backend *be, struct attrinfo *a, PRBool use_lock, int n
|
||||
return 0;
|
||||
}
|
||||
struct ldbminfo *li = (struct ldbminfo *)be->be_database->plg_private;
|
||||
+ if (NULL == li) {
|
||||
+ return 0;
|
||||
+ }
|
||||
dblayer_private *priv = (dblayer_private *)li->li_dblayer_private;
|
||||
|
||||
return priv->dblayer_rm_db_file_fn(be, a, use_lock, no_force_chkpt);
|
||||
@@ -1382,3 +1389,14 @@ dblayer_pop_pvt_txn(void)
|
||||
}
|
||||
return;
|
||||
}
|
||||
+
|
||||
+void
|
||||
+dblayer_destroy_txn_stack(void)
|
||||
+{
|
||||
+ /*
|
||||
+ * Cleanup for the main thread to avoid false/positive leaks from libasan
|
||||
+ * Note: data is freed because PR_SetThreadPrivate calls the
|
||||
+ * dblayer_cleanup_txn_stack callback
|
||||
+ */
|
||||
+ PR_SetThreadPrivate(thread_private_txn_stack, NULL);
|
||||
+}
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/ldbm_attr.c b/ldap/servers/slapd/back-ldbm/ldbm_attr.c
|
||||
index 708756d3e..70700ca1d 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/ldbm_attr.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/ldbm_attr.c
|
||||
@@ -54,7 +54,7 @@ attrinfo_delete(struct attrinfo **pp)
|
||||
idl_release_private(*pp);
|
||||
(*pp)->ai_key_cmp_fn = NULL;
|
||||
slapi_ch_free((void **)&((*pp)->ai_type));
|
||||
- slapi_ch_free((void **)(*pp)->ai_index_rules);
|
||||
+ charray_free((*pp)->ai_index_rules);
|
||||
slapi_ch_free((void **)&((*pp)->ai_attrcrypt));
|
||||
attr_done(&((*pp)->ai_sattr));
|
||||
attrinfo_delete_idlistinfo(&(*pp)->ai_idlistinfo);
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/matchrule.c b/ldap/servers/slapd/back-ldbm/matchrule.c
|
||||
index 5d516b9f8..5365e8acf 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/matchrule.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/matchrule.c
|
||||
@@ -107,7 +107,7 @@ destroy_matchrule_indexer(Slapi_PBlock *pb)
|
||||
* is destroyed
|
||||
*/
|
||||
int
|
||||
-matchrule_values_to_keys(Slapi_PBlock *pb, struct berval **input_values, struct berval ***output_values)
|
||||
+matchrule_values_to_keys(Slapi_PBlock *pb, Slapi_Value **input_values, struct berval ***output_values)
|
||||
{
|
||||
IFP mrINDEX = NULL;
|
||||
|
||||
@@ -135,10 +135,8 @@ matchrule_values_to_keys_sv(Slapi_PBlock *pb, Slapi_Value **input_values, Slapi_
|
||||
slapi_pblock_get(pb, SLAPI_PLUGIN_MR_INDEX_SV_FN, &mrINDEX);
|
||||
if (NULL == mrINDEX) { /* old school - does not have SV function */
|
||||
int rc;
|
||||
- struct berval **bvi = NULL, **bvo = NULL;
|
||||
- valuearray_get_bervalarray(input_values, &bvi);
|
||||
- rc = matchrule_values_to_keys(pb, bvi, &bvo);
|
||||
- ber_bvecfree(bvi);
|
||||
+ struct berval **bvo = NULL;
|
||||
+ rc = matchrule_values_to_keys(pb, input_values, &bvo);
|
||||
/* note - the indexer owns bvo and will free it when destroyed */
|
||||
valuearray_init_bervalarray(bvo, output_values);
|
||||
/* store output values in SV form - caller expects SLAPI_PLUGIN_MR_KEYS is Slapi_Value** */
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/proto-back-ldbm.h b/ldap/servers/slapd/back-ldbm/proto-back-ldbm.h
|
||||
index d93ff9239..157788fa4 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/proto-back-ldbm.h
|
||||
+++ b/ldap/servers/slapd/back-ldbm/proto-back-ldbm.h
|
||||
@@ -84,6 +84,7 @@ int dblayer_release_index_file(backend *be, struct attrinfo *a, DB *pDB);
|
||||
int dblayer_erase_index_file(backend *be, struct attrinfo *a, PRBool use_lock, int no_force_chkpt);
|
||||
int dblayer_get_id2entry(backend *be, DB **ppDB);
|
||||
int dblayer_release_id2entry(backend *be, DB *pDB);
|
||||
+void dblayer_destroy_txn_stack(void);
|
||||
int dblayer_txn_init(struct ldbminfo *li, back_txn *txn);
|
||||
int dblayer_txn_begin(backend *be, back_txnid parent_txn, back_txn *txn);
|
||||
int dblayer_txn_begin_ext(struct ldbminfo *li, back_txnid parent_txn, back_txn *txn, PRBool use_lock);
|
||||
@@ -560,7 +561,7 @@ int compute_allids_limit(Slapi_PBlock *pb, struct ldbminfo *li);
|
||||
*/
|
||||
int create_matchrule_indexer(Slapi_PBlock **pb, char *matchrule, char *type);
|
||||
int destroy_matchrule_indexer(Slapi_PBlock *pb);
|
||||
-int matchrule_values_to_keys(Slapi_PBlock *pb, struct berval **input_values, struct berval ***output_values);
|
||||
+int matchrule_values_to_keys(Slapi_PBlock *pb, Slapi_Value **input_values, struct berval ***output_values);
|
||||
int matchrule_values_to_keys_sv(Slapi_PBlock *pb, Slapi_Value **input_values, Slapi_Value ***output_values);
|
||||
|
||||
/*
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/sort.c b/ldap/servers/slapd/back-ldbm/sort.c
|
||||
index 70ac60803..196af753f 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/sort.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/sort.c
|
||||
@@ -536,30 +536,18 @@ compare_entries_sv(ID *id_a, ID *id_b, sort_spec *s, baggage_carrier *bc, int *e
|
||||
valuearray_get_bervalarray(valueset_get_valuearray(&attr_b->a_present_values), &value_b);
|
||||
} else {
|
||||
/* Match rule case */
|
||||
- struct berval **actual_value_a = NULL;
|
||||
- struct berval **actual_value_b = NULL;
|
||||
- struct berval **temp_value = NULL;
|
||||
-
|
||||
- valuearray_get_bervalarray(valueset_get_valuearray(&attr_a->a_present_values), &actual_value_a);
|
||||
- valuearray_get_bervalarray(valueset_get_valuearray(&attr_b->a_present_values), &actual_value_b);
|
||||
- matchrule_values_to_keys(this_one->mr_pb, actual_value_a, &temp_value);
|
||||
- /* Now copy it, so the second call doesn't crap on it */
|
||||
- value_a = slapi_ch_bvecdup(temp_value); /* Really, we'd prefer to not call the chXXX variant...*/
|
||||
- matchrule_values_to_keys(this_one->mr_pb, actual_value_b, &value_b);
|
||||
-
|
||||
- if ((actual_value_a && !value_a) ||
|
||||
- (actual_value_b && !value_b)) {
|
||||
- ber_bvecfree(actual_value_a);
|
||||
- ber_bvecfree(actual_value_b);
|
||||
- CACHE_RETURN(&inst->inst_cache, &a);
|
||||
- CACHE_RETURN(&inst->inst_cache, &b);
|
||||
- *error = 1;
|
||||
- return 0;
|
||||
+ Slapi_Value **va_a = valueset_get_valuearray(&attr_a->a_present_values);
|
||||
+ Slapi_Value **va_b = valueset_get_valuearray(&attr_b->a_present_values);
|
||||
+
|
||||
+ matchrule_values_to_keys(this_one->mr_pb, va_a, &value_a);
|
||||
+ /* Plugin owns the memory ==> duplicate the key before next call garble it */
|
||||
+ value_a = slapi_ch_bvecdup(value_a);
|
||||
+ matchrule_values_to_keys(this_one->mr_pb, va_b, &value_b);
|
||||
+
|
||||
+ if ((va_a && !value_a) || (va_b && !value_b)) {
|
||||
+ result = 0;
|
||||
+ goto bail;
|
||||
}
|
||||
- if (actual_value_a)
|
||||
- ber_bvecfree(actual_value_a);
|
||||
- if (actual_value_b)
|
||||
- ber_bvecfree(actual_value_b);
|
||||
}
|
||||
/* Compare them */
|
||||
if (!order) {
|
||||
@@ -582,9 +570,10 @@ compare_entries_sv(ID *id_a, ID *id_b, sort_spec *s, baggage_carrier *bc, int *e
|
||||
}
|
||||
/* If so, proceed to the next attribute for comparison */
|
||||
}
|
||||
+ *error = 0;
|
||||
+bail:
|
||||
CACHE_RETURN(&inst->inst_cache, &a);
|
||||
CACHE_RETURN(&inst->inst_cache, &b);
|
||||
- *error = 0;
|
||||
return result;
|
||||
}
|
||||
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/vlv.c b/ldap/servers/slapd/back-ldbm/vlv.c
|
||||
index 121fb3667..70e0bac85 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/vlv.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/vlv.c
|
||||
@@ -605,7 +605,7 @@ vlv_getindices(IFP callback_fn, void *param, backend *be)
|
||||
* generate the same composite key, so we append the EntryID
|
||||
* to ensure the uniqueness of the key.
|
||||
*
|
||||
- * Always creates a key. Never returns NULL.
|
||||
+ * May return NULL in case of errors (typically in some configuration error cases)
|
||||
*/
|
||||
static struct vlv_key *
|
||||
vlv_create_key(struct vlvIndex *p, struct backentry *e)
|
||||
@@ -659,10 +659,8 @@ vlv_create_key(struct vlvIndex *p, struct backentry *e)
|
||||
/* Matching rule. Do the magic mangling. Plugin owns the memory. */
|
||||
if (p->vlv_mrpb[sortattr] != NULL) {
|
||||
/* xxxPINAKI */
|
||||
- struct berval **bval = NULL;
|
||||
Slapi_Value **va = valueset_get_valuearray(&attr->a_present_values);
|
||||
- valuearray_get_bervalarray(va, &bval);
|
||||
- matchrule_values_to_keys(p->vlv_mrpb[sortattr], bval, &value);
|
||||
+ matchrule_values_to_keys(p->vlv_mrpb[sortattr], va, &value);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -779,6 +777,13 @@ do_vlv_update_index(back_txn *txn, struct ldbminfo *li __attribute__((unused)),
|
||||
}
|
||||
|
||||
key = vlv_create_key(pIndex, entry);
|
||||
+ if (key == NULL) {
|
||||
+ slapi_log_err(SLAPI_LOG_ERR, "vlv_create_key", "Unable to generate vlv %s index key."
|
||||
+ " There may be a configuration issue.\n", pIndex->vlv_name);
|
||||
+ dblayer_release_index_file(be, pIndex->vlv_attrinfo, db);
|
||||
+ return rc;
|
||||
+ }
|
||||
+
|
||||
if (NULL != txn) {
|
||||
db_txn = txn->back_txn_txn;
|
||||
} else {
|
||||
@@ -949,11 +954,11 @@ vlv_create_matching_rule_value(Slapi_PBlock *pb, struct berval *original_value)
|
||||
struct berval **value = NULL;
|
||||
if (pb != NULL) {
|
||||
struct berval **outvalue = NULL;
|
||||
- struct berval *invalue[2];
|
||||
- invalue[0] = original_value; /* jcm: cast away const */
|
||||
- invalue[1] = NULL;
|
||||
+ Slapi_Value v_in = {0};
|
||||
+ Slapi_Value *va_in[2] = { &v_in, NULL };
|
||||
+ slapi_value_init_berval(&v_in, original_value);
|
||||
/* The plugin owns the memory it returns in outvalue */
|
||||
- matchrule_values_to_keys(pb, invalue, &outvalue);
|
||||
+ matchrule_values_to_keys(pb, va_in, &outvalue);
|
||||
if (outvalue != NULL) {
|
||||
value = slapi_ch_bvecdup(outvalue);
|
||||
}
|
||||
@@ -1610,11 +1615,8 @@ retry:
|
||||
PRBool needFree = PR_FALSE;
|
||||
|
||||
if (sort_control->mr_pb != NULL) {
|
||||
- struct berval **tmp_entry_value = NULL;
|
||||
-
|
||||
- valuearray_get_bervalarray(csn_value, &tmp_entry_value);
|
||||
/* Matching rule. Do the magic mangling. Plugin owns the memory. */
|
||||
- matchrule_values_to_keys(sort_control->mr_pb, /* xxxPINAKI needs modification attr->a_vals */ tmp_entry_value, &entry_value);
|
||||
+ matchrule_values_to_keys(sort_control->mr_pb, csn_value, &entry_value);
|
||||
} else {
|
||||
valuearray_get_bervalarray(csn_value, &entry_value);
|
||||
needFree = PR_TRUE; /* entry_value is a copy */
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/vlv_srch.c b/ldap/servers/slapd/back-ldbm/vlv_srch.c
|
||||
index fe1208d59..11d1c715b 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/vlv_srch.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/vlv_srch.c
|
||||
@@ -203,6 +203,9 @@ vlvSearch_delete(struct vlvSearch **ppvs)
|
||||
{
|
||||
if (ppvs != NULL && *ppvs != NULL) {
|
||||
struct vlvIndex *pi, *ni;
|
||||
+ if ((*ppvs)->vlv_e) {
|
||||
+ slapi_entry_free((struct slapi_entry *)((*ppvs)->vlv_e));
|
||||
+ }
|
||||
slapi_sdn_free(&((*ppvs)->vlv_dn));
|
||||
slapi_ch_free((void **)&((*ppvs)->vlv_name));
|
||||
slapi_sdn_free(&((*ppvs)->vlv_base));
|
||||
@@ -217,7 +220,6 @@ vlvSearch_delete(struct vlvSearch **ppvs)
|
||||
pi = ni;
|
||||
}
|
||||
slapi_ch_free((void **)ppvs);
|
||||
- *ppvs = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
diff --git a/ldap/servers/slapd/generation.c b/ldap/servers/slapd/generation.c
|
||||
index c4f20f793..89f097322 100644
|
||||
--- a/ldap/servers/slapd/generation.c
|
||||
+++ b/ldap/servers/slapd/generation.c
|
||||
@@ -93,9 +93,13 @@ get_server_dataversion()
|
||||
lenstr *l = NULL;
|
||||
Slapi_Backend *be;
|
||||
char *cookie;
|
||||
+ static pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER;
|
||||
|
||||
+ /* Serialize to avoid race condition */
|
||||
+ pthread_mutex_lock(&mutex);
|
||||
/* we already cached the copy - just return it */
|
||||
if (server_dataversion_id != NULL) {
|
||||
+ pthread_mutex_unlock(&mutex);
|
||||
return server_dataversion_id;
|
||||
}
|
||||
|
||||
@@ -130,5 +134,6 @@ get_server_dataversion()
|
||||
server_dataversion_id = slapi_ch_strdup(l->ls_buf);
|
||||
}
|
||||
lenstr_free(&l);
|
||||
+ pthread_mutex_unlock(&mutex);
|
||||
return server_dataversion_id;
|
||||
}
|
||||
diff --git a/ldap/servers/slapd/plugin_mr.c b/ldap/servers/slapd/plugin_mr.c
|
||||
index 13f76fe52..6cf88b7de 100644
|
||||
--- a/ldap/servers/slapd/plugin_mr.c
|
||||
+++ b/ldap/servers/slapd/plugin_mr.c
|
||||
@@ -391,28 +391,18 @@ mr_wrap_mr_index_sv_fn(Slapi_PBlock *pb)
|
||||
return rc;
|
||||
}
|
||||
|
||||
-/* this function takes SLAPI_PLUGIN_MR_VALUES as struct berval ** and
|
||||
+/* this function takes SLAPI_PLUGIN_MR_VALUES as Slapi_Value ** and
|
||||
returns SLAPI_PLUGIN_MR_KEYS as struct berval **
|
||||
*/
|
||||
static int
|
||||
mr_wrap_mr_index_fn(Slapi_PBlock *pb)
|
||||
{
|
||||
int rc = -1;
|
||||
- struct berval **in_vals = NULL;
|
||||
struct berval **out_vals = NULL;
|
||||
struct mr_private *mrpriv = NULL;
|
||||
- Slapi_Value **in_vals_sv = NULL;
|
||||
Slapi_Value **out_vals_sv = NULL;
|
||||
|
||||
- slapi_pblock_get(pb, SLAPI_PLUGIN_MR_VALUES, &in_vals); /* get bervals */
|
||||
- /* convert bervals to sv ary */
|
||||
- valuearray_init_bervalarray(in_vals, &in_vals_sv);
|
||||
- slapi_pblock_set(pb, SLAPI_PLUGIN_MR_VALUES, in_vals_sv); /* use sv */
|
||||
rc = mr_wrap_mr_index_sv_fn(pb);
|
||||
- /* clean up in_vals_sv */
|
||||
- valuearray_free(&in_vals_sv);
|
||||
- /* restore old in_vals */
|
||||
- slapi_pblock_set(pb, SLAPI_PLUGIN_MR_VALUES, in_vals);
|
||||
/* get result sv keys */
|
||||
slapi_pblock_get(pb, SLAPI_PLUGIN_MR_KEYS, &out_vals_sv);
|
||||
/* convert to bvec */
|
||||
diff --git a/src/lib389/lib389/backend.py b/src/lib389/lib389/backend.py
|
||||
index 9acced205..cee073ea7 100644
|
||||
--- a/src/lib389/lib389/backend.py
|
||||
+++ b/src/lib389/lib389/backend.py
|
||||
@@ -1029,6 +1029,16 @@ class Backends(DSLdapObjects):
|
||||
for be in sorted(self.list(), key=lambda be: len(be.get_suffix()), reverse=True):
|
||||
be.delete()
|
||||
|
||||
+ def get_backend(self, suffix):
|
||||
+ """
|
||||
+ Return the backend associated with the provided suffix.
|
||||
+ """
|
||||
+ suffix_l = suffix.lower()
|
||||
+ for be in self.list():
|
||||
+ if be.get_attr_val_utf8_l('nsslapd-suffix') == suffix_l:
|
||||
+ return be
|
||||
+ return None
|
||||
+
|
||||
|
||||
class DatabaseConfig(DSLdapObject):
|
||||
"""Backend Database configuration
|
||||
--
|
||||
2.48.1
|
||||
|
@ -1,230 +0,0 @@
|
||||
From bd2829d04491556c35a0b36b591c09a69baf6546 Mon Sep 17 00:00:00 2001
|
||||
From: progier389 <progier@redhat.com>
|
||||
Date: Mon, 11 Dec 2023 11:58:40 +0100
|
||||
Subject: [PATCH] Issue 6004 - idletimeout may be ignored (#6005)
|
||||
|
||||
* Issue 6004 - idletimeout may be ignored
|
||||
|
||||
Problem: idletimeout is still not handled when binding as non root (unless there are some activity
|
||||
on another connection)
|
||||
Fix:
|
||||
Add a slapi_eq_repeat_rel handler that walks all active connection every seconds and check if the timeout is expired.
|
||||
Note about CI test:
|
||||
Notice that idletimeout is never enforced for connections bound as root (i.e cn=directory manager).
|
||||
|
||||
Issue #6004
|
||||
|
||||
Reviewed by: @droideck, @tbordaz (Thanks!)
|
||||
|
||||
(cherry picked from commit 86b5969acbe124eec8c89bcf1ab2156b2b140c17)
|
||||
(cherry picked from commit bdb0a72b4953678e5418406b3c202dfa2c7469a2)
|
||||
(cherry picked from commit 61cebc191cd4090072dda691b9956dbde4cf7c48)
|
||||
---
|
||||
.../tests/suites/config/regression_test.py | 82 ++++++++++++++++++-
|
||||
ldap/servers/slapd/daemon.c | 52 +++++++++++-
|
||||
2 files changed, 128 insertions(+), 6 deletions(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/config/regression_test.py b/dirsrvtests/tests/suites/config/regression_test.py
|
||||
index 0000dd82d..8dbba8cd2 100644
|
||||
--- a/dirsrvtests/tests/suites/config/regression_test.py
|
||||
+++ b/dirsrvtests/tests/suites/config/regression_test.py
|
||||
@@ -6,20 +6,49 @@
|
||||
# See LICENSE for details.
|
||||
# --- END COPYRIGHT BLOCK ---
|
||||
#
|
||||
+import os
|
||||
import logging
|
||||
import pytest
|
||||
+import time
|
||||
from lib389.utils import *
|
||||
from lib389.dseldif import DSEldif
|
||||
-from lib389.config import LDBMConfig
|
||||
+from lib389.config import BDB_LDBMConfig, LDBMConfig, Config
|
||||
from lib389.backend import Backends
|
||||
from lib389.topologies import topology_st as topo
|
||||
+from lib389.idm.user import UserAccounts, TEST_USER_PROPERTIES
|
||||
+from lib389._constants import DEFAULT_SUFFIX, PASSWORD, DN_DM
|
||||
|
||||
pytestmark = pytest.mark.tier0
|
||||
|
||||
logging.getLogger(__name__).setLevel(logging.INFO)
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
+DEBUGGING = os.getenv("DEBUGGING", default=False)
|
||||
CUSTOM_MEM = '9100100100'
|
||||
+IDLETIMEOUT = 5
|
||||
+DN_TEST_USER = f'uid={TEST_USER_PROPERTIES["uid"]},ou=People,{DEFAULT_SUFFIX}'
|
||||
+
|
||||
+
|
||||
+@pytest.fixture(scope="module")
|
||||
+def idletimeout_topo(topo, request):
|
||||
+ """Create an instance with a test user and set idletimeout"""
|
||||
+ inst = topo.standalone
|
||||
+ config = Config(inst)
|
||||
+
|
||||
+ users = UserAccounts(inst, DEFAULT_SUFFIX)
|
||||
+ user = users.create(properties={
|
||||
+ **TEST_USER_PROPERTIES,
|
||||
+ 'userpassword' : PASSWORD,
|
||||
+ })
|
||||
+ config.replace('nsslapd-idletimeout', str(IDLETIMEOUT))
|
||||
+
|
||||
+ def fin():
|
||||
+ if not DEBUGGING:
|
||||
+ config.reset('nsslapd-idletimeout')
|
||||
+ user.delete()
|
||||
+
|
||||
+ request.addfinalizer(fin)
|
||||
+ return topo
|
||||
|
||||
|
||||
# Function to return value of available memory in kb
|
||||
@@ -79,7 +108,7 @@ def test_maxbersize_repl(topo):
|
||||
nsslapd-errorlog-logmaxdiskspace are set in certain order
|
||||
|
||||
:id: 743e912c-2be4-4f5f-9c2a-93dcb18f51a0
|
||||
- :setup: MMR with two suppliers
|
||||
+ :setup: Standalone Instance
|
||||
:steps:
|
||||
1. Stop the instance
|
||||
2. Set nsslapd-errorlog-maxlogsize before/after
|
||||
@@ -112,3 +141,52 @@ def test_maxbersize_repl(topo):
|
||||
log.info("Assert no init_dse_file errors in the error log")
|
||||
assert not inst.ds_error_log.match('.*ERR - init_dse_file.*')
|
||||
|
||||
+
|
||||
+def test_bdb_config(topo):
|
||||
+ """Check that bdb config entry exists
|
||||
+
|
||||
+ :id: edbc6f54-7c98-11ee-b1c0-482ae39447e5
|
||||
+ :setup: standalone
|
||||
+ :steps:
|
||||
+ 1. Check that bdb config instance exists.
|
||||
+ :expectedresults:
|
||||
+ 1. Success
|
||||
+ """
|
||||
+
|
||||
+ inst = topo.standalone
|
||||
+ assert BDB_LDBMConfig(inst).exists()
|
||||
+
|
||||
+
|
||||
+@pytest.mark.parametrize("dn,expected_result", [(DN_TEST_USER, True), (DN_DM, False)])
|
||||
+def test_idletimeout(idletimeout_topo, dn, expected_result):
|
||||
+ """Check that bdb config entry exists
|
||||
+
|
||||
+ :id: b20f2826-942a-11ee-827b-482ae39447e5
|
||||
+ :parametrized: yes
|
||||
+ :setup: Standalone Instance with test user and idletimeout
|
||||
+ :steps:
|
||||
+ 1. Open new ldap connection
|
||||
+ 2. Bind with the provided dn
|
||||
+ 3. Wait longer than idletimeout
|
||||
+ 4. Try to bind again the provided dn and check if
|
||||
+ connection is closed or not.
|
||||
+ 5. Check if result is the expected one.
|
||||
+ :expectedresults:
|
||||
+ 1. Success
|
||||
+ 2. Success
|
||||
+ 3. Success
|
||||
+ 4. Success
|
||||
+ 5. Success
|
||||
+ """
|
||||
+
|
||||
+ inst = idletimeout_topo.standalone
|
||||
+
|
||||
+ l = ldap.initialize(f'ldap://localhost:{inst.port}')
|
||||
+ l.bind_s(dn, PASSWORD)
|
||||
+ time.sleep(IDLETIMEOUT+1)
|
||||
+ try:
|
||||
+ l.bind_s(dn, PASSWORD)
|
||||
+ result = False
|
||||
+ except ldap.SERVER_DOWN:
|
||||
+ result = True
|
||||
+ assert expected_result == result
|
||||
diff --git a/ldap/servers/slapd/daemon.c b/ldap/servers/slapd/daemon.c
|
||||
index 57e07e5f5..6df109760 100644
|
||||
--- a/ldap/servers/slapd/daemon.c
|
||||
+++ b/ldap/servers/slapd/daemon.c
|
||||
@@ -68,6 +68,8 @@
|
||||
#define SLAPD_ACCEPT_WAKEUP_TIMER 250
|
||||
#endif
|
||||
|
||||
+#define MILLISECONDS_PER_SECOND 1000
|
||||
+
|
||||
int slapd_wakeup_timer = SLAPD_WAKEUP_TIMER; /* time in ms to wakeup */
|
||||
int slapd_accept_wakeup_timer = SLAPD_ACCEPT_WAKEUP_TIMER; /* time in ms to wakeup */
|
||||
#ifdef notdef /* GGOODREPL */
|
||||
@@ -1045,6 +1047,48 @@ slapd_sockets_ports_free(daemon_ports_t *ports_info)
|
||||
#endif
|
||||
}
|
||||
|
||||
+/*
|
||||
+ * Tells if idle timeout has expired
|
||||
+ */
|
||||
+static inline int __attribute__((always_inline))
|
||||
+has_idletimeout_expired(Connection *c, time_t curtime)
|
||||
+{
|
||||
+ return (c->c_state != CONN_STATE_FREE && !c->c_gettingber &&
|
||||
+ c->c_idletimeout > 0 && NULL == c->c_ops &&
|
||||
+ curtime - c->c_idlesince >= c->c_idletimeout);
|
||||
+}
|
||||
+
|
||||
+/*
|
||||
+ * slapi_eq_repeat_rel callback that checks that idletimeout has not expired.
|
||||
+ */
|
||||
+void
|
||||
+check_idletimeout(time_t when __attribute__((unused)), void *arg __attribute__((unused)) )
|
||||
+{
|
||||
+ Connection_Table *ct = the_connection_table;
|
||||
+ time_t curtime = slapi_current_rel_time_t();
|
||||
+ /* Walk all active connections of all connection listeners */
|
||||
+ for (int list_num = 0; list_num < ct->list_num; list_num++) {
|
||||
+ for (Connection *c = connection_table_get_first_active_connection(ct, list_num);
|
||||
+ c != NULL; c = connection_table_get_next_active_connection(ct, c)) {
|
||||
+ if (!has_idletimeout_expired(c, curtime)) {
|
||||
+ continue;
|
||||
+ }
|
||||
+ /* Looks like idletimeout has expired, lets acquire the lock
|
||||
+ * and double check.
|
||||
+ */
|
||||
+ if (pthread_mutex_trylock(&(c->c_mutex)) == EBUSY) {
|
||||
+ continue;
|
||||
+ }
|
||||
+ if (has_idletimeout_expired(c, curtime)) {
|
||||
+ /* idle timeout has expired */
|
||||
+ disconnect_server_nomutex(c, c->c_connid, -1,
|
||||
+ SLAPD_DISCONNECT_IDLE_TIMEOUT, ETIMEDOUT);
|
||||
+ }
|
||||
+ pthread_mutex_unlock(&(c->c_mutex));
|
||||
+ }
|
||||
+ }
|
||||
+}
|
||||
+
|
||||
void
|
||||
slapd_daemon(daemon_ports_t *ports)
|
||||
{
|
||||
@@ -1258,7 +1302,9 @@ slapd_daemon(daemon_ports_t *ports)
|
||||
"MAINPID=%lu",
|
||||
(unsigned long)getpid());
|
||||
#endif
|
||||
-
|
||||
+ slapi_eq_repeat_rel(check_idletimeout, NULL,
|
||||
+ slapi_current_rel_time_t(),
|
||||
+ MILLISECONDS_PER_SECOND);
|
||||
/* The meat of the operation is in a loop on a call to select */
|
||||
while (!g_get_shutdown()) {
|
||||
int select_return = 0;
|
||||
@@ -1734,9 +1780,7 @@ handle_pr_read_ready(Connection_Table *ct, PRIntn num_poll __attribute__((unused
|
||||
disconnect_server_nomutex(c, c->c_connid, -1,
|
||||
SLAPD_DISCONNECT_POLL, EPIPE);
|
||||
}
|
||||
- } else if (c->c_idletimeout > 0 &&
|
||||
- (curtime - c->c_idlesince) >= c->c_idletimeout &&
|
||||
- NULL == c->c_ops) {
|
||||
+ } else if (has_idletimeout_expired(c, curtime)) {
|
||||
/* idle timeout */
|
||||
disconnect_server_nomutex(c, c->c_connid, -1,
|
||||
SLAPD_DISCONNECT_IDLE_TIMEOUT, ETIMEDOUT);
|
||||
--
|
||||
2.48.1
|
||||
|
@ -1,69 +0,0 @@
|
||||
From e9fe6e074130406328b8e932a5c2efa814d190a0 Mon Sep 17 00:00:00 2001
|
||||
From: tbordaz <tbordaz@redhat.com>
|
||||
Date: Wed, 5 Feb 2025 09:41:30 +0100
|
||||
Subject: [PATCH] Issue 6004 - (2nd) idletimeout may be ignored (#6569)
|
||||
|
||||
Problem:
|
||||
multiple listener threads was implemented in 2.x and after
|
||||
This is missing in 1.4.3 so the cherry pick should be adapted
|
||||
Fix:
|
||||
skip the loop with listeners
|
||||
|
||||
Issue #6004
|
||||
|
||||
Reviewed by: Jamie Chapman (Thanks !)
|
||||
---
|
||||
ldap/servers/slapd/daemon.c | 36 +++++++++++++++++-------------------
|
||||
1 file changed, 17 insertions(+), 19 deletions(-)
|
||||
|
||||
diff --git a/ldap/servers/slapd/daemon.c b/ldap/servers/slapd/daemon.c
|
||||
index 6df109760..bef75e4a3 100644
|
||||
--- a/ldap/servers/slapd/daemon.c
|
||||
+++ b/ldap/servers/slapd/daemon.c
|
||||
@@ -1066,26 +1066,24 @@ check_idletimeout(time_t when __attribute__((unused)), void *arg __attribute__((
|
||||
{
|
||||
Connection_Table *ct = the_connection_table;
|
||||
time_t curtime = slapi_current_rel_time_t();
|
||||
- /* Walk all active connections of all connection listeners */
|
||||
- for (int list_num = 0; list_num < ct->list_num; list_num++) {
|
||||
- for (Connection *c = connection_table_get_first_active_connection(ct, list_num);
|
||||
- c != NULL; c = connection_table_get_next_active_connection(ct, c)) {
|
||||
- if (!has_idletimeout_expired(c, curtime)) {
|
||||
- continue;
|
||||
- }
|
||||
- /* Looks like idletimeout has expired, lets acquire the lock
|
||||
- * and double check.
|
||||
- */
|
||||
- if (pthread_mutex_trylock(&(c->c_mutex)) == EBUSY) {
|
||||
- continue;
|
||||
- }
|
||||
- if (has_idletimeout_expired(c, curtime)) {
|
||||
- /* idle timeout has expired */
|
||||
- disconnect_server_nomutex(c, c->c_connid, -1,
|
||||
- SLAPD_DISCONNECT_IDLE_TIMEOUT, ETIMEDOUT);
|
||||
- }
|
||||
- pthread_mutex_unlock(&(c->c_mutex));
|
||||
+ /* Walk all active connections */
|
||||
+ for (Connection *c = connection_table_get_first_active_connection(ct);
|
||||
+ c != NULL; c = connection_table_get_next_active_connection(ct, c)) {
|
||||
+ if (!has_idletimeout_expired(c, curtime)) {
|
||||
+ continue;
|
||||
+ }
|
||||
+ /* Looks like idletimeout has expired, lets acquire the lock
|
||||
+ * and double check.
|
||||
+ */
|
||||
+ if (pthread_mutex_trylock(&(c->c_mutex)) == EBUSY) {
|
||||
+ continue;
|
||||
+ }
|
||||
+ if (has_idletimeout_expired(c, curtime)) {
|
||||
+ /* idle timeout has expired */
|
||||
+ disconnect_server_nomutex(c, c->c_connid, -1,
|
||||
+ SLAPD_DISCONNECT_IDLE_TIMEOUT, ETIMEDOUT);
|
||||
}
|
||||
+ pthread_mutex_unlock(&(c->c_mutex));
|
||||
}
|
||||
}
|
||||
|
||||
--
|
||||
2.48.1
|
||||
|
@ -1,52 +0,0 @@
|
||||
From b2edc371c5ca4fd24ef469c64829c48824098e7f Mon Sep 17 00:00:00 2001
|
||||
From: Mark Reynolds <mreynolds@redhat.com>
|
||||
Date: Wed, 8 Jan 2025 12:57:52 -0500
|
||||
Subject: [PATCH] Issue 6485 - Fix double free in USN cleanup task
|
||||
|
||||
Description:
|
||||
|
||||
ASAN report shows double free of bind dn in the USN cleanup task data. The bind
|
||||
dn was passed as a reference so it should never have to be freed by the cleanup
|
||||
task.
|
||||
|
||||
Relates: https://github.com/389ds/389-ds-base/issues/6485
|
||||
|
||||
Reviewed by: tbordaz(Thanks!)
|
||||
---
|
||||
ldap/servers/plugins/usn/usn_cleanup.c | 6 ++----
|
||||
1 file changed, 2 insertions(+), 4 deletions(-)
|
||||
|
||||
diff --git a/ldap/servers/plugins/usn/usn_cleanup.c b/ldap/servers/plugins/usn/usn_cleanup.c
|
||||
index bdb55e6b1..7eaf0f88f 100644
|
||||
--- a/ldap/servers/plugins/usn/usn_cleanup.c
|
||||
+++ b/ldap/servers/plugins/usn/usn_cleanup.c
|
||||
@@ -240,7 +240,7 @@ usn_cleanup_add(Slapi_PBlock *pb,
|
||||
char *suffix = NULL;
|
||||
char *backend = NULL;
|
||||
char *maxusn = NULL;
|
||||
- char *bind_dn;
|
||||
+ char *bind_dn = NULL;
|
||||
struct usn_cleanup_data *cleanup_data = NULL;
|
||||
int rv = SLAPI_DSE_CALLBACK_OK;
|
||||
Slapi_Task *task = NULL;
|
||||
@@ -323,8 +323,7 @@ usn_cleanup_add(Slapi_PBlock *pb,
|
||||
suffix = NULL; /* don't free in this function */
|
||||
cleanup_data->maxusn_to_delete = maxusn;
|
||||
maxusn = NULL; /* don't free in this function */
|
||||
- cleanup_data->bind_dn = bind_dn;
|
||||
- bind_dn = NULL; /* don't free in this function */
|
||||
+ cleanup_data->bind_dn = slapi_ch_strdup(bind_dn);
|
||||
slapi_task_set_data(task, cleanup_data);
|
||||
|
||||
/* start the USN tombstone cleanup task as a separate thread */
|
||||
@@ -363,7 +362,6 @@ usn_cleanup_task_destructor(Slapi_Task *task)
|
||||
slapi_ch_free_string(&mydata->suffix);
|
||||
slapi_ch_free_string(&mydata->maxusn_to_delete);
|
||||
slapi_ch_free_string(&mydata->bind_dn);
|
||||
- /* Need to cast to avoid a compiler warning */
|
||||
slapi_ch_free((void **)&mydata);
|
||||
}
|
||||
}
|
||||
--
|
||||
2.48.1
|
||||
|
@ -1,38 +0,0 @@
|
||||
From 679262c0c292413851d2d004b588ecfd7d91c85a Mon Sep 17 00:00:00 2001
|
||||
From: James Chapman <jachapma@redhat.com>
|
||||
Date: Tue, 11 Feb 2025 18:06:34 +0000
|
||||
Subject: [PATCH] Issue 5841 - dsconf incorrectly setting up Pass-Through
|
||||
Authentication (#6601)
|
||||
|
||||
Bug description:
|
||||
During init, PAMPassThroughAuthConfigs defines an "objectclass=nsslapdplugin"
|
||||
plugin object. During filter creation, dsconf fails as objectclass=nsslapdplugin
|
||||
is not present in the PAM PT config entry. This objectclass has been removed in
|
||||
all other branches, branch 1.4.3 was skipped as there are cherry pick conflicts.
|
||||
|
||||
Fix description:
|
||||
Remove nsslapdplugin from the plugin objecti, objectclass list.
|
||||
|
||||
Fixes: https://github.com/389ds/389-ds-base/issues/5841
|
||||
|
||||
Reviewed by: @progier389 (Thank you)
|
||||
---
|
||||
src/lib389/lib389/plugins.py | 2 +-
|
||||
1 file changed, 1 insertion(+), 1 deletion(-)
|
||||
|
||||
diff --git a/src/lib389/lib389/plugins.py b/src/lib389/lib389/plugins.py
|
||||
index 185398e5b..25b49dae4 100644
|
||||
--- a/src/lib389/lib389/plugins.py
|
||||
+++ b/src/lib389/lib389/plugins.py
|
||||
@@ -1579,7 +1579,7 @@ class PAMPassThroughAuthConfigs(DSLdapObjects):
|
||||
|
||||
def __init__(self, instance, basedn="cn=PAM Pass Through Auth,cn=plugins,cn=config"):
|
||||
super(PAMPassThroughAuthConfigs, self).__init__(instance)
|
||||
- self._objectclasses = ['top', 'extensibleObject', 'nsslapdplugin', 'pamConfig']
|
||||
+ self._objectclasses = ['top', 'extensibleObject', 'pamConfig']
|
||||
self._filterattrs = ['cn']
|
||||
self._scope = ldap.SCOPE_ONELEVEL
|
||||
self._childobject = PAMPassThroughAuthConfig
|
||||
--
|
||||
2.48.1
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -1,319 +0,0 @@
|
||||
From 7d534efdcd96b13524dae587c3c5994ed01924ab Mon Sep 17 00:00:00 2001
|
||||
From: Simon Pichugin <spichugi@redhat.com>
|
||||
Date: Fri, 16 Feb 2024 13:52:36 -0800
|
||||
Subject: [PATCH] Issue 6067 - Improve dsidm CLI No Such Entry handling (#6079)
|
||||
|
||||
Description: Add additional error processing to dsidm CLI tool for when basedn
|
||||
or OU subentries are absent.
|
||||
|
||||
Related: https://github.com/389ds/389-ds-base/issues/6067
|
||||
|
||||
Reviewed by: @vashirov (Thanks!)
|
||||
---
|
||||
src/lib389/cli/dsidm | 21 ++++++++-------
|
||||
src/lib389/lib389/cli_idm/__init__.py | 38 ++++++++++++++++++++++++++-
|
||||
src/lib389/lib389/cli_idm/account.py | 4 +--
|
||||
src/lib389/lib389/cli_idm/service.py | 4 ++-
|
||||
src/lib389/lib389/idm/group.py | 10 ++++---
|
||||
src/lib389/lib389/idm/posixgroup.py | 5 ++--
|
||||
src/lib389/lib389/idm/services.py | 5 ++--
|
||||
src/lib389/lib389/idm/user.py | 5 ++--
|
||||
8 files changed, 67 insertions(+), 25 deletions(-)
|
||||
|
||||
diff --git a/src/lib389/cli/dsidm b/src/lib389/cli/dsidm
|
||||
index 1b739b103..970973f4f 100755
|
||||
--- a/src/lib389/cli/dsidm
|
||||
+++ b/src/lib389/cli/dsidm
|
||||
@@ -2,7 +2,7 @@
|
||||
|
||||
# --- BEGIN COPYRIGHT BLOCK ---
|
||||
# Copyright (C) 2016, William Brown <william at blackhats.net.au>
|
||||
-# Copyright (C) 2023 Red Hat, Inc.
|
||||
+# Copyright (C) 2024 Red Hat, Inc.
|
||||
# All rights reserved.
|
||||
#
|
||||
# License: GPL (version 3 or any later version).
|
||||
@@ -19,6 +19,7 @@ import argparse
|
||||
import argcomplete
|
||||
from lib389.utils import get_instance_list, instance_choices
|
||||
from lib389._constants import DSRC_HOME
|
||||
+from lib389.cli_idm import _get_basedn_arg
|
||||
from lib389.cli_idm import account as cli_account
|
||||
from lib389.cli_idm import initialise as cli_init
|
||||
from lib389.cli_idm import organizationalunit as cli_ou
|
||||
@@ -117,14 +118,6 @@ if __name__ == '__main__':
|
||||
parser.print_help()
|
||||
sys.exit(1)
|
||||
|
||||
- if dsrc_inst['basedn'] is None:
|
||||
- errmsg = "Must provide a basedn!"
|
||||
- if args.json:
|
||||
- sys.stderr.write('{"desc": "%s"}\n' % errmsg)
|
||||
- else:
|
||||
- log.error(errmsg)
|
||||
- sys.exit(1)
|
||||
-
|
||||
if not args.verbose:
|
||||
signal.signal(signal.SIGINT, signal_handler)
|
||||
|
||||
@@ -135,7 +128,15 @@ if __name__ == '__main__':
|
||||
result = False
|
||||
try:
|
||||
inst = connect_instance(dsrc_inst=dsrc_inst, verbose=args.verbose, args=args)
|
||||
- result = args.func(inst, dsrc_inst['basedn'], log, args)
|
||||
+ basedn = _get_basedn_arg(inst, args, log, msg="Enter basedn")
|
||||
+ if basedn is None:
|
||||
+ errmsg = "Must provide a basedn!"
|
||||
+ if args.json:
|
||||
+ sys.stderr.write('{"desc": "%s"}\n' % errmsg)
|
||||
+ else:
|
||||
+ log.error(errmsg)
|
||||
+ sys.exit(1)
|
||||
+ result = args.func(inst, basedn, log, args)
|
||||
if args.verbose:
|
||||
log.info("Command successful.")
|
||||
except Exception as e:
|
||||
diff --git a/src/lib389/lib389/cli_idm/__init__.py b/src/lib389/lib389/cli_idm/__init__.py
|
||||
index 0dab54847..e3622246d 100644
|
||||
--- a/src/lib389/lib389/cli_idm/__init__.py
|
||||
+++ b/src/lib389/lib389/cli_idm/__init__.py
|
||||
@@ -1,15 +1,30 @@
|
||||
# --- BEGIN COPYRIGHT BLOCK ---
|
||||
# Copyright (C) 2016, William Brown <william at blackhats.net.au>
|
||||
-# Copyright (C) 2023 Red Hat, Inc.
|
||||
+# Copyright (C) 2024 Red Hat, Inc.
|
||||
# All rights reserved.
|
||||
#
|
||||
# License: GPL (version 3 or any later version).
|
||||
# See LICENSE for details.
|
||||
# --- END COPYRIGHT BLOCK ---
|
||||
|
||||
+import sys
|
||||
import ldap
|
||||
from getpass import getpass
|
||||
import json
|
||||
+from lib389._mapped_object import DSLdapObject
|
||||
+from lib389.cli_base import _get_dn_arg
|
||||
+from lib389.idm.user import DEFAULT_BASEDN_RDN as DEFAULT_BASEDN_RDN_USER
|
||||
+from lib389.idm.group import DEFAULT_BASEDN_RDN as DEFAULT_BASEDN_RDN_GROUP
|
||||
+from lib389.idm.posixgroup import DEFAULT_BASEDN_RDN as DEFAULT_BASEDN_RDN_POSIXGROUP
|
||||
+from lib389.idm.services import DEFAULT_BASEDN_RDN as DEFAULT_BASEDN_RDN_SERVICES
|
||||
+
|
||||
+# The key is module name, the value is default RDN
|
||||
+BASEDN_RDNS = {
|
||||
+ 'user': DEFAULT_BASEDN_RDN_USER,
|
||||
+ 'group': DEFAULT_BASEDN_RDN_GROUP,
|
||||
+ 'posixgroup': DEFAULT_BASEDN_RDN_POSIXGROUP,
|
||||
+ 'service': DEFAULT_BASEDN_RDN_SERVICES,
|
||||
+}
|
||||
|
||||
|
||||
def _get_arg(args, msg=None):
|
||||
@@ -37,6 +52,27 @@ def _get_args(args, kws):
|
||||
return kwargs
|
||||
|
||||
|
||||
+def _get_basedn_arg(inst, args, log, msg=None):
|
||||
+ basedn_arg = _get_dn_arg(args.basedn, msg="Enter basedn")
|
||||
+ if not DSLdapObject(inst, basedn_arg).exists():
|
||||
+ raise ValueError(f'The base DN "{basedn_arg}" does not exist.')
|
||||
+
|
||||
+ # Get the RDN based on the last part of the module name if applicable
|
||||
+ # (lib389.cli_idm.user -> user)
|
||||
+ try:
|
||||
+ command_name = args.func.__module__.split('.')[-1]
|
||||
+ object_rdn = BASEDN_RDNS[command_name]
|
||||
+ # Check if the DN for our command exists
|
||||
+ command_basedn = f'{object_rdn},{basedn_arg}'
|
||||
+ if not DSLdapObject(inst, command_basedn).exists():
|
||||
+ errmsg = f'The DN "{command_basedn}" does not exist.'
|
||||
+ errmsg += f' It is required for "{command_name}" subcommand. Please create it first.'
|
||||
+ raise ValueError(errmsg)
|
||||
+ except KeyError:
|
||||
+ pass
|
||||
+ return basedn_arg
|
||||
+
|
||||
+
|
||||
# This is really similar to get_args, but generates from an array
|
||||
def _get_attributes(args, attrs):
|
||||
kwargs = {}
|
||||
diff --git a/src/lib389/lib389/cli_idm/account.py b/src/lib389/lib389/cli_idm/account.py
|
||||
index 5d7b9cc77..15f766588 100644
|
||||
--- a/src/lib389/lib389/cli_idm/account.py
|
||||
+++ b/src/lib389/lib389/cli_idm/account.py
|
||||
@@ -1,5 +1,5 @@
|
||||
# --- BEGIN COPYRIGHT BLOCK ---
|
||||
-# Copyright (C) 2023, Red Hat inc,
|
||||
+# Copyright (C) 2024, Red Hat inc,
|
||||
# Copyright (C) 2018, William Brown <william@blackhats.net.au>
|
||||
# All rights reserved.
|
||||
#
|
||||
@@ -91,7 +91,6 @@ def entry_status(inst, basedn, log, args):
|
||||
|
||||
|
||||
def subtree_status(inst, basedn, log, args):
|
||||
- basedn = _get_dn_arg(args.basedn, msg="Enter basedn to check")
|
||||
filter = ""
|
||||
scope = ldap.SCOPE_SUBTREE
|
||||
epoch_inactive_time = None
|
||||
@@ -121,7 +120,6 @@ def subtree_status(inst, basedn, log, args):
|
||||
|
||||
|
||||
def bulk_update(inst, basedn, log, args):
|
||||
- basedn = _get_dn_arg(args.basedn, msg="Enter basedn to search")
|
||||
search_filter = "(objectclass=*)"
|
||||
scope = ldap.SCOPE_SUBTREE
|
||||
scope_str = "sub"
|
||||
diff --git a/src/lib389/lib389/cli_idm/service.py b/src/lib389/lib389/cli_idm/service.py
|
||||
index c62fc12d1..c2b2c8c84 100644
|
||||
--- a/src/lib389/lib389/cli_idm/service.py
|
||||
+++ b/src/lib389/lib389/cli_idm/service.py
|
||||
@@ -57,7 +57,9 @@ def rename(inst, basedn, log, args, warn=True):
|
||||
_generic_rename(inst, basedn, log.getChild('_generic_rename'), MANY, rdn, args)
|
||||
|
||||
def create_parser(subparsers):
|
||||
- service_parser = subparsers.add_parser('service', help='Manage service accounts', formatter_class=CustomHelpFormatter)
|
||||
+ service_parser = subparsers.add_parser('service',
|
||||
+ help='Manage service accounts. The organizationalUnit (by default "ou=Services") '
|
||||
+ 'needs to exist prior to managing service accounts.', formatter_class=CustomHelpFormatter)
|
||||
|
||||
subcommands = service_parser.add_subparsers(help='action')
|
||||
|
||||
diff --git a/src/lib389/lib389/idm/group.py b/src/lib389/lib389/idm/group.py
|
||||
index 1b60a1f51..2cf2c7b23 100644
|
||||
--- a/src/lib389/lib389/idm/group.py
|
||||
+++ b/src/lib389/lib389/idm/group.py
|
||||
@@ -1,6 +1,6 @@
|
||||
# --- BEGIN COPYRIGHT BLOCK ---
|
||||
# Copyright (C) 2016, William Brown <william at blackhats.net.au>
|
||||
-# Copyright (C) 2023 Red Hat, Inc.
|
||||
+# Copyright (C) 2024 Red Hat, Inc.
|
||||
# All rights reserved.
|
||||
#
|
||||
# License: GPL (version 3 or any later version).
|
||||
@@ -16,6 +16,8 @@ MUST_ATTRIBUTES = [
|
||||
'cn',
|
||||
]
|
||||
RDN = 'cn'
|
||||
+DEFAULT_BASEDN_RDN = 'ou=Groups'
|
||||
+DEFAULT_BASEDN_RDN_ADMIN_GROUPS = 'ou=People'
|
||||
|
||||
|
||||
class Group(DSLdapObject):
|
||||
@@ -93,7 +95,7 @@ class Groups(DSLdapObjects):
|
||||
:type basedn: str
|
||||
"""
|
||||
|
||||
- def __init__(self, instance, basedn, rdn='ou=Groups'):
|
||||
+ def __init__(self, instance, basedn, rdn=DEFAULT_BASEDN_RDN):
|
||||
super(Groups, self).__init__(instance)
|
||||
self._objectclasses = [
|
||||
'groupOfNames',
|
||||
@@ -140,7 +142,7 @@ class UniqueGroup(DSLdapObject):
|
||||
class UniqueGroups(DSLdapObjects):
|
||||
# WARNING!!!
|
||||
# Use group, not unique group!!!
|
||||
- def __init__(self, instance, basedn, rdn='ou=Groups'):
|
||||
+ def __init__(self, instance, basedn, rdn=DEFAULT_BASEDN_RDN):
|
||||
super(UniqueGroups, self).__init__(instance)
|
||||
self._objectclasses = [
|
||||
'groupOfUniqueNames',
|
||||
@@ -203,7 +205,7 @@ class nsAdminGroups(DSLdapObjects):
|
||||
:type rdn: str
|
||||
"""
|
||||
|
||||
- def __init__(self, instance, basedn, rdn='ou=People'):
|
||||
+ def __init__(self, instance, basedn, rdn=DEFAULT_BASEDN_RDN_ADMIN_GROUPS):
|
||||
super(nsAdminGroups, self).__init__(instance)
|
||||
self._objectclasses = [
|
||||
'nsAdminGroup'
|
||||
diff --git a/src/lib389/lib389/idm/posixgroup.py b/src/lib389/lib389/idm/posixgroup.py
|
||||
index d1debcf12..45735c579 100644
|
||||
--- a/src/lib389/lib389/idm/posixgroup.py
|
||||
+++ b/src/lib389/lib389/idm/posixgroup.py
|
||||
@@ -1,6 +1,6 @@
|
||||
# --- BEGIN COPYRIGHT BLOCK ---
|
||||
# Copyright (C) 2016, William Brown <william at blackhats.net.au>
|
||||
-# Copyright (C) 2023 Red Hat, Inc.
|
||||
+# Copyright (C) 2024 Red Hat, Inc.
|
||||
# All rights reserved.
|
||||
#
|
||||
# License: GPL (version 3 or any later version).
|
||||
@@ -17,6 +17,7 @@ MUST_ATTRIBUTES = [
|
||||
'gidNumber',
|
||||
]
|
||||
RDN = 'cn'
|
||||
+DEFAULT_BASEDN_RDN = 'ou=Groups'
|
||||
|
||||
|
||||
class PosixGroup(DSLdapObject):
|
||||
@@ -72,7 +73,7 @@ class PosixGroups(DSLdapObjects):
|
||||
:type basedn: str
|
||||
"""
|
||||
|
||||
- def __init__(self, instance, basedn, rdn='ou=Groups'):
|
||||
+ def __init__(self, instance, basedn, rdn=DEFAULT_BASEDN_RDN):
|
||||
super(PosixGroups, self).__init__(instance)
|
||||
self._objectclasses = [
|
||||
'groupOfNames',
|
||||
diff --git a/src/lib389/lib389/idm/services.py b/src/lib389/lib389/idm/services.py
|
||||
index d1e5b4693..e750a32c4 100644
|
||||
--- a/src/lib389/lib389/idm/services.py
|
||||
+++ b/src/lib389/lib389/idm/services.py
|
||||
@@ -1,6 +1,6 @@
|
||||
# --- BEGIN COPYRIGHT BLOCK ---
|
||||
# Copyright (C) 2016, William Brown <william at blackhats.net.au>
|
||||
-# Copyright (C) 2021 Red Hat, Inc.
|
||||
+# Copyright (C) 2024 Red Hat, Inc.
|
||||
# All rights reserved.
|
||||
#
|
||||
# License: GPL (version 3 or any later version).
|
||||
@@ -16,6 +16,7 @@ RDN = 'cn'
|
||||
MUST_ATTRIBUTES = [
|
||||
'cn',
|
||||
]
|
||||
+DEFAULT_BASEDN_RDN = 'ou=Services'
|
||||
|
||||
class ServiceAccount(Account):
|
||||
"""A single instance of Service entry
|
||||
@@ -59,7 +60,7 @@ class ServiceAccounts(DSLdapObjects):
|
||||
:type basedn: str
|
||||
"""
|
||||
|
||||
- def __init__(self, instance, basedn, rdn='ou=Services'):
|
||||
+ def __init__(self, instance, basedn, rdn=DEFAULT_BASEDN_RDN):
|
||||
super(ServiceAccounts, self).__init__(instance)
|
||||
self._objectclasses = [
|
||||
'applicationProcess',
|
||||
diff --git a/src/lib389/lib389/idm/user.py b/src/lib389/lib389/idm/user.py
|
||||
index 1206a6e08..3b21ccf1c 100644
|
||||
--- a/src/lib389/lib389/idm/user.py
|
||||
+++ b/src/lib389/lib389/idm/user.py
|
||||
@@ -1,6 +1,6 @@
|
||||
# --- BEGIN COPYRIGHT BLOCK ---
|
||||
# Copyright (C) 2016, William Brown <william at blackhats.net.au>
|
||||
-# Copyright (C) 2023 Red Hat, Inc.
|
||||
+# Copyright (C) 2024 Red Hat, Inc.
|
||||
# All rights reserved.
|
||||
#
|
||||
# License: GPL (version 3 or any later version).
|
||||
@@ -23,6 +23,7 @@ MUST_ATTRIBUTES = [
|
||||
'homeDirectory',
|
||||
]
|
||||
RDN = 'uid'
|
||||
+DEFAULT_BASEDN_RDN = 'ou=People'
|
||||
|
||||
TEST_USER_PROPERTIES = {
|
||||
'uid': 'testuser',
|
||||
@@ -201,7 +202,7 @@ class UserAccounts(DSLdapObjects):
|
||||
:type rdn: str
|
||||
"""
|
||||
|
||||
- def __init__(self, instance, basedn, rdn='ou=People'):
|
||||
+ def __init__(self, instance, basedn, rdn=DEFAULT_BASEDN_RDN):
|
||||
super(UserAccounts, self).__init__(instance)
|
||||
self._objectclasses = [
|
||||
'account',
|
||||
--
|
||||
2.48.1
|
||||
|
@ -1,52 +0,0 @@
|
||||
From ee03e8443a108cff0cc4c7a03962fdc3a1fbf94d Mon Sep 17 00:00:00 2001
|
||||
From: Simon Pichugin <spichugi@redhat.com>
|
||||
Date: Wed, 16 Oct 2024 19:24:55 -0700
|
||||
Subject: [PATCH] Issue 6067 - Update dsidm to prioritize basedn from .dsrc
|
||||
over interactive input (#6362)
|
||||
|
||||
Description: Modifies dsidm CLI tool to check for the basedn in the .dsrc configuration file
|
||||
when the -b option is not provided.
|
||||
Previously, users were required to always specify the basedn interactively if -b was omitted,
|
||||
even if it was available in .dsrc.
|
||||
Now, the basedn is determined by first checking the -b option, then the .dsrc file, and finally
|
||||
prompting the user if neither is set.
|
||||
|
||||
Related: https://github.com/389ds/389-ds-base/issues/6067
|
||||
|
||||
Reviewed by: @Firstyear (Thanks!)
|
||||
---
|
||||
src/lib389/cli/dsidm | 2 +-
|
||||
src/lib389/lib389/cli_idm/__init__.py | 4 ++--
|
||||
2 files changed, 3 insertions(+), 3 deletions(-)
|
||||
|
||||
diff --git a/src/lib389/cli/dsidm b/src/lib389/cli/dsidm
|
||||
index 970973f4f..d318664bc 100755
|
||||
--- a/src/lib389/cli/dsidm
|
||||
+++ b/src/lib389/cli/dsidm
|
||||
@@ -128,7 +128,7 @@ if __name__ == '__main__':
|
||||
result = False
|
||||
try:
|
||||
inst = connect_instance(dsrc_inst=dsrc_inst, verbose=args.verbose, args=args)
|
||||
- basedn = _get_basedn_arg(inst, args, log, msg="Enter basedn")
|
||||
+ basedn = _get_basedn_arg(inst, args, dsrc_inst['basedn'], log, msg="Enter basedn")
|
||||
if basedn is None:
|
||||
errmsg = "Must provide a basedn!"
|
||||
if args.json:
|
||||
diff --git a/src/lib389/lib389/cli_idm/__init__.py b/src/lib389/lib389/cli_idm/__init__.py
|
||||
index e3622246d..1f3e2dc86 100644
|
||||
--- a/src/lib389/lib389/cli_idm/__init__.py
|
||||
+++ b/src/lib389/lib389/cli_idm/__init__.py
|
||||
@@ -52,8 +52,8 @@ def _get_args(args, kws):
|
||||
return kwargs
|
||||
|
||||
|
||||
-def _get_basedn_arg(inst, args, log, msg=None):
|
||||
- basedn_arg = _get_dn_arg(args.basedn, msg="Enter basedn")
|
||||
+def _get_basedn_arg(inst, args, basedn, log, msg=None):
|
||||
+ basedn_arg = _get_dn_arg(basedn, msg="Enter basedn")
|
||||
if not DSLdapObject(inst, basedn_arg).exists():
|
||||
raise ValueError(f'The base DN "{basedn_arg}" does not exist.')
|
||||
|
||||
--
|
||||
2.48.1
|
||||
|
@ -1,520 +0,0 @@
|
||||
From b8c079c770d3eaa4de49e997d42e1501c28a153b Mon Sep 17 00:00:00 2001
|
||||
From: progier389 <progier@redhat.com>
|
||||
Date: Mon, 8 Jul 2024 11:19:09 +0200
|
||||
Subject: [PATCH] Issue 6155 - ldap-agent fails to start because of permission
|
||||
error (#6179)
|
||||
|
||||
Issue: dirsrv-snmp service fails to starts when SELinux is enforced because of AVC preventing to open some files
|
||||
One workaround is to use the dac_override capability but it is a bad practice.
|
||||
Fix: Setting proper permissions:
|
||||
|
||||
Running ldap-agent with uid=root and gid=dirsrv to be able to access both snmp and dirsrv resources.
|
||||
Setting read permission on the group for the dse.ldif file
|
||||
Setting r/w permissions on the group for the snmp semaphore and mmap file
|
||||
For that one special care is needed because ns-slapd umask overrides the file creation permission
|
||||
as is better to avoid changing the umask (changing umask within the code is not thread safe,
|
||||
and the current 0022 umask value is correct for most of the files) so the safest way is to chmod the snmp file
|
||||
if the needed permission are not set.
|
||||
Issue: #6155
|
||||
|
||||
Reviewed by: @droideck , @vashirov (Thanks ! )
|
||||
|
||||
(cherry picked from commit eb7e57d77b557b63c65fdf38f9069893b021f049)
|
||||
---
|
||||
.github/scripts/generate_matrix.py | 4 +-
|
||||
dirsrvtests/tests/suites/snmp/snmp.py | 214 ++++++++++++++++++++++++++
|
||||
ldap/servers/slapd/agtmmap.c | 72 ++++++++-
|
||||
ldap/servers/slapd/agtmmap.h | 13 ++
|
||||
ldap/servers/slapd/dse.c | 6 +-
|
||||
ldap/servers/slapd/slap.h | 6 +
|
||||
ldap/servers/slapd/snmp_collator.c | 4 +-
|
||||
src/lib389/lib389/instance/setup.py | 5 +
|
||||
wrappers/systemd-snmp.service.in | 1 +
|
||||
9 files changed, 313 insertions(+), 12 deletions(-)
|
||||
create mode 100644 dirsrvtests/tests/suites/snmp/snmp.py
|
||||
|
||||
diff --git a/.github/scripts/generate_matrix.py b/.github/scripts/generate_matrix.py
|
||||
index 584374597..8d67a1dc7 100644
|
||||
--- a/.github/scripts/generate_matrix.py
|
||||
+++ b/.github/scripts/generate_matrix.py
|
||||
@@ -21,8 +21,8 @@ else:
|
||||
# Use tests from the source
|
||||
suites = next(os.walk('dirsrvtests/tests/suites/'))[1]
|
||||
|
||||
- # Filter out snmp as it is an empty directory:
|
||||
- suites.remove('snmp')
|
||||
+ # Filter out webui because of broken tests
|
||||
+ suites.remove('webui')
|
||||
|
||||
# Run each replication test module separately to speed things up
|
||||
suites.remove('replication')
|
||||
diff --git a/dirsrvtests/tests/suites/snmp/snmp.py b/dirsrvtests/tests/suites/snmp/snmp.py
|
||||
new file mode 100644
|
||||
index 000000000..0952deb40
|
||||
--- /dev/null
|
||||
+++ b/dirsrvtests/tests/suites/snmp/snmp.py
|
||||
@@ -0,0 +1,214 @@
|
||||
+# --- BEGIN COPYRIGHT BLOCK ---
|
||||
+# Copyright (C) 2024 Red Hat, Inc.
|
||||
+# All rights reserved.
|
||||
+#
|
||||
+# License: GPL (version 3 or any later version).
|
||||
+# See LICENSE for details.
|
||||
+# --- END COPYRIGHT BLOCK ---
|
||||
+#
|
||||
+import os
|
||||
+import pytest
|
||||
+import logging
|
||||
+import subprocess
|
||||
+import ldap
|
||||
+from datetime import datetime
|
||||
+from shutil import copyfile
|
||||
+from lib389.topologies import topology_m2 as topo_m2
|
||||
+from lib389.utils import selinux_present
|
||||
+
|
||||
+
|
||||
+DEBUGGING = os.getenv("DEBUGGING", default=False)
|
||||
+if DEBUGGING:
|
||||
+ logging.getLogger(__name__).setLevel(logging.DEBUG)
|
||||
+else:
|
||||
+ logging.getLogger(__name__).setLevel(logging.INFO)
|
||||
+log = logging.getLogger(__name__)
|
||||
+
|
||||
+
|
||||
+SNMP_USER = 'user_name'
|
||||
+SNMP_PASSWORD = 'authentication_password'
|
||||
+SNMP_PRIVATE = 'private_password'
|
||||
+
|
||||
+# LDAP OID in MIB
|
||||
+LDAP_OID = '.1.3.6.1.4.1.2312.6.1.1'
|
||||
+LDAPCONNECTIONS_OID = f'{LDAP_OID}.21'
|
||||
+
|
||||
+
|
||||
+def run_cmd(cmd, check_returncode=True):
|
||||
+ """Run a command"""
|
||||
+
|
||||
+ log.info(f'Run: {cmd}')
|
||||
+ result = subprocess.run(cmd, capture_output=True, universal_newlines=True)
|
||||
+ log.info(f'STDOUT of {cmd} is:\n{result.stdout}')
|
||||
+ log.info(f'STDERR of {cmd} is:\n{result.stderr}')
|
||||
+ if check_returncode:
|
||||
+ result.check_returncode()
|
||||
+ return result
|
||||
+
|
||||
+
|
||||
+def add_lines(lines, filename):
|
||||
+ """Add lines that are not already present at the end of a file"""
|
||||
+
|
||||
+ log.info(f'add_lines({lines}, {filename})')
|
||||
+ try:
|
||||
+ with open(filename, 'r') as fd:
|
||||
+ for line in fd:
|
||||
+ try:
|
||||
+ lines.remove(line.strip())
|
||||
+ except ValueError:
|
||||
+ pass
|
||||
+ except FileNotFoundError:
|
||||
+ pass
|
||||
+ if lines:
|
||||
+ with open(filename, 'a') as fd:
|
||||
+ for line in lines:
|
||||
+ fd.write(f'{line}\n')
|
||||
+
|
||||
+
|
||||
+def remove_lines(lines, filename):
|
||||
+ """Remove lines in a file"""
|
||||
+
|
||||
+ log.info(f'remove_lines({lines}, {filename})')
|
||||
+ file_lines = []
|
||||
+ with open(filename, 'r') as fd:
|
||||
+ for line in fd:
|
||||
+ if not line.strip() in lines:
|
||||
+ file_lines.append(line)
|
||||
+ with open(filename, 'w') as fd:
|
||||
+ for line in file_lines:
|
||||
+ fd.write(line)
|
||||
+
|
||||
+
|
||||
+@pytest.fixture(scope="module")
|
||||
+def setup_snmp(topo_m2, request):
|
||||
+ """Install snmp and configure it
|
||||
+
|
||||
+ Returns the time just before dirsrv-snmp get restarted
|
||||
+ """
|
||||
+
|
||||
+ inst1 = topo_m2.ms["supplier1"]
|
||||
+ inst2 = topo_m2.ms["supplier2"]
|
||||
+
|
||||
+ # Check for the test prerequisites
|
||||
+ if os.getuid() != 0:
|
||||
+ pytest.skip('This test should be run by root superuser')
|
||||
+ return None
|
||||
+ if not inst1.with_systemd_running():
|
||||
+ pytest.skip('This test requires systemd')
|
||||
+ return None
|
||||
+ required_packages = {
|
||||
+ '389-ds-base-snmp': os.path.join(inst1.get_sbin_dir(), 'ldap-agent'),
|
||||
+ 'net-snmp': '/etc/snmp/snmpd.conf', }
|
||||
+ skip_msg = ""
|
||||
+ for package,file in required_packages.items():
|
||||
+ if not os.path.exists(file):
|
||||
+ skip_msg += f"Package {package} is not installed ({file} is missing).\n"
|
||||
+ if skip_msg != "":
|
||||
+ pytest.skip(f'This test requires the following package(s): {skip_msg}')
|
||||
+ return None
|
||||
+
|
||||
+ # Install snmp
|
||||
+ # run_cmd(['/usr/bin/dnf', 'install', '-y', 'net-snmp', 'net-snmp-utils', '389-ds-base-snmp'])
|
||||
+
|
||||
+ # Prepare the lines to add/remove in files:
|
||||
+ # master agentx
|
||||
+ # snmp user (user_name - authentication_password - private_password)
|
||||
+ # ldap_agent ds instances
|
||||
+ #
|
||||
+ # Adding rwuser and createUser lines is the same as running:
|
||||
+ # net-snmp-create-v3-user -A authentication_password -a SHA -X private_password -x AES user_name
|
||||
+ # but has the advantage of removing the user at cleanup phase
|
||||
+ #
|
||||
+ agent_cfg = '/etc/dirsrv/config/ldap-agent.conf'
|
||||
+ lines_dict = { '/etc/snmp/snmpd.conf' : ['master agentx', f'rwuser {SNMP_USER}'],
|
||||
+ '/var/lib/net-snmp/snmpd.conf' : [
|
||||
+ f'createUser {SNMP_USER} SHA "{SNMP_PASSWORD}" AES "{SNMP_PRIVATE}"',],
|
||||
+ agent_cfg : [] }
|
||||
+ for inst in topo_m2:
|
||||
+ lines_dict[agent_cfg].append(f'server slapd-{inst.serverid}')
|
||||
+
|
||||
+ # Prepare the cleanup
|
||||
+ def fin():
|
||||
+ run_cmd(['systemctl', 'stop', 'dirsrv-snmp'])
|
||||
+ if not DEBUGGING:
|
||||
+ run_cmd(['systemctl', 'stop', 'snmpd'])
|
||||
+ try:
|
||||
+ os.remove('/usr/share/snmp/mibs/redhat-directory.mib')
|
||||
+ except FileNotFoundError:
|
||||
+ pass
|
||||
+ for filename,lines in lines_dict.items():
|
||||
+ remove_lines(lines, filename)
|
||||
+ run_cmd(['systemctl', 'start', 'snmpd'])
|
||||
+
|
||||
+ request.addfinalizer(fin)
|
||||
+
|
||||
+ # Copy RHDS MIB in default MIB search path (Ugly because I have not found how to change the search path)
|
||||
+ copyfile('/usr/share/dirsrv/mibs/redhat-directory.mib', '/usr/share/snmp/mibs/redhat-directory.mib')
|
||||
+
|
||||
+ run_cmd(['systemctl', 'stop', 'snmpd'])
|
||||
+ for filename,lines in lines_dict.items():
|
||||
+ add_lines(lines, filename)
|
||||
+
|
||||
+ run_cmd(['systemctl', 'start', 'snmpd'])
|
||||
+
|
||||
+ curtime = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
|
||||
+
|
||||
+ run_cmd(['systemctl', 'start', 'dirsrv-snmp'])
|
||||
+ return curtime
|
||||
+
|
||||
+
|
||||
+@pytest.mark.skipif(not os.path.exists('/usr/bin/snmpwalk'), reason="net-snmp-utils package is not installed")
|
||||
+def test_snmpwalk(topo_m2, setup_snmp):
|
||||
+ """snmp smoke tests.
|
||||
+
|
||||
+ :id: e5d29998-1c21-11ef-a654-482ae39447e5
|
||||
+ :setup: Two suppliers replication setup, snmp
|
||||
+ :steps:
|
||||
+ 1. use snmpwalk to display LDAP statistics
|
||||
+ 2. use snmpwalk to get the number of open connections
|
||||
+ :expectedresults:
|
||||
+ 1. Success and no messages in stderr
|
||||
+ 2. The number of open connections should be positive
|
||||
+ """
|
||||
+
|
||||
+ inst1 = topo_m2.ms["supplier1"]
|
||||
+ inst2 = topo_m2.ms["supplier2"]
|
||||
+
|
||||
+
|
||||
+ cmd = [ '/usr/bin/snmpwalk', '-v3', '-u', SNMP_USER, '-l', 'AuthPriv',
|
||||
+ '-m', '+RHDS-MIB', '-A', SNMP_PASSWORD, '-a', 'SHA',
|
||||
+ '-X', SNMP_PRIVATE, '-x', 'AES', 'localhost',
|
||||
+ LDAP_OID ]
|
||||
+ result = run_cmd(cmd)
|
||||
+ assert not result.stderr
|
||||
+
|
||||
+ cmd = [ '/usr/bin/snmpwalk', '-v3', '-u', SNMP_USER, '-l', 'AuthPriv',
|
||||
+ '-m', '+RHDS-MIB', '-A', SNMP_PASSWORD, '-a', 'SHA',
|
||||
+ '-X', SNMP_PRIVATE, '-x', 'AES', 'localhost',
|
||||
+ f'{LDAPCONNECTIONS_OID}.{inst1.port}', '-Ov' ]
|
||||
+ result = run_cmd(cmd)
|
||||
+ nbconns = int(result.stdout.split()[1])
|
||||
+ log.info(f'There are {nbconns} open connections on {inst1.serverid}')
|
||||
+ assert nbconns > 0
|
||||
+
|
||||
+
|
||||
+@pytest.mark.skipif(not selinux_present(), reason="SELinux is not enabled")
|
||||
+def test_snmp_avc(topo_m2, setup_snmp):
|
||||
+ """snmp smoke tests.
|
||||
+
|
||||
+ :id: fb79728e-1d0d-11ef-9213-482ae39447e5
|
||||
+ :setup: Two suppliers replication setup, snmp
|
||||
+ :steps:
|
||||
+ 1. Get the system journal about ldap-agent
|
||||
+ :expectedresults:
|
||||
+ 1. No AVC should be present
|
||||
+ """
|
||||
+ result = run_cmd(['journalctl', '-S', setup_snmp, '-g', 'ldap-agent'])
|
||||
+ assert not 'AVC' in result.stdout
|
||||
+
|
||||
+
|
||||
+if __name__ == '__main__':
|
||||
+ # Run isolated
|
||||
+ # -s for DEBUG mode
|
||||
+ CURRENT_FILE = os.path.realpath(__file__)
|
||||
+ pytest.main("-s %s" % CURRENT_FILE)
|
||||
diff --git a/ldap/servers/slapd/agtmmap.c b/ldap/servers/slapd/agtmmap.c
|
||||
index bc5fe1ee1..4dc67dcfb 100644
|
||||
--- a/ldap/servers/slapd/agtmmap.c
|
||||
+++ b/ldap/servers/slapd/agtmmap.c
|
||||
@@ -34,6 +34,70 @@
|
||||
agt_mmap_context_t mmap_tbl[2] = {{AGT_MAP_UNINIT, -1, (caddr_t)-1},
|
||||
{AGT_MAP_UNINIT, -1, (caddr_t)-1}};
|
||||
|
||||
+#define CHECK_MAP_FAILURE(addr) ((addr)==NULL || (addr) == (caddr_t) -1)
|
||||
+
|
||||
+
|
||||
+/****************************************************************************
|
||||
+ *
|
||||
+ * agt_set_fmode () - try to increase file mode if some flags are missing.
|
||||
+ *
|
||||
+ *
|
||||
+ * Inputs:
|
||||
+ * fd -> The file descriptor.
|
||||
+ *
|
||||
+ * mode -> the wanted mode
|
||||
+ *
|
||||
+ * Outputs: None
|
||||
+ * Return Values: None
|
||||
+ *
|
||||
+ ****************************************************************************/
|
||||
+static void
|
||||
+agt_set_fmode(int fd, mode_t mode)
|
||||
+{
|
||||
+ /* ns-slapd umask is 0022 which is usually fine.
|
||||
+ * but ldap-agen needs S_IWGRP permission on snmp semaphore and mmap file
|
||||
+ * ( when SELinux is enforced process with uid=0 does not bypass the file permission
|
||||
+ * (unless the unfamous dac_override capability is set)
|
||||
+ * Changing umask could lead to race conditions so it is better to check the
|
||||
+ * file permission and change them if needed and if the process own the file.
|
||||
+ */
|
||||
+ struct stat fileinfo = {0};
|
||||
+ if (fstat(fd, &fileinfo) == 0 && fileinfo.st_uid == getuid() &&
|
||||
+ (fileinfo.st_mode & mode) != mode) {
|
||||
+ (void) fchmod(fd, fileinfo.st_mode | mode);
|
||||
+ }
|
||||
+}
|
||||
+
|
||||
+/****************************************************************************
|
||||
+ *
|
||||
+ * agt_sem_open () - Like sem_open but ignores umask
|
||||
+ *
|
||||
+ *
|
||||
+ * Inputs: see sem_open man page.
|
||||
+ * Outputs: see sem_open man page.
|
||||
+ * Return Values: see sem_open man page.
|
||||
+ *
|
||||
+ ****************************************************************************/
|
||||
+sem_t *
|
||||
+agt_sem_open(const char *name, int oflag, mode_t mode, unsigned int value)
|
||||
+{
|
||||
+ sem_t *sem = sem_open(name, oflag, mode, value);
|
||||
+ char *semname = NULL;
|
||||
+
|
||||
+ if (sem != NULL) {
|
||||
+ if (asprintf(&semname, "/dev/shm/sem.%s", name+1) > 0) {
|
||||
+ int fd = open(semname, O_RDONLY);
|
||||
+ if (fd >= 0) {
|
||||
+ agt_set_fmode(fd, mode);
|
||||
+ (void) close(fd);
|
||||
+ }
|
||||
+ free(semname);
|
||||
+ semname = NULL;
|
||||
+ }
|
||||
+ }
|
||||
+ return sem;
|
||||
+}
|
||||
+
|
||||
/****************************************************************************
|
||||
*
|
||||
* agt_mopen_stats () - open and Memory Map the stats file. agt_mclose_stats()
|
||||
@@ -52,7 +116,6 @@ agt_mmap_context_t mmap_tbl[2] = {{AGT_MAP_UNINIT, -1, (caddr_t)-1},
|
||||
* as defined in <errno.h>, otherwise.
|
||||
*
|
||||
****************************************************************************/
|
||||
-
|
||||
int
|
||||
agt_mopen_stats(char *statsfile, int mode, int *hdl)
|
||||
{
|
||||
@@ -64,6 +127,7 @@ agt_mopen_stats(char *statsfile, int mode, int *hdl)
|
||||
int err;
|
||||
size_t sz;
|
||||
struct stat fileinfo;
|
||||
+ mode_t rw_mode = S_IWUSR | S_IRUSR | S_IRGRP | S_IWGRP | S_IROTH;
|
||||
|
||||
switch (mode) {
|
||||
case O_RDONLY:
|
||||
@@ -128,10 +192,7 @@ agt_mopen_stats(char *statsfile, int mode, int *hdl)
|
||||
break;
|
||||
|
||||
case O_RDWR:
|
||||
- fd = open(path,
|
||||
- O_RDWR | O_CREAT,
|
||||
- S_IWUSR | S_IRUSR | S_IRGRP | S_IROTH);
|
||||
-
|
||||
+ fd = open(path, O_RDWR | O_CREAT, rw_mode);
|
||||
if (fd < 0) {
|
||||
err = errno;
|
||||
#if (0)
|
||||
@@ -140,6 +201,7 @@ agt_mopen_stats(char *statsfile, int mode, int *hdl)
|
||||
rc = err;
|
||||
goto bail;
|
||||
}
|
||||
+ agt_set_fmode(fd, rw_mode);
|
||||
|
||||
if (fstat(fd, &fileinfo) != 0) {
|
||||
close(fd);
|
||||
diff --git a/ldap/servers/slapd/agtmmap.h b/ldap/servers/slapd/agtmmap.h
|
||||
index fb27ab2c4..99a8584a3 100644
|
||||
--- a/ldap/servers/slapd/agtmmap.h
|
||||
+++ b/ldap/servers/slapd/agtmmap.h
|
||||
@@ -28,6 +28,7 @@
|
||||
#include <sys/types.h>
|
||||
#include <sys/stat.h>
|
||||
#include <fcntl.h>
|
||||
+#include <semaphore.h>
|
||||
#include <errno.h>
|
||||
#include "nspr.h"
|
||||
|
||||
@@ -188,6 +189,18 @@ int agt_mclose_stats(int hdl);
|
||||
|
||||
int agt_mread_stats(int hdl, struct hdr_stats_t *, struct ops_stats_t *, struct entries_stats_t *);
|
||||
|
||||
+/****************************************************************************
|
||||
+ *
|
||||
+ * agt_sem_open () - Like sem_open but ignores umask
|
||||
+ *
|
||||
+ *
|
||||
+ * Inputs: see sem_open man page.
|
||||
+ * Outputs: see sem_open man page.
|
||||
+ * Return Values: see sem_open man page.
|
||||
+ *
|
||||
+ ****************************************************************************/
|
||||
+sem_t *agt_sem_open(const char *name, int oflag, mode_t mode, unsigned int value);
|
||||
+
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
diff --git a/ldap/servers/slapd/dse.c b/ldap/servers/slapd/dse.c
|
||||
index b04fafde6..f1e48c6b1 100644
|
||||
--- a/ldap/servers/slapd/dse.c
|
||||
+++ b/ldap/servers/slapd/dse.c
|
||||
@@ -683,7 +683,7 @@ dse_read_one_file(struct dse *pdse, const char *filename, Slapi_PBlock *pb, int
|
||||
"The configuration file %s could not be accessed, error %d\n",
|
||||
filename, rc);
|
||||
rc = 0; /* Fail */
|
||||
- } else if ((prfd = PR_Open(filename, PR_RDONLY, SLAPD_DEFAULT_FILE_MODE)) == NULL) {
|
||||
+ } else if ((prfd = PR_Open(filename, PR_RDONLY, SLAPD_DEFAULT_DSE_FILE_MODE)) == NULL) {
|
||||
slapi_log_err(SLAPI_LOG_ERR, "dse_read_one_file",
|
||||
"The configuration file %s could not be read. " SLAPI_COMPONENT_NAME_NSPR " %d (%s)\n",
|
||||
filename,
|
||||
@@ -871,7 +871,7 @@ dse_rw_permission_to_one_file(const char *name, int loglevel)
|
||||
PRFileDesc *prfd;
|
||||
|
||||
prfd = PR_Open(name, PR_RDWR | PR_CREATE_FILE | PR_TRUNCATE,
|
||||
- SLAPD_DEFAULT_FILE_MODE);
|
||||
+ SLAPD_DEFAULT_DSE_FILE_MODE);
|
||||
if (NULL == prfd) {
|
||||
prerr = PR_GetError();
|
||||
accesstype = "create";
|
||||
@@ -970,7 +970,7 @@ dse_write_file_nolock(struct dse *pdse)
|
||||
fpw.fpw_prfd = NULL;
|
||||
|
||||
if (NULL != pdse->dse_filename) {
|
||||
- if ((fpw.fpw_prfd = PR_Open(pdse->dse_tmpfile, PR_RDWR | PR_CREATE_FILE | PR_TRUNCATE, SLAPD_DEFAULT_FILE_MODE)) == NULL) {
|
||||
+ if ((fpw.fpw_prfd = PR_Open(pdse->dse_tmpfile, PR_RDWR | PR_CREATE_FILE | PR_TRUNCATE, SLAPD_DEFAULT_DSE_FILE_MODE)) == NULL) {
|
||||
rc = PR_GetOSError();
|
||||
slapi_log_err(SLAPI_LOG_ERR, "dse_write_file_nolock", "Cannot open "
|
||||
"temporary DSE file \"%s\" for update: OS error %d (%s)\n",
|
||||
diff --git a/ldap/servers/slapd/slap.h b/ldap/servers/slapd/slap.h
|
||||
index 469874fd1..927576b70 100644
|
||||
--- a/ldap/servers/slapd/slap.h
|
||||
+++ b/ldap/servers/slapd/slap.h
|
||||
@@ -238,6 +238,12 @@ typedef void (*VFPV)(); /* takes undefined arguments */
|
||||
*/
|
||||
|
||||
#define SLAPD_DEFAULT_FILE_MODE S_IRUSR | S_IWUSR
|
||||
+/* ldap_agent run as uid=root gid=dirsrv and requires S_IRGRP | S_IWGRP
|
||||
+ * on semaphore and mmap file if SELinux is enforced.
|
||||
+ */
|
||||
+#define SLAPD_DEFAULT_SNMP_FILE_MODE S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP
|
||||
+/* ldap_agent run as uid=root gid=dirsrv and requires S_IRGRP on dse.ldif if SELinux is enforced. */
|
||||
+#define SLAPD_DEFAULT_DSE_FILE_MODE S_IRUSR | S_IWUSR | S_IRGRP
|
||||
#define SLAPD_DEFAULT_DIR_MODE S_IRWXU
|
||||
#define SLAPD_DEFAULT_IDLE_TIMEOUT 3600 /* seconds - 0 == never */
|
||||
#define SLAPD_DEFAULT_IDLE_TIMEOUT_STR "3600"
|
||||
diff --git a/ldap/servers/slapd/snmp_collator.c b/ldap/servers/slapd/snmp_collator.c
|
||||
index c998d4262..bd7020585 100644
|
||||
--- a/ldap/servers/slapd/snmp_collator.c
|
||||
+++ b/ldap/servers/slapd/snmp_collator.c
|
||||
@@ -474,7 +474,7 @@ static void
|
||||
snmp_collator_create_semaphore(void)
|
||||
{
|
||||
/* First just try to create the semaphore. This should usually just work. */
|
||||
- if ((stats_sem = sem_open(stats_sem_name, O_CREAT | O_EXCL, SLAPD_DEFAULT_FILE_MODE, 1)) == SEM_FAILED) {
|
||||
+ if ((stats_sem = agt_sem_open(stats_sem_name, O_CREAT | O_EXCL, SLAPD_DEFAULT_SNMP_FILE_MODE, 1)) == SEM_FAILED) {
|
||||
if (errno == EEXIST) {
|
||||
/* It appears that we didn't exit cleanly last time and left the semaphore
|
||||
* around. Recreate it since we don't know what state it is in. */
|
||||
@@ -486,7 +486,7 @@ snmp_collator_create_semaphore(void)
|
||||
exit(1);
|
||||
}
|
||||
|
||||
- if ((stats_sem = sem_open(stats_sem_name, O_CREAT | O_EXCL, SLAPD_DEFAULT_FILE_MODE, 1)) == SEM_FAILED) {
|
||||
+ if ((stats_sem = agt_sem_open(stats_sem_name, O_CREAT | O_EXCL, SLAPD_DEFAULT_SNMP_FILE_MODE, 1)) == SEM_FAILED) {
|
||||
/* No dice */
|
||||
slapi_log_err(SLAPI_LOG_EMERG, "snmp_collator_create_semaphore",
|
||||
"Failed to create semaphore for stats file (/dev/shm/sem.%s). Error %d (%s).\n",
|
||||
diff --git a/src/lib389/lib389/instance/setup.py b/src/lib389/lib389/instance/setup.py
|
||||
index 036664447..fca03383e 100644
|
||||
--- a/src/lib389/lib389/instance/setup.py
|
||||
+++ b/src/lib389/lib389/instance/setup.py
|
||||
@@ -10,6 +10,7 @@
|
||||
import os
|
||||
import sys
|
||||
import shutil
|
||||
+import stat
|
||||
import pwd
|
||||
import grp
|
||||
import re
|
||||
@@ -773,6 +774,10 @@ class SetupDs(object):
|
||||
ldapi_autobind="on",
|
||||
)
|
||||
file_dse.write(dse_fmt)
|
||||
+ # Set minimum permission required by snmp ldap-agent
|
||||
+ status = os.fstat(file_dse.fileno())
|
||||
+ os.fchmod(file_dse.fileno(), status.st_mode | stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP)
|
||||
+ os.chown(os.path.join(slapd['config_dir'], 'dse.ldif'), slapd['user_uid'], slapd['group_gid'])
|
||||
|
||||
self.log.info("Create file system structures ...")
|
||||
# Create all the needed paths
|
||||
diff --git a/wrappers/systemd-snmp.service.in b/wrappers/systemd-snmp.service.in
|
||||
index f18766cb4..d344367a0 100644
|
||||
--- a/wrappers/systemd-snmp.service.in
|
||||
+++ b/wrappers/systemd-snmp.service.in
|
||||
@@ -9,6 +9,7 @@ After=network.target
|
||||
|
||||
[Service]
|
||||
Type=forking
|
||||
+Group=@defaultgroup@
|
||||
PIDFile=/run/dirsrv/ldap-agent.pid
|
||||
ExecStart=@sbindir@/ldap-agent @configdir@/ldap-agent.conf
|
||||
|
||||
--
|
||||
2.49.0
|
||||
|
@ -1,60 +0,0 @@
|
||||
From 12870f410545fb055f664b588df2a2b7ab1c228e Mon Sep 17 00:00:00 2001
|
||||
From: Viktor Ashirov <vashirov@redhat.com>
|
||||
Date: Mon, 4 Mar 2024 07:22:00 +0100
|
||||
Subject: [PATCH] Issue 5305 - OpenLDAP version autodetection doesn't work
|
||||
|
||||
Bug Description:
|
||||
An error is logged during a build in `mock` with Bash 4.4:
|
||||
|
||||
```
|
||||
checking for --with-libldap-r... ./configure: command substitution: line 22848: syntax error near unexpected token `>'
|
||||
./configure: command substitution: line 22848: `ldapsearch -VV 2> >(sed -n '/ldapsearch/ s/.*ldapsearch \([0-9]\+\.[0-9]\+\.[0-9]\+\) .*/\1/p')'
|
||||
no
|
||||
```
|
||||
|
||||
`mock` runs Bash as `sh` (POSIX mode). Support for process substitution
|
||||
in POSIX mode was added in version 5.1:
|
||||
https://lists.gnu.org/archive/html/bug-bash/2020-12/msg00002.html
|
||||
|
||||
> Process substitution is now available in posix mode.
|
||||
|
||||
Fix Description:
|
||||
* Add missing `BuildRequires` for openldap-clients
|
||||
* Replace process substitution with a pipe
|
||||
|
||||
Fixes: https://github.com/389ds/389-ds-base/issues/5305
|
||||
|
||||
Reviewed by: @progier389, @tbordaz (Thanks!)
|
||||
---
|
||||
configure.ac | 2 +-
|
||||
rpm/389-ds-base.spec.in | 1 +
|
||||
2 files changed, 2 insertions(+), 1 deletion(-)
|
||||
|
||||
diff --git a/configure.ac b/configure.ac
|
||||
index ffc2aac14..a690765a3 100644
|
||||
--- a/configure.ac
|
||||
+++ b/configure.ac
|
||||
@@ -912,7 +912,7 @@ AC_ARG_WITH(libldap-r, AS_HELP_STRING([--with-libldap-r],[Use lldap_r shared lib
|
||||
AC_SUBST(with_libldap_r)
|
||||
fi
|
||||
],
|
||||
-OPENLDAP_VERSION=`ldapsearch -VV 2> >(sed -n '/ldapsearch/ s/.*ldapsearch \([[[0-9]]]\+\.[[[0-9]]]\+\.[[[0-9]]]\+\) .*/\1/p')`
|
||||
+OPENLDAP_VERSION=`ldapsearch -VV 2>&1 | sed -n '/ldapsearch/ s/.*ldapsearch \([[[0-9]]]\+\.[[[0-9]]]\+\.[[[0-9]]]\+\) .*/\1/p'`
|
||||
AX_COMPARE_VERSION([$OPENLDAP_VERSION], [lt], [2.5], [ with_libldap_r=yes ], [ with_libldap_r=no ])
|
||||
AC_MSG_RESULT($with_libldap_r))
|
||||
|
||||
diff --git a/rpm/389-ds-base.spec.in b/rpm/389-ds-base.spec.in
|
||||
index cd86138ea..b8c14cd14 100644
|
||||
--- a/rpm/389-ds-base.spec.in
|
||||
+++ b/rpm/389-ds-base.spec.in
|
||||
@@ -65,6 +65,7 @@ Provides: ldif2ldbm
|
||||
# Attach the buildrequires to the top level package:
|
||||
BuildRequires: nspr-devel
|
||||
BuildRequires: nss-devel >= 3.34
|
||||
+BuildRequires: openldap-clients
|
||||
BuildRequires: openldap-devel
|
||||
BuildRequires: libdb-devel
|
||||
BuildRequires: cyrus-sasl-devel
|
||||
--
|
||||
2.49.0
|
||||
|
@ -1,245 +0,0 @@
|
||||
From eca6f5fe18f768fd407d38c85624a5212bcf16ab Mon Sep 17 00:00:00 2001
|
||||
From: Simon Pichugin <spichugi@redhat.com>
|
||||
Date: Wed, 27 Sep 2023 15:40:33 -0700
|
||||
Subject: [PATCH] Issue 1925 - Add a CI test (#5936)
|
||||
|
||||
Description: Verify that the issue is not present. Cover the scenario when
|
||||
we remove existing VLVs, create new VLVs (with the same name) and then
|
||||
we do online re-indexing.
|
||||
|
||||
Related: https://github.com/389ds/389-ds-base/issues/1925
|
||||
|
||||
Reviewed by: @progier389 (Thanks!)
|
||||
|
||||
(cherry picked from the 9633e8d32d28345409680f8e462fb4a53d3b4f83)
|
||||
---
|
||||
.../tests/suites/vlv/regression_test.py | 175 +++++++++++++++---
|
||||
1 file changed, 145 insertions(+), 30 deletions(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/vlv/regression_test.py b/dirsrvtests/tests/suites/vlv/regression_test.py
|
||||
index 6ab709bd3..536fe950f 100644
|
||||
--- a/dirsrvtests/tests/suites/vlv/regression_test.py
|
||||
+++ b/dirsrvtests/tests/suites/vlv/regression_test.py
|
||||
@@ -1,5 +1,5 @@
|
||||
# --- BEGIN COPYRIGHT BLOCK ---
|
||||
-# Copyright (C) 2018 Red Hat, Inc.
|
||||
+# Copyright (C) 2023 Red Hat, Inc.
|
||||
# All rights reserved.
|
||||
#
|
||||
# License: GPL (version 3 or any later version).
|
||||
@@ -9,12 +9,16 @@
|
||||
import pytest, time
|
||||
from lib389.tasks import *
|
||||
from lib389.utils import *
|
||||
-from lib389.topologies import topology_m2
|
||||
+from lib389.topologies import topology_m2, topology_st
|
||||
from lib389.replica import *
|
||||
from lib389._constants import *
|
||||
+from lib389.properties import TASK_WAIT
|
||||
from lib389.index import *
|
||||
from lib389.mappingTree import *
|
||||
from lib389.backend import *
|
||||
+from lib389.idm.user import UserAccounts
|
||||
+from ldap.controls.vlv import VLVRequestControl
|
||||
+from ldap.controls.sss import SSSRequestControl
|
||||
|
||||
pytestmark = pytest.mark.tier1
|
||||
|
||||
@@ -22,6 +26,88 @@ logging.getLogger(__name__).setLevel(logging.DEBUG)
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
+def open_new_ldapi_conn(dsinstance):
|
||||
+ ldapurl, certdir = get_ldapurl_from_serverid(dsinstance)
|
||||
+ assert 'ldapi://' in ldapurl
|
||||
+ conn = ldap.initialize(ldapurl)
|
||||
+ conn.sasl_interactive_bind_s("", ldap.sasl.external())
|
||||
+ return conn
|
||||
+
|
||||
+
|
||||
+def check_vlv_search(conn):
|
||||
+ before_count=1
|
||||
+ after_count=3
|
||||
+ offset=3501
|
||||
+
|
||||
+ vlv_control = VLVRequestControl(criticality=True,
|
||||
+ before_count=before_count,
|
||||
+ after_count=after_count,
|
||||
+ offset=offset,
|
||||
+ content_count=0,
|
||||
+ greater_than_or_equal=None,
|
||||
+ context_id=None)
|
||||
+
|
||||
+ sss_control = SSSRequestControl(criticality=True, ordering_rules=['cn'])
|
||||
+ result = conn.search_ext_s(
|
||||
+ base='dc=example,dc=com',
|
||||
+ scope=ldap.SCOPE_SUBTREE,
|
||||
+ filterstr='(uid=*)',
|
||||
+ serverctrls=[vlv_control, sss_control]
|
||||
+ )
|
||||
+ imin = offset + 998 - before_count
|
||||
+ imax = offset + 998 + after_count
|
||||
+
|
||||
+ for i, (dn, entry) in enumerate(result, start=imin):
|
||||
+ assert i <= imax
|
||||
+ expected_dn = f'uid=testuser{i},ou=People,dc=example,dc=com'
|
||||
+ log.debug(f'found {dn} expected {expected_dn}')
|
||||
+ assert dn.lower() == expected_dn.lower()
|
||||
+
|
||||
+
|
||||
+def add_users(inst, users_num):
|
||||
+ users = UserAccounts(inst, DEFAULT_SUFFIX)
|
||||
+ log.info(f'Adding {users_num} users')
|
||||
+ for i in range(0, users_num):
|
||||
+ uid = 1000 + i
|
||||
+ user_properties = {
|
||||
+ 'uid': f'testuser{uid}',
|
||||
+ 'cn': f'testuser{uid}',
|
||||
+ 'sn': 'user',
|
||||
+ 'uidNumber': str(uid),
|
||||
+ 'gidNumber': str(uid),
|
||||
+ 'homeDirectory': f'/home/testuser{uid}'
|
||||
+ }
|
||||
+ users.create(properties=user_properties)
|
||||
+
|
||||
+
|
||||
+
|
||||
+def create_vlv_search_and_index(inst, basedn=DEFAULT_SUFFIX, bename='userRoot',
|
||||
+ scope=ldap.SCOPE_SUBTREE, prefix="vlv", vlvsort="cn"):
|
||||
+ vlv_searches = VLVSearch(inst)
|
||||
+ vlv_search_properties = {
|
||||
+ "objectclass": ["top", "vlvSearch"],
|
||||
+ "cn": f"{prefix}Srch",
|
||||
+ "vlvbase": basedn,
|
||||
+ "vlvfilter": "(uid=*)",
|
||||
+ "vlvscope": str(scope),
|
||||
+ }
|
||||
+ vlv_searches.create(
|
||||
+ basedn=f"cn={bename},cn=ldbm database,cn=plugins,cn=config",
|
||||
+ properties=vlv_search_properties
|
||||
+ )
|
||||
+
|
||||
+ vlv_index = VLVIndex(inst)
|
||||
+ vlv_index_properties = {
|
||||
+ "objectclass": ["top", "vlvIndex"],
|
||||
+ "cn": f"{prefix}Idx",
|
||||
+ "vlvsort": vlvsort,
|
||||
+ }
|
||||
+ vlv_index.create(
|
||||
+ basedn=f"cn={prefix}Srch,cn={bename},cn=ldbm database,cn=plugins,cn=config",
|
||||
+ properties=vlv_index_properties
|
||||
+ )
|
||||
+ return vlv_searches, vlv_index
|
||||
+
|
||||
class BackendHandler:
|
||||
def __init__(self, inst, bedict, scope=ldap.SCOPE_ONELEVEL):
|
||||
self.inst = inst
|
||||
@@ -101,34 +187,6 @@ class BackendHandler:
|
||||
'dn' : dn}
|
||||
|
||||
|
||||
-def create_vlv_search_and_index(inst, basedn=DEFAULT_SUFFIX, bename='userRoot',
|
||||
- scope=ldap.SCOPE_SUBTREE, prefix="vlv", vlvsort="cn"):
|
||||
- vlv_searches = VLVSearch(inst)
|
||||
- vlv_search_properties = {
|
||||
- "objectclass": ["top", "vlvSearch"],
|
||||
- "cn": f"{prefix}Srch",
|
||||
- "vlvbase": basedn,
|
||||
- "vlvfilter": "(uid=*)",
|
||||
- "vlvscope": str(scope),
|
||||
- }
|
||||
- vlv_searches.create(
|
||||
- basedn=f"cn={bename},cn=ldbm database,cn=plugins,cn=config",
|
||||
- properties=vlv_search_properties
|
||||
- )
|
||||
-
|
||||
- vlv_index = VLVIndex(inst)
|
||||
- vlv_index_properties = {
|
||||
- "objectclass": ["top", "vlvIndex"],
|
||||
- "cn": f"{prefix}Idx",
|
||||
- "vlvsort": vlvsort,
|
||||
- }
|
||||
- vlv_index.create(
|
||||
- basedn=f"cn={prefix}Srch,cn={bename},cn=ldbm database,cn=plugins,cn=config",
|
||||
- properties=vlv_index_properties
|
||||
- )
|
||||
- return vlv_searches, vlv_index
|
||||
-
|
||||
-
|
||||
@pytest.fixture
|
||||
def vlv_setup_with_uid_mr(topology_st, request):
|
||||
inst = topology_st.standalone
|
||||
@@ -245,6 +303,62 @@ def test_bulk_import_when_the_backend_with_vlv_was_recreated(topology_m2):
|
||||
entries = M2.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, "(objectclass=*)")
|
||||
|
||||
|
||||
+def test_vlv_recreation_reindex(topology_st):
|
||||
+ """Test VLV recreation and reindexing.
|
||||
+
|
||||
+ :id: 29f4567f-4ac0-410f-bc99-a32e217a939f
|
||||
+ :setup: Standalone instance.
|
||||
+ :steps:
|
||||
+ 1. Create new VLVs and do the reindex.
|
||||
+ 2. Test the new VLVs.
|
||||
+ 3. Remove the existing VLVs.
|
||||
+ 4. Create new VLVs (with the same name).
|
||||
+ 5. Perform online re-indexing of the new VLVs.
|
||||
+ 6. Test the new VLVs.
|
||||
+ :expectedresults:
|
||||
+ 1. Should Success.
|
||||
+ 2. Should Success.
|
||||
+ 3. Should Success.
|
||||
+ 4. Should Success.
|
||||
+ 5. Should Success.
|
||||
+ 6. Should Success.
|
||||
+ """
|
||||
+
|
||||
+ inst = topology_st.standalone
|
||||
+ reindex_task = Tasks(inst)
|
||||
+
|
||||
+ # Create and test VLVs
|
||||
+ vlv_search, vlv_index = create_vlv_search_and_index(inst)
|
||||
+ assert reindex_task.reindex(
|
||||
+ suffix=DEFAULT_SUFFIX,
|
||||
+ attrname=vlv_index.rdn,
|
||||
+ args={TASK_WAIT: True},
|
||||
+ vlv=True
|
||||
+ ) == 0
|
||||
+
|
||||
+ add_users(inst, 5000)
|
||||
+
|
||||
+ conn = open_new_ldapi_conn(inst.serverid)
|
||||
+ assert len(conn.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, "(cn=*)")) > 0
|
||||
+ check_vlv_search(conn)
|
||||
+
|
||||
+ # Remove and recreate VLVs
|
||||
+ vlv_index.delete()
|
||||
+ vlv_search.delete()
|
||||
+
|
||||
+ vlv_search, vlv_index = create_vlv_search_and_index(inst)
|
||||
+ assert reindex_task.reindex(
|
||||
+ suffix=DEFAULT_SUFFIX,
|
||||
+ attrname=vlv_index.rdn,
|
||||
+ args={TASK_WAIT: True},
|
||||
+ vlv=True
|
||||
+ ) == 0
|
||||
+
|
||||
+ conn = open_new_ldapi_conn(inst.serverid)
|
||||
+ assert len(conn.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, "(cn=*)")) > 0
|
||||
+ check_vlv_search(conn)
|
||||
+
|
||||
+
|
||||
def test_vlv_with_mr(vlv_setup_with_uid_mr):
|
||||
"""
|
||||
Testing vlv having specific matching rule
|
||||
@@ -288,6 +402,7 @@ def test_vlv_with_mr(vlv_setup_with_uid_mr):
|
||||
assert inst.status()
|
||||
|
||||
|
||||
+
|
||||
if __name__ == "__main__":
|
||||
# Run isolated
|
||||
# -s for DEBUG mode
|
||||
--
|
||||
2.49.0
|
||||
|
@ -1,75 +0,0 @@
|
||||
From af3fa90f91efda86f4337e8823bca6581ab61792 Mon Sep 17 00:00:00 2001
|
||||
From: Thierry Bordaz <tbordaz@redhat.com>
|
||||
Date: Fri, 7 Feb 2025 09:43:08 +0100
|
||||
Subject: [PATCH] Issue 6494 - (2nd) Various errors when using extended
|
||||
matching rule on vlv sort filter
|
||||
|
||||
---
|
||||
.../tests/suites/indexes/regression_test.py | 40 +++++++++++++++++++
|
||||
1 file changed, 40 insertions(+)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/indexes/regression_test.py b/dirsrvtests/tests/suites/indexes/regression_test.py
|
||||
index 2196fb2ed..b5bcccc8f 100644
|
||||
--- a/dirsrvtests/tests/suites/indexes/regression_test.py
|
||||
+++ b/dirsrvtests/tests/suites/indexes/regression_test.py
|
||||
@@ -11,17 +11,57 @@ import os
|
||||
import pytest
|
||||
import ldap
|
||||
from lib389._constants import DEFAULT_BENAME, DEFAULT_SUFFIX
|
||||
+from lib389.backend import Backend, Backends, DatabaseConfig
|
||||
from lib389.cos import CosClassicDefinition, CosClassicDefinitions, CosTemplate
|
||||
+from lib389.dbgen import dbgen_users
|
||||
from lib389.index import Indexes
|
||||
from lib389.backend import Backends
|
||||
from lib389.idm.user import UserAccounts
|
||||
from lib389.topologies import topology_st as topo
|
||||
from lib389.utils import ds_is_older
|
||||
from lib389.idm.nscontainer import nsContainer
|
||||
+from lib389.properties import TASK_WAIT
|
||||
+from lib389.tasks import Tasks, Task
|
||||
|
||||
pytestmark = pytest.mark.tier1
|
||||
|
||||
|
||||
+SUFFIX2 = 'dc=example2,dc=com'
|
||||
+BENAME2 = 'be2'
|
||||
+
|
||||
+DEBUGGING = os.getenv("DEBUGGING", default=False)
|
||||
+
|
||||
+@pytest.fixture(scope="function")
|
||||
+def add_backend_and_ldif_50K_users(request, topo):
|
||||
+ """
|
||||
+ Add an empty backend and associated 50K users ldif file
|
||||
+ """
|
||||
+
|
||||
+ tasks = Tasks(topo.standalone)
|
||||
+ import_ldif = f'{topo.standalone.ldifdir}/be2_50K_users.ldif'
|
||||
+ be2 = Backend(topo.standalone)
|
||||
+ be2.create(properties={
|
||||
+ 'cn': BENAME2,
|
||||
+ 'nsslapd-suffix': SUFFIX2,
|
||||
+ },
|
||||
+ )
|
||||
+
|
||||
+ def fin():
|
||||
+ nonlocal be2
|
||||
+ if not DEBUGGING:
|
||||
+ be2.delete()
|
||||
+
|
||||
+ request.addfinalizer(fin)
|
||||
+ parent = f'ou=people,{SUFFIX2}'
|
||||
+ dbgen_users(topo.standalone, 50000, import_ldif, SUFFIX2, generic=True, parent=parent)
|
||||
+ assert tasks.importLDIF(
|
||||
+ suffix=SUFFIX2,
|
||||
+ input_file=import_ldif,
|
||||
+ args={TASK_WAIT: True}
|
||||
+ ) == 0
|
||||
+
|
||||
+ return import_ldif
|
||||
+
|
||||
@pytest.fixture(scope="function")
|
||||
def add_a_group_with_users(request, topo):
|
||||
"""
|
||||
--
|
||||
2.49.0
|
||||
|
@ -1,45 +0,0 @@
|
||||
From 0ad0eb34972c99f30334d7d420f3056e0e794d74 Mon Sep 17 00:00:00 2001
|
||||
From: Thierry Bordaz <tbordaz@redhat.com>
|
||||
Date: Fri, 7 Feb 2025 14:33:46 +0100
|
||||
Subject: [PATCH] Issue 6494 - (3rd) Various errors when using extended
|
||||
matching rule on vlv sort filter
|
||||
|
||||
(cherry picked from the commit f2f917ca55c34c81b578bce1dd5275abff6abb72)
|
||||
---
|
||||
dirsrvtests/tests/suites/vlv/regression_test.py | 8 ++++++--
|
||||
1 file changed, 6 insertions(+), 2 deletions(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/vlv/regression_test.py b/dirsrvtests/tests/suites/vlv/regression_test.py
|
||||
index 536fe950f..d069fdbaf 100644
|
||||
--- a/dirsrvtests/tests/suites/vlv/regression_test.py
|
||||
+++ b/dirsrvtests/tests/suites/vlv/regression_test.py
|
||||
@@ -16,12 +16,16 @@ from lib389.properties import TASK_WAIT
|
||||
from lib389.index import *
|
||||
from lib389.mappingTree import *
|
||||
from lib389.backend import *
|
||||
-from lib389.idm.user import UserAccounts
|
||||
+from lib389.idm.user import UserAccounts, UserAccount
|
||||
+from lib389.idm.organization import Organization
|
||||
+from lib389.idm.organizationalunit import OrganizationalUnits
|
||||
from ldap.controls.vlv import VLVRequestControl
|
||||
from ldap.controls.sss import SSSRequestControl
|
||||
|
||||
pytestmark = pytest.mark.tier1
|
||||
|
||||
+DEMO_PW = 'secret12'
|
||||
+
|
||||
logging.getLogger(__name__).setLevel(logging.DEBUG)
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
@@ -169,7 +173,7 @@ class BackendHandler:
|
||||
'loginShell': '/bin/false',
|
||||
'userpassword': DEMO_PW })
|
||||
# Add regular user
|
||||
- add_users(self.inst, 10, suffix=suffix)
|
||||
+ add_users(self.inst, 10)
|
||||
# Removing ou2
|
||||
ou2.delete()
|
||||
# And export
|
||||
--
|
||||
2.49.0
|
||||
|
@ -1,72 +0,0 @@
|
||||
From 52041811b200292af6670490c9ebc1f599439a22 Mon Sep 17 00:00:00 2001
|
||||
From: Masahiro Matsuya <mmatsuya@redhat.com>
|
||||
Date: Sat, 22 Mar 2025 01:25:25 +0900
|
||||
Subject: [PATCH] Issue 6494 - (4th) Various errors when using extended
|
||||
matching rule on vlv sort filter
|
||||
|
||||
test_vlv_with_mr uses vlv_setup_with_uid_mr fixture to setup backend
|
||||
and testusers. add_users function is called in beh.setup without any
|
||||
suffix for the created backend. As a result, testusers always are
|
||||
created in the DEFAULT_SUFFIX only by add_users function. Another test
|
||||
like test_vlv_recreation_reindex can create the same test user in
|
||||
DEFAULT_SUFFIX, and it caused the ALREADY_EXISTS failure in
|
||||
test_vlv_with_mr test.
|
||||
|
||||
In main branch, add_users have suffix argument. Test users are created
|
||||
on the specific suffix, and the backend is cleaned up after the test.
|
||||
This PR is to follow the same implementation.
|
||||
|
||||
Also, suppressing ldap.ALREADY_EXISTS makes the add_users func to be
|
||||
used easily.
|
||||
|
||||
Related: https://github.com/389ds/389-ds-base/issues/6494
|
||||
---
|
||||
dirsrvtests/tests/suites/vlv/regression_test.py | 11 ++++++-----
|
||||
1 file changed, 6 insertions(+), 5 deletions(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/vlv/regression_test.py b/dirsrvtests/tests/suites/vlv/regression_test.py
|
||||
index d069fdbaf..e9408117b 100644
|
||||
--- a/dirsrvtests/tests/suites/vlv/regression_test.py
|
||||
+++ b/dirsrvtests/tests/suites/vlv/regression_test.py
|
||||
@@ -21,6 +21,7 @@ from lib389.idm.organization import Organization
|
||||
from lib389.idm.organizationalunit import OrganizationalUnits
|
||||
from ldap.controls.vlv import VLVRequestControl
|
||||
from ldap.controls.sss import SSSRequestControl
|
||||
+from contextlib import suppress
|
||||
|
||||
pytestmark = pytest.mark.tier1
|
||||
|
||||
@@ -68,8 +69,8 @@ def check_vlv_search(conn):
|
||||
assert dn.lower() == expected_dn.lower()
|
||||
|
||||
|
||||
-def add_users(inst, users_num):
|
||||
- users = UserAccounts(inst, DEFAULT_SUFFIX)
|
||||
+def add_users(inst, users_num, suffix=DEFAULT_SUFFIX):
|
||||
+ users = UserAccounts(inst, suffix)
|
||||
log.info(f'Adding {users_num} users')
|
||||
for i in range(0, users_num):
|
||||
uid = 1000 + i
|
||||
@@ -81,8 +82,8 @@ def add_users(inst, users_num):
|
||||
'gidNumber': str(uid),
|
||||
'homeDirectory': f'/home/testuser{uid}'
|
||||
}
|
||||
- users.create(properties=user_properties)
|
||||
-
|
||||
+ with suppress(ldap.ALREADY_EXISTS):
|
||||
+ users.create(properties=user_properties)
|
||||
|
||||
|
||||
def create_vlv_search_and_index(inst, basedn=DEFAULT_SUFFIX, bename='userRoot',
|
||||
@@ -173,7 +174,7 @@ class BackendHandler:
|
||||
'loginShell': '/bin/false',
|
||||
'userpassword': DEMO_PW })
|
||||
# Add regular user
|
||||
- add_users(self.inst, 10)
|
||||
+ add_users(self.inst, 10, suffix=suffix)
|
||||
# Removing ou2
|
||||
ou2.delete()
|
||||
# And export
|
||||
--
|
||||
2.49.0
|
||||
|
@ -1,357 +0,0 @@
|
||||
From b812afe4da6db134c1221eb48a6155480e4c2cb3 Mon Sep 17 00:00:00 2001
|
||||
From: Simon Pichugin <spichugi@redhat.com>
|
||||
Date: Tue, 14 Jan 2025 13:55:03 -0500
|
||||
Subject: [PATCH] Issue 6497 - lib389 - Configure replication for multiple
|
||||
suffixes (#6498)
|
||||
|
||||
Bug Description: When trying to set up replication across multiple suffixes -
|
||||
particularly if one of those suffixes is a subsuffix - lib389 fails to properly
|
||||
configure the replication agreements, service accounts, and required groups.
|
||||
The references to the replication_managers group and service account
|
||||
naming do not correctly account for non-default additional suffixes.
|
||||
|
||||
Fix Description: Ensure replication DNs and credentials are correctly tied to each suffix.
|
||||
Enable DSLdapObject.present method to compare values as
|
||||
a normalized DNs if they are DNs.
|
||||
Add a test (test_multi_subsuffix_replication) to verify multi-suffix
|
||||
replication across four suppliers.
|
||||
Fix tests that are related to repl service accounts.
|
||||
|
||||
Fixes: https://github.com/389ds/389-ds-base/issues/6497
|
||||
|
||||
Reviewed: @progier389 (Thanks!)
|
||||
---
|
||||
.../tests/suites/ds_tools/replcheck_test.py | 4 +-
|
||||
.../suites/replication/acceptance_test.py | 153 ++++++++++++++++++
|
||||
.../cleanallruv_shutdown_crash_test.py | 4 +-
|
||||
.../suites/replication/regression_m2_test.py | 2 +-
|
||||
.../replication/tls_client_auth_repl_test.py | 4 +-
|
||||
src/lib389/lib389/_mapped_object.py | 21 ++-
|
||||
src/lib389/lib389/replica.py | 10 +-
|
||||
7 files changed, 182 insertions(+), 16 deletions(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/ds_tools/replcheck_test.py b/dirsrvtests/tests/suites/ds_tools/replcheck_test.py
|
||||
index f61fc432d..dfa1d9423 100644
|
||||
--- a/dirsrvtests/tests/suites/ds_tools/replcheck_test.py
|
||||
+++ b/dirsrvtests/tests/suites/ds_tools/replcheck_test.py
|
||||
@@ -67,10 +67,10 @@ def topo_tls_ldapi(topo):
|
||||
|
||||
# Create the replication dns
|
||||
services = ServiceAccounts(m1, DEFAULT_SUFFIX)
|
||||
- repl_m1 = services.get('%s:%s' % (m1.host, m1.sslport))
|
||||
+ repl_m1 = services.get(f'{DEFAULT_SUFFIX}:{m1.host}:{m1.sslport}')
|
||||
repl_m1.set('nsCertSubjectDN', m1.get_server_tls_subject())
|
||||
|
||||
- repl_m2 = services.get('%s:%s' % (m2.host, m2.sslport))
|
||||
+ repl_m2 = services.get(f'{DEFAULT_SUFFIX}:{m2.host}:{m2.sslport}')
|
||||
repl_m2.set('nsCertSubjectDN', m2.get_server_tls_subject())
|
||||
|
||||
# Check the replication is "done".
|
||||
diff --git a/dirsrvtests/tests/suites/replication/acceptance_test.py b/dirsrvtests/tests/suites/replication/acceptance_test.py
|
||||
index d1cfa8bdb..fc8622051 100644
|
||||
--- a/dirsrvtests/tests/suites/replication/acceptance_test.py
|
||||
+++ b/dirsrvtests/tests/suites/replication/acceptance_test.py
|
||||
@@ -9,6 +9,7 @@
|
||||
import pytest
|
||||
import logging
|
||||
import time
|
||||
+from lib389.backend import Backend
|
||||
from lib389.replica import Replicas
|
||||
from lib389.tasks import *
|
||||
from lib389.utils import *
|
||||
@@ -325,6 +326,158 @@ def test_modify_stripattrs(topo_m4):
|
||||
assert attr_value in entries[0].data['nsds5replicastripattrs']
|
||||
|
||||
|
||||
+def test_multi_subsuffix_replication(topo_m4):
|
||||
+ """Check that replication works with multiple subsuffixes
|
||||
+
|
||||
+ :id: ac1aaeae-173e-48e7-847f-03b9867443c4
|
||||
+ :setup: Four suppliers replication setup
|
||||
+ :steps:
|
||||
+ 1. Create additional suffixes
|
||||
+ 2. Setup replication for all suppliers
|
||||
+ 3. Generate test data for each suffix (add, modify, remove)
|
||||
+ 4. Wait for replication to complete across all suppliers for each suffix
|
||||
+ 5. Check that all expected data is present on all suppliers
|
||||
+ :expectedresults:
|
||||
+ 1. Success
|
||||
+ 2. Success
|
||||
+ 3. Success
|
||||
+ 4. Success
|
||||
+ 5. Success (the data is replicated everywhere)
|
||||
+ """
|
||||
+
|
||||
+ SUFFIX_2 = "dc=test2"
|
||||
+ SUFFIX_3 = f"dc=test3,{DEFAULT_SUFFIX}"
|
||||
+ all_suffixes = [DEFAULT_SUFFIX, SUFFIX_2, SUFFIX_3]
|
||||
+
|
||||
+ test_users_by_suffix = {suffix: [] for suffix in all_suffixes}
|
||||
+ created_backends = []
|
||||
+
|
||||
+ suppliers = [
|
||||
+ topo_m4.ms["supplier1"],
|
||||
+ topo_m4.ms["supplier2"],
|
||||
+ topo_m4.ms["supplier3"],
|
||||
+ topo_m4.ms["supplier4"]
|
||||
+ ]
|
||||
+
|
||||
+ try:
|
||||
+ # Setup additional backends and replication for the new suffixes
|
||||
+ for suffix in [SUFFIX_2, SUFFIX_3]:
|
||||
+ repl = ReplicationManager(suffix)
|
||||
+ for supplier in suppliers:
|
||||
+ # Create a new backend for this suffix
|
||||
+ props = {
|
||||
+ 'cn': f'userRoot_{suffix.split(",")[0][3:]}',
|
||||
+ 'nsslapd-suffix': suffix
|
||||
+ }
|
||||
+ be = Backend(supplier)
|
||||
+ be.create(properties=props)
|
||||
+ be.create_sample_entries('001004002')
|
||||
+
|
||||
+ # Track the backend so we can remove it later
|
||||
+ created_backends.append((supplier, props['cn']))
|
||||
+
|
||||
+ # Enable replication
|
||||
+ if supplier == suppliers[0]:
|
||||
+ repl.create_first_supplier(supplier)
|
||||
+ else:
|
||||
+ repl.join_supplier(suppliers[0], supplier)
|
||||
+
|
||||
+ # Create a full mesh topology for this suffix
|
||||
+ for i, supplier_i in enumerate(suppliers):
|
||||
+ for j, supplier_j in enumerate(suppliers):
|
||||
+ if i != j:
|
||||
+ repl.ensure_agreement(supplier_i, supplier_j)
|
||||
+
|
||||
+ # Generate test data for each suffix (add, modify, remove)
|
||||
+ for suffix in all_suffixes:
|
||||
+ # Create some user entries in supplier1
|
||||
+ for i in range(20):
|
||||
+ user_dn = f'uid=test_user_{i},{suffix}'
|
||||
+ test_user = UserAccount(suppliers[0], user_dn)
|
||||
+ test_user.create(properties={
|
||||
+ 'uid': f'test_user_{i}',
|
||||
+ 'cn': f'Test User {i}',
|
||||
+ 'sn': f'User{i}',
|
||||
+ 'userPassword': 'password',
|
||||
+ 'uidNumber': str(1000 + i),
|
||||
+ 'gidNumber': '2000',
|
||||
+ 'homeDirectory': f'/home/test_user_{i}'
|
||||
+ })
|
||||
+ test_users_by_suffix[suffix].append(test_user)
|
||||
+
|
||||
+ # Perform modifications on these entries
|
||||
+ for user in test_users_by_suffix[suffix]:
|
||||
+ # Add some attributes
|
||||
+ for j in range(3):
|
||||
+ user.add('description', f'Description {j}')
|
||||
+ # Replace an attribute
|
||||
+ user.replace('cn', f'Modified User {user.get_attr_val_utf8("uid")}')
|
||||
+ # Delete the attributes we added
|
||||
+ for j in range(3):
|
||||
+ try:
|
||||
+ user.remove('description', f'Description {j}')
|
||||
+ except Exception:
|
||||
+ pass
|
||||
+
|
||||
+ # Wait for replication to complete across all suppliers, for each suffix
|
||||
+ for suffix in all_suffixes:
|
||||
+ repl = ReplicationManager(suffix)
|
||||
+ for i, supplier_i in enumerate(suppliers):
|
||||
+ for j, supplier_j in enumerate(suppliers):
|
||||
+ if i != j:
|
||||
+ repl.wait_for_replication(supplier_i, supplier_j)
|
||||
+
|
||||
+ # Verify that each user and modification replicated to all suppliers
|
||||
+ for suffix in all_suffixes:
|
||||
+ for i in range(20):
|
||||
+ user_dn = f'uid=test_user_{i},{suffix}'
|
||||
+ # Retrieve this user from all suppliers
|
||||
+ all_user_objs = topo_m4.all_get_dsldapobject(user_dn, UserAccount)
|
||||
+ # Ensure it exists in all 4 suppliers
|
||||
+ assert len(all_user_objs) == 4, (
|
||||
+ f"User {user_dn} not found on all suppliers. "
|
||||
+ f"Found only on {len(all_user_objs)} suppliers."
|
||||
+ )
|
||||
+ # Check modifications: 'cn' should now be 'Modified User test_user_{i}'
|
||||
+ for user_obj in all_user_objs:
|
||||
+ expected_cn = f"Modified User test_user_{i}"
|
||||
+ actual_cn = user_obj.get_attr_val_utf8("cn")
|
||||
+ assert actual_cn == expected_cn, (
|
||||
+ f"User {user_dn} has unexpected 'cn': {actual_cn} "
|
||||
+ f"(expected '{expected_cn}') on supplier {user_obj._instance.serverid}"
|
||||
+ )
|
||||
+ # And check that 'description' attributes were removed
|
||||
+ desc_vals = user_obj.get_attr_vals_utf8('description')
|
||||
+ for j in range(3):
|
||||
+ assert f"Description {j}" not in desc_vals, (
|
||||
+ f"User {user_dn} on supplier {user_obj._instance.serverid} "
|
||||
+ f"still has 'Description {j}'"
|
||||
+ )
|
||||
+ finally:
|
||||
+ for suffix, test_users in test_users_by_suffix.items():
|
||||
+ for user in test_users:
|
||||
+ try:
|
||||
+ if user.exists():
|
||||
+ user.delete()
|
||||
+ except Exception:
|
||||
+ pass
|
||||
+
|
||||
+ for suffix in [SUFFIX_2, SUFFIX_3]:
|
||||
+ repl = ReplicationManager(suffix)
|
||||
+ for supplier in suppliers:
|
||||
+ try:
|
||||
+ repl.remove_supplier(supplier)
|
||||
+ except Exception:
|
||||
+ pass
|
||||
+
|
||||
+ for (supplier, backend_name) in created_backends:
|
||||
+ be = Backend(supplier, backend_name)
|
||||
+ try:
|
||||
+ be.delete()
|
||||
+ except Exception:
|
||||
+ pass
|
||||
+
|
||||
+
|
||||
def test_new_suffix(topo_m4, new_suffix):
|
||||
"""Check that we can enable replication on a new suffix
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/replication/cleanallruv_shutdown_crash_test.py b/dirsrvtests/tests/suites/replication/cleanallruv_shutdown_crash_test.py
|
||||
index b4b74e339..fe9955e7e 100644
|
||||
--- a/dirsrvtests/tests/suites/replication/cleanallruv_shutdown_crash_test.py
|
||||
+++ b/dirsrvtests/tests/suites/replication/cleanallruv_shutdown_crash_test.py
|
||||
@@ -66,10 +66,10 @@ def test_clean_shutdown_crash(topology_m2):
|
||||
|
||||
log.info('Creating replication dns')
|
||||
services = ServiceAccounts(m1, DEFAULT_SUFFIX)
|
||||
- repl_m1 = services.get('%s:%s' % (m1.host, m1.sslport))
|
||||
+ repl_m1 = services.get(f'{DEFAULT_SUFFIX}:{m1.host}:{m1.sslport}')
|
||||
repl_m1.set('nsCertSubjectDN', m1.get_server_tls_subject())
|
||||
|
||||
- repl_m2 = services.get('%s:%s' % (m2.host, m2.sslport))
|
||||
+ repl_m2 = services.get(f'{DEFAULT_SUFFIX}:{m2.host}:{m2.sslport}')
|
||||
repl_m2.set('nsCertSubjectDN', m2.get_server_tls_subject())
|
||||
|
||||
log.info('Changing auth type')
|
||||
diff --git a/dirsrvtests/tests/suites/replication/regression_m2_test.py b/dirsrvtests/tests/suites/replication/regression_m2_test.py
|
||||
index 72d4b9f89..9c707615f 100644
|
||||
--- a/dirsrvtests/tests/suites/replication/regression_m2_test.py
|
||||
+++ b/dirsrvtests/tests/suites/replication/regression_m2_test.py
|
||||
@@ -64,7 +64,7 @@ class _AgmtHelper:
|
||||
self.binddn = f'cn={cn},cn=config'
|
||||
else:
|
||||
self.usedn = False
|
||||
- self.cn = f'{self.from_inst.host}:{self.from_inst.sslport}'
|
||||
+ self.cn = ldap.dn.escape_dn_chars(f'{DEFAULT_SUFFIX}:{self.from_inst.host}:{self.from_inst.sslport}')
|
||||
self.binddn = f'cn={self.cn}, ou=Services, {DEFAULT_SUFFIX}'
|
||||
self.original_state = []
|
||||
self._pass = False
|
||||
diff --git a/dirsrvtests/tests/suites/replication/tls_client_auth_repl_test.py b/dirsrvtests/tests/suites/replication/tls_client_auth_repl_test.py
|
||||
index a00dc5b78..ca17554c7 100644
|
||||
--- a/dirsrvtests/tests/suites/replication/tls_client_auth_repl_test.py
|
||||
+++ b/dirsrvtests/tests/suites/replication/tls_client_auth_repl_test.py
|
||||
@@ -56,10 +56,10 @@ def tls_client_auth(topo_m2):
|
||||
|
||||
# Create the replication dns
|
||||
services = ServiceAccounts(m1, DEFAULT_SUFFIX)
|
||||
- repl_m1 = services.get('%s:%s' % (m1.host, m1.sslport))
|
||||
+ repl_m1 = services.get(f'{DEFAULT_SUFFIX}:{m1.host}:{m1.sslport}')
|
||||
repl_m1.set('nsCertSubjectDN', m1.get_server_tls_subject())
|
||||
|
||||
- repl_m2 = services.get('%s:%s' % (m2.host, m2.sslport))
|
||||
+ repl_m2 = services.get(f'{DEFAULT_SUFFIX}:{m2.host}:{m2.sslport}')
|
||||
repl_m2.set('nsCertSubjectDN', m2.get_server_tls_subject())
|
||||
|
||||
# Check the replication is "done".
|
||||
diff --git a/src/lib389/lib389/_mapped_object.py b/src/lib389/lib389/_mapped_object.py
|
||||
index b7391d8cc..ae00c95d0 100644
|
||||
--- a/src/lib389/lib389/_mapped_object.py
|
||||
+++ b/src/lib389/lib389/_mapped_object.py
|
||||
@@ -19,7 +19,7 @@ from lib389._constants import DIRSRV_STATE_ONLINE
|
||||
from lib389._mapped_object_lint import DSLint, DSLints
|
||||
from lib389.utils import (
|
||||
ensure_bytes, ensure_str, ensure_int, ensure_list_bytes, ensure_list_str,
|
||||
- ensure_list_int, display_log_value, display_log_data
|
||||
+ ensure_list_int, display_log_value, display_log_data, is_a_dn, normalizeDN
|
||||
)
|
||||
|
||||
# This function filter and term generation provided thanks to
|
||||
@@ -292,15 +292,28 @@ class DSLdapObject(DSLogging, DSLint):
|
||||
_search_ext_s(self._instance,self._dn, ldap.SCOPE_BASE, self._object_filter, attrlist=[attr, ],
|
||||
serverctrls=self._server_controls, clientctrls=self._client_controls,
|
||||
escapehatch='i am sure')[0]
|
||||
- values = self.get_attr_vals_bytes(attr)
|
||||
+ values = self.get_attr_vals_utf8(attr)
|
||||
self._log.debug("%s contains %s" % (self._dn, values))
|
||||
|
||||
if value is None:
|
||||
# We are just checking if SOMETHING is present ....
|
||||
return len(values) > 0
|
||||
+
|
||||
+ # Otherwise, we are checking a specific value
|
||||
+ if is_a_dn(value):
|
||||
+ normalized_value = normalizeDN(value)
|
||||
else:
|
||||
- # Check if a value really does exist.
|
||||
- return ensure_bytes(value).lower() in [x.lower() for x in values]
|
||||
+ normalized_value = ensure_bytes(value).lower()
|
||||
+
|
||||
+ # Normalize each returned value depending on whether it is a DN
|
||||
+ normalized_values = []
|
||||
+ for v in values:
|
||||
+ if is_a_dn(v):
|
||||
+ normalized_values.append(normalizeDN(v))
|
||||
+ else:
|
||||
+ normalized_values.append(ensure_bytes(v.lower()))
|
||||
+
|
||||
+ return normalized_value in normalized_values
|
||||
|
||||
def add(self, key, value):
|
||||
"""Add an attribute with a value
|
||||
diff --git a/src/lib389/lib389/replica.py b/src/lib389/lib389/replica.py
|
||||
index 1f321972d..cd46e86d5 100644
|
||||
--- a/src/lib389/lib389/replica.py
|
||||
+++ b/src/lib389/lib389/replica.py
|
||||
@@ -2011,7 +2011,7 @@ class ReplicationManager(object):
|
||||
return repl_group
|
||||
else:
|
||||
try:
|
||||
- repl_group = groups.get('replication_managers')
|
||||
+ repl_group = groups.get(dn=f'cn=replication_managers,{self._suffix}')
|
||||
return repl_group
|
||||
except ldap.NO_SUCH_OBJECT:
|
||||
self._log.warning("{} doesn't have cn=replication_managers,{} entry \
|
||||
@@ -2035,7 +2035,7 @@ class ReplicationManager(object):
|
||||
services = ServiceAccounts(from_instance, self._suffix)
|
||||
# Generate the password and save the credentials
|
||||
# for putting them into agreements in the future
|
||||
- service_name = '{}:{}'.format(to_instance.host, port)
|
||||
+ service_name = f'{self._suffix}:{to_instance.host}:{port}'
|
||||
creds = password_generate()
|
||||
repl_service = services.ensure_state(properties={
|
||||
'cn': service_name,
|
||||
@@ -2299,7 +2299,7 @@ class ReplicationManager(object):
|
||||
Internal Only.
|
||||
"""
|
||||
|
||||
- rdn = '{}:{}'.format(from_instance.host, from_instance.sslport)
|
||||
+ rdn = f'{self._suffix}:{from_instance.host}:{from_instance.sslport}'
|
||||
try:
|
||||
creds = self._repl_creds[rdn]
|
||||
except KeyError:
|
||||
@@ -2499,8 +2499,8 @@ class ReplicationManager(object):
|
||||
# Touch something then wait_for_replication.
|
||||
from_groups = Groups(from_instance, basedn=self._suffix, rdn=None)
|
||||
to_groups = Groups(to_instance, basedn=self._suffix, rdn=None)
|
||||
- from_group = from_groups.get('replication_managers')
|
||||
- to_group = to_groups.get('replication_managers')
|
||||
+ from_group = from_groups.get(dn=f'cn=replication_managers,{self._suffix}')
|
||||
+ to_group = to_groups.get(dn=f'cn=replication_managers,{self._suffix}')
|
||||
|
||||
change = str(uuid.uuid4())
|
||||
|
||||
--
|
||||
2.49.0
|
||||
|
@ -1,126 +0,0 @@
|
||||
From ebe986c78c6cd4e1f10172d8a8a11faf814fbc22 Mon Sep 17 00:00:00 2001
|
||||
From: Mark Reynolds <mreynolds@redhat.com>
|
||||
Date: Thu, 6 Mar 2025 16:49:53 -0500
|
||||
Subject: [PATCH] Issue 6655 - fix replication release replica decoding error
|
||||
|
||||
Description:
|
||||
|
||||
When a start replication session extended op is received acquire and
|
||||
release exclusive access before returning the result to the client.
|
||||
Otherwise there is a race condition where a "end" replication extended
|
||||
op can arrive before the replica is released and that leads to a
|
||||
decoding error on the other replica.
|
||||
|
||||
Relates: https://github.com/389ds/389-ds-base/issues/6655
|
||||
|
||||
Reviewed by: spichugi, tbordaz, and vashirov(Thanks!!!)
|
||||
---
|
||||
.../suites/replication/acceptance_test.py | 12 ++++++++++
|
||||
ldap/servers/plugins/replication/repl_extop.c | 24 ++++++++++++-------
|
||||
2 files changed, 27 insertions(+), 9 deletions(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/replication/acceptance_test.py b/dirsrvtests/tests/suites/replication/acceptance_test.py
|
||||
index fc8622051..0f18edb44 100644
|
||||
--- a/dirsrvtests/tests/suites/replication/acceptance_test.py
|
||||
+++ b/dirsrvtests/tests/suites/replication/acceptance_test.py
|
||||
@@ -1,5 +1,9 @@
|
||||
# --- BEGIN COPYRIGHT BLOCK ---
|
||||
+<<<<<<< HEAD
|
||||
# Copyright (C) 2021 Red Hat, Inc.
|
||||
+=======
|
||||
+# Copyright (C) 2025 Red Hat, Inc.
|
||||
+>>>>>>> a623c3f90 (Issue 6655 - fix replication release replica decoding error)
|
||||
# All rights reserved.
|
||||
#
|
||||
# License: GPL (version 3 or any later version).
|
||||
@@ -453,6 +457,13 @@ def test_multi_subsuffix_replication(topo_m4):
|
||||
f"User {user_dn} on supplier {user_obj._instance.serverid} "
|
||||
f"still has 'Description {j}'"
|
||||
)
|
||||
+
|
||||
+ # Check there are no decoding errors
|
||||
+ assert not topo_m4.ms["supplier1"].ds_error_log.match('.*decoding failed.*')
|
||||
+ assert not topo_m4.ms["supplier2"].ds_error_log.match('.*decoding failed.*')
|
||||
+ assert not topo_m4.ms["supplier3"].ds_error_log.match('.*decoding failed.*')
|
||||
+ assert not topo_m4.ms["supplier4"].ds_error_log.match('.*decoding failed.*')
|
||||
+
|
||||
finally:
|
||||
for suffix, test_users in test_users_by_suffix.items():
|
||||
for user in test_users:
|
||||
@@ -507,6 +518,7 @@ def test_new_suffix(topo_m4, new_suffix):
|
||||
repl.remove_supplier(m1)
|
||||
repl.remove_supplier(m2)
|
||||
|
||||
+
|
||||
def test_many_attrs(topo_m4, create_entry):
|
||||
"""Check a replication with many attributes (add and delete)
|
||||
|
||||
diff --git a/ldap/servers/plugins/replication/repl_extop.c b/ldap/servers/plugins/replication/repl_extop.c
|
||||
index 14b756df1..dacc611c0 100644
|
||||
--- a/ldap/servers/plugins/replication/repl_extop.c
|
||||
+++ b/ldap/servers/plugins/replication/repl_extop.c
|
||||
@@ -1134,6 +1134,12 @@ send_response:
|
||||
slapi_pblock_set(pb, SLAPI_EXT_OP_RET_OID, REPL_NSDS50_REPLICATION_RESPONSE_OID);
|
||||
}
|
||||
|
||||
+ /* connext (release our hold on it at least) */
|
||||
+ if (NULL != connext) {
|
||||
+ /* don't free it, just let go of it */
|
||||
+ consumer_connection_extension_relinquish_exclusive_access(conn, connid, opid, PR_FALSE);
|
||||
+ }
|
||||
+
|
||||
slapi_pblock_set(pb, SLAPI_EXT_OP_RET_VALUE, resp_bval);
|
||||
slapi_log_err(SLAPI_LOG_REPL, repl_plugin_name,
|
||||
"multimaster_extop_StartNSDS50ReplicationRequest - "
|
||||
@@ -1251,12 +1257,6 @@ send_response:
|
||||
if (NULL != ruv_bervals) {
|
||||
ber_bvecfree(ruv_bervals);
|
||||
}
|
||||
- /* connext (our hold on it at least) */
|
||||
- if (NULL != connext) {
|
||||
- /* don't free it, just let go of it */
|
||||
- consumer_connection_extension_relinquish_exclusive_access(conn, connid, opid, PR_FALSE);
|
||||
- connext = NULL;
|
||||
- }
|
||||
|
||||
return return_value;
|
||||
}
|
||||
@@ -1389,6 +1389,13 @@ multimaster_extop_EndNSDS50ReplicationRequest(Slapi_PBlock *pb)
|
||||
}
|
||||
}
|
||||
send_response:
|
||||
+ /* connext (release our hold on it at least) */
|
||||
+ if (NULL != connext) {
|
||||
+ /* don't free it, just let go of it */
|
||||
+ consumer_connection_extension_relinquish_exclusive_access(conn, connid, opid, PR_FALSE);
|
||||
+ connext = NULL;
|
||||
+ }
|
||||
+
|
||||
/* Send the response code */
|
||||
if ((resp_bere = der_alloc()) == NULL) {
|
||||
goto free_and_return;
|
||||
@@ -1419,11 +1426,10 @@ free_and_return:
|
||||
if (NULL != resp_bval) {
|
||||
ber_bvfree(resp_bval);
|
||||
}
|
||||
- /* connext (our hold on it at least) */
|
||||
+ /* connext (release our hold on it if not already released) */
|
||||
if (NULL != connext) {
|
||||
/* don't free it, just let go of it */
|
||||
consumer_connection_extension_relinquish_exclusive_access(conn, connid, opid, PR_FALSE);
|
||||
- connext = NULL;
|
||||
}
|
||||
|
||||
return return_value;
|
||||
@@ -1516,7 +1522,7 @@ multimaster_extop_abort_cleanruv(Slapi_PBlock *pb)
|
||||
rid);
|
||||
}
|
||||
/*
|
||||
- * Get the replica
|
||||
+ * Get the replica
|
||||
*/
|
||||
if ((r = replica_get_replica_from_root(repl_root)) == NULL) {
|
||||
slapi_log_err(SLAPI_LOG_ERR, repl_plugin_name, "multimaster_extop_abort_cleanruv - "
|
||||
--
|
||||
2.49.0
|
||||
|
@ -1,26 +0,0 @@
|
||||
From 5b12463bfeb518f016acb14bc118b5f8ad3eef5e Mon Sep 17 00:00:00 2001
|
||||
From: Viktor Ashirov <vashirov@redhat.com>
|
||||
Date: Thu, 15 May 2025 09:22:22 +0200
|
||||
Subject: [PATCH] Issue 6655 - fix merge conflict
|
||||
|
||||
---
|
||||
dirsrvtests/tests/suites/replication/acceptance_test.py | 4 ----
|
||||
1 file changed, 4 deletions(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/replication/acceptance_test.py b/dirsrvtests/tests/suites/replication/acceptance_test.py
|
||||
index 0f18edb44..6b5186127 100644
|
||||
--- a/dirsrvtests/tests/suites/replication/acceptance_test.py
|
||||
+++ b/dirsrvtests/tests/suites/replication/acceptance_test.py
|
||||
@@ -1,9 +1,5 @@
|
||||
# --- BEGIN COPYRIGHT BLOCK ---
|
||||
-<<<<<<< HEAD
|
||||
-# Copyright (C) 2021 Red Hat, Inc.
|
||||
-=======
|
||||
# Copyright (C) 2025 Red Hat, Inc.
|
||||
->>>>>>> a623c3f90 (Issue 6655 - fix replication release replica decoding error)
|
||||
# All rights reserved.
|
||||
#
|
||||
# License: GPL (version 3 or any later version).
|
||||
--
|
||||
2.49.0
|
||||
|
@ -1,291 +0,0 @@
|
||||
From 8d62124fb4d0700378b6f0669cc9d47338a8151c Mon Sep 17 00:00:00 2001
|
||||
From: tbordaz <tbordaz@redhat.com>
|
||||
Date: Tue, 25 Mar 2025 09:20:50 +0100
|
||||
Subject: [PATCH] Issue 6571 - Nested group does not receive memberOf attribute
|
||||
(#6679)
|
||||
|
||||
Bug description:
|
||||
There is a risk to create a loop in group membership.
|
||||
For example G2 is member of G1 and G1 is member of G2.
|
||||
Memberof plugins iterates from a node to its ancestors
|
||||
to update the 'memberof' values of the node.
|
||||
The plugin uses a valueset ('already_seen_ndn_vals')
|
||||
to keep the track of the nodes it already visited.
|
||||
It uses this valueset to detect a possible loop and
|
||||
in that case it does not add the ancestor as the
|
||||
memberof value of the node.
|
||||
This is an error in case there are multiples paths
|
||||
up to an ancestor.
|
||||
|
||||
Fix description:
|
||||
The ancestor should be added to the node systematically,
|
||||
just in case the ancestor is in 'already_seen_ndn_vals'
|
||||
it skips the final recursion
|
||||
|
||||
fixes: #6571
|
||||
|
||||
Reviewed by: Pierre Rogier, Mark Reynolds (Thanks !!!)
|
||||
---
|
||||
.../suites/memberof_plugin/regression_test.py | 109 ++++++++++++++++++
|
||||
.../tests/suites/plugins/memberof_test.py | 5 +
|
||||
ldap/servers/plugins/memberof/memberof.c | 52 ++++-----
|
||||
3 files changed, 137 insertions(+), 29 deletions(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/memberof_plugin/regression_test.py b/dirsrvtests/tests/suites/memberof_plugin/regression_test.py
|
||||
index 4c681a909..dba908975 100644
|
||||
--- a/dirsrvtests/tests/suites/memberof_plugin/regression_test.py
|
||||
+++ b/dirsrvtests/tests/suites/memberof_plugin/regression_test.py
|
||||
@@ -467,6 +467,21 @@ def _find_memberof_ext(server, user_dn=None, group_dn=None, find_result=True):
|
||||
else:
|
||||
assert (not found)
|
||||
|
||||
+def _check_membership(server, entry, expected_members, expected_memberof):
|
||||
+ assert server
|
||||
+ assert entry
|
||||
+
|
||||
+ memberof = entry.get_attr_vals('memberof')
|
||||
+ member = entry.get_attr_vals('member')
|
||||
+ assert len(member) == len(expected_members)
|
||||
+ assert len(memberof) == len(expected_memberof)
|
||||
+ for e in expected_members:
|
||||
+ server.log.info("Checking %s has member %s" % (entry.dn, e.dn))
|
||||
+ assert e.dn.encode() in member
|
||||
+ for e in expected_memberof:
|
||||
+ server.log.info("Checking %s is member of %s" % (entry.dn, e.dn))
|
||||
+ assert e.dn.encode() in memberof
|
||||
+
|
||||
|
||||
@pytest.mark.ds49161
|
||||
def test_memberof_group(topology_st):
|
||||
@@ -535,6 +550,100 @@ def test_memberof_group(topology_st):
|
||||
_find_memberof_ext(inst, dn1, g2n, True)
|
||||
_find_memberof_ext(inst, dn2, g2n, True)
|
||||
|
||||
+def test_multipaths(topology_st, request):
|
||||
+ """Test memberof succeeds to update memberof when
|
||||
+ there are multiple paths from a leaf to an intermediate node
|
||||
+
|
||||
+ :id: 35aa704a-b895-4153-9dcb-1e8a13612ebf
|
||||
+
|
||||
+ :setup: Single instance
|
||||
+
|
||||
+ :steps:
|
||||
+ 1. Create a graph G1->U1, G2->G21->U1
|
||||
+ 2. Add G2 as member of G1: G1->U1, G1->G2->G21->U1
|
||||
+ 3. Check members and memberof in entries G1,G2,G21,User1
|
||||
+
|
||||
+ :expectedresults:
|
||||
+ 1. Graph should be created
|
||||
+ 2. succeed
|
||||
+ 3. Membership is okay
|
||||
+ """
|
||||
+
|
||||
+ inst = topology_st.standalone
|
||||
+ memberof = MemberOfPlugin(inst)
|
||||
+ memberof.enable()
|
||||
+ memberof.replace('memberOfEntryScope', SUFFIX)
|
||||
+ if (memberof.get_memberofdeferredupdate() and memberof.get_memberofdeferredupdate().lower() == "on"):
|
||||
+ delay = 3
|
||||
+ else:
|
||||
+ delay = 0
|
||||
+ inst.restart()
|
||||
+
|
||||
+ #
|
||||
+ # Create the hierarchy
|
||||
+ #
|
||||
+ #
|
||||
+ # Grp1 ---------------> User1
|
||||
+ # ^
|
||||
+ # /
|
||||
+ # Grp2 ----> Grp21 ------/
|
||||
+ #
|
||||
+ users = UserAccounts(inst, SUFFIX, rdn=None)
|
||||
+ user1 = users.create(properties={'uid': "user1",
|
||||
+ 'cn': "user1",
|
||||
+ 'sn': 'SN',
|
||||
+ 'description': 'leaf',
|
||||
+ 'uidNumber': '1000',
|
||||
+ 'gidNumber': '2000',
|
||||
+ 'homeDirectory': '/home/user1'
|
||||
+ })
|
||||
+ group = Groups(inst, SUFFIX, rdn=None)
|
||||
+ g1 = group.create(properties={'cn': 'group1',
|
||||
+ 'member': user1.dn,
|
||||
+ 'description': 'group1'})
|
||||
+ g21 = group.create(properties={'cn': 'group21',
|
||||
+ 'member': user1.dn,
|
||||
+ 'description': 'group21'})
|
||||
+ g2 = group.create(properties={'cn': 'group2',
|
||||
+ 'member': [g21.dn],
|
||||
+ 'description': 'group2'})
|
||||
+
|
||||
+ # Enable debug logs if necessary
|
||||
+ #inst.config.replace('nsslapd-errorlog-level', '65536')
|
||||
+ #inst.config.set('nsslapd-accesslog-level','260')
|
||||
+ #inst.config.set('nsslapd-plugin-logging', 'on')
|
||||
+ #inst.config.set('nsslapd-auditlog-logging-enabled','on')
|
||||
+ #inst.config.set('nsslapd-auditfaillog-logging-enabled','on')
|
||||
+
|
||||
+ #
|
||||
+ # Update the hierarchy
|
||||
+ #
|
||||
+ #
|
||||
+ # Grp1 ----------------> User1
|
||||
+ # \ ^
|
||||
+ # \ /
|
||||
+ # --> Grp2 --> Grp21 --
|
||||
+ #
|
||||
+ g1.add_member(g2.dn)
|
||||
+ time.sleep(delay)
|
||||
+
|
||||
+ #
|
||||
+ # Check G1, G2, G21 and User1 members and memberof
|
||||
+ #
|
||||
+ _check_membership(inst, g1, expected_members=[g2, user1], expected_memberof=[])
|
||||
+ _check_membership(inst, g2, expected_members=[g21], expected_memberof=[g1])
|
||||
+ _check_membership(inst, g21, expected_members=[user1], expected_memberof=[g2, g1])
|
||||
+ _check_membership(inst, user1, expected_members=[], expected_memberof=[g21, g2, g1])
|
||||
+
|
||||
+ def fin():
|
||||
+ try:
|
||||
+ user1.delete()
|
||||
+ g1.delete()
|
||||
+ g2.delete()
|
||||
+ g21.delete()
|
||||
+ except:
|
||||
+ pass
|
||||
+ request.addfinalizer(fin)
|
||||
|
||||
def _config_memberof_entrycache_on_modrdn_failure(server):
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/plugins/memberof_test.py b/dirsrvtests/tests/suites/plugins/memberof_test.py
|
||||
index 2de1389fd..621c45daf 100644
|
||||
--- a/dirsrvtests/tests/suites/plugins/memberof_test.py
|
||||
+++ b/dirsrvtests/tests/suites/plugins/memberof_test.py
|
||||
@@ -2168,9 +2168,14 @@ def test_complex_group_scenario_6(topology_st):
|
||||
|
||||
# add Grp[1-4] (uniqueMember) to grp5
|
||||
# it creates a membership loop !!!
|
||||
+ topology_st.standalone.config.replace('nsslapd-errorlog-level', '65536')
|
||||
mods = [(ldap.MOD_ADD, 'uniqueMember', memofegrp020_5)]
|
||||
for grp in [memofegrp020_1, memofegrp020_2, memofegrp020_3, memofegrp020_4]:
|
||||
topology_st.standalone.modify_s(ensure_str(grp), mods)
|
||||
+ topology_st.standalone.config.replace('nsslapd-errorlog-level', '0')
|
||||
+
|
||||
+ results = topology_st.standalone.ds_error_log.match('.*detecting a loop in group.*')
|
||||
+ assert results
|
||||
|
||||
time.sleep(5)
|
||||
# assert user[1-4] are member of grp20_[1-4]
|
||||
diff --git a/ldap/servers/plugins/memberof/memberof.c b/ldap/servers/plugins/memberof/memberof.c
|
||||
index e75b99b14..32bdcf3f1 100644
|
||||
--- a/ldap/servers/plugins/memberof/memberof.c
|
||||
+++ b/ldap/servers/plugins/memberof/memberof.c
|
||||
@@ -1592,7 +1592,7 @@ memberof_call_foreach_dn(Slapi_PBlock *pb __attribute__((unused)), Slapi_DN *sdn
|
||||
ht_grp = ancestors_cache_lookup(config, (const void *)ndn);
|
||||
if (ht_grp) {
|
||||
#if MEMBEROF_CACHE_DEBUG
|
||||
- slapi_log_err(SLAPI_LOG_PLUGIN, MEMBEROF_PLUGIN_SUBSYSTEM, "memberof_call_foreach_dn: Ancestors of %s already cached (%x)\n", ndn, ht_grp);
|
||||
+ slapi_log_err(SLAPI_LOG_PLUGIN, MEMBEROF_PLUGIN_SUBSYSTEM, "memberof_call_foreach_dn: Ancestors of %s already cached (%lx)\n", ndn, (ulong) ht_grp);
|
||||
#endif
|
||||
add_ancestors_cbdata(ht_grp, callback_data);
|
||||
*cached = 1;
|
||||
@@ -1600,7 +1600,7 @@ memberof_call_foreach_dn(Slapi_PBlock *pb __attribute__((unused)), Slapi_DN *sdn
|
||||
}
|
||||
}
|
||||
#if MEMBEROF_CACHE_DEBUG
|
||||
- slapi_log_err(SLAPI_LOG_PLUGIN, MEMBEROF_PLUGIN_SUBSYSTEM, "memberof_call_foreach_dn: Ancestors of %s not cached\n", ndn);
|
||||
+ slapi_log_err(SLAPI_LOG_PLUGIN, MEMBEROF_PLUGIN_SUBSYSTEM, "memberof_call_foreach_dn: Ancestors of %s not cached\n", slapi_sdn_get_ndn(sdn));
|
||||
#endif
|
||||
|
||||
/* Escape the dn, and build the search filter. */
|
||||
@@ -3233,7 +3233,8 @@ cache_ancestors(MemberOfConfig *config, Slapi_Value **member_ndn_val, memberof_g
|
||||
return;
|
||||
}
|
||||
#if MEMBEROF_CACHE_DEBUG
|
||||
- if (double_check = ancestors_cache_lookup(config, (const void*) key)) {
|
||||
+ double_check = ancestors_cache_lookup(config, (const void*) key);
|
||||
+ if (double_check) {
|
||||
dump_cache_entry(double_check, "read back");
|
||||
}
|
||||
#endif
|
||||
@@ -3263,13 +3264,13 @@ merge_ancestors(Slapi_Value **member_ndn_val, memberof_get_groups_data *v1, memb
|
||||
sval_dn = slapi_value_new_string(slapi_value_get_string(sval));
|
||||
if (sval_dn) {
|
||||
/* Use the normalized dn from v1 to search it
|
||||
- * in v2
|
||||
- */
|
||||
+ * in v2
|
||||
+ */
|
||||
val_sdn = slapi_sdn_new_dn_byval(slapi_value_get_string(sval_dn));
|
||||
sval_ndn = slapi_value_new_string(slapi_sdn_get_ndn(val_sdn));
|
||||
if (!slapi_valueset_find(
|
||||
((memberof_get_groups_data *)v2)->config->group_slapiattrs[0], v2_group_norm_vals, sval_ndn)) {
|
||||
-/* This ancestor was not already present in v2 => Add it
|
||||
+ /* This ancestor was not already present in v2 => Add it
|
||||
* Using slapi_valueset_add_value it consumes val
|
||||
* so do not free sval
|
||||
*/
|
||||
@@ -3318,7 +3319,7 @@ memberof_get_groups_r(MemberOfConfig *config, Slapi_DN *member_sdn, memberof_get
|
||||
|
||||
merge_ancestors(&member_ndn_val, &member_data, data);
|
||||
if (!cached && member_data.use_cache)
|
||||
- cache_ancestors(config, &member_ndn_val, &member_data);
|
||||
+ cache_ancestors(config, &member_ndn_val, data);
|
||||
|
||||
slapi_value_free(&member_ndn_val);
|
||||
slapi_valueset_free(groupvals);
|
||||
@@ -3379,25 +3380,6 @@ memberof_get_groups_callback(Slapi_Entry *e, void *callback_data)
|
||||
goto bail;
|
||||
}
|
||||
|
||||
- /* Have we been here before? Note that we don't loop through all of the group_slapiattrs
|
||||
- * in config. We only need this attribute for it's syntax so the comparison can be
|
||||
- * performed. Since all of the grouping attributes are validated to use the Dinstinguished
|
||||
- * Name syntax, we can safely just use the first group_slapiattr. */
|
||||
- if (slapi_valueset_find(
|
||||
- ((memberof_get_groups_data *)callback_data)->config->group_slapiattrs[0], already_seen_ndn_vals, group_ndn_val)) {
|
||||
- /* we either hit a recursive grouping, or an entry is
|
||||
- * a member of a group through multiple paths. Either
|
||||
- * way, we can just skip processing this entry since we've
|
||||
- * already gone through this part of the grouping hierarchy. */
|
||||
- slapi_log_err(SLAPI_LOG_PLUGIN, MEMBEROF_PLUGIN_SUBSYSTEM,
|
||||
- "memberof_get_groups_callback - Possible group recursion"
|
||||
- " detected in %s\n",
|
||||
- group_ndn);
|
||||
- slapi_value_free(&group_ndn_val);
|
||||
- ((memberof_get_groups_data *)callback_data)->use_cache = PR_FALSE;
|
||||
- goto bail;
|
||||
- }
|
||||
-
|
||||
/* if the group does not belong to an excluded subtree, adds it to the valueset */
|
||||
if (memberof_entry_in_scope(config, group_sdn)) {
|
||||
/* Push group_dn_val into the valueset. This memory is now owned
|
||||
@@ -3407,9 +3389,21 @@ memberof_get_groups_callback(Slapi_Entry *e, void *callback_data)
|
||||
group_dn_val = slapi_value_new_string(group_dn);
|
||||
slapi_valueset_add_value_ext(groupvals, group_dn_val, SLAPI_VALUE_FLAG_PASSIN);
|
||||
|
||||
- /* push this ndn to detect group recursion */
|
||||
- already_seen_ndn_val = slapi_value_new_string(group_ndn);
|
||||
- slapi_valueset_add_value_ext(already_seen_ndn_vals, already_seen_ndn_val, SLAPI_VALUE_FLAG_PASSIN);
|
||||
+ if (slapi_valueset_find(
|
||||
+ ((memberof_get_groups_data *)callback_data)->config->group_slapiattrs[0], already_seen_ndn_vals, group_ndn_val)) {
|
||||
+ /* The group group_ndn_val has already been processed
|
||||
+ * skip the final recursion to prevent infinite loop
|
||||
+ */
|
||||
+ slapi_log_err(SLAPI_LOG_PLUGIN, MEMBEROF_PLUGIN_SUBSYSTEM,
|
||||
+ "memberof_get_groups_callback - detecting a loop in group %s (stop building memberof)\n",
|
||||
+ group_ndn);
|
||||
+ ((memberof_get_groups_data *)callback_data)->use_cache = PR_FALSE;
|
||||
+ goto bail;
|
||||
+ } else {
|
||||
+ /* keep this ndn to detect a possible group recursion */
|
||||
+ already_seen_ndn_val = slapi_value_new_string(group_ndn);
|
||||
+ slapi_valueset_add_value_ext(already_seen_ndn_vals, already_seen_ndn_val, SLAPI_VALUE_FLAG_PASSIN);
|
||||
+ }
|
||||
}
|
||||
if (!config->skip_nested || config->fixup_task) {
|
||||
/* now recurse to find ancestors groups of e */
|
||||
--
|
||||
2.49.0
|
||||
|
@ -1,272 +0,0 @@
|
||||
From 17da0257b24749765777a4e64c3626cb39cca639 Mon Sep 17 00:00:00 2001
|
||||
From: tbordaz <tbordaz@redhat.com>
|
||||
Date: Mon, 31 Mar 2025 11:05:01 +0200
|
||||
Subject: [PATCH] Issue 6571 - (2nd) Nested group does not receive memberOf
|
||||
attribute (#6697)
|
||||
|
||||
Bug description:
|
||||
erroneous debug change made in previous fix
|
||||
where cache_ancestors is called with the wrong parameter
|
||||
|
||||
Fix description:
|
||||
Restore the orginal param 'member_data'
|
||||
Increase the set of tests around multipaths
|
||||
|
||||
fixes: #6571
|
||||
|
||||
review by: Simon Pichugin (Thanks !!)
|
||||
---
|
||||
.../suites/memberof_plugin/regression_test.py | 154 ++++++++++++++++++
|
||||
ldap/servers/plugins/memberof/memberof.c | 50 +++++-
|
||||
2 files changed, 203 insertions(+), 1 deletion(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/memberof_plugin/regression_test.py b/dirsrvtests/tests/suites/memberof_plugin/regression_test.py
|
||||
index dba908975..9ba40a0c3 100644
|
||||
--- a/dirsrvtests/tests/suites/memberof_plugin/regression_test.py
|
||||
+++ b/dirsrvtests/tests/suites/memberof_plugin/regression_test.py
|
||||
@@ -598,6 +598,8 @@ def test_multipaths(topology_st, request):
|
||||
'homeDirectory': '/home/user1'
|
||||
})
|
||||
group = Groups(inst, SUFFIX, rdn=None)
|
||||
+ g0 = group.create(properties={'cn': 'group0',
|
||||
+ 'description': 'group0'})
|
||||
g1 = group.create(properties={'cn': 'group1',
|
||||
'member': user1.dn,
|
||||
'description': 'group1'})
|
||||
@@ -635,6 +637,158 @@ def test_multipaths(topology_st, request):
|
||||
_check_membership(inst, g21, expected_members=[user1], expected_memberof=[g2, g1])
|
||||
_check_membership(inst, user1, expected_members=[], expected_memberof=[g21, g2, g1])
|
||||
|
||||
+ #inst.config.replace('nsslapd-errorlog-level', '65536')
|
||||
+ #inst.config.set('nsslapd-accesslog-level','260')
|
||||
+ #inst.config.set('nsslapd-plugin-logging', 'on')
|
||||
+ #inst.config.set('nsslapd-auditlog-logging-enabled','on')
|
||||
+ #inst.config.set('nsslapd-auditfaillog-logging-enabled','on')
|
||||
+ #
|
||||
+ # Update the hierarchy
|
||||
+ #
|
||||
+ #
|
||||
+ # Grp1 ----------------> User1
|
||||
+ # ^
|
||||
+ # /
|
||||
+ # Grp2 --> Grp21 --
|
||||
+ #
|
||||
+ g1.remove_member(g2.dn)
|
||||
+ time.sleep(delay)
|
||||
+
|
||||
+ #
|
||||
+ # Check G1, G2, G21 and User1 members and memberof
|
||||
+ #
|
||||
+ _check_membership(inst, g1, expected_members=[user1], expected_memberof=[])
|
||||
+ _check_membership(inst, g2, expected_members=[g21], expected_memberof=[])
|
||||
+ _check_membership(inst, g21, expected_members=[user1], expected_memberof=[g2])
|
||||
+ _check_membership(inst, user1, expected_members=[], expected_memberof=[g21, g2, g1])
|
||||
+
|
||||
+ #
|
||||
+ # Update the hierarchy
|
||||
+ #
|
||||
+ #
|
||||
+ # Grp1 ----------------> User1
|
||||
+ # \__________ ^
|
||||
+ # | /
|
||||
+ # v /
|
||||
+ # Grp2 --> Grp21 ----
|
||||
+ #
|
||||
+ g1.add_member(g21.dn)
|
||||
+ time.sleep(delay)
|
||||
+
|
||||
+ #
|
||||
+ # Check G1, G2, G21 and User1 members and memberof
|
||||
+ #
|
||||
+ _check_membership(inst, g1, expected_members=[user1, g21], expected_memberof=[])
|
||||
+ _check_membership(inst, g2, expected_members=[g21], expected_memberof=[])
|
||||
+ _check_membership(inst, g21, expected_members=[user1], expected_memberof=[g2, g1])
|
||||
+ _check_membership(inst, user1, expected_members=[], expected_memberof=[g21, g2, g1])
|
||||
+
|
||||
+ #
|
||||
+ # Update the hierarchy
|
||||
+ #
|
||||
+ #
|
||||
+ # Grp1 ----------------> User1
|
||||
+ # ^
|
||||
+ # /
|
||||
+ # Grp2 --> Grp21 --
|
||||
+ #
|
||||
+ g1.remove_member(g21.dn)
|
||||
+ time.sleep(delay)
|
||||
+
|
||||
+ #
|
||||
+ # Check G1, G2, G21 and User1 members and memberof
|
||||
+ #
|
||||
+ _check_membership(inst, g1, expected_members=[user1], expected_memberof=[])
|
||||
+ _check_membership(inst, g2, expected_members=[g21], expected_memberof=[])
|
||||
+ _check_membership(inst, g21, expected_members=[user1], expected_memberof=[g2])
|
||||
+ _check_membership(inst, user1, expected_members=[], expected_memberof=[g21, g2, g1])
|
||||
+
|
||||
+ #
|
||||
+ # Update the hierarchy
|
||||
+ #
|
||||
+ #
|
||||
+ # Grp1 ----------------> User1
|
||||
+ # ^
|
||||
+ # /
|
||||
+ # Grp0 ---> Grp2 ---> Grp21 ---
|
||||
+ #
|
||||
+ g0.add_member(g2.dn)
|
||||
+ time.sleep(delay)
|
||||
+
|
||||
+ #
|
||||
+ # Check G0,G1, G2, G21 and User1 members and memberof
|
||||
+ #
|
||||
+ _check_membership(inst, g0, expected_members=[g2], expected_memberof=[])
|
||||
+ _check_membership(inst, g1, expected_members=[user1], expected_memberof=[])
|
||||
+ _check_membership(inst, g2, expected_members=[g21], expected_memberof=[g0])
|
||||
+ _check_membership(inst, g21, expected_members=[user1], expected_memberof=[g0, g2])
|
||||
+ _check_membership(inst, user1, expected_members=[], expected_memberof=[g21, g2, g1, g0])
|
||||
+
|
||||
+ #
|
||||
+ # Update the hierarchy
|
||||
+ #
|
||||
+ #
|
||||
+ # Grp1 ----------------> User1
|
||||
+ # ^ ^
|
||||
+ # / /
|
||||
+ # Grp0 ---> Grp2 ---> Grp21 ---
|
||||
+ #
|
||||
+ g0.add_member(g1.dn)
|
||||
+ time.sleep(delay)
|
||||
+
|
||||
+ #
|
||||
+ # Check G0,G1, G2, G21 and User1 members and memberof
|
||||
+ #
|
||||
+ _check_membership(inst, g0, expected_members=[g1,g2], expected_memberof=[])
|
||||
+ _check_membership(inst, g1, expected_members=[user1], expected_memberof=[g0])
|
||||
+ _check_membership(inst, g2, expected_members=[g21], expected_memberof=[g0])
|
||||
+ _check_membership(inst, g21, expected_members=[user1], expected_memberof=[g0, g2])
|
||||
+ _check_membership(inst, user1, expected_members=[], expected_memberof=[g21, g2, g1, g0])
|
||||
+
|
||||
+ #
|
||||
+ # Update the hierarchy
|
||||
+ #
|
||||
+ #
|
||||
+ # Grp1 ----------------> User1
|
||||
+ # ^ \_____________ ^
|
||||
+ # / | /
|
||||
+ # / V /
|
||||
+ # Grp0 ---> Grp2 ---> Grp21 ---
|
||||
+ #
|
||||
+ g1.add_member(g21.dn)
|
||||
+ time.sleep(delay)
|
||||
+
|
||||
+ #
|
||||
+ # Check G0,G1, G2, G21 and User1 members and memberof
|
||||
+ #
|
||||
+ _check_membership(inst, g0, expected_members=[g1, g2], expected_memberof=[])
|
||||
+ _check_membership(inst, g1, expected_members=[user1, g21], expected_memberof=[g0])
|
||||
+ _check_membership(inst, g2, expected_members=[g21], expected_memberof=[g0])
|
||||
+ _check_membership(inst, g21, expected_members=[user1], expected_memberof=[g0, g1, g2])
|
||||
+ _check_membership(inst, user1, expected_members=[], expected_memberof=[g21, g2, g1, g0])
|
||||
+
|
||||
+ #
|
||||
+ # Update the hierarchy
|
||||
+ #
|
||||
+ #
|
||||
+ # Grp1 ----------------> User1
|
||||
+ # ^ \_____________ ^
|
||||
+ # / | /
|
||||
+ # / V /
|
||||
+ # Grp0 ---> Grp2 Grp21 ---
|
||||
+ #
|
||||
+ g2.remove_member(g21.dn)
|
||||
+ time.sleep(delay)
|
||||
+
|
||||
+ #
|
||||
+ # Check G0,G1, G2, G21 and User1 members and memberof
|
||||
+ #
|
||||
+ _check_membership(inst, g0, expected_members=[g1, g2], expected_memberof=[])
|
||||
+ _check_membership(inst, g1, expected_members=[user1, g21], expected_memberof=[g0])
|
||||
+ _check_membership(inst, g2, expected_members=[], expected_memberof=[g0])
|
||||
+ _check_membership(inst, g21, expected_members=[user1], expected_memberof=[g0, g1])
|
||||
+ _check_membership(inst, user1, expected_members=[], expected_memberof=[g21, g1, g0])
|
||||
+
|
||||
def fin():
|
||||
try:
|
||||
user1.delete()
|
||||
diff --git a/ldap/servers/plugins/memberof/memberof.c b/ldap/servers/plugins/memberof/memberof.c
|
||||
index 32bdcf3f1..f79b083a9 100644
|
||||
--- a/ldap/servers/plugins/memberof/memberof.c
|
||||
+++ b/ldap/servers/plugins/memberof/memberof.c
|
||||
@@ -3258,6 +3258,35 @@ merge_ancestors(Slapi_Value **member_ndn_val, memberof_get_groups_data *v1, memb
|
||||
Slapi_ValueSet *v2_group_norm_vals = *((memberof_get_groups_data *)v2)->group_norm_vals;
|
||||
int merged_cnt = 0;
|
||||
|
||||
+#if MEMBEROF_CACHE_DEBUG
|
||||
+ {
|
||||
+ Slapi_Value *val = 0;
|
||||
+ int hint = 0;
|
||||
+ struct berval *bv;
|
||||
+ hint = slapi_valueset_first_value(v2_groupvals, &val);
|
||||
+ while (val) {
|
||||
+ /* this makes a copy of the berval */
|
||||
+ bv = slapi_value_get_berval(val);
|
||||
+ if (bv && bv->bv_len) {
|
||||
+ slapi_log_err(SLAPI_LOG_PLUGIN, MEMBEROF_PLUGIN_SUBSYSTEM,
|
||||
+ "merge_ancestors: V2 contains %s\n",
|
||||
+ bv->bv_val);
|
||||
+ }
|
||||
+ hint = slapi_valueset_next_value(v2_groupvals, hint, &val);
|
||||
+ }
|
||||
+ hint = slapi_valueset_first_value(v1_groupvals, &val);
|
||||
+ while (val) {
|
||||
+ /* this makes a copy of the berval */
|
||||
+ bv = slapi_value_get_berval(val);
|
||||
+ if (bv && bv->bv_len) {
|
||||
+ slapi_log_err(SLAPI_LOG_PLUGIN, MEMBEROF_PLUGIN_SUBSYSTEM,
|
||||
+ "merge_ancestors: add %s (from V1)\n",
|
||||
+ bv->bv_val);
|
||||
+ }
|
||||
+ hint = slapi_valueset_next_value(v1_groupvals, hint, &val);
|
||||
+ }
|
||||
+ }
|
||||
+#endif
|
||||
hint = slapi_valueset_first_value(v1_groupvals, &sval);
|
||||
while (sval) {
|
||||
if (memberof_compare(config, member_ndn_val, &sval)) {
|
||||
@@ -3319,7 +3348,7 @@ memberof_get_groups_r(MemberOfConfig *config, Slapi_DN *member_sdn, memberof_get
|
||||
|
||||
merge_ancestors(&member_ndn_val, &member_data, data);
|
||||
if (!cached && member_data.use_cache)
|
||||
- cache_ancestors(config, &member_ndn_val, data);
|
||||
+ cache_ancestors(config, &member_ndn_val, &member_data);
|
||||
|
||||
slapi_value_free(&member_ndn_val);
|
||||
slapi_valueset_free(groupvals);
|
||||
@@ -4285,6 +4314,25 @@ memberof_fix_memberof_callback(Slapi_Entry *e, void *callback_data)
|
||||
|
||||
/* get a list of all of the groups this user belongs to */
|
||||
groups = memberof_get_groups(config, sdn);
|
||||
+#if MEMBEROF_CACHE_DEBUG
|
||||
+ {
|
||||
+ Slapi_Value *val = 0;
|
||||
+ int hint = 0;
|
||||
+ struct berval *bv;
|
||||
+ hint = slapi_valueset_first_value(groups, &val);
|
||||
+ while (val) {
|
||||
+ /* this makes a copy of the berval */
|
||||
+ bv = slapi_value_get_berval(val);
|
||||
+ if (bv && bv->bv_len) {
|
||||
+ slapi_log_err(SLAPI_LOG_PLUGIN, MEMBEROF_PLUGIN_SUBSYSTEM,
|
||||
+ "memberof_fix_memberof_callback: %s belongs to %s\n",
|
||||
+ ndn,
|
||||
+ bv->bv_val);
|
||||
+ }
|
||||
+ hint = slapi_valueset_next_value(groups, hint, &val);
|
||||
+ }
|
||||
+ }
|
||||
+#endif
|
||||
|
||||
if (config->group_filter) {
|
||||
if (slapi_filter_test_simple(e, config->group_filter)) {
|
||||
--
|
||||
2.49.0
|
||||
|
@ -1,192 +0,0 @@
|
||||
From ff364a4b1c88e1a8f678e056af88cce50cd8717c Mon Sep 17 00:00:00 2001
|
||||
From: progier389 <progier@redhat.com>
|
||||
Date: Fri, 28 Mar 2025 17:32:14 +0100
|
||||
Subject: [PATCH] Issue 6698 - NPE after configuring invalid filtered role
|
||||
(#6699)
|
||||
|
||||
Server crash when doing search after configuring filtered role with invalid filter.
|
||||
Reason: The part of the filter that should be overwritten are freed before knowing that the filter is invalid.
|
||||
Solution: Check first that the filter is valid before freeing the filtere bits
|
||||
|
||||
Issue: #6698
|
||||
|
||||
Reviewed by: @tbordaz , @mreynolds389 (Thanks!)
|
||||
|
||||
(cherry picked from commit 31e120d2349eda7a41380cf78fc04cf41e394359)
|
||||
---
|
||||
dirsrvtests/tests/suites/roles/basic_test.py | 80 ++++++++++++++++++--
|
||||
ldap/servers/slapd/filter.c | 17 ++++-
|
||||
2 files changed, 88 insertions(+), 9 deletions(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/roles/basic_test.py b/dirsrvtests/tests/suites/roles/basic_test.py
|
||||
index 875ac47c1..b79816c58 100644
|
||||
--- a/dirsrvtests/tests/suites/roles/basic_test.py
|
||||
+++ b/dirsrvtests/tests/suites/roles/basic_test.py
|
||||
@@ -28,6 +28,7 @@ from lib389.dbgen import dbgen_users
|
||||
from lib389.tasks import ImportTask
|
||||
from lib389.utils import get_default_db_lib
|
||||
from lib389.rewriters import *
|
||||
+from lib389._mapped_object import DSLdapObject
|
||||
from lib389.backend import Backends
|
||||
|
||||
logging.getLogger(__name__).setLevel(logging.INFO)
|
||||
@@ -427,7 +428,6 @@ def test_vattr_on_filtered_role_restart(topo, request):
|
||||
log.info("Check the default value of attribute nsslapd-ignore-virtual-attrs should be OFF")
|
||||
assert topo.standalone.config.present('nsslapd-ignore-virtual-attrs', 'off')
|
||||
|
||||
-
|
||||
log.info("Check the virtual attribute definition is found (after a required delay)")
|
||||
topo.standalone.restart()
|
||||
time.sleep(5)
|
||||
@@ -541,7 +541,7 @@ def test_managed_and_filtered_role_rewrite(topo, request):
|
||||
indexes = backend.get_indexes()
|
||||
try:
|
||||
index = indexes.create(properties={
|
||||
- 'cn': attrname,
|
||||
+ 'cn': attrname,
|
||||
'nsSystemIndex': 'false',
|
||||
'nsIndexType': ['eq', 'pres']
|
||||
})
|
||||
@@ -593,7 +593,6 @@ def test_managed_and_filtered_role_rewrite(topo, request):
|
||||
dn = "uid=%s0000%d,%s" % (RDN, i, PARENT)
|
||||
topo.standalone.modify_s(dn, [(ldap.MOD_REPLACE, 'nsRoleDN', [role.dn.encode()])])
|
||||
|
||||
-
|
||||
# Now check that search is fast, evaluating only 4 entries
|
||||
search_start = time.time()
|
||||
entries = topo.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, "(nsrole=%s)" % role.dn)
|
||||
@@ -676,7 +675,7 @@ def test_not_such_entry_role_rewrite(topo, request):
|
||||
indexes = backend.get_indexes()
|
||||
try:
|
||||
index = indexes.create(properties={
|
||||
- 'cn': attrname,
|
||||
+ 'cn': attrname,
|
||||
'nsSystemIndex': 'false',
|
||||
'nsIndexType': ['eq', 'pres']
|
||||
})
|
||||
@@ -730,7 +729,7 @@ def test_not_such_entry_role_rewrite(topo, request):
|
||||
|
||||
# Enable plugin level to check message
|
||||
topo.standalone.config.loglevel(vals=(ErrorLog.DEFAULT,ErrorLog.PLUGIN))
|
||||
-
|
||||
+
|
||||
# Now check that search is fast, evaluating only 4 entries
|
||||
search_start = time.time()
|
||||
entries = topo.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, "(|(nsrole=%s)(nsrole=cn=not_such_entry_role,%s))" % (role.dn, DEFAULT_SUFFIX))
|
||||
@@ -758,6 +757,77 @@ def test_not_such_entry_role_rewrite(topo, request):
|
||||
|
||||
request.addfinalizer(fin)
|
||||
|
||||
+
|
||||
+def test_rewriter_with_invalid_filter(topo, request):
|
||||
+ """Test that server does not crash when having
|
||||
+ invalid filter in filtered role
|
||||
+
|
||||
+ :id: 5013b0b2-0af6-11f0-8684-482ae39447e5
|
||||
+ :setup: standalone server
|
||||
+ :steps:
|
||||
+ 1. Setup filtered role with good filter
|
||||
+ 2. Setup nsrole rewriter
|
||||
+ 3. Restart the server
|
||||
+ 4. Search for entries
|
||||
+ 5. Setup filtered role with bad filter
|
||||
+ 6. Search for entries
|
||||
+ :expectedresults:
|
||||
+ 1. Operation should succeed
|
||||
+ 2. Operation should succeed
|
||||
+ 3. Operation should succeed
|
||||
+ 4. Operation should succeed
|
||||
+ 5. Operation should succeed
|
||||
+ 6. Operation should succeed
|
||||
+ """
|
||||
+ inst = topo.standalone
|
||||
+ entries = []
|
||||
+
|
||||
+ def fin():
|
||||
+ inst.start()
|
||||
+ for entry in entries:
|
||||
+ entry.delete()
|
||||
+ request.addfinalizer(fin)
|
||||
+
|
||||
+ # Setup filtered role
|
||||
+ roles = FilteredRoles(inst, f'ou=people,{DEFAULT_SUFFIX}')
|
||||
+ filter_ko = '(&((objectClass=top)(objectClass=nsPerson))'
|
||||
+ filter_ok = '(&(objectClass=top)(objectClass=nsPerson))'
|
||||
+ role_properties = {
|
||||
+ 'cn': 'TestFilteredRole',
|
||||
+ 'nsRoleFilter': filter_ok,
|
||||
+ 'description': 'Test good filter',
|
||||
+ }
|
||||
+ role = roles.create(properties=role_properties)
|
||||
+ entries.append(role)
|
||||
+
|
||||
+ # Setup nsrole rewriter
|
||||
+ rewriters = Rewriters(inst)
|
||||
+ rewriter_properties = {
|
||||
+ "cn": "nsrole",
|
||||
+ "nsslapd-libpath": 'libroles-plugin',
|
||||
+ "nsslapd-filterrewriter": 'role_nsRole_filter_rewriter',
|
||||
+ }
|
||||
+ rewriter = rewriters.ensure_state(properties=rewriter_properties)
|
||||
+ entries.append(rewriter)
|
||||
+
|
||||
+ # Restart thge instance
|
||||
+ inst.restart()
|
||||
+
|
||||
+ # Search for entries
|
||||
+ entries = inst.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, "(nsrole=%s)" % role.dn)
|
||||
+
|
||||
+ # Set bad filter
|
||||
+ role_properties = {
|
||||
+ 'cn': 'TestFilteredRole',
|
||||
+ 'nsRoleFilter': filter_ko,
|
||||
+ 'description': 'Test bad filter',
|
||||
+ }
|
||||
+ role.ensure_state(properties=role_properties)
|
||||
+
|
||||
+ # Search for entries
|
||||
+ entries = inst.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, "(nsrole=%s)" % role.dn)
|
||||
+
|
||||
+
|
||||
if __name__ == "__main__":
|
||||
CURRENT_FILE = os.path.realpath(__file__)
|
||||
pytest.main("-s -v %s" % CURRENT_FILE)
|
||||
diff --git a/ldap/servers/slapd/filter.c b/ldap/servers/slapd/filter.c
|
||||
index ce09891b8..f541b8fc1 100644
|
||||
--- a/ldap/servers/slapd/filter.c
|
||||
+++ b/ldap/servers/slapd/filter.c
|
||||
@@ -1038,9 +1038,11 @@ slapi_filter_get_subfilt(
|
||||
}
|
||||
|
||||
/*
|
||||
- * Before calling this function, you must free all the parts
|
||||
+ * The function does not know how to free all the parts
|
||||
* which will be overwritten (i.e. slapi_free_the_filter_bits),
|
||||
- * this function dosn't know how to do that
|
||||
+ * so the caller must take care of that.
|
||||
+ * But it must do so AFTER calling slapi_filter_replace_ex to
|
||||
+ * avoid getting invalid filter if slapi_filter_replace_ex fails.
|
||||
*/
|
||||
int
|
||||
slapi_filter_replace_ex(Slapi_Filter *f, char *s)
|
||||
@@ -1099,8 +1101,15 @@ slapi_filter_free_bits(Slapi_Filter *f)
|
||||
int
|
||||
slapi_filter_replace_strfilter(Slapi_Filter *f, char *strfilter)
|
||||
{
|
||||
- slapi_filter_free_bits(f);
|
||||
- return (slapi_filter_replace_ex(f, strfilter));
|
||||
+ /* slapi_filter_replace_ex may fail and we cannot
|
||||
+ * free filter bits before calling it.
|
||||
+ */
|
||||
+ Slapi_Filter save_f = *f;
|
||||
+ int ret = slapi_filter_replace_ex(f, strfilter);
|
||||
+ if (ret == 0) {
|
||||
+ slapi_filter_free_bits(&save_f);
|
||||
+ }
|
||||
+ return ret;
|
||||
}
|
||||
|
||||
static void
|
||||
--
|
||||
2.49.0
|
||||
|
@ -1,455 +0,0 @@
|
||||
From 446a23d0ed2d3ffa76c5fb5e9576d6876bdbf04f Mon Sep 17 00:00:00 2001
|
||||
From: Simon Pichugin <spichugi@redhat.com>
|
||||
Date: Fri, 28 Mar 2025 11:28:54 -0700
|
||||
Subject: [PATCH] Issue 6686 - CLI - Re-enabling user accounts that reached
|
||||
inactivity limit fails with error (#6687)
|
||||
|
||||
Description: When attempting to unlock a user account that has been locked due
|
||||
to exceeding the Account Policy Plugin's inactivity limit, the dsidm account
|
||||
unlock command fails with a Python type error: "float() argument must be a
|
||||
string or a number, not 'NoneType'".
|
||||
|
||||
Enhance the unlock method to properly handle different account locking states,
|
||||
including inactivity limit exceeded states.
|
||||
Add test cases to verify account inactivity locking/unlocking functionality
|
||||
with CoS and role-based indirect locking.
|
||||
|
||||
Fix CoS template class to include the required 'ldapsubentry' objectClass.
|
||||
Improv error messages to provide better guidance on unlocking indirectly
|
||||
locked accounts.
|
||||
|
||||
Fixes: https://github.com/389ds/389-ds-base/issues/6686
|
||||
|
||||
Reviewed by: @mreynolds389 (Thanks!)
|
||||
---
|
||||
.../clu/dsidm_account_inactivity_test.py | 329 ++++++++++++++++++
|
||||
src/lib389/lib389/cli_idm/account.py | 25 +-
|
||||
src/lib389/lib389/idm/account.py | 28 +-
|
||||
3 files changed, 377 insertions(+), 5 deletions(-)
|
||||
create mode 100644 dirsrvtests/tests/suites/clu/dsidm_account_inactivity_test.py
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/clu/dsidm_account_inactivity_test.py b/dirsrvtests/tests/suites/clu/dsidm_account_inactivity_test.py
|
||||
new file mode 100644
|
||||
index 000000000..88a34abf6
|
||||
--- /dev/null
|
||||
+++ b/dirsrvtests/tests/suites/clu/dsidm_account_inactivity_test.py
|
||||
@@ -0,0 +1,329 @@
|
||||
+# --- BEGIN COPYRIGHT BLOCK ---
|
||||
+# Copyright (C) 2025 Red Hat, Inc.
|
||||
+# All rights reserved.
|
||||
+#
|
||||
+# License: GPL (version 3 or any later version).
|
||||
+# See LICENSE for details.
|
||||
+# --- END COPYRIGHT BLOCK ---
|
||||
+#
|
||||
+import ldap
|
||||
+import time
|
||||
+import pytest
|
||||
+import logging
|
||||
+import os
|
||||
+from datetime import datetime, timedelta
|
||||
+
|
||||
+from lib389 import DEFAULT_SUFFIX, DN_PLUGIN, DN_CONFIG
|
||||
+from lib389.cli_idm.account import entry_status, unlock
|
||||
+from lib389.topologies import topology_st
|
||||
+from lib389.cli_base import FakeArgs
|
||||
+from lib389.utils import ds_is_older
|
||||
+from lib389.plugins import AccountPolicyPlugin, AccountPolicyConfigs
|
||||
+from lib389.idm.role import FilteredRoles
|
||||
+from lib389.idm.user import UserAccounts
|
||||
+from lib389.cos import CosTemplate, CosPointerDefinition
|
||||
+from lib389.idm.domain import Domain
|
||||
+from . import check_value_in_log_and_reset
|
||||
+
|
||||
+pytestmark = pytest.mark.tier0
|
||||
+
|
||||
+logging.getLogger(__name__).setLevel(logging.DEBUG)
|
||||
+log = logging.getLogger(__name__)
|
||||
+
|
||||
+# Constants
|
||||
+PLUGIN_ACCT_POLICY = "Account Policy Plugin"
|
||||
+ACCP_DN = f"cn={PLUGIN_ACCT_POLICY},{DN_PLUGIN}"
|
||||
+ACCP_CONF = f"{DN_CONFIG},{ACCP_DN}"
|
||||
+POLICY_NAME = "Account Inactivity Policy"
|
||||
+POLICY_DN = f"cn={POLICY_NAME},{DEFAULT_SUFFIX}"
|
||||
+COS_TEMPLATE_NAME = "TemplateCoS"
|
||||
+COS_TEMPLATE_DN = f"cn={COS_TEMPLATE_NAME},{DEFAULT_SUFFIX}"
|
||||
+COS_DEFINITION_NAME = "DefinitionCoS"
|
||||
+COS_DEFINITION_DN = f"cn={COS_DEFINITION_NAME},{DEFAULT_SUFFIX}"
|
||||
+TEST_USER_NAME = "test_inactive_user"
|
||||
+TEST_USER_DN = f"uid={TEST_USER_NAME},{DEFAULT_SUFFIX}"
|
||||
+TEST_USER_PW = "password"
|
||||
+INACTIVITY_LIMIT = 30
|
||||
+
|
||||
+
|
||||
+@pytest.fixture(scope="function")
|
||||
+def account_policy_setup(topology_st, request):
|
||||
+ """Set up account policy plugin, configuration, and CoS objects"""
|
||||
+ log.info("Setting up Account Policy Plugin and CoS")
|
||||
+
|
||||
+ # Enable Account Policy Plugin
|
||||
+ plugin = AccountPolicyPlugin(topology_st.standalone)
|
||||
+ if not plugin.status():
|
||||
+ plugin.enable()
|
||||
+ plugin.set('nsslapd-pluginarg0', ACCP_CONF)
|
||||
+
|
||||
+ # Configure Account Policy
|
||||
+ accp_configs = AccountPolicyConfigs(topology_st.standalone)
|
||||
+ accp_config = accp_configs.ensure_state(
|
||||
+ properties={
|
||||
+ 'cn': 'config',
|
||||
+ 'alwaysrecordlogin': 'yes',
|
||||
+ 'stateattrname': 'lastLoginTime',
|
||||
+ 'altstateattrname': '1.1',
|
||||
+ 'specattrname': 'acctPolicySubentry',
|
||||
+ 'limitattrname': 'accountInactivityLimit'
|
||||
+ }
|
||||
+ )
|
||||
+
|
||||
+ # Add ACI for anonymous access if it doesn't exist
|
||||
+ domain = Domain(topology_st.standalone, DEFAULT_SUFFIX)
|
||||
+ anon_aci = '(targetattr="*")(version 3.0; acl "Anonymous read access"; allow (read,search,compare) userdn="ldap:///anyone";)'
|
||||
+ domain.ensure_present('aci', anon_aci)
|
||||
+
|
||||
+ # Restart the server to apply plugin configuration
|
||||
+ topology_st.standalone.restart()
|
||||
+
|
||||
+ # Create or update account policy entry
|
||||
+ accp_configs = AccountPolicyConfigs(topology_st.standalone, basedn=DEFAULT_SUFFIX)
|
||||
+ policy = accp_configs.ensure_state(
|
||||
+ properties={
|
||||
+ 'cn': POLICY_NAME,
|
||||
+ 'objectClass': ['top', 'ldapsubentry', 'extensibleObject', 'accountpolicy'],
|
||||
+ 'accountInactivityLimit': str(INACTIVITY_LIMIT)
|
||||
+ }
|
||||
+ )
|
||||
+
|
||||
+ # Create or update CoS template entry
|
||||
+ cos_template = CosTemplate(topology_st.standalone, dn=COS_TEMPLATE_DN)
|
||||
+ cos_template.ensure_state(
|
||||
+ properties={
|
||||
+ 'cn': COS_TEMPLATE_NAME,
|
||||
+ 'objectClass': ['top', 'cosTemplate', 'extensibleObject'],
|
||||
+ 'acctPolicySubentry': policy.dn
|
||||
+ }
|
||||
+ )
|
||||
+
|
||||
+ # Create or update CoS definition entry
|
||||
+ cos_def = CosPointerDefinition(topology_st.standalone, dn=COS_DEFINITION_DN)
|
||||
+ cos_def.ensure_state(
|
||||
+ properties={
|
||||
+ 'cn': COS_DEFINITION_NAME,
|
||||
+ 'objectClass': ['top', 'ldapsubentry', 'cosSuperDefinition', 'cosPointerDefinition'],
|
||||
+ 'cosTemplateDn': COS_TEMPLATE_DN,
|
||||
+ 'cosAttribute': 'acctPolicySubentry default operational-default'
|
||||
+ }
|
||||
+ )
|
||||
+
|
||||
+ # Restart server to ensure CoS is applied
|
||||
+ topology_st.standalone.restart()
|
||||
+
|
||||
+ def fin():
|
||||
+ log.info('Cleaning up Account Policy settings')
|
||||
+ try:
|
||||
+ # Delete CoS and policy entries
|
||||
+ if cos_def.exists():
|
||||
+ cos_def.delete()
|
||||
+ if cos_template.exists():
|
||||
+ cos_template.delete()
|
||||
+ if policy.exists():
|
||||
+ policy.delete()
|
||||
+
|
||||
+ # Disable the plugin
|
||||
+ if plugin.status():
|
||||
+ plugin.disable()
|
||||
+ topology_st.standalone.restart()
|
||||
+ except Exception as e:
|
||||
+ log.error(f'Failed to clean up: {e}')
|
||||
+
|
||||
+ request.addfinalizer(fin)
|
||||
+
|
||||
+ return topology_st.standalone
|
||||
+
|
||||
+
|
||||
+@pytest.fixture(scope="function")
|
||||
+def create_test_user(topology_st, account_policy_setup, request):
|
||||
+ """Create a test user for the inactivity test"""
|
||||
+ log.info('Creating test user')
|
||||
+
|
||||
+ users = UserAccounts(topology_st.standalone, DEFAULT_SUFFIX)
|
||||
+ user = users.ensure_state(
|
||||
+ properties={
|
||||
+ 'uid': TEST_USER_NAME,
|
||||
+ 'cn': TEST_USER_NAME,
|
||||
+ 'sn': TEST_USER_NAME,
|
||||
+ 'userPassword': TEST_USER_PW,
|
||||
+ 'uidNumber': '1000',
|
||||
+ 'gidNumber': '2000',
|
||||
+ 'homeDirectory': f'/home/{TEST_USER_NAME}'
|
||||
+ }
|
||||
+ )
|
||||
+
|
||||
+ def fin():
|
||||
+ log.info('Deleting test user')
|
||||
+ if user.exists():
|
||||
+ user.delete()
|
||||
+
|
||||
+ request.addfinalizer(fin)
|
||||
+ return user
|
||||
+
|
||||
+
|
||||
+@pytest.mark.skipif(ds_is_older("1.4.2"), reason="Indirect account locking not implemented")
|
||||
+def test_dsidm_account_inactivity_lock_unlock(topology_st, create_test_user):
|
||||
+ """Test dsidm account unlock functionality with indirectly locked accounts
|
||||
+
|
||||
+ :id: d7b57083-6111-4dbf-af84-6fca7fc7fb31
|
||||
+ :setup: Standalone instance with Account Policy Plugin and CoS configured
|
||||
+ :steps:
|
||||
+ 1. Create a test user
|
||||
+ 2. Bind as the test user to set lastLoginTime
|
||||
+ 3. Check account status - should be active
|
||||
+ 4. Set user's lastLoginTime to a time in the past that exceeds inactivity limit
|
||||
+ 5. Check account status - should be locked due to inactivity
|
||||
+ 6. Attempt to bind as the user - should fail with constraint violation
|
||||
+ 7. Unlock the account using dsidm account unlock
|
||||
+ 8. Verify account status is active again
|
||||
+ 9. Verify the user can bind again
|
||||
+ :expectedresults:
|
||||
+ 1. Success
|
||||
+ 2. Success
|
||||
+ 3. Account status shows as activated
|
||||
+ 4. Success
|
||||
+ 5. Account status shows as inactivity limit exceeded
|
||||
+ 6. Bind attempt fails with constraint violation
|
||||
+ 7. Account unlocked successfully
|
||||
+ 8. Account status shows as activated
|
||||
+ 9. User can bind successfully
|
||||
+ """
|
||||
+ standalone = topology_st.standalone
|
||||
+ user = create_test_user
|
||||
+
|
||||
+ # Set up FakeArgs for dsidm commands
|
||||
+ args = FakeArgs()
|
||||
+ args.dn = user.dn
|
||||
+ args.json = False
|
||||
+ args.details = False
|
||||
+
|
||||
+ # 1. Check initial account status - should be active
|
||||
+ log.info('Step 1: Checking initial account status')
|
||||
+ entry_status(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args)
|
||||
+ check_value_in_log_and_reset(topology_st, check_value='Entry State: activated')
|
||||
+
|
||||
+ # 2. Bind as test user to set initial lastLoginTime
|
||||
+ log.info('Step 2: Binding as test user to set lastLoginTime')
|
||||
+ try:
|
||||
+ conn = user.bind(TEST_USER_PW)
|
||||
+ conn.unbind()
|
||||
+ log.info("Successfully bound as test user")
|
||||
+ except ldap.LDAPError as e:
|
||||
+ pytest.fail(f"Failed to bind as test user: {e}")
|
||||
+
|
||||
+ # 3. Set lastLoginTime to a time in the past that exceeds inactivity limit
|
||||
+ log.info('Step 3: Setting lastLoginTime to the past')
|
||||
+ past_time = datetime.utcnow() - timedelta(seconds=INACTIVITY_LIMIT * 2)
|
||||
+ past_time_str = past_time.strftime('%Y%m%d%H%M%SZ')
|
||||
+ user.replace('lastLoginTime', past_time_str)
|
||||
+
|
||||
+ # 4. Check account status - should now be locked due to inactivity
|
||||
+ log.info('Step 4: Checking account status after setting old lastLoginTime')
|
||||
+ entry_status(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args)
|
||||
+ check_value_in_log_and_reset(topology_st, check_value='Entry State: inactivity limit exceeded')
|
||||
+
|
||||
+ # 5. Attempt to bind as the user - should fail
|
||||
+ log.info('Step 5: Attempting to bind as user (should fail)')
|
||||
+ with pytest.raises(ldap.CONSTRAINT_VIOLATION) as excinfo:
|
||||
+ conn = user.bind(TEST_USER_PW)
|
||||
+ assert "Account inactivity limit exceeded" in str(excinfo.value)
|
||||
+
|
||||
+ # 6. Unlock the account using dsidm account unlock
|
||||
+ log.info('Step 6: Unlocking the account with dsidm')
|
||||
+ unlock(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args)
|
||||
+ check_value_in_log_and_reset(topology_st,
|
||||
+ check_value='now unlocked by resetting lastLoginTime')
|
||||
+
|
||||
+ # 7. Verify account status is active again
|
||||
+ log.info('Step 7: Checking account status after unlock')
|
||||
+ entry_status(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args)
|
||||
+ check_value_in_log_and_reset(topology_st, check_value='Entry State: activated')
|
||||
+
|
||||
+ # 8. Verify the user can bind again
|
||||
+ log.info('Step 8: Verifying user can bind again')
|
||||
+ try:
|
||||
+ conn = user.bind(TEST_USER_PW)
|
||||
+ conn.unbind()
|
||||
+ log.info("Successfully bound as test user after unlock")
|
||||
+ except ldap.LDAPError as e:
|
||||
+ pytest.fail(f"Failed to bind as test user after unlock: {e}")
|
||||
+
|
||||
+
|
||||
+@pytest.mark.skipif(ds_is_older("1.4.2"), reason="Indirect account locking not implemented")
|
||||
+def test_dsidm_indirectly_locked_via_role(topology_st, create_test_user):
|
||||
+ """Test dsidm account unlock functionality with accounts indirectly locked via role
|
||||
+
|
||||
+ :id: 7bfe69bb-cf99-4214-a763-051ab2b9cf89
|
||||
+ :setup: Standalone instance with Role and user configured
|
||||
+ :steps:
|
||||
+ 1. Create a test user
|
||||
+ 2. Create a Filtered Role that includes the test user
|
||||
+ 3. Lock the role
|
||||
+ 4. Check account status - should be indirectly locked through the role
|
||||
+ 5. Attempt to unlock the account - should fail with appropriate message
|
||||
+ 6. Unlock the role
|
||||
+ 7. Verify account status is active again
|
||||
+ :expectedresults:
|
||||
+ 1. Success
|
||||
+ 2. Success
|
||||
+ 3. Success
|
||||
+ 4. Account status shows as indirectly locked
|
||||
+ 5. Unlock attempt fails with appropriate error message
|
||||
+ 6. Success
|
||||
+ 7. Account status shows as activated
|
||||
+ """
|
||||
+ standalone = topology_st.standalone
|
||||
+ user = create_test_user
|
||||
+
|
||||
+ # Use FilteredRoles and ensure_state for role creation
|
||||
+ log.info('Step 1: Creating Filtered Role')
|
||||
+ roles = FilteredRoles(standalone, DEFAULT_SUFFIX)
|
||||
+ role = roles.ensure_state(
|
||||
+ properties={
|
||||
+ 'cn': 'TestFilterRole',
|
||||
+ 'nsRoleFilter': f'(uid={TEST_USER_NAME})'
|
||||
+ }
|
||||
+ )
|
||||
+
|
||||
+ # Set up FakeArgs for dsidm commands
|
||||
+ args = FakeArgs()
|
||||
+ args.dn = user.dn
|
||||
+ args.json = False
|
||||
+ args.details = False
|
||||
+
|
||||
+ # 2. Check account status before locking role
|
||||
+ log.info('Step 2: Checking account status before locking role')
|
||||
+ entry_status(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args)
|
||||
+ check_value_in_log_and_reset(topology_st, check_value='Entry State: activated')
|
||||
+
|
||||
+ # 3. Lock the role
|
||||
+ log.info('Step 3: Locking the role')
|
||||
+ role.lock()
|
||||
+
|
||||
+ # 4. Check account status - should be indirectly locked
|
||||
+ log.info('Step 4: Checking account status after locking role')
|
||||
+ entry_status(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args)
|
||||
+ check_value_in_log_and_reset(topology_st, check_value='Entry State: indirectly locked through a Role')
|
||||
+
|
||||
+ # 5. Attempt to unlock the account - should fail
|
||||
+ log.info('Step 5: Attempting to unlock indirectly locked account')
|
||||
+ unlock(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args)
|
||||
+ check_value_in_log_and_reset(topology_st,
|
||||
+ check_value='Account is locked through role')
|
||||
+
|
||||
+ # 6. Unlock the role
|
||||
+ log.info('Step 6: Unlocking the role')
|
||||
+ role.unlock()
|
||||
+
|
||||
+ # 7. Verify account status is active again
|
||||
+ log.info('Step 7: Checking account status after unlocking role')
|
||||
+ entry_status(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args)
|
||||
+ check_value_in_log_and_reset(topology_st, check_value='Entry State: activated')
|
||||
+
|
||||
+
|
||||
+if __name__ == '__main__':
|
||||
+ # Run isolated
|
||||
+ # -s for DEBUG mode
|
||||
+ CURRENT_FILE = os.path.realpath(__file__)
|
||||
+ pytest.main(["-s", CURRENT_FILE])
|
||||
\ No newline at end of file
|
||||
diff --git a/src/lib389/lib389/cli_idm/account.py b/src/lib389/lib389/cli_idm/account.py
|
||||
index 15f766588..a0dfd8f65 100644
|
||||
--- a/src/lib389/lib389/cli_idm/account.py
|
||||
+++ b/src/lib389/lib389/cli_idm/account.py
|
||||
@@ -176,8 +176,29 @@ def unlock(inst, basedn, log, args):
|
||||
dn = _get_dn_arg(args.dn, msg="Enter dn to unlock")
|
||||
accounts = Accounts(inst, basedn)
|
||||
acct = accounts.get(dn=dn)
|
||||
- acct.unlock()
|
||||
- log.info(f'Entry {dn} is unlocked')
|
||||
+
|
||||
+ try:
|
||||
+ # Get the account status before attempting to unlock
|
||||
+ status = acct.status()
|
||||
+ state = status["state"]
|
||||
+
|
||||
+ # Attempt to unlock the account
|
||||
+ acct.unlock()
|
||||
+
|
||||
+ # Success message
|
||||
+ log.info(f'Entry {dn} is unlocked')
|
||||
+ if state == AccountState.DIRECTLY_LOCKED:
|
||||
+ log.info(f'The entry was directly locked')
|
||||
+ elif state == AccountState.INACTIVITY_LIMIT_EXCEEDED:
|
||||
+ log.info(f'The entry was locked due to inactivity and is now unlocked by resetting lastLoginTime')
|
||||
+
|
||||
+ except ValueError as e:
|
||||
+ # Provide a more detailed error message based on failure reason
|
||||
+ if "through role" in str(e):
|
||||
+ log.error(f"Cannot unlock {dn}: {str(e)}")
|
||||
+ log.info("To unlock this account, you must modify the role that's locking it.")
|
||||
+ else:
|
||||
+ log.error(f"Failed to unlock {dn}: {str(e)}")
|
||||
|
||||
|
||||
def reset_password(inst, basedn, log, args):
|
||||
diff --git a/src/lib389/lib389/idm/account.py b/src/lib389/lib389/idm/account.py
|
||||
index 4b823b662..faf6f6f16 100644
|
||||
--- a/src/lib389/lib389/idm/account.py
|
||||
+++ b/src/lib389/lib389/idm/account.py
|
||||
@@ -140,7 +140,8 @@ class Account(DSLdapObject):
|
||||
"nsAccountLock", state_attr])
|
||||
|
||||
last_login_time = self._dict_get_with_ignore_indexerror(account_data, state_attr)
|
||||
- if not last_login_time:
|
||||
+ # if last_login_time not exist then check alt_state_attr only if its not disabled and exist
|
||||
+ if not last_login_time and alt_state_attr in account_data:
|
||||
last_login_time = self._dict_get_with_ignore_indexerror(account_data, alt_state_attr)
|
||||
|
||||
create_time = self._dict_get_with_ignore_indexerror(account_data, "createTimestamp")
|
||||
@@ -203,12 +204,33 @@ class Account(DSLdapObject):
|
||||
self.replace('nsAccountLock', 'true')
|
||||
|
||||
def unlock(self):
|
||||
- """Unset nsAccountLock"""
|
||||
+ """Unset nsAccountLock if it's set and reset lastLoginTime if account is locked due to inactivity"""
|
||||
|
||||
current_status = self.status()
|
||||
+
|
||||
if current_status["state"] == AccountState.ACTIVATED:
|
||||
raise ValueError("Account is already active")
|
||||
- self.remove('nsAccountLock', None)
|
||||
+
|
||||
+ if current_status["state"] == AccountState.DIRECTLY_LOCKED:
|
||||
+ # Account is directly locked with nsAccountLock attribute
|
||||
+ self.remove('nsAccountLock', None)
|
||||
+ elif current_status["state"] == AccountState.INACTIVITY_LIMIT_EXCEEDED:
|
||||
+ # Account is locked due to inactivity - reset lastLoginTime to current time
|
||||
+ # The lastLoginTime attribute stores its value in GMT/UTC time (Zulu time zone)
|
||||
+ current_time = time.strftime('%Y%m%d%H%M%SZ', time.gmtime())
|
||||
+ self.replace('lastLoginTime', current_time)
|
||||
+ elif current_status["state"] == AccountState.INDIRECTLY_LOCKED:
|
||||
+ # Account is locked through a role
|
||||
+ role_dn = current_status.get("role_dn")
|
||||
+ if role_dn:
|
||||
+ raise ValueError(f"Account is locked through role {role_dn}. "
|
||||
+ f"Please modify the role to unlock this account.")
|
||||
+ else:
|
||||
+ raise ValueError("Account is locked through an unknown role. "
|
||||
+ "Please check the roles configuration to unlock this account.")
|
||||
+ else:
|
||||
+ # Should not happen, but just in case
|
||||
+ raise ValueError(f"Unknown lock state: {current_status['state'].value}")
|
||||
|
||||
# If the account can be bound to, this will attempt to do so. We don't check
|
||||
# for exceptions, just pass them back!
|
||||
--
|
||||
2.49.0
|
||||
|
@ -1,70 +0,0 @@
|
||||
From 09a284ee43c2b4346da892f8756f97accd15ca68 Mon Sep 17 00:00:00 2001
|
||||
From: Simon Pichugin <spichugi@redhat.com>
|
||||
Date: Wed, 4 Dec 2024 21:59:40 -0500
|
||||
Subject: [PATCH] Issue 6302 - Allow to run replication status without a prompt
|
||||
(#6410)
|
||||
|
||||
Description: We should allow running replication status and
|
||||
other similar commands without requesting a password and bind DN.
|
||||
|
||||
This way, the current instance's root DN and root PW will be used on other
|
||||
instances when requesting CSN info. If they are incorrect,
|
||||
then the info won't be printed, but otherwise, the agreement status
|
||||
will be displayed correctly.
|
||||
|
||||
Fixes: https://github.com/389ds/389-ds-base/issues/6302
|
||||
|
||||
Reviewed by: @progier389 (Thanks!)
|
||||
---
|
||||
src/lib389/lib389/cli_conf/replication.py | 15 +++------------
|
||||
1 file changed, 3 insertions(+), 12 deletions(-)
|
||||
|
||||
diff --git a/src/lib389/lib389/cli_conf/replication.py b/src/lib389/lib389/cli_conf/replication.py
|
||||
index 399d0d2f8..cd4a331a8 100644
|
||||
--- a/src/lib389/lib389/cli_conf/replication.py
|
||||
+++ b/src/lib389/lib389/cli_conf/replication.py
|
||||
@@ -319,12 +319,9 @@ def list_suffixes(inst, basedn, log, args):
|
||||
def get_repl_status(inst, basedn, log, args):
|
||||
replicas = Replicas(inst)
|
||||
replica = replicas.get(args.suffix)
|
||||
- pw_and_dn_prompt = False
|
||||
if args.bind_passwd_file is not None:
|
||||
args.bind_passwd = get_passwd_from_file(args.bind_passwd_file)
|
||||
- if args.bind_passwd_prompt or args.bind_dn is None or args.bind_passwd is None:
|
||||
- pw_and_dn_prompt = True
|
||||
- status = replica.status(binddn=args.bind_dn, bindpw=args.bind_passwd, pwprompt=pw_and_dn_prompt)
|
||||
+ status = replica.status(binddn=args.bind_dn, bindpw=args.bind_passwd, pwprompt=args.bind_passwd_prompt)
|
||||
if args.json:
|
||||
log.info(json.dumps({"type": "list", "items": status}, indent=4))
|
||||
else:
|
||||
@@ -335,12 +332,9 @@ def get_repl_status(inst, basedn, log, args):
|
||||
def get_repl_winsync_status(inst, basedn, log, args):
|
||||
replicas = Replicas(inst)
|
||||
replica = replicas.get(args.suffix)
|
||||
- pw_and_dn_prompt = False
|
||||
if args.bind_passwd_file is not None:
|
||||
args.bind_passwd = get_passwd_from_file(args.bind_passwd_file)
|
||||
- if args.bind_passwd_prompt or args.bind_dn is None or args.bind_passwd is None:
|
||||
- pw_and_dn_prompt = True
|
||||
- status = replica.status(binddn=args.bind_dn, bindpw=args.bind_passwd, winsync=True, pwprompt=pw_and_dn_prompt)
|
||||
+ status = replica.status(binddn=args.bind_dn, bindpw=args.bind_passwd, winsync=True, pwprompt=args.bind_passwd_prompt)
|
||||
if args.json:
|
||||
log.info(json.dumps({"type": "list", "items": status}, indent=4))
|
||||
else:
|
||||
@@ -874,12 +868,9 @@ def poke_agmt(inst, basedn, log, args):
|
||||
|
||||
def get_agmt_status(inst, basedn, log, args):
|
||||
agmt = get_agmt(inst, args)
|
||||
- pw_and_dn_prompt = False
|
||||
if args.bind_passwd_file is not None:
|
||||
args.bind_passwd = get_passwd_from_file(args.bind_passwd_file)
|
||||
- if args.bind_passwd_prompt or args.bind_dn is None or args.bind_passwd is None:
|
||||
- pw_and_dn_prompt = True
|
||||
- status = agmt.status(use_json=args.json, binddn=args.bind_dn, bindpw=args.bind_passwd, pwprompt=pw_and_dn_prompt)
|
||||
+ status = agmt.status(use_json=args.json, binddn=args.bind_dn, bindpw=args.bind_passwd, pwprompt=args.bind_passwd_prompt)
|
||||
log.info(status)
|
||||
|
||||
|
||||
--
|
||||
2.49.0
|
||||
|
@ -1,4 +0,0 @@
|
||||
For detailed information on developing plugins for
|
||||
389 Directory Server visit.
|
||||
|
||||
http://port389/wiki/Plugins
|
@ -1,16 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
DATE=`date +%Y%m%d`
|
||||
# use a real tag name here
|
||||
VERSION=1.3.5.14
|
||||
PKGNAME=389-ds-base
|
||||
TAG=${TAG:-$PKGNAME-$VERSION}
|
||||
URL="https://git.fedorahosted.org/git/?p=389/ds.git;a=snapshot;h=$TAG;sf=tgz"
|
||||
SRCNAME=$PKGNAME-$VERSION
|
||||
|
||||
wget -O $SRCNAME.tar.gz "$URL"
|
||||
|
||||
echo convert tgz format to tar.bz2 format
|
||||
|
||||
gunzip $PKGNAME-$VERSION.tar.gz
|
||||
bzip2 $PKGNAME-$VERSION.tar
|
@ -1,999 +0,0 @@
|
||||
# This file is automatically @generated by Cargo.
|
||||
# It is not intended for manual editing.
|
||||
version = 3
|
||||
|
||||
[[package]]
|
||||
name = "addr2line"
|
||||
version = "0.24.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "dfbe277e56a376000877090da837660b4427aad530e3028d44e0bffe4f89a1c1"
|
||||
dependencies = [
|
||||
"gimli",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "adler2"
|
||||
version = "2.0.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "512761e0bb2578dd7380c6baaa0f4ce03e84f95e960231d1dec8bf4d7d6e2627"
|
||||
|
||||
[[package]]
|
||||
name = "ahash"
|
||||
version = "0.7.8"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "891477e0c6a8957309ee5c45a6368af3ae14bb510732d2684ffa19af310920f9"
|
||||
dependencies = [
|
||||
"getrandom 0.2.16",
|
||||
"once_cell",
|
||||
"version_check",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "ansi_term"
|
||||
version = "0.12.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d52a9bb7ec0cf484c551830a7ce27bd20d67eac647e1befb56b0be4ee39a55d2"
|
||||
dependencies = [
|
||||
"winapi",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "atty"
|
||||
version = "0.2.14"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8"
|
||||
dependencies = [
|
||||
"hermit-abi",
|
||||
"libc",
|
||||
"winapi",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "autocfg"
|
||||
version = "1.4.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ace50bade8e6234aa140d9a2f552bbee1db4d353f69b8217bc503490fc1a9f26"
|
||||
|
||||
[[package]]
|
||||
name = "backtrace"
|
||||
version = "0.3.75"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "6806a6321ec58106fea15becdad98371e28d92ccbc7c8f1b3b6dd724fe8f1002"
|
||||
dependencies = [
|
||||
"addr2line",
|
||||
"cfg-if",
|
||||
"libc",
|
||||
"miniz_oxide",
|
||||
"object",
|
||||
"rustc-demangle",
|
||||
"windows-targets",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "base64"
|
||||
version = "0.13.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9e1b586273c5702936fe7b7d6896644d8be71e6314cfe09d3167c95f712589e8"
|
||||
|
||||
[[package]]
|
||||
name = "bitflags"
|
||||
version = "1.3.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a"
|
||||
|
||||
[[package]]
|
||||
name = "bitflags"
|
||||
version = "2.9.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "1b8e56985ec62d17e9c1001dc89c88ecd7dc08e47eba5ec7c29c7b5eeecde967"
|
||||
|
||||
[[package]]
|
||||
name = "byteorder"
|
||||
version = "1.5.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b"
|
||||
|
||||
[[package]]
|
||||
name = "cbindgen"
|
||||
version = "0.9.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9daec6140ab4dcd38c3dd57e580b59a621172a526ac79f1527af760a55afeafd"
|
||||
dependencies = [
|
||||
"clap",
|
||||
"log",
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"syn 1.0.109",
|
||||
"tempfile",
|
||||
"toml",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "cc"
|
||||
version = "1.2.25"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d0fc897dc1e865cc67c0e05a836d9d3f1df3cbe442aa4a9473b18e12624a4951"
|
||||
dependencies = [
|
||||
"jobserver",
|
||||
"libc",
|
||||
"shlex",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "cfg-if"
|
||||
version = "1.0.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
|
||||
|
||||
[[package]]
|
||||
name = "clap"
|
||||
version = "2.34.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a0610544180c38b88101fecf2dd634b174a62eef6946f84dfc6a7127512b381c"
|
||||
dependencies = [
|
||||
"ansi_term",
|
||||
"atty",
|
||||
"bitflags 1.3.2",
|
||||
"strsim",
|
||||
"textwrap",
|
||||
"unicode-width",
|
||||
"vec_map",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "concread"
|
||||
version = "0.2.21"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "dcc9816f5ac93ebd51c37f7f9a6bf2b40dfcd42978ad2aea5d542016e9244cf6"
|
||||
dependencies = [
|
||||
"ahash",
|
||||
"crossbeam",
|
||||
"crossbeam-epoch",
|
||||
"crossbeam-utils",
|
||||
"lru",
|
||||
"parking_lot",
|
||||
"rand",
|
||||
"smallvec",
|
||||
"tokio",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "crossbeam"
|
||||
version = "0.8.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "1137cd7e7fc0fb5d3c5a8678be38ec56e819125d8d7907411fe24ccb943faca8"
|
||||
dependencies = [
|
||||
"crossbeam-channel",
|
||||
"crossbeam-deque",
|
||||
"crossbeam-epoch",
|
||||
"crossbeam-queue",
|
||||
"crossbeam-utils",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "crossbeam-channel"
|
||||
version = "0.5.15"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "82b8f8f868b36967f9606790d1903570de9ceaf870a7bf9fbbd3016d636a2cb2"
|
||||
dependencies = [
|
||||
"crossbeam-utils",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "crossbeam-deque"
|
||||
version = "0.8.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9dd111b7b7f7d55b72c0a6ae361660ee5853c9af73f70c3c2ef6858b950e2e51"
|
||||
dependencies = [
|
||||
"crossbeam-epoch",
|
||||
"crossbeam-utils",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "crossbeam-epoch"
|
||||
version = "0.9.18"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "5b82ac4a3c2ca9c3460964f020e1402edd5753411d7737aa39c3714ad1b5420e"
|
||||
dependencies = [
|
||||
"crossbeam-utils",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "crossbeam-queue"
|
||||
version = "0.3.12"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "0f58bbc28f91df819d0aa2a2c00cd19754769c2fad90579b3592b1c9ba7a3115"
|
||||
dependencies = [
|
||||
"crossbeam-utils",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "crossbeam-utils"
|
||||
version = "0.8.21"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d0a5c400df2834b80a4c3327b3aad3a4c4cd4de0629063962b03235697506a28"
|
||||
|
||||
[[package]]
|
||||
name = "entryuuid"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"cc",
|
||||
"libc",
|
||||
"paste",
|
||||
"slapi_r_plugin",
|
||||
"uuid",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "entryuuid_syntax"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"cc",
|
||||
"libc",
|
||||
"paste",
|
||||
"slapi_r_plugin",
|
||||
"uuid",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "errno"
|
||||
version = "0.3.12"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "cea14ef9355e3beab063703aa9dab15afd25f0667c341310c1e5274bb1d0da18"
|
||||
dependencies = [
|
||||
"libc",
|
||||
"windows-sys",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "fastrand"
|
||||
version = "2.3.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "37909eebbb50d72f9059c3b6d82c0463f2ff062c9e95845c43a6c9c0355411be"
|
||||
|
||||
[[package]]
|
||||
name = "fernet"
|
||||
version = "0.1.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "93804560e638370a8be6d59ce71ed803e55e230abdbf42598e666b41adda9b1f"
|
||||
dependencies = [
|
||||
"base64",
|
||||
"byteorder",
|
||||
"getrandom 0.2.16",
|
||||
"openssl",
|
||||
"zeroize",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "foreign-types"
|
||||
version = "0.3.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f6f339eb8adc052cd2ca78910fda869aefa38d22d5cb648e6485e4d3fc06f3b1"
|
||||
dependencies = [
|
||||
"foreign-types-shared",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "foreign-types-shared"
|
||||
version = "0.1.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b"
|
||||
|
||||
[[package]]
|
||||
name = "getrandom"
|
||||
version = "0.2.16"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "335ff9f135e4384c8150d6f27c6daed433577f86b4750418338c01a1a2528592"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
"libc",
|
||||
"wasi 0.11.0+wasi-snapshot-preview1",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "getrandom"
|
||||
version = "0.3.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "26145e563e54f2cadc477553f1ec5ee650b00862f0a58bcd12cbdc5f0ea2d2f4"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
"libc",
|
||||
"r-efi",
|
||||
"wasi 0.14.2+wasi-0.2.4",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "gimli"
|
||||
version = "0.31.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "07e28edb80900c19c28f1072f2e8aeca7fa06b23cd4169cefe1af5aa3260783f"
|
||||
|
||||
[[package]]
|
||||
name = "hashbrown"
|
||||
version = "0.12.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888"
|
||||
dependencies = [
|
||||
"ahash",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "hermit-abi"
|
||||
version = "0.1.19"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "62b467343b94ba476dcb2500d242dadbb39557df889310ac77c5d99100aaac33"
|
||||
dependencies = [
|
||||
"libc",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "instant"
|
||||
version = "0.1.13"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e0242819d153cba4b4b05a5a8f2a7e9bbf97b6055b2a002b395c96b5ff3c0222"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "itoa"
|
||||
version = "1.0.15"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "4a5f13b858c8d314ee3e8f639011f7ccefe71f97f96e50151fb991f267928e2c"
|
||||
|
||||
[[package]]
|
||||
name = "jobserver"
|
||||
version = "0.1.33"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "38f262f097c174adebe41eb73d66ae9c06b2844fb0da69969647bbddd9b0538a"
|
||||
dependencies = [
|
||||
"getrandom 0.3.3",
|
||||
"libc",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "libc"
|
||||
version = "0.2.172"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d750af042f7ef4f724306de029d18836c26c1765a54a6a3f094cbd23a7267ffa"
|
||||
|
||||
[[package]]
|
||||
name = "librnsslapd"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"cbindgen",
|
||||
"libc",
|
||||
"slapd",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "librslapd"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"cbindgen",
|
||||
"concread",
|
||||
"libc",
|
||||
"slapd",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "linux-raw-sys"
|
||||
version = "0.9.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "cd945864f07fe9f5371a27ad7b52a172b4b499999f1d97574c9fa68373937e12"
|
||||
|
||||
[[package]]
|
||||
name = "lock_api"
|
||||
version = "0.4.13"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "96936507f153605bddfcda068dd804796c84324ed2510809e5b2a624c81da765"
|
||||
dependencies = [
|
||||
"autocfg",
|
||||
"scopeguard",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "log"
|
||||
version = "0.4.27"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "13dc2df351e3202783a1fe0d44375f7295ffb4049267b0f3018346dc122a1d94"
|
||||
|
||||
[[package]]
|
||||
name = "lru"
|
||||
version = "0.7.8"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e999beba7b6e8345721bd280141ed958096a2e4abdf74f67ff4ce49b4b54e47a"
|
||||
dependencies = [
|
||||
"hashbrown",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "memchr"
|
||||
version = "2.7.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "78ca9ab1a0babb1e7d5695e3530886289c18cf2f87ec19a575a0abdce112e3a3"
|
||||
|
||||
[[package]]
|
||||
name = "miniz_oxide"
|
||||
version = "0.8.8"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "3be647b768db090acb35d5ec5db2b0e1f1de11133ca123b9eacf5137868f892a"
|
||||
dependencies = [
|
||||
"adler2",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "object"
|
||||
version = "0.36.7"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "62948e14d923ea95ea2c7c86c71013138b66525b86bdc08d2dcc262bdb497b87"
|
||||
dependencies = [
|
||||
"memchr",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "once_cell"
|
||||
version = "1.21.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "42f5e15c9953c5e4ccceeb2e7382a716482c34515315f7b03532b8b4e8393d2d"
|
||||
|
||||
[[package]]
|
||||
name = "openssl"
|
||||
version = "0.10.73"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8505734d46c8ab1e19a1dce3aef597ad87dcb4c37e7188231769bd6bd51cebf8"
|
||||
dependencies = [
|
||||
"bitflags 2.9.1",
|
||||
"cfg-if",
|
||||
"foreign-types",
|
||||
"libc",
|
||||
"once_cell",
|
||||
"openssl-macros",
|
||||
"openssl-sys",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "openssl-macros"
|
||||
version = "0.1.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn 2.0.101",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "openssl-sys"
|
||||
version = "0.9.109"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "90096e2e47630d78b7d1c20952dc621f957103f8bc2c8359ec81290d75238571"
|
||||
dependencies = [
|
||||
"cc",
|
||||
"libc",
|
||||
"pkg-config",
|
||||
"vcpkg",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "parking_lot"
|
||||
version = "0.11.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7d17b78036a60663b797adeaee46f5c9dfebb86948d1255007a1d6be0271ff99"
|
||||
dependencies = [
|
||||
"instant",
|
||||
"lock_api",
|
||||
"parking_lot_core",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "parking_lot_core"
|
||||
version = "0.8.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "60a2cfe6f0ad2bfc16aefa463b497d5c7a5ecd44a23efa72aa342d90177356dc"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
"instant",
|
||||
"libc",
|
||||
"redox_syscall",
|
||||
"smallvec",
|
||||
"winapi",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "paste"
|
||||
version = "0.1.18"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "45ca20c77d80be666aef2b45486da86238fabe33e38306bd3118fe4af33fa880"
|
||||
dependencies = [
|
||||
"paste-impl",
|
||||
"proc-macro-hack",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "paste-impl"
|
||||
version = "0.1.18"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d95a7db200b97ef370c8e6de0088252f7e0dfff7d047a28528e47456c0fc98b6"
|
||||
dependencies = [
|
||||
"proc-macro-hack",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "pin-project-lite"
|
||||
version = "0.2.16"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "3b3cff922bd51709b605d9ead9aa71031d81447142d828eb4a6eba76fe619f9b"
|
||||
|
||||
[[package]]
|
||||
name = "pkg-config"
|
||||
version = "0.3.32"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7edddbd0b52d732b21ad9a5fab5c704c14cd949e5e9a1ec5929a24fded1b904c"
|
||||
|
||||
[[package]]
|
||||
name = "ppv-lite86"
|
||||
version = "0.2.21"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "85eae3c4ed2f50dcfe72643da4befc30deadb458a9b590d720cde2f2b1e97da9"
|
||||
dependencies = [
|
||||
"zerocopy",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "proc-macro-hack"
|
||||
version = "0.5.20+deprecated"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "dc375e1527247fe1a97d8b7156678dfe7c1af2fc075c9a4db3690ecd2a148068"
|
||||
|
||||
[[package]]
|
||||
name = "proc-macro2"
|
||||
version = "1.0.95"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "02b3e5e68a3a1a02aad3ec490a98007cbc13c37cbe84a3cd7b8e406d76e7f778"
|
||||
dependencies = [
|
||||
"unicode-ident",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "pwdchan"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"base64",
|
||||
"cc",
|
||||
"libc",
|
||||
"openssl",
|
||||
"paste",
|
||||
"slapi_r_plugin",
|
||||
"uuid",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "quote"
|
||||
version = "1.0.40"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "1885c039570dc00dcb4ff087a89e185fd56bae234ddc7f056a945bf36467248d"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "r-efi"
|
||||
version = "5.2.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "74765f6d916ee2faa39bc8e68e4f3ed8949b48cccdac59983d287a7cb71ce9c5"
|
||||
|
||||
[[package]]
|
||||
name = "rand"
|
||||
version = "0.8.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404"
|
||||
dependencies = [
|
||||
"libc",
|
||||
"rand_chacha",
|
||||
"rand_core",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "rand_chacha"
|
||||
version = "0.3.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88"
|
||||
dependencies = [
|
||||
"ppv-lite86",
|
||||
"rand_core",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "rand_core"
|
||||
version = "0.6.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c"
|
||||
dependencies = [
|
||||
"getrandom 0.2.16",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "redox_syscall"
|
||||
version = "0.2.16"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "fb5a58c1855b4b6819d59012155603f0b22ad30cad752600aadfcb695265519a"
|
||||
dependencies = [
|
||||
"bitflags 1.3.2",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "rsds"
|
||||
version = "0.1.0"
|
||||
|
||||
[[package]]
|
||||
name = "rustc-demangle"
|
||||
version = "0.1.24"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "719b953e2095829ee67db738b3bfa9fa368c94900df327b3f07fe6e794d2fe1f"
|
||||
|
||||
[[package]]
|
||||
name = "rustix"
|
||||
version = "1.0.7"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c71e83d6afe7ff64890ec6b71d6a69bb8a610ab78ce364b3352876bb4c801266"
|
||||
dependencies = [
|
||||
"bitflags 2.9.1",
|
||||
"errno",
|
||||
"libc",
|
||||
"linux-raw-sys",
|
||||
"windows-sys",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "ryu"
|
||||
version = "1.0.20"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "28d3b2b1366ec20994f1fd18c3c594f05c5dd4bc44d8bb0c1c632c8d6829481f"
|
||||
|
||||
[[package]]
|
||||
name = "scopeguard"
|
||||
version = "1.2.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49"
|
||||
|
||||
[[package]]
|
||||
name = "serde"
|
||||
version = "1.0.219"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "5f0e2c6ed6606019b4e29e69dbaba95b11854410e5347d525002456dbbb786b6"
|
||||
dependencies = [
|
||||
"serde_derive",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "serde_derive"
|
||||
version = "1.0.219"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "5b0276cf7f2c73365f7157c8123c21cd9a50fbbd844757af28ca1f5925fc2a00"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn 2.0.101",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "serde_json"
|
||||
version = "1.0.140"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "20068b6e96dc6c9bd23e01df8827e6c7e1f2fddd43c21810382803c136b99373"
|
||||
dependencies = [
|
||||
"itoa",
|
||||
"memchr",
|
||||
"ryu",
|
||||
"serde",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "shlex"
|
||||
version = "1.3.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64"
|
||||
|
||||
[[package]]
|
||||
name = "slapd"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"fernet",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "slapi_r_plugin"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"libc",
|
||||
"paste",
|
||||
"uuid",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "smallvec"
|
||||
version = "1.15.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8917285742e9f3e1683f0a9c4e6b57960b7314d0b08d30d1ecd426713ee2eee9"
|
||||
|
||||
[[package]]
|
||||
name = "strsim"
|
||||
version = "0.8.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8ea5119cdb4c55b55d432abb513a0429384878c15dde60cc77b1c99de1a95a6a"
|
||||
|
||||
[[package]]
|
||||
name = "syn"
|
||||
version = "1.0.109"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"unicode-ident",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "syn"
|
||||
version = "2.0.101"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8ce2b7fc941b3a24138a0a7cf8e858bfc6a992e7978a068a5c760deb0ed43caf"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"unicode-ident",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tempfile"
|
||||
version = "3.20.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e8a64e3985349f2441a1a9ef0b853f869006c3855f2cda6862a94d26ebb9d6a1"
|
||||
dependencies = [
|
||||
"fastrand",
|
||||
"getrandom 0.3.3",
|
||||
"once_cell",
|
||||
"rustix",
|
||||
"windows-sys",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "textwrap"
|
||||
version = "0.11.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d326610f408c7a4eb6f51c37c330e496b08506c9457c9d34287ecc38809fb060"
|
||||
dependencies = [
|
||||
"unicode-width",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tokio"
|
||||
version = "1.45.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "75ef51a33ef1da925cea3e4eb122833cb377c61439ca401b770f54902b806779"
|
||||
dependencies = [
|
||||
"backtrace",
|
||||
"pin-project-lite",
|
||||
"tokio-macros",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tokio-macros"
|
||||
version = "2.5.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "6e06d43f1345a3bcd39f6a56dbb7dcab2ba47e68e8ac134855e7e2bdbaf8cab8"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn 2.0.101",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "toml"
|
||||
version = "0.5.11"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f4f7f0dd8d50a853a531c426359045b1998f04219d88799810762cd4ad314234"
|
||||
dependencies = [
|
||||
"serde",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "unicode-ident"
|
||||
version = "1.0.18"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "5a5f39404a5da50712a4c1eecf25e90dd62b613502b7e925fd4e4d19b5c96512"
|
||||
|
||||
[[package]]
|
||||
name = "unicode-width"
|
||||
version = "0.1.14"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7dd6e30e90baa6f72411720665d41d89b9a3d039dc45b8faea1ddd07f617f6af"
|
||||
|
||||
[[package]]
|
||||
name = "uuid"
|
||||
version = "0.8.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "bc5cf98d8186244414c848017f0e2676b3fcb46807f6668a97dfe67359a3c4b7"
|
||||
dependencies = [
|
||||
"getrandom 0.2.16",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "vcpkg"
|
||||
version = "0.2.15"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426"
|
||||
|
||||
[[package]]
|
||||
name = "vec_map"
|
||||
version = "0.8.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f1bddf1187be692e79c5ffeab891132dfb0f236ed36a43c7ed39f1165ee20191"
|
||||
|
||||
[[package]]
|
||||
name = "version_check"
|
||||
version = "0.9.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "0b928f33d975fc6ad9f86c8f283853ad26bdd5b10b7f1542aa2fa15e2289105a"
|
||||
|
||||
[[package]]
|
||||
name = "wasi"
|
||||
version = "0.11.0+wasi-snapshot-preview1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423"
|
||||
|
||||
[[package]]
|
||||
name = "wasi"
|
||||
version = "0.14.2+wasi-0.2.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9683f9a5a998d873c0d21fcbe3c083009670149a8fab228644b8bd36b2c48cb3"
|
||||
dependencies = [
|
||||
"wit-bindgen-rt",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "winapi"
|
||||
version = "0.3.9"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419"
|
||||
dependencies = [
|
||||
"winapi-i686-pc-windows-gnu",
|
||||
"winapi-x86_64-pc-windows-gnu",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "winapi-i686-pc-windows-gnu"
|
||||
version = "0.4.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6"
|
||||
|
||||
[[package]]
|
||||
name = "winapi-x86_64-pc-windows-gnu"
|
||||
version = "0.4.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f"
|
||||
|
||||
[[package]]
|
||||
name = "windows-sys"
|
||||
version = "0.59.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "1e38bc4d79ed67fd075bcc251a1c39b32a1776bbe92e5bef1f0bf1f8c531853b"
|
||||
dependencies = [
|
||||
"windows-targets",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "windows-targets"
|
||||
version = "0.52.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973"
|
||||
dependencies = [
|
||||
"windows_aarch64_gnullvm",
|
||||
"windows_aarch64_msvc",
|
||||
"windows_i686_gnu",
|
||||
"windows_i686_gnullvm",
|
||||
"windows_i686_msvc",
|
||||
"windows_x86_64_gnu",
|
||||
"windows_x86_64_gnullvm",
|
||||
"windows_x86_64_msvc",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "windows_aarch64_gnullvm"
|
||||
version = "0.52.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3"
|
||||
|
||||
[[package]]
|
||||
name = "windows_aarch64_msvc"
|
||||
version = "0.52.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469"
|
||||
|
||||
[[package]]
|
||||
name = "windows_i686_gnu"
|
||||
version = "0.52.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b"
|
||||
|
||||
[[package]]
|
||||
name = "windows_i686_gnullvm"
|
||||
version = "0.52.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66"
|
||||
|
||||
[[package]]
|
||||
name = "windows_i686_msvc"
|
||||
version = "0.52.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66"
|
||||
|
||||
[[package]]
|
||||
name = "windows_x86_64_gnu"
|
||||
version = "0.52.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78"
|
||||
|
||||
[[package]]
|
||||
name = "windows_x86_64_gnullvm"
|
||||
version = "0.52.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d"
|
||||
|
||||
[[package]]
|
||||
name = "windows_x86_64_msvc"
|
||||
version = "0.52.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec"
|
||||
|
||||
[[package]]
|
||||
name = "wit-bindgen-rt"
|
||||
version = "0.39.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "6f42320e61fe2cfd34354ecb597f86f413484a798ba44a8ca1165c58d42da6c1"
|
||||
dependencies = [
|
||||
"bitflags 2.9.1",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "zerocopy"
|
||||
version = "0.8.25"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a1702d9583232ddb9174e01bb7c15a2ab8fb1bc6f227aa1233858c351a3ba0cb"
|
||||
dependencies = [
|
||||
"zerocopy-derive",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "zerocopy-derive"
|
||||
version = "0.8.25"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "28a6e20d751156648aa063f3800b706ee209a32c0b4d9f24be3d980b01be55ef"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn 2.0.101",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "zeroize"
|
||||
version = "1.8.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ced3678a2879b30306d323f4542626697a464a97c0a07c9aebf7ebca65cd4dde"
|
||||
dependencies = [
|
||||
"zeroize_derive",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "zeroize_derive"
|
||||
version = "1.4.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn 2.0.101",
|
||||
]
|
File diff suppressed because it is too large
Load Diff
3
sources
Normal file
3
sources
Normal file
@ -0,0 +1,3 @@
|
||||
SHA512 (389-ds-base-3.0.4.tar.bz2) = 45ef03d288fc3c1e7a24474393fe769deb52413f57aa1517b71882fb4be653eeae041911d55e60b82079922e7995c55bb0653d3f1ea0a83622e84d6411c863fe
|
||||
SHA512 (jemalloc-5.3.0.tar.bz2) = 22907bb052096e2caffb6e4e23548aecc5cc9283dce476896a2b1127eee64170e3562fa2e7db9571298814a7a2c7df6e8d1fbe152bd3f3b0c1abec22a2de34b1
|
||||
SHA512 (libdb-5.3.28-59.tar.bz2) = 731a434fa2e6487ebb05c458b0437456eb9f7991284beb08cb3e21931e23bdeddddbc95bfabe3a2f9f029fe69cd33a2d4f0f5ce6a9811e9c3b940cb6fde4bf79
|
Loading…
Reference in New Issue
Block a user