Compare commits
1 Commits
c8-stream-
...
c10
| Author | SHA1 | Date | |
|---|---|---|---|
| 8febfc956c |
@ -1,3 +0,0 @@
|
||||
bd9aab32d9cbf9231058d585479813f3420dc872 SOURCES/389-ds-base-1.4.3.39.tar.bz2
|
||||
1c8f2d0dfbf39fa8cd86363bf3314351ab21f8d4 SOURCES/jemalloc-5.3.0.tar.bz2
|
||||
8d3275209f2f8e1a69053340930ad1fb037d61fb SOURCES/vendor-1.4.3.39-3.tar.gz
|
||||
7
.gitignore
vendored
7
.gitignore
vendored
@ -1,3 +1,4 @@
|
||||
SOURCES/389-ds-base-1.4.3.39.tar.bz2
|
||||
SOURCES/jemalloc-5.3.0.tar.bz2
|
||||
SOURCES/vendor-1.4.3.39-3.tar.gz
|
||||
389-ds-base-3.0.6.tar.bz2
|
||||
jemalloc-5.3.0.tar.bz2
|
||||
libdb-5.3.28-59.tar.bz2
|
||||
vendor-3.0.6-3.tar.gz
|
||||
|
||||
@ -0,0 +1,53 @@
|
||||
From fc7f5aa01e245c7c2e35b01d171dbd5a6dc75db4 Mon Sep 17 00:00:00 2001
|
||||
From: Viktor Ashirov <vashirov@redhat.com>
|
||||
Date: Sat, 25 Jan 2025 13:54:33 +0100
|
||||
Subject: [PATCH] Issue 6544 - logconv.py: python3-magic conflicts with
|
||||
python3-file-magic
|
||||
|
||||
Bug Description:
|
||||
python3-magic and python3-file-magic can't be installed simultaneously,
|
||||
python3-magic is not packaged for EL10.
|
||||
|
||||
Fix Description:
|
||||
Use python3-file-magic instead.
|
||||
|
||||
Issue identified and fix suggested by Adam Williamson.
|
||||
|
||||
Fixes: https://github.com/389ds/389-ds-base/issues/6544
|
||||
|
||||
Reviewed by: @mreynolds389 (Thanks!)
|
||||
---
|
||||
ldap/admin/src/logconv.py | 3 +--
|
||||
rpm/389-ds-base.spec.in | 2 +-
|
||||
2 files changed, 2 insertions(+), 3 deletions(-)
|
||||
|
||||
diff --git a/ldap/admin/src/logconv.py b/ldap/admin/src/logconv.py
|
||||
index 566f9af38..2fb5bb8c1 100755
|
||||
--- a/ldap/admin/src/logconv.py
|
||||
+++ b/ldap/admin/src/logconv.py
|
||||
@@ -1798,8 +1798,7 @@ class logAnalyser:
|
||||
return None
|
||||
|
||||
try:
|
||||
- mime = magic.Magic(mime=True)
|
||||
- filetype = mime.from_file(filepath)
|
||||
+ filetype = magic.detect_from_filename(filepath).mime_type
|
||||
|
||||
# List of supported compression types
|
||||
compressed_mime_types = [
|
||||
diff --git a/rpm/389-ds-base.spec.in b/rpm/389-ds-base.spec.in
|
||||
index 3146b9186..3c6e95938 100644
|
||||
--- a/rpm/389-ds-base.spec.in
|
||||
+++ b/rpm/389-ds-base.spec.in
|
||||
@@ -298,7 +298,7 @@ Requires: json-c
|
||||
# Log compression
|
||||
Requires: zlib-devel
|
||||
# logconv.py, MIME type
|
||||
-Requires: python-magic
|
||||
+Requires: python3-file-magic
|
||||
# Picks up our systemd deps.
|
||||
%{?systemd_requires}
|
||||
|
||||
--
|
||||
2.48.0
|
||||
|
||||
311
0002-Issue-6374-nsslapd-mdb-max-dbs-autotuning-doesn-t-wo.patch
Normal file
311
0002-Issue-6374-nsslapd-mdb-max-dbs-autotuning-doesn-t-wo.patch
Normal file
@ -0,0 +1,311 @@
|
||||
From 1aabba9b17f99eb1a460be3305aad4b7099b9fe6 Mon Sep 17 00:00:00 2001
|
||||
From: progier389 <progier@redhat.com>
|
||||
Date: Wed, 13 Nov 2024 15:31:35 +0100
|
||||
Subject: [PATCH] Issue 6374 - nsslapd-mdb-max-dbs autotuning doesn't work
|
||||
properly (#6400)
|
||||
|
||||
* Issue 6374 - nsslapd-mdb-max-dbs autotuning doesn't work properly
|
||||
|
||||
Several issues:
|
||||
|
||||
After restarting the server nsslapd-mdb-max-dbs may not be high enough to add a new backend
|
||||
because the value computation is wrong.
|
||||
dbscan fails to open the database if nsslapd-mdb-max-dbs has been increased.
|
||||
dbscan crashes when closing the database (typically when using -S)
|
||||
When starting the instance the nsslapd-mdb-max-dbs parameter is increased to ensure that a new backend may be added.
|
||||
When dse.ldif path is not specified, the db environment is now open using the INFO.mdb data instead of using the default values.
|
||||
synchronization between thread closure and database context destruction is hardened
|
||||
Issue: #6374
|
||||
|
||||
Reviewed by: @tbordaz , @vashirov (Thanks!)
|
||||
|
||||
(cherry picked from commit 56cd3389da608a3f6eeee58d20dffbcd286a8033)
|
||||
---
|
||||
.../tests/suites/config/config_test.py | 86 +++++++++++++++++++
|
||||
ldap/servers/slapd/back-ldbm/back-ldbm.h | 2 +
|
||||
.../slapd/back-ldbm/db-mdb/mdb_config.c | 17 ++--
|
||||
.../back-ldbm/db-mdb/mdb_import_threads.c | 9 +-
|
||||
.../slapd/back-ldbm/db-mdb/mdb_instance.c | 8 ++
|
||||
ldap/servers/slapd/back-ldbm/dbimpl.c | 2 +-
|
||||
ldap/servers/slapd/back-ldbm/import.c | 14 ++-
|
||||
7 files changed, 128 insertions(+), 10 deletions(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/config/config_test.py b/dirsrvtests/tests/suites/config/config_test.py
|
||||
index c3e26eed4..08544594f 100644
|
||||
--- a/dirsrvtests/tests/suites/config/config_test.py
|
||||
+++ b/dirsrvtests/tests/suites/config/config_test.py
|
||||
@@ -17,6 +17,7 @@ from lib389.topologies import topology_m2, topology_st as topo
|
||||
from lib389.utils import *
|
||||
from lib389._constants import DN_CONFIG, DEFAULT_SUFFIX, DEFAULT_BENAME
|
||||
from lib389._mapped_object import DSLdapObjects
|
||||
+from lib389.agreement import Agreements
|
||||
from lib389.cli_base import FakeArgs
|
||||
from lib389.cli_conf.backend import db_config_set
|
||||
from lib389.idm.user import UserAccounts, TEST_USER_PROPERTIES
|
||||
@@ -27,6 +28,8 @@ from lib389.cos import CosPointerDefinitions, CosTemplates
|
||||
from lib389.backend import Backends, DatabaseConfig
|
||||
from lib389.monitor import MonitorLDBM, Monitor
|
||||
from lib389.plugins import ReferentialIntegrityPlugin
|
||||
+from lib389.replica import BootstrapReplicationManager, Replicas
|
||||
+from lib389.passwd import password_generate
|
||||
|
||||
pytestmark = pytest.mark.tier0
|
||||
|
||||
@@ -36,6 +39,8 @@ PSTACK_CMD = '/usr/bin/pstack'
|
||||
logging.getLogger(__name__).setLevel(logging.INFO)
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
+DEBUGGING = os.getenv("DEBUGGING", default=False)
|
||||
+
|
||||
@pytest.fixture(scope="module")
|
||||
def big_file():
|
||||
TEMP_BIG_FILE = ''
|
||||
@@ -813,6 +818,87 @@ def test_numlisteners_limit(topo):
|
||||
assert numlisteners[0] == '4'
|
||||
|
||||
|
||||
+def bootstrap_replication(inst_from, inst_to, creds):
|
||||
+ manager = BootstrapReplicationManager(inst_to)
|
||||
+ rdn_val = 'replication manager'
|
||||
+ if manager.exists():
|
||||
+ manager.delete()
|
||||
+ manager.create(properties={
|
||||
+ 'cn': rdn_val,
|
||||
+ 'uid': rdn_val,
|
||||
+ 'userPassword': creds
|
||||
+ })
|
||||
+ for replica in Replicas(inst_to).list():
|
||||
+ replica.remove_all('nsDS5ReplicaBindDNGroup')
|
||||
+ replica.replace('nsDS5ReplicaBindDN', manager.dn)
|
||||
+ for agmt in Agreements(inst_from).list():
|
||||
+ agmt.replace('nsDS5ReplicaBindDN', manager.dn)
|
||||
+ agmt.replace('nsDS5ReplicaCredentials', creds)
|
||||
+
|
||||
+
|
||||
+@pytest.mark.skipif(get_default_db_lib() != "mdb", reason="This test requires lmdb")
|
||||
+def test_lmdb_autotuned_maxdbs(topology_m2, request):
|
||||
+ """Verify that after restart, nsslapd-mdb-max-dbs is large enough to add a new backend.
|
||||
+
|
||||
+ :id: 0272d432-9080-11ef-8f40-482ae39447e5
|
||||
+ :setup: Two suppliers configuration
|
||||
+ :steps:
|
||||
+ 1. loop 20 times
|
||||
+ 3. In 1 loop: restart instance
|
||||
+ 3. In 1 loop: add a new backend
|
||||
+ 4. In 1 loop: check that instance is still alive
|
||||
+ :expectedresults:
|
||||
+ 1. Success
|
||||
+ 2. Success
|
||||
+ 3. Success
|
||||
+ 4. Success
|
||||
+ """
|
||||
+
|
||||
+ s1 = topology_m2.ms["supplier1"]
|
||||
+ s2 = topology_m2.ms["supplier2"]
|
||||
+
|
||||
+ backends = Backends(s1)
|
||||
+ db_config = DatabaseConfig(s1)
|
||||
+ # Generate the teardown finalizer
|
||||
+ belist = []
|
||||
+ creds=password_generate()
|
||||
+ bootstrap_replication(s2, s1, creds)
|
||||
+ bootstrap_replication(s1, s2, creds)
|
||||
+
|
||||
+ def fin():
|
||||
+ s1.start()
|
||||
+ for be in belist:
|
||||
+ be.delete()
|
||||
+
|
||||
+ if not DEBUGGING:
|
||||
+ request.addfinalizer(fin)
|
||||
+
|
||||
+ # 1. Set autotuning (off-line to be able to decrease the value)
|
||||
+ s1.stop()
|
||||
+ dse_ldif = DSEldif(s1)
|
||||
+ dse_ldif.replace(db_config.dn, 'nsslapd-mdb-max-dbs', '0')
|
||||
+ os.remove(f'{s1.dbdir}/data.mdb')
|
||||
+ s1.start()
|
||||
+
|
||||
+ # 2. Reinitialize the db:
|
||||
+ log.info("Bulk import...")
|
||||
+ agmt = Agreements(s2).list()[0]
|
||||
+ agmt.begin_reinit()
|
||||
+ (done, error) = agmt.wait_reinit()
|
||||
+ log.info(f'Bulk importresult is ({done}, {error})')
|
||||
+ assert done is True
|
||||
+ assert error is False
|
||||
+
|
||||
+ # 3. loop 20 times
|
||||
+ for idx in range(20):
|
||||
+ s1.restart()
|
||||
+ log.info(f'Adding backend test{idx}')
|
||||
+ belist.append(backends.create(properties={'cn': f'test{idx}',
|
||||
+ 'nsslapd-suffix': f'dc=test{idx}'}))
|
||||
+ assert s1.status()
|
||||
+
|
||||
+
|
||||
+
|
||||
if __name__ == '__main__':
|
||||
# Run isolated
|
||||
# -s for DEBUG mode
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/back-ldbm.h b/ldap/servers/slapd/back-ldbm/back-ldbm.h
|
||||
index 8fea63e35..35d0ece04 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/back-ldbm.h
|
||||
+++ b/ldap/servers/slapd/back-ldbm/back-ldbm.h
|
||||
@@ -896,4 +896,6 @@ typedef struct _back_search_result_set
|
||||
((L)->size == (R)->size && !memcmp((L)->data, (R)->data, (L)->size))
|
||||
|
||||
typedef int backend_implement_init_fn(struct ldbminfo *li, config_info *config_array);
|
||||
+
|
||||
+pthread_mutex_t *get_import_ctx_mutex();
|
||||
#endif /* _back_ldbm_h_ */
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/db-mdb/mdb_config.c b/ldap/servers/slapd/back-ldbm/db-mdb/mdb_config.c
|
||||
index 351f54037..1f7b71442 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/db-mdb/mdb_config.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/db-mdb/mdb_config.c
|
||||
@@ -83,7 +83,7 @@ dbmdb_compute_limits(struct ldbminfo *li)
|
||||
uint64_t total_space = 0;
|
||||
uint64_t avail_space = 0;
|
||||
uint64_t cur_dbsize = 0;
|
||||
- int nbchangelogs = 0;
|
||||
+ int nbvlvs = 0;
|
||||
int nbsuffixes = 0;
|
||||
int nbindexes = 0;
|
||||
int nbagmt = 0;
|
||||
@@ -99,8 +99,8 @@ dbmdb_compute_limits(struct ldbminfo *li)
|
||||
* But some tunable may be autotuned.
|
||||
*/
|
||||
if (dbmdb_count_config_entries("(objectClass=nsMappingTree)", &nbsuffixes) ||
|
||||
- dbmdb_count_config_entries("(objectClass=nsIndex)", &nbsuffixes) ||
|
||||
- dbmdb_count_config_entries("(&(objectClass=nsds5Replica)(nsDS5Flags=1))", &nbchangelogs) ||
|
||||
+ dbmdb_count_config_entries("(objectClass=nsIndex)", &nbindexes) ||
|
||||
+ dbmdb_count_config_entries("(objectClass=vlvIndex)", &nbvlvs) ||
|
||||
dbmdb_count_config_entries("(objectClass=nsds5replicationagreement)", &nbagmt)) {
|
||||
/* error message is already logged */
|
||||
return 1;
|
||||
@@ -120,8 +120,15 @@ dbmdb_compute_limits(struct ldbminfo *li)
|
||||
|
||||
info->pagesize = sysconf(_SC_PAGE_SIZE);
|
||||
limits->min_readers = config_get_threadnumber() + nbagmt + DBMDB_READERS_MARGIN;
|
||||
- /* Default indexes are counted in "nbindexes" so we should always have enough resource to add 1 new suffix */
|
||||
- limits->min_dbs = nbsuffixes + nbindexes + nbchangelogs + DBMDB_DBS_MARGIN;
|
||||
+ /*
|
||||
+ * For each suffix there are 4 databases instances:
|
||||
+ * long-entryrdn, replication_changelog, id2entry and ancestorid
|
||||
+ * then the indexes and the vlv and vlv cache
|
||||
+ *
|
||||
+ * Default indexes are counted in "nbindexes" so we should always have enough
|
||||
+ * resource to add 1 new suffix
|
||||
+ */
|
||||
+ limits->min_dbs = 4*nbsuffixes + nbindexes + 2*nbvlvs + DBMDB_DBS_MARGIN;
|
||||
|
||||
total_space = ((uint64_t)(buf.f_blocks)) * ((uint64_t)(buf.f_bsize));
|
||||
avail_space = ((uint64_t)(buf.f_bavail)) * ((uint64_t)(buf.f_bsize));
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/db-mdb/mdb_import_threads.c b/ldap/servers/slapd/back-ldbm/db-mdb/mdb_import_threads.c
|
||||
index 8c879da31..707a110c5 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/db-mdb/mdb_import_threads.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/db-mdb/mdb_import_threads.c
|
||||
@@ -4312,9 +4312,12 @@ dbmdb_import_init_writer(ImportJob *job, ImportRole_t role)
|
||||
void
|
||||
dbmdb_free_import_ctx(ImportJob *job)
|
||||
{
|
||||
- if (job->writer_ctx) {
|
||||
- ImportCtx_t *ctx = job->writer_ctx;
|
||||
- job->writer_ctx = NULL;
|
||||
+ ImportCtx_t *ctx = NULL;
|
||||
+ pthread_mutex_lock(get_import_ctx_mutex());
|
||||
+ ctx = job->writer_ctx;
|
||||
+ job->writer_ctx = NULL;
|
||||
+ pthread_mutex_unlock(get_import_ctx_mutex());
|
||||
+ if (ctx) {
|
||||
pthread_mutex_destroy(&ctx->workerq.mutex);
|
||||
pthread_cond_destroy(&ctx->workerq.cv);
|
||||
slapi_ch_free((void**)&ctx->workerq.slots);
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/db-mdb/mdb_instance.c b/ldap/servers/slapd/back-ldbm/db-mdb/mdb_instance.c
|
||||
index 6386ecf06..05f1e348d 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/db-mdb/mdb_instance.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/db-mdb/mdb_instance.c
|
||||
@@ -287,6 +287,13 @@ int add_dbi(dbi_open_ctx_t *octx, backend *be, const char *fname, int flags)
|
||||
slapi_ch_free((void**)&treekey.dbname);
|
||||
return octx->rc;
|
||||
}
|
||||
+ if (treekey.dbi >= ctx->dsecfg.max_dbs) {
|
||||
+ octx->rc = MDB_DBS_FULL;
|
||||
+ slapi_log_err(SLAPI_LOG_ERR, "add_dbi", "Failed to open database instance %s slots: %d/%d. Error is %d: %s.\n",
|
||||
+ treekey.dbname, treekey.dbi, ctx->dsecfg.max_dbs, octx->rc, mdb_strerror(octx->rc));
|
||||
+ slapi_ch_free((void**)&treekey.dbname);
|
||||
+ return octx->rc;
|
||||
+ }
|
||||
if (octx->ai && octx->ai->ai_key_cmp_fn) {
|
||||
octx->rc = dbmdb_update_dbi_cmp_fn(ctx, &treekey, octx->ai->ai_key_cmp_fn, octx->txn);
|
||||
if (octx->rc) {
|
||||
@@ -689,6 +696,7 @@ int dbmdb_make_env(dbmdb_ctx_t *ctx, int readOnly, mdb_mode_t mode)
|
||||
rc = dbmdb_write_infofile(ctx);
|
||||
} else {
|
||||
/* No Config ==> read it from info file */
|
||||
+ ctx->dsecfg = ctx->startcfg;
|
||||
}
|
||||
if (rc) {
|
||||
return rc;
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/dbimpl.c b/ldap/servers/slapd/back-ldbm/dbimpl.c
|
||||
index 86df986bd..f3bf68a9f 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/dbimpl.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/dbimpl.c
|
||||
@@ -505,7 +505,7 @@ int dblayer_show_statistics(const char *dbimpl_name, const char *dbhome, FILE *f
|
||||
li->li_plugin = be->be_database;
|
||||
li->li_plugin->plg_name = (char*) "back-ldbm-dbimpl";
|
||||
li->li_plugin->plg_libpath = (char*) "libback-ldbm";
|
||||
- li->li_directory = (char*)dbhome;
|
||||
+ li->li_directory = get_li_directory(dbhome);
|
||||
|
||||
/* Initialize database plugin */
|
||||
rc = dbimpl_setup(li, dbimpl_name);
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/import.c b/ldap/servers/slapd/back-ldbm/import.c
|
||||
index 2bb8cb581..30ec462fa 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/import.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/import.c
|
||||
@@ -27,6 +27,9 @@
|
||||
#define NEED_DN_NORM_SP -25
|
||||
#define NEED_DN_NORM_BT -26
|
||||
|
||||
+/* Protect against import context destruction */
|
||||
+static pthread_mutex_t import_ctx_mutex = PTHREAD_MUTEX_INITIALIZER;
|
||||
+
|
||||
|
||||
/********** routines to manipulate the entry fifo **********/
|
||||
|
||||
@@ -143,6 +146,14 @@ ldbm_back_wire_import(Slapi_PBlock *pb)
|
||||
|
||||
/* Threads management */
|
||||
|
||||
+/* Return the mutex that protects against import context destruction */
|
||||
+pthread_mutex_t *
|
||||
+get_import_ctx_mutex()
|
||||
+{
|
||||
+ return &import_ctx_mutex;
|
||||
+}
|
||||
+
|
||||
+
|
||||
/* tell all the threads to abort */
|
||||
void
|
||||
import_abort_all(ImportJob *job, int wait_for_them)
|
||||
@@ -151,7 +162,7 @@ import_abort_all(ImportJob *job, int wait_for_them)
|
||||
|
||||
/* tell all the worker threads to abort */
|
||||
job->flags |= FLAG_ABORT;
|
||||
-
|
||||
+ pthread_mutex_lock(&import_ctx_mutex);
|
||||
for (worker = job->worker_list; worker; worker = worker->next)
|
||||
worker->command = ABORT;
|
||||
|
||||
@@ -167,6 +178,7 @@ import_abort_all(ImportJob *job, int wait_for_them)
|
||||
}
|
||||
}
|
||||
}
|
||||
+ pthread_mutex_unlock(&import_ctx_mutex);
|
||||
}
|
||||
|
||||
|
||||
--
|
||||
2.48.0
|
||||
|
||||
72
0003-Issue-6090-Fix-dbscan-options-and-man-pages-6315.patch
Normal file
72
0003-Issue-6090-Fix-dbscan-options-and-man-pages-6315.patch
Normal file
@ -0,0 +1,72 @@
|
||||
From 6b80ba631161219093267e8e4c885bfc392d3d61 Mon Sep 17 00:00:00 2001
|
||||
From: progier389 <progier@redhat.com>
|
||||
Date: Fri, 6 Sep 2024 14:45:06 +0200
|
||||
Subject: [PATCH] Issue 6090 - Fix dbscan options and man pages (#6315)
|
||||
|
||||
* Issue 6090 - Fix dbscan options and man pages
|
||||
|
||||
dbscan -d option is dangerously confusing as it removes a database instance while in db_stat it identify the database
|
||||
(cf issue #5609 ).
|
||||
This fix implements long options in dbscan, rename -d in --remove, and requires a new --do-it option for action that change the database content.
|
||||
The fix should also align both the usage and the dbscan man page with the new set of options
|
||||
|
||||
Issue: #6090
|
||||
|
||||
Reviewed by: @tbordaz, @droideck (Thanks!)
|
||||
|
||||
(cherry picked from commit 25e1d16887ebd299dfe0088080b9ee0deec1e41f)
|
||||
---
|
||||
ldap/servers/slapd/back-ldbm/dbimpl.c | 5 ++++-
|
||||
src/lib389/lib389/cli_ctl/dblib.py | 13 ++++++++++++-
|
||||
2 files changed, 16 insertions(+), 2 deletions(-)
|
||||
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/dbimpl.c b/ldap/servers/slapd/back-ldbm/dbimpl.c
|
||||
index f3bf68a9f..83662df8c 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/dbimpl.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/dbimpl.c
|
||||
@@ -481,7 +481,10 @@ int dblayer_private_close(Slapi_Backend **be, dbi_env_t **env, dbi_db_t **db)
|
||||
slapi_ch_free_string(&li->li_directory);
|
||||
slapi_ch_free((void**)&li->li_dblayer_private);
|
||||
slapi_ch_free((void**)&li->li_dblayer_config);
|
||||
- ldbm_config_destroy(li);
|
||||
+ if (dblayer_is_lmdb(*be)) {
|
||||
+ /* Generate use after free and double free in bdb case */
|
||||
+ ldbm_config_destroy(li);
|
||||
+ }
|
||||
slapi_ch_free((void**)&(*be)->be_database);
|
||||
slapi_ch_free((void**)&(*be)->be_instance_info);
|
||||
slapi_ch_free((void**)be);
|
||||
diff --git a/src/lib389/lib389/cli_ctl/dblib.py b/src/lib389/lib389/cli_ctl/dblib.py
|
||||
index 053a72d61..318ae5ae9 100644
|
||||
--- a/src/lib389/lib389/cli_ctl/dblib.py
|
||||
+++ b/src/lib389/lib389/cli_ctl/dblib.py
|
||||
@@ -199,6 +199,14 @@ def run_dbscan(args):
|
||||
return output
|
||||
|
||||
|
||||
+def does_dbscan_need_do_it():
|
||||
+ prefix = os.environ.get('PREFIX', "")
|
||||
+ prog = f'{prefix}/bin/dbscan'
|
||||
+ args = [ prog, '-h' ]
|
||||
+ output = subprocess.run(args, encoding='utf-8', stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
|
||||
+ return '--do-it' in output.stdout
|
||||
+
|
||||
+
|
||||
def export_changelog(be, dblib):
|
||||
# Export backend changelog
|
||||
if not be['has_changelog']:
|
||||
@@ -217,7 +225,10 @@ def import_changelog(be, dblib):
|
||||
try:
|
||||
cl5dbname = be['eccl5dbname'] if dblib == "bdb" else be['cl5dbname']
|
||||
_log.info(f"Importing changelog {cl5dbname} from {be['cl5name']}")
|
||||
- run_dbscan(['-D', dblib, '-f', cl5dbname, '--import', be['cl5name'], '--do-it'])
|
||||
+ if does_dbscan_need_do_it():
|
||||
+ run_dbscan(['-D', dblib, '-f', cl5dbname, '-I', be['cl5name'], '--do-it'])
|
||||
+ else:
|
||||
+ run_dbscan(['-D', dblib, '-f', cl5dbname, '-I', be['cl5name']])
|
||||
return True
|
||||
except subprocess.CalledProcessError as e:
|
||||
return False
|
||||
--
|
||||
2.48.0
|
||||
|
||||
146
0004-Issue-6489-After-log-rotation-refresh-the-FD-pointer.patch
Normal file
146
0004-Issue-6489-After-log-rotation-refresh-the-FD-pointer.patch
Normal file
@ -0,0 +1,146 @@
|
||||
From dc8032856d51c382e266eea72f66284e70a0e40c Mon Sep 17 00:00:00 2001
|
||||
From: Mark Reynolds <mreynolds@redhat.com>
|
||||
Date: Fri, 31 Jan 2025 08:54:27 -0500
|
||||
Subject: [PATCH] Issue 6489 - After log rotation refresh the FD pointer
|
||||
|
||||
Description:
|
||||
|
||||
When flushing a log buffer we get a FD for log prior to checking if the
|
||||
log should be rotated. If the log is rotated that FD reference is now
|
||||
invalid, and it needs to be refrehed before proceeding
|
||||
|
||||
Relates: https://github.com/389ds/389-ds-base/issues/6489
|
||||
|
||||
Reviewed by: tbordaz(Thanks!)
|
||||
---
|
||||
.../suites/logging/log_flush_rotation_test.py | 81 +++++++++++++++++++
|
||||
ldap/servers/slapd/log.c | 18 +++++
|
||||
2 files changed, 99 insertions(+)
|
||||
create mode 100644 dirsrvtests/tests/suites/logging/log_flush_rotation_test.py
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/logging/log_flush_rotation_test.py b/dirsrvtests/tests/suites/logging/log_flush_rotation_test.py
|
||||
new file mode 100644
|
||||
index 000000000..b33a622e1
|
||||
--- /dev/null
|
||||
+++ b/dirsrvtests/tests/suites/logging/log_flush_rotation_test.py
|
||||
@@ -0,0 +1,81 @@
|
||||
+# --- BEGIN COPYRIGHT BLOCK ---
|
||||
+# Copyright (C) 2025 Red Hat, Inc.
|
||||
+# All rights reserved.
|
||||
+#
|
||||
+# License: GPL (version 3 or any later version).
|
||||
+# See LICENSE for details.
|
||||
+# --- END COPYRIGHT BLOCK ---
|
||||
+#
|
||||
+import os
|
||||
+import logging
|
||||
+import time
|
||||
+import pytest
|
||||
+from lib389._constants import DEFAULT_SUFFIX, PW_DM
|
||||
+from lib389.tasks import ImportTask
|
||||
+from lib389.idm.user import UserAccounts
|
||||
+from lib389.topologies import topology_st as topo
|
||||
+
|
||||
+
|
||||
+log = logging.getLogger(__name__)
|
||||
+
|
||||
+
|
||||
+def test_log_flush_and_rotation_crash(topo):
|
||||
+ """Make sure server does not crash whening flushing a buffer and rotating
|
||||
+ the log at the same time
|
||||
+
|
||||
+ :id: d4b0af2f-48b2-45f5-ae8b-f06f692c3133
|
||||
+ :setup: Standalone Instance
|
||||
+ :steps:
|
||||
+ 1. Enable all logs
|
||||
+ 2. Enable log buffering for all logs
|
||||
+ 3. Set rotation time unit to 1 minute
|
||||
+ 4. Make sure server is still running after 1 minute
|
||||
+ :expectedresults:
|
||||
+ 1. Success
|
||||
+ 2. Success
|
||||
+ 3. Success
|
||||
+ 4. Success
|
||||
+ """
|
||||
+
|
||||
+ inst = topo.standalone
|
||||
+
|
||||
+ # Enable logging and buffering
|
||||
+ inst.config.set("nsslapd-auditlog-logging-enabled", "on")
|
||||
+ inst.config.set("nsslapd-accesslog-logbuffering", "on")
|
||||
+ inst.config.set("nsslapd-auditlog-logbuffering", "on")
|
||||
+ inst.config.set("nsslapd-errorlog-logbuffering", "on")
|
||||
+ inst.config.set("nsslapd-securitylog-logbuffering", "on")
|
||||
+
|
||||
+ # Set rotation policy to trigger rotation asap
|
||||
+ inst.config.set("nsslapd-accesslog-logrotationtimeunit", "minute")
|
||||
+ inst.config.set("nsslapd-auditlog-logrotationtimeunit", "minute")
|
||||
+ inst.config.set("nsslapd-errorlog-logrotationtimeunit", "minute")
|
||||
+ inst.config.set("nsslapd-securitylog-logrotationtimeunit", "minute")
|
||||
+
|
||||
+ #
|
||||
+ # Performs ops to populate all the logs
|
||||
+ #
|
||||
+ # Access & audit log
|
||||
+ users = UserAccounts(topo.standalone, DEFAULT_SUFFIX)
|
||||
+ user = users.create_test_user()
|
||||
+ user.set("userPassword", PW_DM)
|
||||
+ # Security log
|
||||
+ user.bind(PW_DM)
|
||||
+ # Error log
|
||||
+ import_task = ImportTask(inst)
|
||||
+ import_task.import_suffix_from_ldif(ldiffile="/not/here",
|
||||
+ suffix=DEFAULT_SUFFIX)
|
||||
+
|
||||
+ # Wait a minute and make sure the server did not crash
|
||||
+ log.info("Sleep until logs are flushed and rotated")
|
||||
+ time.sleep(61)
|
||||
+
|
||||
+ assert inst.status()
|
||||
+
|
||||
+
|
||||
+if __name__ == '__main__':
|
||||
+ # Run isolated
|
||||
+ # -s for DEBUG mode
|
||||
+ CURRENT_FILE = os.path.realpath(__file__)
|
||||
+ pytest.main(["-s", CURRENT_FILE])
|
||||
+
|
||||
diff --git a/ldap/servers/slapd/log.c b/ldap/servers/slapd/log.c
|
||||
index 76f2b6768..7e2c980a4 100644
|
||||
--- a/ldap/servers/slapd/log.c
|
||||
+++ b/ldap/servers/slapd/log.c
|
||||
@@ -6746,6 +6746,23 @@ log_refresh_state(int32_t log_type)
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
+static LOGFD
|
||||
+log_refresh_fd(int32_t log_type)
|
||||
+{
|
||||
+ switch (log_type) {
|
||||
+ case SLAPD_ACCESS_LOG:
|
||||
+ return loginfo.log_access_fdes;
|
||||
+ case SLAPD_SECURITY_LOG:
|
||||
+ return loginfo.log_security_fdes;
|
||||
+ case SLAPD_AUDIT_LOG:
|
||||
+ return loginfo.log_audit_fdes;
|
||||
+ case SLAPD_AUDITFAIL_LOG:
|
||||
+ return loginfo.log_auditfail_fdes;
|
||||
+ case SLAPD_ERROR_LOG:
|
||||
+ return loginfo.log_error_fdes;
|
||||
+ }
|
||||
+ return NULL;
|
||||
+}
|
||||
|
||||
/* this function assumes the lock is already acquired */
|
||||
/* if sync_now is non-zero, data is flushed to physical storage */
|
||||
@@ -6857,6 +6874,7 @@ log_flush_buffer(LogBufferInfo *lbi, int log_type, int sync_now, int locked)
|
||||
rotationtime_secs);
|
||||
}
|
||||
log_state = log_refresh_state(log_type);
|
||||
+ fd = log_refresh_fd(log_type);
|
||||
}
|
||||
|
||||
if (log_state & LOGGING_NEED_TITLE) {
|
||||
--
|
||||
2.48.0
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
From 1845aed98becaba6b975342229cb5e0de79d208d Mon Sep 17 00:00:00 2001
|
||||
From 90460bfa66fb77118967927963572f69e097c4eb Mon Sep 17 00:00:00 2001
|
||||
From: James Chapman <jachapma@redhat.com>
|
||||
Date: Wed, 29 Jan 2025 17:41:55 +0000
|
||||
Subject: [PATCH] Issue 6436 - MOD on a large group slow if substring index is
|
||||
@ -23,10 +23,10 @@ Reviewed by: @Firstyear, @tbordaz, @droideck (Thanks)
|
||||
3 files changed, 137 insertions(+), 4 deletions(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/healthcheck/health_config_test.py b/dirsrvtests/tests/suites/healthcheck/health_config_test.py
|
||||
index 6d3d08bfa..747699486 100644
|
||||
index e1e5398ab..f09bc8bb8 100644
|
||||
--- a/dirsrvtests/tests/suites/healthcheck/health_config_test.py
|
||||
+++ b/dirsrvtests/tests/suites/healthcheck/health_config_test.py
|
||||
@@ -212,6 +212,7 @@ def test_healthcheck_RI_plugin_missing_indexes(topology_st):
|
||||
@@ -167,6 +167,7 @@ def test_healthcheck_RI_plugin_missing_indexes(topology_st):
|
||||
MEMBER_DN = 'cn=member,cn=index,cn=userroot,cn=ldbm database,cn=plugins,cn=config'
|
||||
|
||||
standalone = topology_st.standalone
|
||||
@ -34,7 +34,7 @@ index 6d3d08bfa..747699486 100644
|
||||
|
||||
log.info('Enable RI plugin')
|
||||
plugin = ReferentialIntegrityPlugin(standalone)
|
||||
@@ -233,7 +234,7 @@ def test_healthcheck_RI_plugin_missing_indexes(topology_st):
|
||||
@@ -188,7 +189,7 @@ def test_healthcheck_RI_plugin_missing_indexes(topology_st):
|
||||
|
||||
|
||||
def test_healthcheck_MO_plugin_missing_indexes(topology_st):
|
||||
@ -43,7 +43,7 @@ index 6d3d08bfa..747699486 100644
|
||||
|
||||
:id: 236b0ec2-13da-48fb-b65a-db7406d56d5d
|
||||
:setup: Standalone instance
|
||||
@@ -248,8 +249,8 @@ def test_healthcheck_MO_plugin_missing_indexes(topology_st):
|
||||
@@ -203,8 +204,8 @@ def test_healthcheck_MO_plugin_missing_indexes(topology_st):
|
||||
:expectedresults:
|
||||
1. Success
|
||||
2. Success
|
||||
@ -54,7 +54,7 @@ index 6d3d08bfa..747699486 100644
|
||||
5. Success
|
||||
6. Healthcheck reports no issue found
|
||||
7. Healthcheck reports no issue found
|
||||
@@ -259,6 +260,7 @@ def test_healthcheck_MO_plugin_missing_indexes(topology_st):
|
||||
@@ -214,6 +215,7 @@ def test_healthcheck_MO_plugin_missing_indexes(topology_st):
|
||||
MO_GROUP_ATTR = 'creatorsname'
|
||||
|
||||
standalone = topology_st.standalone
|
||||
@ -62,8 +62,8 @@ index 6d3d08bfa..747699486 100644
|
||||
|
||||
log.info('Enable MO plugin')
|
||||
plugin = MemberOfPlugin(standalone)
|
||||
@@ -279,6 +281,87 @@ def test_healthcheck_MO_plugin_missing_indexes(topology_st):
|
||||
run_healthcheck_and_flush_log(topology_st, standalone, json=True, searched_code=JSON_OUTPUT)
|
||||
@@ -236,6 +238,87 @@ def test_healthcheck_MO_plugin_missing_indexes(topology_st):
|
||||
standalone.restart()
|
||||
|
||||
|
||||
+def test_healthcheck_MO_plugin_substring_index(topology_st):
|
||||
@ -147,14 +147,14 @@ index 6d3d08bfa..747699486 100644
|
||||
+ standalone.restart()
|
||||
+
|
||||
+
|
||||
@pytest.mark.ds50873
|
||||
@pytest.mark.bz1685160
|
||||
@pytest.mark.xfail(ds_is_older("1.4.1"), reason="Not implemented")
|
||||
def test_healthcheck_virtual_attr_incorrectly_indexed(topology_st):
|
||||
"""Check if HealthCheck returns DSVIRTLE0001 code
|
||||
diff --git a/src/lib389/lib389/lint.py b/src/lib389/lib389/lint.py
|
||||
index 4d9cbb666..3d3c79ea3 100644
|
||||
index d0747f0f4..460bf64fc 100644
|
||||
--- a/src/lib389/lib389/lint.py
|
||||
+++ b/src/lib389/lib389/lint.py
|
||||
@@ -231,6 +231,21 @@ database after adding the missing index type. Here is an example using dsconf:
|
||||
@@ -270,6 +270,21 @@ database after adding the missing index type. Here is an example using dsconf:
|
||||
"""
|
||||
}
|
||||
|
||||
@ -177,7 +177,7 @@ index 4d9cbb666..3d3c79ea3 100644
|
||||
DSDSLE0001 = {
|
||||
'dsle': 'DSDSLE0001',
|
||||
diff --git a/src/lib389/lib389/plugins.py b/src/lib389/lib389/plugins.py
|
||||
index 6bf1843ad..185398e5b 100644
|
||||
index 67af93a14..31bbfa502 100644
|
||||
--- a/src/lib389/lib389/plugins.py
|
||||
+++ b/src/lib389/lib389/plugins.py
|
||||
@@ -12,7 +12,7 @@ import copy
|
||||
@ -232,5 +232,5 @@ index 6bf1843ad..185398e5b 100644
|
||||
"""Get memberofattr attribute"""
|
||||
|
||||
--
|
||||
2.48.1
|
||||
2.48.0
|
||||
|
||||
@ -0,0 +1,70 @@
|
||||
From dcb6298db5bfef4b2541f7c52682d153b424bfa7 Mon Sep 17 00:00:00 2001
|
||||
From: James Chapman <jachapma@redhat.com>
|
||||
Date: Tue, 4 Feb 2025 15:40:16 +0000
|
||||
Subject: [PATCH] Issue 6566 - RI plugin failure to handle a modrdn for rename
|
||||
of member of multiple groups (#6567)
|
||||
|
||||
Bug description:
|
||||
With AM and RI plugins enabled, the rename of a user that is part of multiple groups
|
||||
fails with a "value exists" error.
|
||||
|
||||
Fix description:
|
||||
For a modrdn the RI plugin creates a new DN, before a modify is attempted check
|
||||
if the new DN already exists in the attr being updated.
|
||||
|
||||
Fixes: https://github.com/389ds/389-ds-base/issues/6566
|
||||
|
||||
Reviewed by: @progier389 , @tbordaz (Thank you)
|
||||
---
|
||||
ldap/servers/plugins/referint/referint.c | 15 ++++++++++++---
|
||||
1 file changed, 12 insertions(+), 3 deletions(-)
|
||||
|
||||
diff --git a/ldap/servers/plugins/referint/referint.c b/ldap/servers/plugins/referint/referint.c
|
||||
index 468fdc239..218863ea5 100644
|
||||
--- a/ldap/servers/plugins/referint/referint.c
|
||||
+++ b/ldap/servers/plugins/referint/referint.c
|
||||
@@ -924,6 +924,7 @@ _update_all_per_mod(Slapi_DN *entrySDN, /* DN of the searched entry */
|
||||
{
|
||||
Slapi_Mods *smods = NULL;
|
||||
char *newDN = NULL;
|
||||
+ struct berval bv = {0};
|
||||
char **dnParts = NULL;
|
||||
char *sval = NULL;
|
||||
char *newvalue = NULL;
|
||||
@@ -1026,22 +1027,30 @@ _update_all_per_mod(Slapi_DN *entrySDN, /* DN of the searched entry */
|
||||
}
|
||||
/* else: normalize_rc < 0) Ignore the DN normalization error for now. */
|
||||
|
||||
+ bv.bv_val = newDN;
|
||||
+ bv.bv_len = strlen(newDN);
|
||||
p = PL_strstr(sval, slapi_sdn_get_ndn(origDN));
|
||||
if (p == sval) {
|
||||
/* (case 1) */
|
||||
slapi_mods_add_string(smods, LDAP_MOD_DELETE, attrName, sval);
|
||||
- slapi_mods_add_string(smods, LDAP_MOD_ADD, attrName, newDN);
|
||||
-
|
||||
+ /* Add only if the attr value does not exist */
|
||||
+ if (VALUE_PRESENT != attr_value_find_wsi(attr, &bv, &v)) {
|
||||
+ slapi_mods_add_string(smods, LDAP_MOD_ADD, attrName, newDN);
|
||||
+ }
|
||||
} else if (p) {
|
||||
/* (case 2) */
|
||||
slapi_mods_add_string(smods, LDAP_MOD_DELETE, attrName, sval);
|
||||
*p = '\0';
|
||||
newvalue = slapi_ch_smprintf("%s%s", sval, newDN);
|
||||
- slapi_mods_add_string(smods, LDAP_MOD_ADD, attrName, newvalue);
|
||||
+ /* Add only if the attr value does not exist */
|
||||
+ if (VALUE_PRESENT != attr_value_find_wsi(attr, &bv, &v)) {
|
||||
+ slapi_mods_add_string(smods, LDAP_MOD_ADD, attrName, newvalue);
|
||||
+ }
|
||||
slapi_ch_free_string(&newvalue);
|
||||
}
|
||||
/* else: value does not include the modified DN. Ignore it. */
|
||||
slapi_ch_free_string(&sval);
|
||||
+ bv = (struct berval){0};
|
||||
}
|
||||
rc = _do_modify(mod_pb, entrySDN, slapi_mods_get_ldapmods_byref(smods));
|
||||
if (rc) {
|
||||
--
|
||||
2.48.0
|
||||
|
||||
@ -0,0 +1,43 @@
|
||||
From be57ea839934c29b3f4db450a65281aa30a72caf Mon Sep 17 00:00:00 2001
|
||||
From: Masahiro Matsuya <mmatsuya@redhat.com>
|
||||
Date: Wed, 5 Feb 2025 11:38:28 +0900
|
||||
Subject: [PATCH] Issue 6258 - Mitigate race condition in paged_results_test.py
|
||||
(#6433)
|
||||
|
||||
The regression test dirsrvtests/tests/suites/paged_results/paged_results_test.py::test_multi_suffix_search has a race condition causing it to fail due to multiple queries potentially writing their logs out of chronological order.
|
||||
|
||||
This failure is mitigated by sorting the retrieved access_log_lines by their "op" value. This ensures the log lines are in chronological order, as expected by the assertions at the end of test_multi_suffix_search().
|
||||
|
||||
Helps fix: #6258
|
||||
|
||||
Reviewed by: @droideck , @progier389 (Thanks!)
|
||||
|
||||
Co-authored-by: Anuar Beisembayev <111912342+abeisemb@users.noreply.github.com>
|
||||
---
|
||||
dirsrvtests/tests/suites/paged_results/paged_results_test.py | 3 +++
|
||||
1 file changed, 3 insertions(+)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/paged_results/paged_results_test.py b/dirsrvtests/tests/suites/paged_results/paged_results_test.py
|
||||
index eaf0e0da9..fca48db0f 100644
|
||||
--- a/dirsrvtests/tests/suites/paged_results/paged_results_test.py
|
||||
+++ b/dirsrvtests/tests/suites/paged_results/paged_results_test.py
|
||||
@@ -7,6 +7,7 @@
|
||||
# --- END COPYRIGHT BLOCK ---
|
||||
#
|
||||
import socket
|
||||
+import re
|
||||
from random import sample, randrange
|
||||
|
||||
import pytest
|
||||
@@ -1126,6 +1127,8 @@ def test_multi_suffix_search(topology_st, create_user, new_suffixes):
|
||||
topology_st.standalone.restart(timeout=10)
|
||||
|
||||
access_log_lines = topology_st.standalone.ds_access_log.match('.*pr_cookie=.*')
|
||||
+ # Sort access_log_lines by op number to mitigate race condition effects.
|
||||
+ access_log_lines.sort(key=lambda x: int(re.search(r"op=(\d+) RESULT", x).group(1)))
|
||||
pr_cookie_list = ([line.rsplit('=', 1)[-1] for line in access_log_lines])
|
||||
pr_cookie_list = [int(pr_cookie) for pr_cookie in pr_cookie_list]
|
||||
log.info('Assert that last pr_cookie == -1 and others pr_cookie == 0')
|
||||
--
|
||||
2.48.0
|
||||
|
||||
566
0008-Issue-6229-After-an-initial-failure-subsequent-onlin.patch
Normal file
566
0008-Issue-6229-After-an-initial-failure-subsequent-onlin.patch
Normal file
@ -0,0 +1,566 @@
|
||||
From 8e3a484f88fc9f9a3fcdfdd685d4ad2ed3cbe5d9 Mon Sep 17 00:00:00 2001
|
||||
From: progier389 <progier@redhat.com>
|
||||
Date: Fri, 28 Jun 2024 18:56:49 +0200
|
||||
Subject: [PATCH] Issue 6229 - After an initial failure, subsequent online
|
||||
backups fail (#6230)
|
||||
|
||||
* Issue 6229 - After an initial failure, subsequent online backups will not work
|
||||
|
||||
Several issues related to backup task error handling:
|
||||
Backends stay busy after the failure
|
||||
Exit code is 0 in some cases
|
||||
Crash if failing to open the backup directory
|
||||
And a more general one:
|
||||
lib389 Task DN collision
|
||||
|
||||
Solutions:
|
||||
Always reset the busy flags that have been set
|
||||
Ensure that 0 is not returned in error case
|
||||
Avoid closing NULL directory descriptor
|
||||
Use a timestamp having milliseconds precision to create the task DN
|
||||
|
||||
Issue: #6229
|
||||
|
||||
Reviewed by: @droideck (Thanks!)
|
||||
|
||||
(cherry picked from commit 04a0b6ac776a1d588ec2e10ff651e5015078ad21)
|
||||
---
|
||||
ldap/servers/slapd/back-ldbm/archive.c | 45 +++++-----
|
||||
.../slapd/back-ldbm/db-mdb/mdb_layer.c | 3 +
|
||||
src/lib389/lib389/__init__.py | 10 +--
|
||||
src/lib389/lib389/tasks.py | 82 +++++++++----------
|
||||
4 files changed, 70 insertions(+), 70 deletions(-)
|
||||
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/archive.c b/ldap/servers/slapd/back-ldbm/archive.c
|
||||
index 0460a42f6..6658cc80a 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/archive.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/archive.c
|
||||
@@ -16,6 +16,8 @@
|
||||
#include "back-ldbm.h"
|
||||
#include "dblayer.h"
|
||||
|
||||
+#define NO_OBJECT ((Object*)-1)
|
||||
+
|
||||
int
|
||||
ldbm_temporary_close_all_instances(Slapi_PBlock *pb)
|
||||
{
|
||||
@@ -270,6 +272,7 @@ ldbm_back_ldbm2archive(Slapi_PBlock *pb)
|
||||
int run_from_cmdline = 0;
|
||||
Slapi_Task *task;
|
||||
struct stat sbuf;
|
||||
+ Object *last_busy_inst_obj = NO_OBJECT;
|
||||
|
||||
slapi_pblock_get(pb, SLAPI_PLUGIN_PRIVATE, &li);
|
||||
slapi_pblock_get(pb, SLAPI_SEQ_VAL, &rawdirectory);
|
||||
@@ -380,13 +383,12 @@ ldbm_back_ldbm2archive(Slapi_PBlock *pb)
|
||||
|
||||
/* to avoid conflict w/ import, do this check for commandline, as well */
|
||||
{
|
||||
- Object *inst_obj, *inst_obj2;
|
||||
ldbm_instance *inst = NULL;
|
||||
|
||||
/* server is up -- mark all backends busy */
|
||||
- for (inst_obj = objset_first_obj(li->li_instance_set); inst_obj;
|
||||
- inst_obj = objset_next_obj(li->li_instance_set, inst_obj)) {
|
||||
- inst = (ldbm_instance *)object_get_data(inst_obj);
|
||||
+ for (last_busy_inst_obj = objset_first_obj(li->li_instance_set); last_busy_inst_obj;
|
||||
+ last_busy_inst_obj = objset_next_obj(li->li_instance_set, last_busy_inst_obj)) {
|
||||
+ inst = (ldbm_instance *)object_get_data(last_busy_inst_obj);
|
||||
|
||||
/* check if an import/restore is already ongoing... */
|
||||
if (instance_set_busy(inst) != 0 || dblayer_in_import(inst) != 0) {
|
||||
@@ -400,20 +402,6 @@ ldbm_back_ldbm2archive(Slapi_PBlock *pb)
|
||||
"another task and cannot be disturbed.",
|
||||
inst->inst_name);
|
||||
}
|
||||
-
|
||||
- /* painfully, we have to clear the BUSY flags on the
|
||||
- * backends we'd already marked...
|
||||
- */
|
||||
- for (inst_obj2 = objset_first_obj(li->li_instance_set);
|
||||
- inst_obj2 && (inst_obj2 != inst_obj);
|
||||
- inst_obj2 = objset_next_obj(li->li_instance_set,
|
||||
- inst_obj2)) {
|
||||
- inst = (ldbm_instance *)object_get_data(inst_obj2);
|
||||
- instance_set_not_busy(inst);
|
||||
- }
|
||||
- if (inst_obj2 && inst_obj2 != inst_obj)
|
||||
- object_release(inst_obj2);
|
||||
- object_release(inst_obj);
|
||||
goto err;
|
||||
}
|
||||
}
|
||||
@@ -427,18 +415,26 @@ ldbm_back_ldbm2archive(Slapi_PBlock *pb)
|
||||
goto err;
|
||||
}
|
||||
|
||||
- if (!run_from_cmdline) {
|
||||
+err:
|
||||
+ /* Clear all BUSY flags that have been previously set */
|
||||
+ if (last_busy_inst_obj != NO_OBJECT) {
|
||||
ldbm_instance *inst;
|
||||
Object *inst_obj;
|
||||
|
||||
- /* none of these backends are busy anymore */
|
||||
- for (inst_obj = objset_first_obj(li->li_instance_set); inst_obj;
|
||||
+ for (inst_obj = objset_first_obj(li->li_instance_set);
|
||||
+ inst_obj && (inst_obj != last_busy_inst_obj);
|
||||
inst_obj = objset_next_obj(li->li_instance_set, inst_obj)) {
|
||||
inst = (ldbm_instance *)object_get_data(inst_obj);
|
||||
instance_set_not_busy(inst);
|
||||
}
|
||||
+ if (last_busy_inst_obj != NULL) {
|
||||
+ /* release last seen object for aborted objset_next_obj iterations */
|
||||
+ if (inst_obj != NULL) {
|
||||
+ object_release(inst_obj);
|
||||
+ }
|
||||
+ object_release(last_busy_inst_obj);
|
||||
+ }
|
||||
}
|
||||
-err:
|
||||
if (return_value) {
|
||||
if (dir_bak) {
|
||||
slapi_log_err(SLAPI_LOG_ERR,
|
||||
@@ -727,7 +723,10 @@ ldbm_archive_config(char *bakdir, Slapi_Task *task)
|
||||
}
|
||||
|
||||
error:
|
||||
- PR_CloseDir(dirhandle);
|
||||
+ if (NULL != dirhandle) {
|
||||
+ PR_CloseDir(dirhandle);
|
||||
+ dirhandle = NULL;
|
||||
+ }
|
||||
dse_backup_unlock();
|
||||
slapi_ch_free_string(&backup_config_dir);
|
||||
slapi_ch_free_string(&dse_file);
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/db-mdb/mdb_layer.c b/ldap/servers/slapd/back-ldbm/db-mdb/mdb_layer.c
|
||||
index 4a7beedeb..3ecc47170 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/db-mdb/mdb_layer.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/db-mdb/mdb_layer.c
|
||||
@@ -983,6 +983,9 @@ dbmdb_backup(struct ldbminfo *li, char *dest_dir, Slapi_Task *task)
|
||||
if (ldbm_archive_config(dest_dir, task) != 0) {
|
||||
slapi_log_err(SLAPI_LOG_ERR, "dbmdb_backup",
|
||||
"Backup of config files failed or is incomplete\n");
|
||||
+ if (0 == return_value) {
|
||||
+ return_value = -1;
|
||||
+ }
|
||||
}
|
||||
|
||||
goto bail;
|
||||
diff --git a/src/lib389/lib389/__init__.py b/src/lib389/lib389/__init__.py
|
||||
index 368741a66..cb372c138 100644
|
||||
--- a/src/lib389/lib389/__init__.py
|
||||
+++ b/src/lib389/lib389/__init__.py
|
||||
@@ -69,7 +69,7 @@ from lib389.utils import (
|
||||
get_user_is_root)
|
||||
from lib389.paths import Paths
|
||||
from lib389.nss_ssl import NssSsl
|
||||
-from lib389.tasks import BackupTask, RestoreTask
|
||||
+from lib389.tasks import BackupTask, RestoreTask, Task
|
||||
from lib389.dseldif import DSEldif
|
||||
|
||||
# mixin
|
||||
@@ -1424,7 +1424,7 @@ class DirSrv(SimpleLDAPObject, object):
|
||||
name, self.ds_paths.prefix)
|
||||
|
||||
# create the archive
|
||||
- name = "backup_%s_%s.tar.gz" % (self.serverid, time.strftime("%m%d%Y_%H%M%S"))
|
||||
+ name = "backup_%s_%s.tar.gz" % (self.serverid, Task.get_timestamp())
|
||||
backup_file = os.path.join(backup_dir, name)
|
||||
tar = tarfile.open(backup_file, "w:gz")
|
||||
tar.extraction_filter = (lambda member, path: member)
|
||||
@@ -2810,7 +2810,7 @@ class DirSrv(SimpleLDAPObject, object):
|
||||
else:
|
||||
# No output file specified. Use the default ldif location/name
|
||||
cmd.append('-a')
|
||||
- tnow = datetime.now().strftime("%Y_%m_%d_%H_%M_%S")
|
||||
+ tnow = Task.get_timestamp()
|
||||
if bename:
|
||||
ldifname = os.path.join(self.ds_paths.ldif_dir, "%s-%s-%s.ldif" % (self.serverid, bename, tnow))
|
||||
else:
|
||||
@@ -2881,7 +2881,7 @@ class DirSrv(SimpleLDAPObject, object):
|
||||
|
||||
if archive_dir is None:
|
||||
# Use the instance name and date/time as the default backup name
|
||||
- tnow = datetime.now().strftime("%Y_%m_%d_%H_%M_%S")
|
||||
+ tnow = Task.get_timestamp()
|
||||
archive_dir = os.path.join(self.ds_paths.backup_dir, "%s-%s" % (self.serverid, tnow))
|
||||
elif not archive_dir.startswith("/"):
|
||||
# Relative path, append it to the bak directory
|
||||
@@ -3506,7 +3506,7 @@ class DirSrv(SimpleLDAPObject, object):
|
||||
|
||||
if archive is None:
|
||||
# Use the instance name and date/time as the default backup name
|
||||
- tnow = datetime.now().strftime("%Y_%m_%d_%H_%M_%S")
|
||||
+ tnow = Task.get_timestamp()
|
||||
if self.serverid is not None:
|
||||
backup_dir_name = "%s-%s" % (self.serverid, tnow)
|
||||
else:
|
||||
diff --git a/src/lib389/lib389/tasks.py b/src/lib389/lib389/tasks.py
|
||||
index 6c2adb5b2..6bf302862 100644
|
||||
--- a/src/lib389/lib389/tasks.py
|
||||
+++ b/src/lib389/lib389/tasks.py
|
||||
@@ -118,7 +118,7 @@ class Task(DSLdapObject):
|
||||
return super(Task, self).create(rdn, properties, basedn)
|
||||
|
||||
@staticmethod
|
||||
- def _get_task_date():
|
||||
+ def get_timestamp():
|
||||
"""Return a timestamp to use in naming new task entries."""
|
||||
|
||||
return datetime.now().isoformat()
|
||||
@@ -132,7 +132,7 @@ class AutomemberRebuildMembershipTask(Task):
|
||||
"""
|
||||
|
||||
def __init__(self, instance, dn=None):
|
||||
- self.cn = 'automember_rebuild_' + Task._get_task_date()
|
||||
+ self.cn = 'automember_rebuild_' + Task.get_timestamp()
|
||||
dn = "cn=" + self.cn + "," + DN_AUTOMEMBER_REBUILD_TASK
|
||||
|
||||
super(AutomemberRebuildMembershipTask, self).__init__(instance, dn)
|
||||
@@ -147,7 +147,7 @@ class AutomemberAbortRebuildTask(Task):
|
||||
"""
|
||||
|
||||
def __init__(self, instance, dn=None):
|
||||
- self.cn = 'automember_abort_' + Task._get_task_date()
|
||||
+ self.cn = 'automember_abort_' + Task.get_timestamp()
|
||||
dn = "cn=" + self.cn + "," + DN_AUTOMEMBER_ABORT_REBUILD_TASK
|
||||
|
||||
super(AutomemberAbortRebuildTask, self).__init__(instance, dn)
|
||||
@@ -161,7 +161,7 @@ class FixupLinkedAttributesTask(Task):
|
||||
"""
|
||||
|
||||
def __init__(self, instance, dn=None):
|
||||
- self.cn = 'fixup_linked_attrs_' + Task._get_task_date()
|
||||
+ self.cn = 'fixup_linked_attrs_' + Task.get_timestamp()
|
||||
dn = "cn=" + self.cn + "," + DN_FIXUP_LINKED_ATTIBUTES
|
||||
|
||||
super(FixupLinkedAttributesTask, self).__init__(instance, dn)
|
||||
@@ -175,7 +175,7 @@ class MemberUidFixupTask(Task):
|
||||
"""
|
||||
|
||||
def __init__(self, instance, dn=None):
|
||||
- self.cn = 'memberUid_fixup_' + Task._get_task_date()
|
||||
+ self.cn = 'memberUid_fixup_' + Task.get_timestamp()
|
||||
dn = f"cn={self.cn},cn=memberuid task,cn=tasks,cn=config"
|
||||
|
||||
super(MemberUidFixupTask, self).__init__(instance, dn)
|
||||
@@ -190,7 +190,7 @@ class MemberOfFixupTask(Task):
|
||||
"""
|
||||
|
||||
def __init__(self, instance, dn=None):
|
||||
- self.cn = 'memberOf_fixup_' + Task._get_task_date()
|
||||
+ self.cn = 'memberOf_fixup_' + Task.get_timestamp()
|
||||
dn = "cn=" + self.cn + "," + DN_MBO_TASK
|
||||
|
||||
super(MemberOfFixupTask, self).__init__(instance, dn)
|
||||
@@ -205,7 +205,7 @@ class USNTombstoneCleanupTask(Task):
|
||||
"""
|
||||
|
||||
def __init__(self, instance, dn=None):
|
||||
- self.cn = 'usn_cleanup_' + Task._get_task_date()
|
||||
+ self.cn = 'usn_cleanup_' + Task.get_timestamp()
|
||||
dn = "cn=" + self.cn + ",cn=USN tombstone cleanup task," + DN_TASKS
|
||||
|
||||
super(USNTombstoneCleanupTask, self).__init__(instance, dn)
|
||||
@@ -225,7 +225,7 @@ class csngenTestTask(Task):
|
||||
"""
|
||||
|
||||
def __init__(self, instance, dn=None):
|
||||
- self.cn = 'csngenTest_' + Task._get_task_date()
|
||||
+ self.cn = 'csngenTest_' + Task.get_timestamp()
|
||||
dn = "cn=" + self.cn + ",cn=csngen_test," + DN_TASKS
|
||||
super(csngenTestTask, self).__init__(instance, dn)
|
||||
|
||||
@@ -238,7 +238,7 @@ class EntryUUIDFixupTask(Task):
|
||||
"""
|
||||
|
||||
def __init__(self, instance, dn=None):
|
||||
- self.cn = 'entryuuid_fixup_' + Task._get_task_date()
|
||||
+ self.cn = 'entryuuid_fixup_' + Task.get_timestamp()
|
||||
dn = "cn=" + self.cn + "," + DN_EUUID_TASK
|
||||
super(EntryUUIDFixupTask, self).__init__(instance, dn)
|
||||
self._must_attributes.extend(['basedn'])
|
||||
@@ -252,7 +252,7 @@ class DBCompactTask(Task):
|
||||
"""
|
||||
|
||||
def __init__(self, instance, dn=None):
|
||||
- self.cn = 'compact_db_' + Task._get_task_date()
|
||||
+ self.cn = 'compact_db_' + Task.get_timestamp()
|
||||
dn = "cn=" + self.cn + "," + DN_COMPACTDB_TASK
|
||||
super(DBCompactTask, self).__init__(instance, dn)
|
||||
|
||||
@@ -265,7 +265,7 @@ class SchemaReloadTask(Task):
|
||||
"""
|
||||
|
||||
def __init__(self, instance, dn=None):
|
||||
- self.cn = 'schema_reload_' + Task._get_task_date()
|
||||
+ self.cn = 'schema_reload_' + Task.get_timestamp()
|
||||
dn = "cn=" + self.cn + ",cn=schema reload task," + DN_TASKS
|
||||
super(SchemaReloadTask, self).__init__(instance, dn)
|
||||
|
||||
@@ -278,7 +278,7 @@ class SyntaxValidateTask(Task):
|
||||
"""
|
||||
|
||||
def __init__(self, instance, dn=None):
|
||||
- self.cn = 'syntax_validate_' + Task._get_task_date()
|
||||
+ self.cn = 'syntax_validate_' + Task.get_timestamp()
|
||||
dn = f"cn={self.cn},cn=syntax validate,cn=tasks,cn=config"
|
||||
|
||||
super(SyntaxValidateTask, self).__init__(instance, dn)
|
||||
@@ -295,7 +295,7 @@ class AbortCleanAllRUVTask(Task):
|
||||
"""
|
||||
|
||||
def __init__(self, instance, dn=None):
|
||||
- self.cn = 'abortcleanallruv_' + Task._get_task_date()
|
||||
+ self.cn = 'abortcleanallruv_' + Task.get_timestamp()
|
||||
dn = "cn=" + self.cn + ",cn=abort cleanallruv," + DN_TASKS
|
||||
|
||||
super(AbortCleanAllRUVTask, self).__init__(instance, dn)
|
||||
@@ -312,7 +312,7 @@ class CleanAllRUVTask(Task):
|
||||
"""
|
||||
|
||||
def __init__(self, instance, dn=None):
|
||||
- self.cn = 'cleanallruv_' + Task._get_task_date()
|
||||
+ self.cn = 'cleanallruv_' + Task.get_timestamp()
|
||||
dn = "cn=" + self.cn + ",cn=cleanallruv," + DN_TASKS
|
||||
self._properties = None
|
||||
|
||||
@@ -359,7 +359,7 @@ class ImportTask(Task):
|
||||
"""
|
||||
|
||||
def __init__(self, instance, dn=None):
|
||||
- self.cn = 'import_' + Task._get_task_date()
|
||||
+ self.cn = 'import_' + Task.get_timestamp()
|
||||
dn = "cn=%s,%s" % (self.cn, DN_IMPORT_TASK)
|
||||
self._properties = None
|
||||
|
||||
@@ -388,7 +388,7 @@ class ExportTask(Task):
|
||||
"""
|
||||
|
||||
def __init__(self, instance, dn=None):
|
||||
- self.cn = 'export_' + Task._get_task_date()
|
||||
+ self.cn = 'export_' + Task.get_timestamp()
|
||||
dn = "cn=%s,%s" % (self.cn, DN_EXPORT_TASK)
|
||||
self._properties = None
|
||||
|
||||
@@ -411,7 +411,7 @@ class BackupTask(Task):
|
||||
"""
|
||||
|
||||
def __init__(self, instance, dn=None):
|
||||
- self.cn = 'backup_' + Task._get_task_date()
|
||||
+ self.cn = 'backup_' + Task.get_timestamp()
|
||||
dn = "cn=" + self.cn + ",cn=backup," + DN_TASKS
|
||||
self._properties = None
|
||||
|
||||
@@ -426,7 +426,7 @@ class RestoreTask(Task):
|
||||
"""
|
||||
|
||||
def __init__(self, instance, dn=None):
|
||||
- self.cn = 'restore_' + Task._get_task_date()
|
||||
+ self.cn = 'restore_' + Task.get_timestamp()
|
||||
dn = "cn=" + self.cn + ",cn=restore," + DN_TASKS
|
||||
self._properties = None
|
||||
|
||||
@@ -513,7 +513,7 @@ class Tasks(object):
|
||||
raise ValueError("Import file (%s) does not exist" % input_file)
|
||||
|
||||
# Prepare the task entry
|
||||
- cn = "import_" + time.strftime("%m%d%Y_%H%M%S", time.localtime())
|
||||
+ cn = "import_" + Task.get_timestamp()
|
||||
dn = "cn=%s,%s" % (cn, DN_IMPORT_TASK)
|
||||
entry = Entry(dn)
|
||||
entry.setValues('objectclass', 'top', 'extensibleObject')
|
||||
@@ -581,7 +581,7 @@ class Tasks(object):
|
||||
raise ValueError("output_file is mandatory")
|
||||
|
||||
# Prepare the task entry
|
||||
- cn = "export_" + time.strftime("%m%d%Y_%H%M%S", time.localtime())
|
||||
+ cn = "export_" + Task.get_timestamp()
|
||||
dn = "cn=%s,%s" % (cn, DN_EXPORT_TASK)
|
||||
entry = Entry(dn)
|
||||
entry.update({
|
||||
@@ -637,7 +637,7 @@ class Tasks(object):
|
||||
raise ValueError("You must specify a backup directory.")
|
||||
|
||||
# build the task entry
|
||||
- cn = "backup_" + time.strftime("%m%d%Y_%H%M%S", time.localtime())
|
||||
+ cn = "backup_" + Task.get_timestamp()
|
||||
dn = "cn=%s,%s" % (cn, DN_BACKUP_TASK)
|
||||
entry = Entry(dn)
|
||||
entry.update({
|
||||
@@ -694,7 +694,7 @@ class Tasks(object):
|
||||
raise ValueError("Backup file (%s) does not exist" % backup_dir)
|
||||
|
||||
# build the task entry
|
||||
- cn = "restore_" + time.strftime("%m%d%Y_%H%M%S", time.localtime())
|
||||
+ cn = "restore_" + Task.get_timestamp()
|
||||
dn = "cn=%s,%s" % (cn, DN_RESTORE_TASK)
|
||||
entry = Entry(dn)
|
||||
entry.update({
|
||||
@@ -789,7 +789,7 @@ class Tasks(object):
|
||||
attrs.append(attr)
|
||||
else:
|
||||
attrs.append(attrname)
|
||||
- cn = "index_vlv_%s" % (time.strftime("%m%d%Y_%H%M%S", time.localtime()))
|
||||
+ cn = "index_vlv_%s" % (Task.get_timestamp())
|
||||
dn = "cn=%s,%s" % (cn, DN_INDEX_TASK)
|
||||
entry = Entry(dn)
|
||||
entry.update({
|
||||
@@ -803,7 +803,7 @@ class Tasks(object):
|
||||
#
|
||||
# Reindex all attributes - gather them first...
|
||||
#
|
||||
- cn = "index_all_%s" % (time.strftime("%m%d%Y_%H%M%S", time.localtime()))
|
||||
+ cn = "index_all_%s" % (Task.get_timestamp())
|
||||
dn = ('cn=%s,cn=ldbm database,cn=plugins,cn=config' % backend)
|
||||
try:
|
||||
indexes = self.conn.search_s(dn, ldap.SCOPE_SUBTREE, '(objectclass=nsIndex)')
|
||||
@@ -815,7 +815,7 @@ class Tasks(object):
|
||||
#
|
||||
# Reindex specific attributes
|
||||
#
|
||||
- cn = "index_attrs_%s" % (time.strftime("%m%d%Y_%H%M%S", time.localtime()))
|
||||
+ cn = "index_attrs_%s" % (Task.get_timestamp())
|
||||
if isinstance(attrname, (tuple, list)):
|
||||
# Need to guarantee this is a list (and not a tuple)
|
||||
for attr in attrname:
|
||||
@@ -903,8 +903,7 @@ class Tasks(object):
|
||||
|
||||
suffix = ents[0].getValue(attr)
|
||||
|
||||
- cn = "fixupmemberof_" + time.strftime("%m%d%Y_%H%M%S",
|
||||
- time.localtime())
|
||||
+ cn = "fixupmemberof_" + Task.get_timestamp()
|
||||
dn = "cn=%s,%s" % (cn, DN_MBO_TASK)
|
||||
entry = Entry(dn)
|
||||
entry.setValues('objectclass', 'top', 'extensibleObject')
|
||||
@@ -965,8 +964,7 @@ class Tasks(object):
|
||||
if len(ents) != 1:
|
||||
raise ValueError("invalid backend name: %s" % bename)
|
||||
|
||||
- cn = "fixupTombstone_" + time.strftime("%m%d%Y_%H%M%S",
|
||||
- time.localtime())
|
||||
+ cn = "fixupTombstone_" + Task.get_timestamp()
|
||||
dn = "cn=%s,%s" % (cn, DN_TOMB_FIXUP_TASK)
|
||||
entry = Entry(dn)
|
||||
entry.setValues('objectclass', 'top', 'extensibleObject')
|
||||
@@ -1019,7 +1017,7 @@ class Tasks(object):
|
||||
@return exit code
|
||||
'''
|
||||
|
||||
- cn = 'task-' + time.strftime("%m%d%Y_%H%M%S", time.localtime())
|
||||
+ cn = 'task-' + Task.get_timestamp()
|
||||
dn = ('cn=%s,cn=automember rebuild membership,cn=tasks,cn=config' % cn)
|
||||
|
||||
entry = Entry(dn)
|
||||
@@ -1077,7 +1075,7 @@ class Tasks(object):
|
||||
if not ldif_out:
|
||||
raise ValueError("Missing ldif_out")
|
||||
|
||||
- cn = 'task-' + time.strftime("%m%d%Y_%H%M%S", time.localtime())
|
||||
+ cn = 'task-' + Task.get_timestamp()
|
||||
dn = ('cn=%s,cn=automember export updates,cn=tasks,cn=config' % cn)
|
||||
entry = Entry(dn)
|
||||
entry.setValues('objectclass', 'top', 'extensibleObject')
|
||||
@@ -1129,7 +1127,7 @@ class Tasks(object):
|
||||
if not ldif_out or not ldif_in:
|
||||
raise ValueError("Missing ldif_out and/or ldif_in")
|
||||
|
||||
- cn = 'task-' + time.strftime("%m%d%Y_%H%M%S", time.localtime())
|
||||
+ cn = 'task-' + Task.get_timestamp()
|
||||
dn = ('cn=%s,cn=automember map updates,cn=tasks,cn=config' % cn)
|
||||
|
||||
entry = Entry(dn)
|
||||
@@ -1175,7 +1173,7 @@ class Tasks(object):
|
||||
@return exit code
|
||||
'''
|
||||
|
||||
- cn = 'task-' + time.strftime("%m%d%Y_%H%M%S", time.localtime())
|
||||
+ cn = 'task-' + Task.get_timestamp()
|
||||
dn = ('cn=%s,cn=fixup linked attributes,cn=tasks,cn=config' % cn)
|
||||
entry = Entry(dn)
|
||||
entry.setValues('objectclass', 'top', 'extensibleObject')
|
||||
@@ -1219,7 +1217,7 @@ class Tasks(object):
|
||||
@return exit code
|
||||
'''
|
||||
|
||||
- cn = 'task-' + time.strftime("%m%d%Y_%H%M%S", time.localtime())
|
||||
+ cn = 'task-' + Task.get_timestamp()
|
||||
dn = ('cn=%s,cn=schema reload task,cn=tasks,cn=config' % cn)
|
||||
entry = Entry(dn)
|
||||
entry.setValues('objectclass', 'top', 'extensibleObject')
|
||||
@@ -1264,7 +1262,7 @@ class Tasks(object):
|
||||
@return exit code
|
||||
'''
|
||||
|
||||
- cn = 'task-' + time.strftime("%m%d%Y_%H%M%S", time.localtime())
|
||||
+ cn = 'task-' + Task.get_timestamp()
|
||||
dn = ('cn=%s,cn=memberuid task,cn=tasks,cn=config' % cn)
|
||||
entry = Entry(dn)
|
||||
entry.setValues('objectclass', 'top', 'extensibleObject')
|
||||
@@ -1311,7 +1309,7 @@ class Tasks(object):
|
||||
@return exit code
|
||||
'''
|
||||
|
||||
- cn = 'task-' + time.strftime("%m%d%Y_%H%M%S", time.localtime())
|
||||
+ cn = 'task-' + Task.get_timestamp()
|
||||
dn = ('cn=%s,cn=syntax validate,cn=tasks,cn=config' % cn)
|
||||
entry = Entry(dn)
|
||||
entry.setValues('objectclass', 'top', 'extensibleObject')
|
||||
@@ -1358,7 +1356,7 @@ class Tasks(object):
|
||||
@return exit code
|
||||
'''
|
||||
|
||||
- cn = 'task-' + time.strftime("%m%d%Y_%H%M%S", time.localtime())
|
||||
+ cn = 'task-' + Task.get_timestamp()
|
||||
dn = ('cn=%s,cn=USN tombstone cleanup task,cn=tasks,cn=config' % cn)
|
||||
entry = Entry(dn)
|
||||
entry.setValues('objectclass', 'top', 'extensibleObject')
|
||||
@@ -1413,7 +1411,7 @@ class Tasks(object):
|
||||
if not configfile:
|
||||
raise ValueError("Missing required paramter: configfile")
|
||||
|
||||
- cn = 'task-' + time.strftime("%m%d%Y_%H%M%S", time.localtime())
|
||||
+ cn = 'task-' + Task.get_timestamp()
|
||||
dn = ('cn=%s,cn=sysconfig reload,cn=tasks,cn=config' % cn)
|
||||
entry = Entry(dn)
|
||||
entry.setValues('objectclass', 'top', 'extensibleObject')
|
||||
@@ -1464,7 +1462,7 @@ class Tasks(object):
|
||||
if not suffix:
|
||||
raise ValueError("Missing required paramter: suffix")
|
||||
|
||||
- cn = 'task-' + time.strftime("%m%d%Y_%H%M%S", time.localtime())
|
||||
+ cn = 'task-' + Task.get_timestamp()
|
||||
dn = ('cn=%s,cn=cleanallruv,cn=tasks,cn=config' % cn)
|
||||
entry = Entry(dn)
|
||||
entry.setValues('objectclass', 'top', 'extensibleObject')
|
||||
@@ -1516,7 +1514,7 @@ class Tasks(object):
|
||||
if not suffix:
|
||||
raise ValueError("Missing required paramter: suffix")
|
||||
|
||||
- cn = 'task-' + time.strftime("%m%d%Y_%H%M%S", time.localtime())
|
||||
+ cn = 'task-' + Task.get_timestamp()
|
||||
dn = ('cn=%s,cn=abort cleanallruv,cn=tasks,cn=config' % cn)
|
||||
entry = Entry(dn)
|
||||
entry.setValues('objectclass', 'top', 'extensibleObject')
|
||||
@@ -1571,7 +1569,7 @@ class Tasks(object):
|
||||
if not nsArchiveDir:
|
||||
raise ValueError("Missing required paramter: nsArchiveDir")
|
||||
|
||||
- cn = 'task-' + time.strftime("%m%d%Y_%H%M%S", time.localtime())
|
||||
+ cn = 'task-' + Task.get_timestamp()
|
||||
dn = ('cn=%s,cn=upgradedb,cn=tasks,cn=config' % cn)
|
||||
entry = Entry(dn)
|
||||
entry.setValues('objectclass', 'top', 'extensibleObject')
|
||||
@@ -1616,6 +1614,6 @@ class LDAPIMappingReloadTask(Task):
|
||||
"""
|
||||
|
||||
def __init__(self, instance, dn=None):
|
||||
- self.cn = 'reload-' + Task._get_task_date()
|
||||
+ self.cn = 'reload-' + Task.get_timestamp()
|
||||
dn = f'cn={self.cn},cn=reload ldapi mappings,cn=tasks,cn=config'
|
||||
super(LDAPIMappingReloadTask, self).__init__(instance, dn)
|
||||
--
|
||||
2.48.0
|
||||
|
||||
165
0009-Issue-6554-During-import-of-entries-without-nsUnique.patch
Normal file
165
0009-Issue-6554-During-import-of-entries-without-nsUnique.patch
Normal file
@ -0,0 +1,165 @@
|
||||
From 2b1b2db90c9d337166fa28e313f60828cd43de09 Mon Sep 17 00:00:00 2001
|
||||
From: tbordaz <tbordaz@redhat.com>
|
||||
Date: Thu, 6 Feb 2025 18:25:36 +0100
|
||||
Subject: [PATCH] Issue 6554 - During import of entries without nsUniqueId, a
|
||||
supplier generates duplicate nsUniqueId (LMDB only) (#6582)
|
||||
|
||||
Bug description:
|
||||
During an import the entry is prepared (schema, operational
|
||||
attributes, password encryption,...) before starting the
|
||||
update of the database and indexes.
|
||||
A step of the preparation is to assign a value to 'nsuniqueid'
|
||||
operational attribute. 'nsuniqueid' must be unique.
|
||||
In LMDB the preparation is done by multiple threads (workers).
|
||||
In such case the 'nsuniqueid' are generated in parallel and
|
||||
as it is time based several values can be duplicated.
|
||||
|
||||
Fix description:
|
||||
To prevent that the routine dbmdb_import_generate_uniqueid
|
||||
should make sure to synchronize the workers.
|
||||
|
||||
fixes: #6554
|
||||
|
||||
Reviewed by: Pierre Rogier
|
||||
---
|
||||
.../tests/suites/import/import_test.py | 79 ++++++++++++++++++-
|
||||
.../back-ldbm/db-mdb/mdb_import_threads.c | 11 +++
|
||||
2 files changed, 89 insertions(+), 1 deletion(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/import/import_test.py b/dirsrvtests/tests/suites/import/import_test.py
|
||||
index dbd921924..54d304753 100644
|
||||
--- a/dirsrvtests/tests/suites/import/import_test.py
|
||||
+++ b/dirsrvtests/tests/suites/import/import_test.py
|
||||
@@ -14,11 +14,13 @@ import os
|
||||
import pytest
|
||||
import time
|
||||
import glob
|
||||
+import re
|
||||
import logging
|
||||
import subprocess
|
||||
from datetime import datetime
|
||||
from lib389.topologies import topology_st as topo
|
||||
-from lib389._constants import DEFAULT_SUFFIX, TaskWarning
|
||||
+from lib389.topologies import topology_m2 as topo_m2
|
||||
+from lib389._constants import DEFAULT_BENAME, DEFAULT_SUFFIX, TaskWarning
|
||||
from lib389.dbgen import dbgen_users
|
||||
from lib389.tasks import ImportTask
|
||||
from lib389.index import Indexes
|
||||
@@ -688,6 +690,81 @@ def test_online_import_under_load(topo):
|
||||
assert import_task.get_exit_code() == 0
|
||||
|
||||
|
||||
+def test_duplicate_nsuniqueid(topo_m2, request):
|
||||
+ """Test that after an offline import all
|
||||
+ nsuniqueid are different
|
||||
+
|
||||
+ :id: a2541677-a288-4633-bacf-4050cc56016d
|
||||
+ :setup: MMR with 2 suppliers
|
||||
+ :steps:
|
||||
+ 1. stop the instance to do offline operations
|
||||
+ 2. Generate a 5K users LDIF file
|
||||
+ 3. Check that no uniqueid are present in the generated file
|
||||
+ 4. import the generated LDIF
|
||||
+ 5. export the database
|
||||
+ 6. Check that that exported LDIF contains more than 5K nsuniqueid
|
||||
+ 7. Check that there is no duplicate nsuniqued in exported LDIF
|
||||
+ :expectedresults:
|
||||
+ 1. Should succeeds
|
||||
+ 2. Should succeeds
|
||||
+ 3. Should succeeds
|
||||
+ 4. Should succeeds
|
||||
+ 5. Should succeeds
|
||||
+ 6. Should succeeds
|
||||
+ 7. Should succeeds
|
||||
+ """
|
||||
+ m1 = topo_m2.ms["supplier1"]
|
||||
+
|
||||
+ # Stop the instance
|
||||
+ m1.stop()
|
||||
+
|
||||
+ # Generate a test ldif (5k entries)
|
||||
+ log.info("Generating LDIF...")
|
||||
+ ldif_dir = m1.get_ldif_dir()
|
||||
+ import_ldif = ldif_dir + '/5k_users_import.ldif'
|
||||
+ dbgen_users(m1, 5000, import_ldif, DEFAULT_SUFFIX)
|
||||
+
|
||||
+ # Check that the generated LDIF does not contain nsuniqueid
|
||||
+ all_nsuniqueid = []
|
||||
+ with open(import_ldif, 'r') as file:
|
||||
+ for line in file:
|
||||
+ if line.lower().startswith("nsuniqueid: "):
|
||||
+ all_nsuniqueid.append(line.split(': ')[1])
|
||||
+ log.info("import file contains " + str(len(all_nsuniqueid)) + " nsuniqueid")
|
||||
+ assert len(all_nsuniqueid) == 0
|
||||
+
|
||||
+ # Import the "nsuniquied free" LDIF file
|
||||
+ if not m1.ldif2db('userRoot', None, None, None, import_ldif):
|
||||
+ assert False
|
||||
+
|
||||
+ # Export the DB that now should contain nsuniqueid
|
||||
+ export_ldif = ldif_dir + '/5k_user_export.ldif'
|
||||
+ log.info("export to file " + export_ldif)
|
||||
+ m1.db2ldif(bename=DEFAULT_BENAME, suffixes=[DEFAULT_SUFFIX],
|
||||
+ excludeSuffixes=None, repl_data=False,
|
||||
+ outputfile=export_ldif, encrypt=False)
|
||||
+
|
||||
+ # Check that the export LDIF contain nsuniqueid
|
||||
+ all_nsuniqueid = []
|
||||
+ with open(export_ldif, 'r') as file:
|
||||
+ for line in file:
|
||||
+ if line.lower().startswith("nsuniqueid: "):
|
||||
+ all_nsuniqueid.append(line.split(': ')[1])
|
||||
+ log.info("export file " + export_ldif + " contains " + str(len(all_nsuniqueid)) + " nsuniqueid")
|
||||
+ assert len(all_nsuniqueid) >= 5000
|
||||
+
|
||||
+ # Check that the nsuniqueid are unique
|
||||
+ assert len(set(all_nsuniqueid)) == len(all_nsuniqueid)
|
||||
+
|
||||
+ def fin():
|
||||
+ if os.path.exists(import_ldif):
|
||||
+ os.remove(import_ldif)
|
||||
+ if os.path.exists(export_ldif):
|
||||
+ os.remove(export_ldif)
|
||||
+ m1.start
|
||||
+
|
||||
+ request.addfinalizer(fin)
|
||||
+
|
||||
if __name__ == '__main__':
|
||||
# Run isolated
|
||||
# -s for DEBUG mode
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/db-mdb/mdb_import_threads.c b/ldap/servers/slapd/back-ldbm/db-mdb/mdb_import_threads.c
|
||||
index 707a110c5..0f445bb56 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/db-mdb/mdb_import_threads.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/db-mdb/mdb_import_threads.c
|
||||
@@ -610,10 +610,20 @@ dbmdb_import_generate_uniqueid(ImportJob *job, Slapi_Entry *e)
|
||||
{
|
||||
const char *uniqueid = slapi_entry_get_uniqueid(e);
|
||||
int rc = UID_SUCCESS;
|
||||
+ static pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER;
|
||||
|
||||
if (!uniqueid && (job->uuid_gen_type != SLAPI_UNIQUEID_GENERATE_NONE)) {
|
||||
char *newuniqueid;
|
||||
|
||||
+ /* With 'mdb' we have several workers generating nsuniqueid
|
||||
+ * we need to serialize them to prevent generating duplicate value
|
||||
+ * From performance pov it only impacts import
|
||||
+ * The default value is SLAPI_UNIQUEID_GENERATE_TIME_BASED so
|
||||
+ * the only syscall is clock_gettime and then string formating
|
||||
+ * that should limit contention
|
||||
+ */
|
||||
+ pthread_mutex_lock(&mutex);
|
||||
+
|
||||
/* generate id based on dn */
|
||||
if (job->uuid_gen_type == SLAPI_UNIQUEID_GENERATE_NAME_BASED) {
|
||||
char *dn = slapi_entry_get_dn(e);
|
||||
@@ -624,6 +634,7 @@ dbmdb_import_generate_uniqueid(ImportJob *job, Slapi_Entry *e)
|
||||
/* time based */
|
||||
rc = slapi_uniqueIDGenerateString(&newuniqueid);
|
||||
}
|
||||
+ pthread_mutex_unlock(&mutex);
|
||||
|
||||
if (rc == UID_SUCCESS) {
|
||||
slapi_entry_set_uniqueid(e, newuniqueid);
|
||||
--
|
||||
2.48.0
|
||||
|
||||
77
0010-Issue-6596-BUG-Compilation-Regresion-6597.patch
Normal file
77
0010-Issue-6596-BUG-Compilation-Regresion-6597.patch
Normal file
@ -0,0 +1,77 @@
|
||||
From e638e801afd51ca44523222a90a9f69f4be82ae3 Mon Sep 17 00:00:00 2001
|
||||
From: Firstyear <william@blackhats.net.au>
|
||||
Date: Fri, 7 Feb 2025 14:47:29 +1000
|
||||
Subject: [PATCH] Issue 6596 - BUG - Compilation Regresion (#6597)
|
||||
|
||||
Bug Description: The addition of the json auditlog feature caused
|
||||
a regresion in compilation due to the use of labels in a declaration.
|
||||
|
||||
Fix Description: Enclose the switch/case in braces to resolve the
|
||||
compilation issue.
|
||||
|
||||
fixes: https://github.com/389ds/389-ds-base/issues/6596
|
||||
|
||||
Author: William Brown <william@blackhats.net.au>
|
||||
|
||||
Review by: @droideck Thanks!
|
||||
---
|
||||
ldap/servers/slapd/auditlog.c | 15 ++++++++-------
|
||||
1 file changed, 8 insertions(+), 7 deletions(-)
|
||||
|
||||
diff --git a/ldap/servers/slapd/auditlog.c b/ldap/servers/slapd/auditlog.c
|
||||
index c288a1a7f..ff9a6fdde 100644
|
||||
--- a/ldap/servers/slapd/auditlog.c
|
||||
+++ b/ldap/servers/slapd/auditlog.c
|
||||
@@ -456,7 +456,7 @@ write_audit_file_json(Slapi_PBlock *pb, Slapi_Entry *entry, int logtype,
|
||||
add_entry_attrs_json(entry, log_json);
|
||||
|
||||
switch (optype) {
|
||||
- case SLAPI_OPERATION_MODIFY:
|
||||
+ case SLAPI_OPERATION_MODIFY: {
|
||||
json_object *mod_list = json_object_new_array();
|
||||
mods = change;
|
||||
for (size_t j = 0; (mods != NULL) && (mods[j] != NULL); j++) {
|
||||
@@ -511,8 +511,8 @@ write_audit_file_json(Slapi_PBlock *pb, Slapi_Entry *entry, int logtype,
|
||||
/* Add entire mod list to the main object */
|
||||
json_object_object_add(log_json, "modify", mod_list);
|
||||
break;
|
||||
-
|
||||
- case SLAPI_OPERATION_ADD:
|
||||
+ }
|
||||
+ case SLAPI_OPERATION_ADD: {
|
||||
int len;
|
||||
e = change;
|
||||
tmp = slapi_entry2str(e, &len);
|
||||
@@ -526,8 +526,8 @@ write_audit_file_json(Slapi_PBlock *pb, Slapi_Entry *entry, int logtype,
|
||||
json_object_object_add(log_json, "add", json_object_new_string(tmp));
|
||||
slapi_ch_free_string(&tmpsave);
|
||||
break;
|
||||
-
|
||||
- case SLAPI_OPERATION_DELETE:
|
||||
+ }
|
||||
+ case SLAPI_OPERATION_DELETE: {
|
||||
tmp = change;
|
||||
del_obj = json_object_new_object();
|
||||
if (tmp && tmp[0]) {
|
||||
@@ -538,8 +538,8 @@ write_audit_file_json(Slapi_PBlock *pb, Slapi_Entry *entry, int logtype,
|
||||
json_object_object_add(log_json, "delete", del_obj);
|
||||
}
|
||||
break;
|
||||
-
|
||||
- case SLAPI_OPERATION_MODDN:
|
||||
+ }
|
||||
+ case SLAPI_OPERATION_MODDN: {
|
||||
newrdn = ((char **)change)[0];
|
||||
modrdn_obj = json_object_new_object();
|
||||
json_object_object_add(modrdn_obj, attr_newrdn, json_object_new_string(newrdn));
|
||||
@@ -551,6 +551,7 @@ write_audit_file_json(Slapi_PBlock *pb, Slapi_Entry *entry, int logtype,
|
||||
}
|
||||
json_object_object_add(log_json, "modrdn", modrdn_obj);
|
||||
break;
|
||||
+ }
|
||||
}
|
||||
|
||||
msg = (char *)json_object_to_json_string_ext(log_json, log_format);
|
||||
--
|
||||
2.48.0
|
||||
|
||||
2106
0011-Issue-6367-RFE-support-of-Session-Tracking-Control-i.patch
Normal file
2106
0011-Issue-6367-RFE-support-of-Session-Tracking-Control-i.patch
Normal file
File diff suppressed because it is too large
Load Diff
38
0012-Issue-6561-TLS-1.2-stickiness-in-FIPS-mode.patch
Normal file
38
0012-Issue-6561-TLS-1.2-stickiness-in-FIPS-mode.patch
Normal file
@ -0,0 +1,38 @@
|
||||
From 35f9253d6e988ccbf68e790404f05451a9c0708e Mon Sep 17 00:00:00 2001
|
||||
From: Viktor Ashirov <vashirov@redhat.com>
|
||||
Date: Thu, 13 Feb 2025 16:37:43 +0100
|
||||
Subject: [PATCH] Issue 6561 - TLS 1.2 stickiness in FIPS mode
|
||||
|
||||
Description:
|
||||
TLS 1.3 works with NSS in FIPS mode for quite some time now,
|
||||
this restriction is no longer needed.
|
||||
|
||||
Fixes: https://github.com/389ds/389-ds-base/issues/6561
|
||||
|
||||
Reviewed by: @mreynolds389 (Thanks!)
|
||||
---
|
||||
ldap/servers/slapd/ssl.c | 8 --------
|
||||
1 file changed, 8 deletions(-)
|
||||
|
||||
diff --git a/ldap/servers/slapd/ssl.c b/ldap/servers/slapd/ssl.c
|
||||
index 94259efe7..84a7fb004 100644
|
||||
--- a/ldap/servers/slapd/ssl.c
|
||||
+++ b/ldap/servers/slapd/ssl.c
|
||||
@@ -1929,14 +1929,6 @@ slapd_ssl_init2(PRFileDesc **fd, int startTLS)
|
||||
*/
|
||||
sslStatus = SSL_VersionRangeGet(pr_sock, &slapdNSSVersions);
|
||||
if (sslStatus == SECSuccess) {
|
||||
- if (slapdNSSVersions.max > LDAP_OPT_X_TLS_PROTOCOL_TLS1_2 && fipsMode) {
|
||||
- /*
|
||||
- * FIPS & NSS currently only support a max version of TLS1.2
|
||||
- * (although NSS advertises 1.3 as a max range in FIPS mode),
|
||||
- * hopefully this code block can be removed soon...
|
||||
- */
|
||||
- slapdNSSVersions.max = LDAP_OPT_X_TLS_PROTOCOL_TLS1_2;
|
||||
- }
|
||||
/* Reset request range */
|
||||
sslStatus = SSL_VersionRangeSet(pr_sock, &slapdNSSVersions);
|
||||
if (sslStatus == SECSuccess) {
|
||||
--
|
||||
2.48.1
|
||||
|
||||
6214
0013-Issue-6375-UI-Update-cockpit.js-code-to-the-latest-v.patch
Normal file
6214
0013-Issue-6375-UI-Update-cockpit.js-code-to-the-latest-v.patch
Normal file
File diff suppressed because it is too large
Load Diff
138
0014-Issue-6623-UI-Generic-updates-6624.patch
Normal file
138
0014-Issue-6623-UI-Generic-updates-6624.patch
Normal file
@ -0,0 +1,138 @@
|
||||
From d3693c35f38632825f217e8bbb141cf42f6869da Mon Sep 17 00:00:00 2001
|
||||
From: James Chapman <jachapma@redhat.com>
|
||||
Date: Wed, 19 Feb 2025 12:18:59 +0000
|
||||
Subject: [PATCH] Issue 6623 - UI - Generic updates (#6624)
|
||||
|
||||
Bug description:
|
||||
Missing dot in warning alert when changing suffix backend state.
|
||||
No success alert upon deleting local password policy.
|
||||
Tool tips for part of Syntax Settings for local password policies do not show.
|
||||
|
||||
Fixes: https://github.com/389ds/389-ds-base/issues/6623
|
||||
|
||||
Reviewed by: @mreynolds389 (Thank you)
|
||||
---
|
||||
.../389-console/src/lib/database/localPwp.jsx | 29 +++++++++++--------
|
||||
.../389-console/src/lib/database/suffix.jsx | 4 +--
|
||||
2 files changed, 19 insertions(+), 14 deletions(-)
|
||||
|
||||
diff --git a/src/cockpit/389-console/src/lib/database/localPwp.jsx b/src/cockpit/389-console/src/lib/database/localPwp.jsx
|
||||
index b933b043c..e9148e2a1 100644
|
||||
--- a/src/cockpit/389-console/src/lib/database/localPwp.jsx
|
||||
+++ b/src/cockpit/389-console/src/lib/database/localPwp.jsx
|
||||
@@ -542,10 +542,10 @@ class CreatePolicy extends React.Component {
|
||||
isDisabled={!this.props.passwordchecksyntax}
|
||||
/>
|
||||
</GridItem>
|
||||
- <GridItem className="ds-label" offset={5} span={3} title={_("Reject passwords with fewer than this many alpha characters (passwordMinAlphas).")}>
|
||||
+ <GridItem className="ds-label" offset={5} span={3}>
|
||||
{_("Minimum Alpha's")}
|
||||
</GridItem>
|
||||
- <GridItem span={1}>
|
||||
+ <GridItem span={1} title={_("Reject passwords with fewer than this many alpha characters (passwordMinAlphas).")}>
|
||||
<TextInput
|
||||
type="number"
|
||||
id="create_passwordminalphas"
|
||||
@@ -574,10 +574,10 @@ class CreatePolicy extends React.Component {
|
||||
isDisabled={!this.props.passwordchecksyntax}
|
||||
/>
|
||||
</GridItem>
|
||||
- <GridItem className="ds-label" offset={5} span={3} title={_("Reject passwords with fewer than this many special non-alphanumeric characters (passwordMinSpecials).")}>
|
||||
+ <GridItem className="ds-label" offset={5} span={3}>
|
||||
{_("Minimum Special")}
|
||||
</GridItem>
|
||||
- <GridItem span={1}>
|
||||
+ <GridItem span={1} title={_("Reject passwords with fewer than this many special non-alphanumeric characters (passwordMinSpecials).")}>
|
||||
<TextInput
|
||||
type="number"
|
||||
id="create_passwordminspecials"
|
||||
@@ -606,10 +606,10 @@ class CreatePolicy extends React.Component {
|
||||
isDisabled={!this.props.passwordchecksyntax}
|
||||
/>
|
||||
</GridItem>
|
||||
- <GridItem className="ds-label" offset={5} span={3} title={_("Reject passwords with fewer than this many lowercase characters (passwordMinLowers).")}>
|
||||
+ <GridItem className="ds-label" offset={5} span={3}>
|
||||
{_("Minimum Lowercase")}
|
||||
</GridItem>
|
||||
- <GridItem span={1}>
|
||||
+ <GridItem span={1} title={_("Reject passwords with fewer than this many lowercase characters (passwordMinLowers).")}>
|
||||
<TextInput
|
||||
type="number"
|
||||
id="create_passwordminlowers"
|
||||
@@ -638,10 +638,10 @@ class CreatePolicy extends React.Component {
|
||||
isDisabled={!this.props.passwordchecksyntax}
|
||||
/>
|
||||
</GridItem>
|
||||
- <GridItem className="ds-label" offset={5} span={3} title={_("The minimum number of character categories that a password must contain (categories are upper, lower, digit, special, and 8-bit) (passwordMinCategories).")}>
|
||||
+ <GridItem className="ds-label" offset={5} span={3}>
|
||||
{_("Minimum Categories")}
|
||||
</GridItem>
|
||||
- <GridItem span={1}>
|
||||
+ <GridItem span={1} title={_("The minimum number of character categories that a password must contain (categories are upper, lower, digit, special, and 8-bit) (passwordMinCategories).")}>
|
||||
<TextInput
|
||||
type="number"
|
||||
id="create_passwordmincategories"
|
||||
@@ -670,10 +670,10 @@ class CreatePolicy extends React.Component {
|
||||
isDisabled={!this.props.passwordchecksyntax}
|
||||
/>
|
||||
</GridItem>
|
||||
- <GridItem className="ds-label" offset={5} span={3} title={_("The maximum number of times the same character can sequentially appear in a password (passwordMaxRepeats).")}>
|
||||
+ <GridItem className="ds-label" offset={5} span={3}>
|
||||
{_("Max Repeated Chars")}
|
||||
</GridItem>
|
||||
- <GridItem span={1}>
|
||||
+ <GridItem span={1} title={_("The maximum number of times the same character can sequentially appear in a password (passwordMaxRepeats).")}>
|
||||
<TextInput
|
||||
type="number"
|
||||
id="create_passwordmaxrepeats"
|
||||
@@ -702,10 +702,10 @@ class CreatePolicy extends React.Component {
|
||||
isDisabled={!this.props.passwordchecksyntax}
|
||||
/>
|
||||
</GridItem>
|
||||
- <GridItem className="ds-label" offset={5} span={3} title={_("The maximum number of times the same character can sequentially appear in a password (passwordMaxRepeats).")}>
|
||||
+ <GridItem className="ds-label" offset={5} span={3}>
|
||||
{_("Max Sequence Sets")}
|
||||
</GridItem>
|
||||
- <GridItem span={1}>
|
||||
+ <GridItem span={1} title={_("The maximum number of times the same character can sequentially appear in a password (passwordMaxRepeats).")}>
|
||||
<TextInput
|
||||
type="number"
|
||||
id="create_passwordmaxseqsets"
|
||||
@@ -1855,7 +1855,12 @@ export class LocalPwPolicy extends React.Component {
|
||||
.spawn(cmd, { superuser: true, err: "message" })
|
||||
.done(content => {
|
||||
this.handleLoadPolicies();
|
||||
+ this.props.addNotification(
|
||||
+ "success",
|
||||
+ "Successfully deleted password policy"
|
||||
+ )
|
||||
})
|
||||
+
|
||||
.fail(err => {
|
||||
const errMsg = JSON.parse(err);
|
||||
this.handleLoadPolicies();
|
||||
diff --git a/src/cockpit/389-console/src/lib/database/suffix.jsx b/src/cockpit/389-console/src/lib/database/suffix.jsx
|
||||
index c6f19e640..bd59653dd 100644
|
||||
--- a/src/cockpit/389-console/src/lib/database/suffix.jsx
|
||||
+++ b/src/cockpit/389-console/src/lib/database/suffix.jsx
|
||||
@@ -812,7 +812,7 @@ export class Suffix extends React.Component {
|
||||
savingConfig: true
|
||||
});
|
||||
log_cmd("saveSuffixConfig", "Save suffix config", cmd);
|
||||
- const msg = "Successfully updated suffix configuration";
|
||||
+ const msg = "Successfully updated suffix configuration.";
|
||||
cockpit
|
||||
.spawn(cmd, { superuser: true, err: "message" })
|
||||
.done(content => {
|
||||
@@ -821,7 +821,7 @@ export class Suffix extends React.Component {
|
||||
if (requireRestart) {
|
||||
this.props.addNotification(
|
||||
"warning",
|
||||
- msg + _("You must restart the Directory Server for these changes to take effect.")
|
||||
+ msg + _(" You must restart the Directory Server for these changes to take effect.")
|
||||
);
|
||||
}
|
||||
this.setState({
|
||||
--
|
||||
2.48.1
|
||||
|
||||
1018
0015-Issue-6625-UI-fix-various-issues-with-LDAP-browser-e.patch
Normal file
1018
0015-Issue-6625-UI-fix-various-issues-with-LDAP-browser-e.patch
Normal file
File diff suppressed because it is too large
Load Diff
2900
0016-Issue-6625-UI-fix-next-round-of-bugs.patch
Normal file
2900
0016-Issue-6625-UI-fix-next-round-of-bugs.patch
Normal file
File diff suppressed because it is too large
Load Diff
2641
0017-Issue-6625-UI-various-fixes-part-3.patch
Normal file
2641
0017-Issue-6625-UI-various-fixes-part-3.patch
Normal file
File diff suppressed because it is too large
Load Diff
975
0018-Issue-6429-UI-clicking-on-a-database-suffix-under-th.patch
Normal file
975
0018-Issue-6429-UI-clicking-on-a-database-suffix-under-th.patch
Normal file
@ -0,0 +1,975 @@
|
||||
From 93ebe434023d1ba25d34950a77894bfd8854ea58 Mon Sep 17 00:00:00 2001
|
||||
From: James Chapman <jachapma@redhat.com>
|
||||
Date: Thu, 6 Mar 2025 13:26:37 +0000
|
||||
Subject: [PATCH] Issue 6429 - UI - clicking on a database suffix under the
|
||||
Monitor tab crashes UI (#6610)
|
||||
|
||||
Bug description:
|
||||
Clicking on a db suffix under the Monitor tab causes the UI to crash when
|
||||
the instance is configured with the mdb db engine.
|
||||
|
||||
Fix description:
|
||||
Introduced separate database and suffix monitor classes tailored for mdb. Parent
|
||||
class detects the configured db engine and calls the appropriate monitor class.
|
||||
|
||||
Fixes: https://github.com/389ds/389-ds-base/issues/6429
|
||||
|
||||
Reviewed by: @mreynolds389, @droideck (Thank you)
|
||||
---
|
||||
.../src/lib/database/databaseConfig.jsx | 1 -
|
||||
.../389-console/src/lib/monitor/dbMonitor.jsx | 385 +++++++++++++++++-
|
||||
.../src/lib/monitor/suffixMonitor.jsx | 363 ++++++++++++++++-
|
||||
src/cockpit/389-console/src/monitor.jsx | 97 ++++-
|
||||
4 files changed, 823 insertions(+), 23 deletions(-)
|
||||
|
||||
diff --git a/src/cockpit/389-console/src/lib/database/databaseConfig.jsx b/src/cockpit/389-console/src/lib/database/databaseConfig.jsx
|
||||
index 112625be9..52a2cf2df 100644
|
||||
--- a/src/cockpit/389-console/src/lib/database/databaseConfig.jsx
|
||||
+++ b/src/cockpit/389-console/src/lib/database/databaseConfig.jsx
|
||||
@@ -1253,7 +1253,6 @@ export class GlobalDatabaseConfigMDB extends React.Component {
|
||||
// Check if a setting was changed, if so enable the save button
|
||||
for (const config_attr of check_attrs) {
|
||||
if (this.state[config_attr] !== this.state['_' + config_attr]) {
|
||||
- // jc console.log(config_attr);
|
||||
saveBtnDisabled = false;
|
||||
break;
|
||||
}
|
||||
diff --git a/src/cockpit/389-console/src/lib/monitor/dbMonitor.jsx b/src/cockpit/389-console/src/lib/monitor/dbMonitor.jsx
|
||||
index f3f51733b..08aa1aaea 100644
|
||||
--- a/src/cockpit/389-console/src/lib/monitor/dbMonitor.jsx
|
||||
+++ b/src/cockpit/389-console/src/lib/monitor/dbMonitor.jsx
|
||||
@@ -326,7 +326,6 @@ export class DatabaseMonitor extends React.Component {
|
||||
</GridItem>
|
||||
</Grid>
|
||||
</Tab>
|
||||
-
|
||||
<Tab eventKey={1} title={<TabTitleText>{_("Normalized DN Cache")}</TabTitleText>}>
|
||||
<div className="ds-margin-top-lg">
|
||||
<Grid hasGutter>
|
||||
@@ -511,12 +510,394 @@ export class DatabaseMonitor extends React.Component {
|
||||
// Prop types and defaults
|
||||
|
||||
DatabaseMonitor.propTypes = {
|
||||
+ data: PropTypes.object,
|
||||
serverId: PropTypes.string,
|
||||
enableTree: PropTypes.func,
|
||||
};
|
||||
|
||||
DatabaseMonitor.defaultProps = {
|
||||
+ data: {},
|
||||
serverId: "",
|
||||
};
|
||||
|
||||
-export default DatabaseMonitor;
|
||||
+export class DatabaseMonitorMDB extends React.Component {
|
||||
+ constructor (props) {
|
||||
+ super(props);
|
||||
+ this.state = {
|
||||
+ activeTabKey: 0,
|
||||
+ data: {},
|
||||
+ loading: true,
|
||||
+ // refresh chart
|
||||
+ cache_refresh: "",
|
||||
+ count: 10,
|
||||
+ ndnCount: 5,
|
||||
+ dbCacheList: [],
|
||||
+ ndnCacheList: [],
|
||||
+ ndnCacheUtilList: []
|
||||
+ };
|
||||
+
|
||||
+ // Toggle currently active tab
|
||||
+ this.handleNavSelect = (event, tabIndex) => {
|
||||
+ this.setState({
|
||||
+ activeTabKey: tabIndex
|
||||
+ });
|
||||
+ };
|
||||
+
|
||||
+ this.startCacheRefresh = this.startCacheRefresh.bind(this);
|
||||
+ this.refreshCache = this.refreshCache.bind(this);
|
||||
+ }
|
||||
+
|
||||
+ componentDidMount() {
|
||||
+ this.resetChartData();
|
||||
+ this.refreshCache();
|
||||
+ this.startCacheRefresh();
|
||||
+ this.props.enableTree();
|
||||
+ }
|
||||
+
|
||||
+ componentWillUnmount() {
|
||||
+ this.stopCacheRefresh();
|
||||
+ }
|
||||
+
|
||||
+ resetChartData() {
|
||||
+ this.setState({
|
||||
+ data: {
|
||||
+ normalizeddncachehitratio: [0],
|
||||
+ maxnormalizeddncachesize: [0],
|
||||
+ currentnormalizeddncachesize: [0],
|
||||
+ normalizeddncachetries: [0],
|
||||
+ normalizeddncachehits: [0],
|
||||
+ normalizeddncacheevictions: [0],
|
||||
+ currentnormalizeddncachecount: [0],
|
||||
+ normalizeddncachethreadsize: [0],
|
||||
+ normalizeddncachethreadslots: [0],
|
||||
+ },
|
||||
+ ndnCacheList: [
|
||||
+ { name: "", x: "1", y: 0 },
|
||||
+ { name: "", x: "2", y: 0 },
|
||||
+ { name: "", x: "3", y: 0 },
|
||||
+ { name: "", x: "4", y: 0 },
|
||||
+ { name: "", x: "5", y: 0 },
|
||||
+ ],
|
||||
+ ndnCacheUtilList: [
|
||||
+ { name: "", x: "1", y: 0 },
|
||||
+ { name: "", x: "2", y: 0 },
|
||||
+ { name: "", x: "3", y: 0 },
|
||||
+ { name: "", x: "4", y: 0 },
|
||||
+ { name: "", x: "5", y: 0 },
|
||||
+ ],
|
||||
+ });
|
||||
+ }
|
||||
+
|
||||
+ refreshCache() {
|
||||
+ // Search for db cache stat and update state
|
||||
+ const cmd = [
|
||||
+ "dsconf", "-j", "ldapi://%2fvar%2frun%2fslapd-" + this.props.serverId + ".socket",
|
||||
+ "monitor", "ldbm"
|
||||
+ ];
|
||||
+ cockpit
|
||||
+ .spawn(cmd, { superuser: true, err: "message" })
|
||||
+ .done(content => {
|
||||
+ const config = JSON.parse(content);
|
||||
+ let count = this.state.count + 1;
|
||||
+ const ndnCount = this.state.ndnCount + 1;
|
||||
+ if (count > 100) {
|
||||
+ // Keep progress count in check
|
||||
+ count = 1;
|
||||
+ }
|
||||
+
|
||||
+ // Build up the DB Cache chart data
|
||||
+ const dbratio = config.attrs.dbcachehitratio[0];
|
||||
+ const chart_data = this.state.dbCacheList;
|
||||
+ chart_data.shift();
|
||||
+ chart_data.push({ name: _("Cache Hit Ratio"), x: count.toString(), y: parseInt(dbratio) });
|
||||
+
|
||||
+ // Build up the NDN Cache chart data
|
||||
+ const ndnratio = config.attrs.normalizeddncachehitratio[0];
|
||||
+ const ndn_chart_data = this.state.ndnCacheList;
|
||||
+ ndn_chart_data.shift();
|
||||
+ ndn_chart_data.push({ name: _("Cache Hit Ratio"), x: count.toString(), y: parseInt(ndnratio) });
|
||||
+
|
||||
+ // Build up the DB Cache Util chart data
|
||||
+ const ndn_util_chart_data = this.state.ndnCacheUtilList;
|
||||
+ const currNDNSize = parseInt(config.attrs.currentnormalizeddncachesize[0]);
|
||||
+ const maxNDNSize = parseInt(config.attrs.maxnormalizeddncachesize[0]);
|
||||
+ const ndn_utilization = (currNDNSize / maxNDNSize) * 100;
|
||||
+ ndn_util_chart_data.shift();
|
||||
+ ndn_util_chart_data.push({ name: _("Cache Utilization"), x: ndnCount.toString(), y: parseInt(ndn_utilization) });
|
||||
+
|
||||
+ this.setState({
|
||||
+ data: config.attrs,
|
||||
+ loading: false,
|
||||
+ dbCacheList: chart_data,
|
||||
+ ndnCacheList: ndn_chart_data,
|
||||
+ ndnCacheUtilList: ndn_util_chart_data,
|
||||
+ count,
|
||||
+ ndnCount
|
||||
+ });
|
||||
+ })
|
||||
+ .fail(() => {
|
||||
+ this.resetChartData();
|
||||
+ });
|
||||
+ }
|
||||
+
|
||||
+ startCacheRefresh() {
|
||||
+ this.setState({
|
||||
+ cache_refresh: setInterval(this.refreshCache, 2000),
|
||||
+ });
|
||||
+ }
|
||||
+
|
||||
+ stopCacheRefresh() {
|
||||
+ clearInterval(this.state.cache_refresh);
|
||||
+ }
|
||||
+
|
||||
+ render() {
|
||||
+ let chartColor = ChartThemeColor.green;
|
||||
+ let ndnChartColor = ChartThemeColor.green;
|
||||
+ let ndnUtilColor = ChartThemeColor.green;
|
||||
+ let dbcachehit = 0;
|
||||
+ let ndncachehit = 0;
|
||||
+ let ndncachemax = 0;
|
||||
+ let ndncachecurr = 0;
|
||||
+ let utilratio = 0;
|
||||
+ let content = (
|
||||
+ <div className="ds-margin-top-xlg ds-center">
|
||||
+ <TextContent>
|
||||
+ <Text component={TextVariants.h3}>
|
||||
+ {_("Loading database monitor information ...")}
|
||||
+ </Text>
|
||||
+ </TextContent>
|
||||
+ <Spinner className="ds-margin-top-lg" size="xl" />
|
||||
+ </div>
|
||||
+ );
|
||||
+
|
||||
+ if (!this.state.loading) {
|
||||
+ dbcachehit = parseInt(this.state.data.dbcachehitratio[0]);
|
||||
+ ndncachehit = parseInt(this.state.data.normalizeddncachehitratio[0]);
|
||||
+ ndncachemax = parseInt(this.state.data.maxnormalizeddncachesize[0]);
|
||||
+ ndncachecurr = parseInt(this.state.data.currentnormalizeddncachesize[0]);
|
||||
+ utilratio = Math.round((ndncachecurr / ndncachemax) * 100);
|
||||
+ if (utilratio === 0) {
|
||||
+ // Just round up to 1
|
||||
+ utilratio = 1;
|
||||
+ }
|
||||
+
|
||||
+ // Database cache
|
||||
+ if (dbcachehit > 89) {
|
||||
+ chartColor = ChartThemeColor.green;
|
||||
+ } else if (dbcachehit > 74) {
|
||||
+ chartColor = ChartThemeColor.orange;
|
||||
+ } else {
|
||||
+ chartColor = ChartThemeColor.purple;
|
||||
+ }
|
||||
+ // NDN cache ratio
|
||||
+ if (ndncachehit > 89) {
|
||||
+ ndnChartColor = ChartThemeColor.green;
|
||||
+ } else if (ndncachehit > 74) {
|
||||
+ ndnChartColor = ChartThemeColor.orange;
|
||||
+ } else {
|
||||
+ ndnChartColor = ChartThemeColor.purple;
|
||||
+ }
|
||||
+ // NDN cache utilization
|
||||
+ if (utilratio > 95) {
|
||||
+ ndnUtilColor = ChartThemeColor.purple;
|
||||
+ } else if (utilratio > 90) {
|
||||
+ ndnUtilColor = ChartThemeColor.orange;
|
||||
+ } else {
|
||||
+ ndnUtilColor = ChartThemeColor.green;
|
||||
+ }
|
||||
+
|
||||
+ content = (
|
||||
+ <Tabs activeKey={this.state.activeTabKey} onSelect={this.handleNavSelect}>
|
||||
+ <Tab eventKey={0} title={<TabTitleText>{_("Normalized DN Cache")}</TabTitleText>}>
|
||||
+ <div className="ds-margin-top-lg">
|
||||
+ <Grid hasGutter>
|
||||
+ <GridItem span={6}>
|
||||
+ <Card isSelectable>
|
||||
+ <CardBody>
|
||||
+ <div className="ds-container">
|
||||
+ <div className="ds-center">
|
||||
+ <TextContent className="ds-margin-top-xlg" title={_("The normalized DN cache hit ratio (normalizeddncachehitratio).")}>
|
||||
+ <Text component={TextVariants.h3}>
|
||||
+ {_("Cache Hit Ratio")}
|
||||
+ </Text>
|
||||
+ </TextContent>
|
||||
+ <TextContent>
|
||||
+ <Text component={TextVariants.h2}>
|
||||
+ <b>{ndncachehit}%</b>
|
||||
+ </Text>
|
||||
+ </TextContent>
|
||||
+ </div>
|
||||
+ <div className="ds-margin-left" style={{ height: '200px', width: '350px' }}>
|
||||
+ <Chart
|
||||
+ ariaDesc="NDN Cache"
|
||||
+ ariaTitle={_("Live Normalized DN Cache Statistics")}
|
||||
+ containerComponent={<ChartVoronoiContainer labels={({ datum }) => `${datum.name}: ${datum.y}`} constrainToVisibleArea />}
|
||||
+ height={200}
|
||||
+ maxDomain={{ y: 100 }}
|
||||
+ minDomain={{ y: 0 }}
|
||||
+ padding={{
|
||||
+ bottom: 40,
|
||||
+ left: 60,
|
||||
+ top: 10,
|
||||
+ right: 15,
|
||||
+ }}
|
||||
+ width={350}
|
||||
+ themeColor={ndnChartColor}
|
||||
+ >
|
||||
+ <ChartAxis />
|
||||
+ <ChartAxis dependentAxis showGrid tickValues={[25, 50, 75, 100]} />
|
||||
+ <ChartGroup>
|
||||
+ <ChartArea
|
||||
+ data={this.state.ndnCacheList}
|
||||
+ />
|
||||
+ </ChartGroup>
|
||||
+ </Chart>
|
||||
+ </div>
|
||||
+ </div>
|
||||
+ </CardBody>
|
||||
+ </Card>
|
||||
+ </GridItem>
|
||||
+ <GridItem span={6}>
|
||||
+ <Card isSelectable>
|
||||
+ <CardBody>
|
||||
+ <div className="ds-container">
|
||||
+ <div className="ds-center">
|
||||
+ <TextContent className="ds-margin-top-lg" title={_("The amount of the cache that is being used: max size (maxnormalizeddncachesize) vs current size (currentnormalizeddncachesize)")}>
|
||||
+ <Text component={TextVariants.h2}>
|
||||
+ {_("Cache Utilization")}
|
||||
+ </Text>
|
||||
+ </TextContent>
|
||||
+ <TextContent>
|
||||
+ <Text component={TextVariants.h3}>
|
||||
+ <b>{utilratio}%</b>
|
||||
+ </Text>
|
||||
+ </TextContent>
|
||||
+ <TextContent className="ds-margin-top-xlg">
|
||||
+ <Text component={TextVariants.h5}>
|
||||
+ {_("Cached DN's")}
|
||||
+ </Text>
|
||||
+ </TextContent>
|
||||
+ <b>{numToCommas(this.state.data.currentnormalizeddncachecount[0])}</b>
|
||||
+ </div>
|
||||
+ <div className="ds-margin-left" style={{ height: '200px', width: '350px' }}>
|
||||
+ <Chart
|
||||
+ ariaDesc="NDN Cache Utilization"
|
||||
+ ariaTitle={_("Live Normalized DN Cache Utilization Statistics")}
|
||||
+ containerComponent={<ChartVoronoiContainer labels={({ datum }) => `${datum.name}: ${datum.y}`} constrainToVisibleArea />}
|
||||
+ height={200}
|
||||
+ maxDomain={{ y: 100 }}
|
||||
+ minDomain={{ y: 0 }}
|
||||
+ padding={{
|
||||
+ bottom: 40,
|
||||
+ left: 60,
|
||||
+ top: 10,
|
||||
+ right: 15,
|
||||
+ }}
|
||||
+ width={350}
|
||||
+ themeColor={ndnUtilColor}
|
||||
+ >
|
||||
+ <ChartAxis />
|
||||
+ <ChartAxis dependentAxis showGrid tickValues={[25, 50, 75, 100]} />
|
||||
+ <ChartGroup>
|
||||
+ <ChartArea
|
||||
+ data={this.state.ndnCacheUtilList}
|
||||
+ />
|
||||
+ </ChartGroup>
|
||||
+ </Chart>
|
||||
+ </div>
|
||||
+ </div>
|
||||
+ </CardBody>
|
||||
+ </Card>
|
||||
+ </GridItem>
|
||||
+ </Grid>
|
||||
+
|
||||
+ <Grid hasGutter className="ds-margin-top-xlg">
|
||||
+ <GridItem span={3}>
|
||||
+ {_("NDN Cache Hit Ratio:")}
|
||||
+ </GridItem>
|
||||
+ <GridItem span={2}>
|
||||
+ <b>{this.state.data.normalizeddncachehitratio}%</b>
|
||||
+ </GridItem>
|
||||
+ <GridItem span={3}>
|
||||
+ {_("NDN Cache Max Size:")}
|
||||
+ </GridItem>
|
||||
+ <GridItem span={2}>
|
||||
+ <b>{displayBytes(this.state.data.maxnormalizeddncachesize)}</b>
|
||||
+ </GridItem>
|
||||
+ <GridItem span={3}>
|
||||
+ {_("NDN Cache Tries:")}
|
||||
+ </GridItem>
|
||||
+ <GridItem span={2}>
|
||||
+ <b>{numToCommas(this.state.data.normalizeddncachetries)}</b>
|
||||
+ </GridItem>
|
||||
+ <GridItem span={3}>
|
||||
+ {_("NDN Current Cache Size:")}
|
||||
+ </GridItem>
|
||||
+ <GridItem span={2}>
|
||||
+ <b>{displayBytes(this.state.data.currentnormalizeddncachesize)}</b>
|
||||
+ </GridItem>
|
||||
+ <GridItem span={3}>
|
||||
+ {_("NDN Cache Hits:")}
|
||||
+ </GridItem>
|
||||
+ <GridItem span={2}>
|
||||
+ <b>{numToCommas(this.state.data.normalizeddncachehits)}</b>
|
||||
+ </GridItem>
|
||||
+ <GridItem span={3}>
|
||||
+ {_("NDN Cache DN Count:")}
|
||||
+ </GridItem>
|
||||
+ <GridItem span={2}>
|
||||
+ <b>{numToCommas(this.state.data.currentnormalizeddncachecount)}</b>
|
||||
+ </GridItem>
|
||||
+ <GridItem span={3}>
|
||||
+ {_("NDN Cache Evictions:")}
|
||||
+ </GridItem>
|
||||
+ <GridItem span={2}>
|
||||
+ <b>{numToCommas(this.state.data.normalizeddncacheevictions)}</b>
|
||||
+ </GridItem>
|
||||
+ <GridItem span={3}>
|
||||
+ {_("NDN Cache Thread Size:")}
|
||||
+ </GridItem>
|
||||
+ <GridItem span={2}>
|
||||
+ <b>{numToCommas(this.state.data.normalizeddncachethreadsize)}</b>
|
||||
+ </GridItem>
|
||||
+ <GridItem span={3}>
|
||||
+ {_("NDN Cache Thread Slots:")}
|
||||
+ </GridItem>
|
||||
+ <GridItem span={2}>
|
||||
+ <b>{numToCommas(this.state.data.normalizeddncachethreadslots)}</b>
|
||||
+ </GridItem>
|
||||
+ </Grid>
|
||||
+ </div>
|
||||
+ </Tab>
|
||||
+ </Tabs>
|
||||
+ );
|
||||
+ }
|
||||
+
|
||||
+ return (
|
||||
+ <div id="db-content">
|
||||
+ <TextContent>
|
||||
+ <Text className="ds-sub-header" component={TextVariants.h2}>
|
||||
+ {_("Database Performance Statistics")}
|
||||
+ </Text>
|
||||
+ </TextContent>
|
||||
+ <div className="ds-margin-top-lg">
|
||||
+ {content}
|
||||
+ </div>
|
||||
+
|
||||
+ </div>
|
||||
+ );
|
||||
+ }
|
||||
+}
|
||||
+
|
||||
+// Prop types and defaults
|
||||
+
|
||||
+DatabaseMonitorMDB.propTypes = {
|
||||
+ data: PropTypes.object,
|
||||
+ serverId: PropTypes.string,
|
||||
+ enableTree: PropTypes.func,
|
||||
+};
|
||||
+
|
||||
+DatabaseMonitorMDB.defaultProps = {
|
||||
+ data: {},
|
||||
+ serverId: "",
|
||||
+};
|
||||
diff --git a/src/cockpit/389-console/src/lib/monitor/suffixMonitor.jsx b/src/cockpit/389-console/src/lib/monitor/suffixMonitor.jsx
|
||||
index 464137731..ec78dbdc2 100644
|
||||
--- a/src/cockpit/389-console/src/lib/monitor/suffixMonitor.jsx
|
||||
+++ b/src/cockpit/389-console/src/lib/monitor/suffixMonitor.jsx
|
||||
@@ -626,4 +626,365 @@ SuffixMonitor.defaultProps = {
|
||||
bename: "",
|
||||
};
|
||||
|
||||
-export default SuffixMonitor;
|
||||
+export class SuffixMonitorMDB extends React.Component {
|
||||
+ constructor (props) {
|
||||
+ super(props);
|
||||
+ this.state = {
|
||||
+ activeTabKey: 0,
|
||||
+ data: {},
|
||||
+ loading: true,
|
||||
+ // refresh charts
|
||||
+ cache_refresh: "",
|
||||
+ count: 10,
|
||||
+ utilCount: 5,
|
||||
+ entryCacheList: [],
|
||||
+ entryUtilCacheList: [],
|
||||
+ };
|
||||
+
|
||||
+ // Toggle currently active tab
|
||||
+ this.handleNavSelect = (event, tabIndex) => {
|
||||
+ this.setState({
|
||||
+ activeTabKey: tabIndex
|
||||
+ });
|
||||
+ };
|
||||
+
|
||||
+ this.startCacheRefresh = this.startCacheRefresh.bind(this);
|
||||
+ this.refreshSuffixCache = this.refreshSuffixCache.bind(this);
|
||||
+ }
|
||||
+
|
||||
+ componentDidMount() {
|
||||
+ this.resetChartData();
|
||||
+ this.refreshSuffixCache();
|
||||
+ this.startCacheRefresh();
|
||||
+ this.props.enableTree();
|
||||
+ }
|
||||
+
|
||||
+ componentWillUnmount() {
|
||||
+ this.stopCacheRefresh();
|
||||
+ }
|
||||
+
|
||||
+ resetChartData() {
|
||||
+ this.setState({
|
||||
+ data: {
|
||||
+ // Entry cache
|
||||
+ entrycachehitratio: [0],
|
||||
+ entrycachetries: [0],
|
||||
+ entrycachehits: [0],
|
||||
+ maxentrycachesize: [0],
|
||||
+ currententrycachesize: [0],
|
||||
+ maxentrycachecount: [0],
|
||||
+ currententrycachecount: [0],
|
||||
+ },
|
||||
+ entryCacheList: [
|
||||
+ { name: "", x: "1", y: 0 },
|
||||
+ { name: "", x: "2", y: 0 },
|
||||
+ { name: "", x: "3", y: 0 },
|
||||
+ { name: "", x: "4", y: 0 },
|
||||
+ { name: "", x: "5", y: 0 },
|
||||
+ { name: "", x: "6", y: 0 },
|
||||
+ { name: "", x: "7", y: 0 },
|
||||
+ { name: "", x: "8", y: 0 },
|
||||
+ { name: "", x: "9", y: 0 },
|
||||
+ { name: "", x: "10", y: 0 },
|
||||
+ ],
|
||||
+ });
|
||||
+ }
|
||||
+
|
||||
+ refreshSuffixCache() {
|
||||
+ // Search for db cache stat and update state
|
||||
+ const cmd = [
|
||||
+ "dsconf", "-j", "ldapi://%2fvar%2frun%2fslapd-" + this.props.serverId + ".socket",
|
||||
+ "monitor", "backend", this.props.suffix
|
||||
+ ];
|
||||
+ log_cmd("refreshSuffixCache", "Get suffix monitor", cmd);
|
||||
+ cockpit
|
||||
+ .spawn(cmd, { superuser: true, err: "message" })
|
||||
+ .done(content => {
|
||||
+ const config = JSON.parse(content);
|
||||
+ let count = this.state.count + 1;
|
||||
+ const utilCount = this.state.utilCount + 1;
|
||||
+ if (count > 100) {
|
||||
+ // Keep progress count in check
|
||||
+ count = 1;
|
||||
+ }
|
||||
+
|
||||
+ // Build up the Entry Cache chart data
|
||||
+ const entryRatio = config.attrs.entrycachehitratio[0];
|
||||
+ const entry_data = this.state.entryCacheList;
|
||||
+ entry_data.shift();
|
||||
+ entry_data.push({ name: _("Cache Hit Ratio"), x: count.toString(), y: parseInt(entryRatio) });
|
||||
+
|
||||
+ // Build up the Entry Util chart data
|
||||
+ const entry_util_data = this.state.entryUtilCacheList;
|
||||
+ let maxsize = config.attrs.maxentrycachesize[0];
|
||||
+ let currsize = config.attrs.currententrycachesize[0];
|
||||
+ let utilratio = Math.round((currsize / maxsize) * 100);
|
||||
+ if (utilratio === 0) {
|
||||
+ utilratio = 1;
|
||||
+ }
|
||||
+ entry_util_data.shift();
|
||||
+ entry_util_data.push({ name: _("Cache Utilization"), x: utilCount.toString(), y: parseInt(utilratio) });
|
||||
+
|
||||
+ this.setState({
|
||||
+ data: config.attrs,
|
||||
+ loading: false,
|
||||
+ entryCacheList: entry_data,
|
||||
+ entryUtilCacheList: entry_util_data,
|
||||
+ count,
|
||||
+ utilCount
|
||||
+ });
|
||||
+ })
|
||||
+ .fail(() => {
|
||||
+ this.resetChartData();
|
||||
+ });
|
||||
+ }
|
||||
+
|
||||
+ startCacheRefresh() {
|
||||
+ this.setState({
|
||||
+ cache_refresh: setInterval(this.refreshSuffixCache, 2000)
|
||||
+ });
|
||||
+ }
|
||||
+
|
||||
+ stopCacheRefresh() {
|
||||
+ clearInterval(this.state.cache_refresh);
|
||||
+ }
|
||||
+
|
||||
+ render() {
|
||||
+ let entryChartColor = ChartThemeColor.green;
|
||||
+ let entryUtilChartColor = ChartThemeColor.green;
|
||||
+ let cachehit = 1;
|
||||
+ let cachemax = 0;
|
||||
+ let cachecurr = 0;
|
||||
+ let cachecount = 0;
|
||||
+ let utilratio = 1;
|
||||
+ let SuffixIcon = TreeIcon;
|
||||
+
|
||||
+ if (this.props.dbtype === "subsuffix") {
|
||||
+ SuffixIcon = LeafIcon;
|
||||
+ }
|
||||
+
|
||||
+ let content = (
|
||||
+ <div className="ds-margin-top-xlg ds-center">
|
||||
+ <TextContent>
|
||||
+ <Text component={TextVariants.h3}>
|
||||
+ {_("Loading Suffix Monitor Information ...")}
|
||||
+ </Text>
|
||||
+ </TextContent>
|
||||
+ <Spinner className="ds-margin-top-lg" size="xl" />
|
||||
+ </div>
|
||||
+ );
|
||||
+
|
||||
+ if (!this.state.loading) {
|
||||
+ // Entry cache
|
||||
+ cachehit = parseInt(this.state.data.entrycachehitratio[0]);
|
||||
+ cachemax = parseInt(this.state.data.maxentrycachesize[0]);
|
||||
+ cachecurr = parseInt(this.state.data.currententrycachesize[0]);
|
||||
+ cachecount = parseInt(this.state.data.currententrycachecount[0]);
|
||||
+ utilratio = Math.round((cachecurr / cachemax) * 100);
|
||||
+
|
||||
+ // Adjust ratios if needed
|
||||
+ if (utilratio === 0) {
|
||||
+ utilratio = 1;
|
||||
+ }
|
||||
+
|
||||
+ // Entry cache chart color
|
||||
+ if (cachehit > 89) {
|
||||
+ entryChartColor = ChartThemeColor.green;
|
||||
+ } else if (cachehit > 74) {
|
||||
+ entryChartColor = ChartThemeColor.orange;
|
||||
+ } else {
|
||||
+ entryChartColor = ChartThemeColor.purple;
|
||||
+ }
|
||||
+ // Entry cache utilization
|
||||
+ if (utilratio > 95) {
|
||||
+ entryUtilChartColor = ChartThemeColor.purple;
|
||||
+ } else if (utilratio > 90) {
|
||||
+ entryUtilChartColor = ChartThemeColor.orange;
|
||||
+ } else {
|
||||
+ entryUtilChartColor = ChartThemeColor.green;
|
||||
+ }
|
||||
+
|
||||
+ content = (
|
||||
+ <div id="monitor-suffix-page">
|
||||
+ <Tabs activeKey={this.state.activeTabKey} onSelect={this.handleNavSelect}>
|
||||
+ <Tab eventKey={0} title={<TabTitleText>{_("Entry Cache")}</TabTitleText>}>
|
||||
+ <div className="ds-margin-top">
|
||||
+ <Grid hasGutter>
|
||||
+ <GridItem span={6}>
|
||||
+ <Card isSelectable>
|
||||
+ <CardBody>
|
||||
+ <div className="ds-container">
|
||||
+ <div className="ds-center">
|
||||
+ <TextContent title={_("The entry cache hit ratio (entrycachehitratio)")}>
|
||||
+ <Text className="ds-margin-top" component={TextVariants.h3}>
|
||||
+ {_("Cache Hit Ratio")}
|
||||
+ </Text>
|
||||
+ </TextContent>
|
||||
+ <TextContent>
|
||||
+ <Text className="ds-margin-top" component={TextVariants.h2}>
|
||||
+ <b>{cachehit}%</b>
|
||||
+ </Text>
|
||||
+ </TextContent>
|
||||
+ </div>
|
||||
+ <div className="ds-margin-left" style={{ height: '200px', width: '350px' }}>
|
||||
+ <Chart
|
||||
+ ariaDesc="Entry Cache"
|
||||
+ ariaTitle={_("Live Entry Cache Statistics")}
|
||||
+ containerComponent={<ChartVoronoiContainer labels={({ datum }) => `${datum.name}: ${datum.y}`} constrainToVisibleArea />}
|
||||
+ height={200}
|
||||
+ maxDomain={{ y: 100 }}
|
||||
+ minDomain={{ y: 0 }}
|
||||
+ padding={{
|
||||
+ bottom: 40,
|
||||
+ left: 60,
|
||||
+ top: 10,
|
||||
+ right: 15,
|
||||
+ }}
|
||||
+ width={350}
|
||||
+ themeColor={entryChartColor}
|
||||
+ >
|
||||
+ <ChartAxis />
|
||||
+ <ChartAxis dependentAxis showGrid tickValues={[25, 50, 75, 100]} />
|
||||
+ <ChartGroup>
|
||||
+ <ChartArea
|
||||
+ data={this.state.entryCacheList}
|
||||
+ />
|
||||
+ </ChartGroup>
|
||||
+ </Chart>
|
||||
+ </div>
|
||||
+ </div>
|
||||
+ </CardBody>
|
||||
+ </Card>
|
||||
+ </GridItem>
|
||||
+ <GridItem span={6}>
|
||||
+ <Card isSelectable>
|
||||
+ <CardBody>
|
||||
+ <div className="ds-container">
|
||||
+ <div className="ds-center">
|
||||
+ <TextContent title={_("The amount of the cache that is being used: max size (maxentrycachesize) vs current size (currententrycachesize)")}>
|
||||
+ <Text className="ds-margin-top" component={TextVariants.h3}>
|
||||
+ {_("Cache Utilization")}
|
||||
+ </Text>
|
||||
+ </TextContent>
|
||||
+ <TextContent>
|
||||
+ <Text component={TextVariants.h2}>
|
||||
+ <b>{utilratio}%</b>
|
||||
+ </Text>
|
||||
+ </TextContent>
|
||||
+ <TextContent>
|
||||
+ <Text className="ds-margin-top-lg" component={TextVariants.h5}>
|
||||
+ {_("Cached Entries")}
|
||||
+ </Text>
|
||||
+ </TextContent>
|
||||
+ <b>{cachecount}</b>
|
||||
+ </div>
|
||||
+ <div className="ds-margin-left" style={{ height: '200px', width: '350px' }}>
|
||||
+ <Chart
|
||||
+ ariaDesc="Entry Cache Utilization"
|
||||
+ ariaTitle={_("Live Entry Cache Utilization Statistics")}
|
||||
+ containerComponent={<ChartVoronoiContainer labels={({ datum }) => `${datum.name}: ${datum.y}`} constrainToVisibleArea />}
|
||||
+ height={200}
|
||||
+ maxDomain={{ y: 100 }}
|
||||
+ minDomain={{ y: 0 }}
|
||||
+ padding={{
|
||||
+ bottom: 40,
|
||||
+ left: 60,
|
||||
+ top: 10,
|
||||
+ right: 15,
|
||||
+ }}
|
||||
+ width={350}
|
||||
+ themeColor={entryUtilChartColor}
|
||||
+ >
|
||||
+ <ChartAxis />
|
||||
+ <ChartAxis dependentAxis showGrid tickValues={[25, 50, 75, 100]} />
|
||||
+ <ChartGroup>
|
||||
+ <ChartArea
|
||||
+ data={this.state.entryUtilCacheList}
|
||||
+ />
|
||||
+ </ChartGroup>
|
||||
+ </Chart>
|
||||
+ </div>
|
||||
+ </div>
|
||||
+ </CardBody>
|
||||
+ </Card>
|
||||
+ </GridItem>
|
||||
+ </Grid>
|
||||
+ </div>
|
||||
+ <Grid hasGutter className="ds-margin-top-xlg">
|
||||
+ <GridItem span={3}>
|
||||
+ {_("Entry Cache Hit Ratio:")}
|
||||
+ </GridItem>
|
||||
+ <GridItem span={2}>
|
||||
+ <b>{this.state.data.entrycachehitratio[0]}%</b>
|
||||
+ </GridItem>
|
||||
+ <GridItem span={3}>
|
||||
+ {_("Entry Cache Max Size:")}
|
||||
+ </GridItem>
|
||||
+ <GridItem span={2}>
|
||||
+ <b>{displayBytes(cachemax)} </b>
|
||||
+ </GridItem>
|
||||
+
|
||||
+ <GridItem span={3}>
|
||||
+ {_("Entry Cache Hits:")}
|
||||
+ </GridItem>
|
||||
+ <GridItem span={2}>
|
||||
+ <b>{numToCommas(this.state.data.entrycachehits[0])}</b>
|
||||
+ </GridItem>
|
||||
+ <GridItem span={3}>
|
||||
+ {_("Entry Cache Current Size:")}
|
||||
+ </GridItem>
|
||||
+ <GridItem span={2}>
|
||||
+ <b>{displayBytes(cachecurr)}</b>
|
||||
+ </GridItem>
|
||||
+ <GridItem span={3}>
|
||||
+ {_("Entry Cache Tries:")}
|
||||
+ </GridItem>
|
||||
+ <GridItem span={2}>
|
||||
+ <b>{numToCommas(this.state.data.entrycachetries[0])}</b>
|
||||
+ </GridItem>
|
||||
+ <GridItem span={3}>
|
||||
+ {_("Entry Cache Max Entries:")}
|
||||
+ </GridItem>
|
||||
+ <GridItem span={2}>
|
||||
+ <b>{numToCommas(this.state.data.maxentrycachecount[0])}</b>
|
||||
+ </GridItem>
|
||||
+ <GridItem span={3}>
|
||||
+ {_("Entry Cache Count:")}
|
||||
+ </GridItem>
|
||||
+ <GridItem span={2}>
|
||||
+ <b>{numToCommas(this.state.data.currententrycachecount[0])}</b>
|
||||
+ </GridItem>
|
||||
+ </Grid>
|
||||
+ </Tab>
|
||||
+ </Tabs>
|
||||
+ </div>
|
||||
+ );
|
||||
+ }
|
||||
+
|
||||
+ return (
|
||||
+ <div>
|
||||
+ <TextContent>
|
||||
+ <Text component={TextVariants.h2}>
|
||||
+ <SuffixIcon /> {this.props.suffix} (<b>{this.props.bename}</b>)
|
||||
+ </Text>
|
||||
+ </TextContent>
|
||||
+ <div className="ds-margin-top-lg">
|
||||
+ {content}
|
||||
+ </div>
|
||||
+ </div>
|
||||
+ );
|
||||
+ }
|
||||
+}
|
||||
+
|
||||
+SuffixMonitorMDB.propTypes = {
|
||||
+ serverId: PropTypes.string,
|
||||
+ suffix: PropTypes.string,
|
||||
+ bename: PropTypes.string,
|
||||
+ enableTree: PropTypes.func,
|
||||
+};
|
||||
+
|
||||
+SuffixMonitorMDB.defaultProps = {
|
||||
+ serverId: "",
|
||||
+ suffix: "",
|
||||
+ bename: "",
|
||||
+};
|
||||
diff --git a/src/cockpit/389-console/src/monitor.jsx b/src/cockpit/389-console/src/monitor.jsx
|
||||
index da959be07..7e0e0c5d4 100644
|
||||
--- a/src/cockpit/389-console/src/monitor.jsx
|
||||
+++ b/src/cockpit/389-console/src/monitor.jsx
|
||||
@@ -3,8 +3,8 @@ import React from "react";
|
||||
import { log_cmd } from "./lib/tools.jsx";
|
||||
import PropTypes from "prop-types";
|
||||
import ServerMonitor from "./lib/monitor/serverMonitor.jsx";
|
||||
-import DatabaseMonitor from "./lib/monitor/dbMonitor.jsx";
|
||||
-import SuffixMonitor from "./lib/monitor/suffixMonitor.jsx";
|
||||
+import { DatabaseMonitor, DatabaseMonitorMDB } from "./lib/monitor/dbMonitor.jsx";
|
||||
+import { SuffixMonitor, SuffixMonitorMDB } from "./lib/monitor/suffixMonitor.jsx";
|
||||
import ChainingMonitor from "./lib/monitor/chainingMonitor.jsx";
|
||||
import AccessLogMonitor from "./lib/monitor/accesslog.jsx";
|
||||
import AuditLogMonitor from "./lib/monitor/auditlog.jsx";
|
||||
@@ -35,6 +35,8 @@ import {
|
||||
|
||||
const _ = cockpit.gettext;
|
||||
|
||||
+const BE_IMPL_MDB = "mdb";
|
||||
+
|
||||
export class Monitor extends React.Component {
|
||||
constructor(props) {
|
||||
super(props);
|
||||
@@ -82,6 +84,8 @@ export class Monitor extends React.Component {
|
||||
auditlogLocation: "",
|
||||
auditfaillogLocation: "",
|
||||
securitylogLocation: "",
|
||||
+ // DB engine, bdb or mdb (default)
|
||||
+ dbEngine: BE_IMPL_MDB,
|
||||
};
|
||||
|
||||
// Bindings
|
||||
@@ -98,6 +102,7 @@ export class Monitor extends React.Component {
|
||||
this.loadMonitorChaining = this.loadMonitorChaining.bind(this);
|
||||
this.loadDiskSpace = this.loadDiskSpace.bind(this);
|
||||
this.reloadDisks = this.reloadDisks.bind(this);
|
||||
+ this.getDBEngine = this.getDBEngine.bind(this);
|
||||
// Replication
|
||||
this.onHandleLoadMonitorReplication = this.onHandleLoadMonitorReplication.bind(this);
|
||||
this.loadCleanTasks = this.loadCleanTasks.bind(this);
|
||||
@@ -114,6 +119,10 @@ export class Monitor extends React.Component {
|
||||
this.loadMonitor = this.loadMonitor.bind(this);
|
||||
}
|
||||
|
||||
+ componentDidMount() {
|
||||
+ this.getDBEngine();
|
||||
+ }
|
||||
+
|
||||
componentDidUpdate(prevProps) {
|
||||
if (this.props.wasActiveList.includes(6)) {
|
||||
if (this.state.firstLoad) {
|
||||
@@ -580,6 +589,32 @@ export class Monitor extends React.Component {
|
||||
});
|
||||
}
|
||||
|
||||
+ getDBEngine () {
|
||||
+ const cmd = [
|
||||
+ "dsconf", "-j", "ldapi://%2fvar%2frun%2fslapd-" + this.props.serverId + ".socket",
|
||||
+ "backend", "config", "get"
|
||||
+ ];
|
||||
+ log_cmd("getDBEngine", "Get DB Implementation", cmd);
|
||||
+ cockpit
|
||||
+ .spawn(cmd, { superuser: true, err: "message" })
|
||||
+ .done(content => {
|
||||
+ const config = JSON.parse(content);
|
||||
+ const attrs = config.attrs;
|
||||
+ if ('nsslapd-backend-implement' in attrs) {
|
||||
+ this.setState({
|
||||
+ dbEngine: attrs['nsslapd-backend-implement'][0],
|
||||
+ });
|
||||
+ }
|
||||
+ })
|
||||
+ .fail(err => {
|
||||
+ const errMsg = JSON.parse(err);
|
||||
+ this.props.addNotification(
|
||||
+ "error",
|
||||
+ cockpit.format("Error detecting DB implementation type - $0", errMsg.desc)
|
||||
+ );
|
||||
+ });
|
||||
+ }
|
||||
+
|
||||
reloadSNMP() {
|
||||
const cmd = [
|
||||
"dsconf", "-j", "ldapi://%2fvar%2frun%2fslapd-" + this.props.serverId + ".socket",
|
||||
@@ -955,13 +990,24 @@ export class Monitor extends React.Component {
|
||||
</div>
|
||||
);
|
||||
} else {
|
||||
- monitor_element = (
|
||||
- <DatabaseMonitor
|
||||
- data={this.state.ldbmData}
|
||||
- enableTree={this.enableTree}
|
||||
- serverId={this.props.serverId}
|
||||
- />
|
||||
- );
|
||||
+ if (this.state.dbEngine === BE_IMPL_MDB) {
|
||||
+ monitor_element = (
|
||||
+ <DatabaseMonitorMDB
|
||||
+ data={this.state.ldbmData}
|
||||
+ enableTree={this.enableTree}
|
||||
+ serverId={this.props.serverId}
|
||||
+ />
|
||||
+ );
|
||||
+ } else {
|
||||
+ monitor_element = (
|
||||
+ <DatabaseMonitor
|
||||
+ data={this.state.ldbmData}
|
||||
+ enableTree={this.enableTree}
|
||||
+ serverId={this.props.serverId}
|
||||
+ />
|
||||
+ );
|
||||
+ }
|
||||
+
|
||||
}
|
||||
} else if (this.state.node_name === "server-monitor") {
|
||||
if (this.state.serverLoading) {
|
||||
@@ -1142,16 +1188,29 @@ export class Monitor extends React.Component {
|
||||
);
|
||||
} else {
|
||||
// Suffix
|
||||
- monitor_element = (
|
||||
- <SuffixMonitor
|
||||
- serverId={this.props.serverId}
|
||||
- suffix={this.state.node_text}
|
||||
- bename={this.state.bename}
|
||||
- enableTree={this.enableTree}
|
||||
- key={this.state.node_text}
|
||||
- addNotification={this.props.addNotification}
|
||||
- />
|
||||
- );
|
||||
+ if (this.state.dbEngine === BE_IMPL_MDB) {
|
||||
+ monitor_element = (
|
||||
+ <SuffixMonitorMDB
|
||||
+ serverId={this.props.serverId}
|
||||
+ suffix={this.state.node_text}
|
||||
+ bename={this.state.bename}
|
||||
+ enableTree={this.enableTree}
|
||||
+ key={this.state.node_text}
|
||||
+ addNotification={this.props.addNotification}
|
||||
+ />
|
||||
+ );
|
||||
+ } else {
|
||||
+ monitor_element = (
|
||||
+ <SuffixMonitor
|
||||
+ serverId={this.props.serverId}
|
||||
+ suffix={this.state.node_text}
|
||||
+ bename={this.state.bename}
|
||||
+ enableTree={this.enableTree}
|
||||
+ key={this.state.node_text}
|
||||
+ addNotification={this.props.addNotification}
|
||||
+ />
|
||||
+ );
|
||||
+ }
|
||||
}
|
||||
}
|
||||
}
|
||||
--
|
||||
2.48.1
|
||||
|
||||
1188
0019-Issue-6656-UI-Enhance-Monitor-Log-Viewer-with-Patter.patch
Normal file
1188
0019-Issue-6656-UI-Enhance-Monitor-Log-Viewer-with-Patter.patch
Normal file
File diff suppressed because it is too large
Load Diff
159
0020-Issue-6568-Fix-failing-webUI-tests.patch
Normal file
159
0020-Issue-6568-Fix-failing-webUI-tests.patch
Normal file
@ -0,0 +1,159 @@
|
||||
From 4991c494580904fe123cb1c4239764e16ae9d5ca Mon Sep 17 00:00:00 2001
|
||||
From: Lenka Doudova <lryznaro@redhat.com>
|
||||
Date: Thu, 23 Jan 2025 17:52:25 +0100
|
||||
Subject: [PATCH] Issue 6568 - Fix failing webUI tests
|
||||
|
||||
Fixing webUI tests that are failing either due to changes or due to
|
||||
existing bugs
|
||||
|
||||
Relates: https://github.com/389ds/389-ds-base/issues/6568
|
||||
|
||||
Author: Lenka Doudova
|
||||
|
||||
Reviewed by: Barbora Simonova
|
||||
---
|
||||
dirsrvtests/tests/suites/webui/__init__.py | 1 +
|
||||
.../suites/webui/database/database_test.py | 73 ++++++++++++-------
|
||||
.../webui/ldap_browser/ldap_browser_test.py | 1 +
|
||||
.../webui/monitoring/monitoring_test.py | 3 +-
|
||||
4 files changed, 51 insertions(+), 27 deletions(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/webui/__init__.py b/dirsrvtests/tests/suites/webui/__init__.py
|
||||
index f92b75605..8333d0d49 100644
|
||||
--- a/dirsrvtests/tests/suites/webui/__init__.py
|
||||
+++ b/dirsrvtests/tests/suites/webui/__init__.py
|
||||
@@ -194,6 +194,7 @@ def create_entry(frame, entry_type, entry_data):
|
||||
frame.get_by_role("button", name="Next", exact=True).click()
|
||||
|
||||
elif entry_type == 'custom Entry':
|
||||
+ frame.get_by_role("textbox", name="Search input").fill('account')
|
||||
frame.get_by_role("checkbox", name="Select row 0").check()
|
||||
frame.get_by_role("button", name="Next", exact=True).click()
|
||||
frame.get_by_role("checkbox", name="Select row 1").check()
|
||||
diff --git a/dirsrvtests/tests/suites/webui/database/database_test.py b/dirsrvtests/tests/suites/webui/database/database_test.py
|
||||
index d582bd185..ef105d262 100644
|
||||
--- a/dirsrvtests/tests/suites/webui/database/database_test.py
|
||||
+++ b/dirsrvtests/tests/suites/webui/database/database_test.py
|
||||
@@ -52,7 +52,7 @@ def test_global_database_configuration_availability(topology_st, page, browser_n
|
||||
:id: d0efda45-4e8e-4703-b9c0-ab53249dafc3
|
||||
:setup: Standalone instance
|
||||
:steps:
|
||||
- 1. Click on Database tab and check if ID List Scan Limit label is visible.
|
||||
+ 1. Click on Database tab, click on Limits tab and check if ID List Scan Limit label is visible.
|
||||
2. Click on Database Cache tab and check if Automatic Cache Tuning checkbox is visible.
|
||||
3. Click on Import Cache tab and check if Automatic Import Cache Tuning checkbox is visible.
|
||||
4. Click on NDN Cache tab and check if Normalized DN Cache Max Size label is visible.
|
||||
@@ -69,31 +69,52 @@ def test_global_database_configuration_availability(topology_st, page, browser_n
|
||||
setup_login(page)
|
||||
time.sleep(1)
|
||||
frame = check_frame_assignment(page, browser_name)
|
||||
-
|
||||
- log.info('Check if element on Limits tab is loaded.')
|
||||
- frame.get_by_role('tab', name='Database', exact=True).click()
|
||||
- frame.get_by_text('ID List Scan Limit', exact=True).wait_for()
|
||||
- assert frame.get_by_text('ID List Scan Limit', exact=True).is_visible()
|
||||
-
|
||||
- log.info('Click on Database Cache tab and check if element is loaded')
|
||||
- frame.get_by_role('tab', name='Database Cache', exact=True).click()
|
||||
- assert frame.locator('#db_cache_auto').is_visible()
|
||||
-
|
||||
- log.info('Click on Import Cache tab and check if element is loaded')
|
||||
- frame.get_by_role('tab', name='Import Cache', exact=True).click()
|
||||
- assert frame.locator('#import_cache_auto').is_visible()
|
||||
-
|
||||
- log.info('Click on NDN Cache tab and check if element is loaded')
|
||||
- frame.get_by_role('tab', name='NDN Cache', exact=True).click()
|
||||
- assert frame.get_by_text('Normalized DN Cache Max Size').is_visible()
|
||||
-
|
||||
- log.info('Click on Database Locks tab and check if element is loaded')
|
||||
- frame.get_by_role('tab', name='Database Locks', exact=True).click()
|
||||
- assert frame.locator('#dblocksMonitoring').is_visible()
|
||||
-
|
||||
- log.info('Click on Advanced Settings tab and check if element is loaded')
|
||||
- frame.get_by_role('tab', name='Advanced Settings', exact=True).click()
|
||||
- assert frame.locator('#txnlogdir').is_visible()
|
||||
+ instance = topology_st.standalone
|
||||
+
|
||||
+ if instance.get_db_lib() is 'mdb':
|
||||
+ log.info('Check if element on Limits tab is loaded.')
|
||||
+ frame.get_by_role('tab', name='Database', exact=True).click()
|
||||
+
|
||||
+ frame.get_by_role('tab', name='Database Size', exact=True).click()
|
||||
+ frame.get_by_text('Database Maximum Size', exact=True).wait_for()
|
||||
+ assert frame.get_by_text('Database Maximum Size', exact=True).is_visible()
|
||||
+
|
||||
+ frame.get_by_role('tab', name='Limits', exact=True).click()
|
||||
+ frame.get_by_text('ID List Scan Limit', exact=True).wait_for()
|
||||
+ assert frame.get_by_text('ID List Scan Limit', exact=True).is_visible()
|
||||
+
|
||||
+ log.info('Click on NDN Cache tab and check if element is loaded')
|
||||
+ frame.get_by_role('tab', name='NDN Cache', exact=True).click()
|
||||
+ assert frame.get_by_text('Normalized DN Cache Max Size').is_visible()
|
||||
+
|
||||
+ log.info('Click on Advanced Settings tab and check if element is loaded')
|
||||
+ frame.get_by_role('tab', name='Advanced Settings', exact=True).click()
|
||||
+ assert frame.locator('#dbhomedir').is_visible()
|
||||
+ elif instance.get_db_lib() is 'bdb':
|
||||
+ log.info('Check if element on Limits tab is loaded.')
|
||||
+ frame.get_by_role('tab', name='Database', exact=True).click()
|
||||
+ frame.get_by_text('ID List Scan Limit', exact=True).wait_for()
|
||||
+ assert frame.get_by_text('ID List Scan Limit', exact=True).is_visible()
|
||||
+
|
||||
+ log.info('Click on Database Cache tab and check if element is loaded')
|
||||
+ frame.get_by_role('tab', name='Database Cache', exact=True).click()
|
||||
+ assert frame.locator('#db_cache_auto').is_visible()
|
||||
+
|
||||
+ log.info('Click on Import Cache tab and check if element is loaded')
|
||||
+ frame.get_by_role('tab', name='Import Cache', exact=True).click()
|
||||
+ assert frame.locator('#import_cache_auto').is_visible()
|
||||
+
|
||||
+ log.info('Click on NDN Cache tab and check if element is loaded')
|
||||
+ frame.get_by_role('tab', name='NDN Cache', exact=True).click()
|
||||
+ assert frame.get_by_text('Normalized DN Cache Max Size').is_visible()
|
||||
+
|
||||
+ log.info('Click on Database Locks tab and check if element is loaded')
|
||||
+ frame.get_by_role('tab', name='Database Locks', exact=True).click()
|
||||
+ assert frame.locator('#dblocksMonitoring').is_visible()
|
||||
+
|
||||
+ log.info('Click on Advanced Settings tab and check if element is loaded')
|
||||
+ frame.get_by_role('tab', name='Advanced Settings', exact=True).click()
|
||||
+ assert frame.locator('#txnlogdir').is_visible()
|
||||
|
||||
|
||||
def test_chaining_configuration_availability(topology_st, page, browser_name):
|
||||
diff --git a/dirsrvtests/tests/suites/webui/ldap_browser/ldap_browser_test.py b/dirsrvtests/tests/suites/webui/ldap_browser/ldap_browser_test.py
|
||||
index 265cc9489..0aab5c7c5 100644
|
||||
--- a/dirsrvtests/tests/suites/webui/ldap_browser/ldap_browser_test.py
|
||||
+++ b/dirsrvtests/tests/suites/webui/ldap_browser/ldap_browser_test.py
|
||||
@@ -181,6 +181,7 @@ def test_create_and_delete_group(topology_st, page, browser_name):
|
||||
assert frame.get_by_role("button").filter(has_text=f"cn={test_data['group_name']}").count() == 0
|
||||
|
||||
|
||||
+@pytest.mark.xfail(reason="DS6558")
|
||||
def test_create_and_delete_organizational_unit(topology_st, page, browser_name):
|
||||
""" Test to create and delete organizational unit
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/webui/monitoring/monitoring_test.py b/dirsrvtests/tests/suites/webui/monitoring/monitoring_test.py
|
||||
index 40de3baeb..6a1556c71 100644
|
||||
--- a/dirsrvtests/tests/suites/webui/monitoring/monitoring_test.py
|
||||
+++ b/dirsrvtests/tests/suites/webui/monitoring/monitoring_test.py
|
||||
@@ -132,6 +132,7 @@ def test_replication_visibility(topology_st, page, browser_name):
|
||||
assert frame.locator('#replication-suffix-dc\\=example\\,dc\\=com').is_visible()
|
||||
|
||||
|
||||
+@pytest.mark.xfail(reason="DS6557")
|
||||
def test_database_visibility(topology_st, page, browser_name):
|
||||
""" Test Database monitoring visibility
|
||||
|
||||
@@ -247,7 +248,7 @@ def test_create_credential_and_alias(topology_st, page, browser_name):
|
||||
|
||||
log.info('Click on Monitoring tab, click on replication button, create new credential and check if it is created')
|
||||
frame.get_by_role('tab', name='Monitoring', exact=True).click()
|
||||
- frame.locator('#replication-monitor').click()
|
||||
+ frame.locator('#sync-report').click()
|
||||
frame.locator('#pf-tab-1-prepare-new-report').click()
|
||||
frame.get_by_role('button', name='Add Credentials').click()
|
||||
frame.locator('#credsHostname').fill('credential.test')
|
||||
--
|
||||
2.48.1
|
||||
|
||||
255
0021-Issue-6665-UI-Need-to-refresh-log-settings-after-sav.patch
Normal file
255
0021-Issue-6665-UI-Need-to-refresh-log-settings-after-sav.patch
Normal file
@ -0,0 +1,255 @@
|
||||
From a1fd904e68066da103b626bae600632d90c1b83d Mon Sep 17 00:00:00 2001
|
||||
From: Mark Reynolds <mreynolds@redhat.com>
|
||||
Date: Tue, 11 Mar 2025 10:23:10 -0400
|
||||
Subject: [PATCH] Issue 6665 - UI - Need to refresh log settings after saving
|
||||
|
||||
Description:
|
||||
|
||||
While do we reload the config from the parent component it is not resetting the
|
||||
child component as expected.
|
||||
|
||||
Relates: https://github.com/389ds/389-ds-base/issues/6665
|
||||
|
||||
Reviewed by: spichugi(Thanks!)
|
||||
---
|
||||
.../389-console/src/lib/server/accessLog.jsx | 20 ++++++++----------
|
||||
.../389-console/src/lib/server/auditLog.jsx | 21 ++++++++-----------
|
||||
.../src/lib/server/auditfailLog.jsx | 20 ++++++++----------
|
||||
.../389-console/src/lib/server/errorLog.jsx | 20 ++++++++----------
|
||||
.../src/lib/server/securityLog.jsx | 20 ++++++++----------
|
||||
5 files changed, 45 insertions(+), 56 deletions(-)
|
||||
|
||||
diff --git a/src/cockpit/389-console/src/lib/server/accessLog.jsx b/src/cockpit/389-console/src/lib/server/accessLog.jsx
|
||||
index 250fe48ca..d2bf01ff6 100644
|
||||
--- a/src/cockpit/389-console/src/lib/server/accessLog.jsx
|
||||
+++ b/src/cockpit/389-console/src/lib/server/accessLog.jsx
|
||||
@@ -303,32 +303,30 @@ export class ServerAccessLog extends React.Component {
|
||||
.spawn(cmd, { superuser: true, err: "message" })
|
||||
.done(content => {
|
||||
this.props.reload();
|
||||
+ this.refreshConfig(1);
|
||||
this.props.addNotification(
|
||||
"success",
|
||||
_("Successfully updated Access Log settings")
|
||||
);
|
||||
- this.setState({
|
||||
- loading: false
|
||||
- });
|
||||
})
|
||||
.fail(err => {
|
||||
const errMsg = JSON.parse(err);
|
||||
this.props.reload();
|
||||
+ this.refreshConfig(1);
|
||||
this.props.addNotification(
|
||||
"error",
|
||||
cockpit.format(_("Error saving Access Log settings - $0"), errMsg.desc)
|
||||
);
|
||||
- this.setState({
|
||||
- loading: false
|
||||
- });
|
||||
});
|
||||
}
|
||||
|
||||
- refreshConfig(refesh) {
|
||||
- this.setState({
|
||||
- loading: true,
|
||||
- loaded: false,
|
||||
- });
|
||||
+ refreshConfig(loading) {
|
||||
+ if (!loading) {
|
||||
+ this.setState({
|
||||
+ loading: true,
|
||||
+ loaded: false,
|
||||
+ });
|
||||
+ }
|
||||
|
||||
const cmd = [
|
||||
"dsconf", "-j", "ldapi://%2fvar%2frun%2fslapd-" + this.props.serverId + ".socket",
|
||||
diff --git a/src/cockpit/389-console/src/lib/server/auditLog.jsx b/src/cockpit/389-console/src/lib/server/auditLog.jsx
|
||||
index 0a566bccb..606dde642 100644
|
||||
--- a/src/cockpit/389-console/src/lib/server/auditLog.jsx
|
||||
+++ b/src/cockpit/389-console/src/lib/server/auditLog.jsx
|
||||
@@ -308,33 +308,30 @@ export class ServerAuditLog extends React.Component {
|
||||
.spawn(cmd, { superuser: true, err: "message" })
|
||||
.done(content => {
|
||||
this.props.reload();
|
||||
+ this.refreshConfig(1);
|
||||
this.props.addNotification(
|
||||
"success",
|
||||
_("Successfully updated Audit Log settings")
|
||||
);
|
||||
- this.setState({
|
||||
- loading: false
|
||||
- });
|
||||
-
|
||||
})
|
||||
.fail(err => {
|
||||
const errMsg = JSON.parse(err);
|
||||
this.props.reload();
|
||||
+ this.refreshConfig(1);
|
||||
this.props.addNotification(
|
||||
"error",
|
||||
cockpit.format(_("Error saving Audit Log settings - $0"), errMsg.desc)
|
||||
);
|
||||
- this.setState({
|
||||
- loading: false,
|
||||
- });
|
||||
});
|
||||
}
|
||||
|
||||
- refreshConfig() {
|
||||
- this.setState({
|
||||
- loading: true,
|
||||
- loaded: false,
|
||||
- });
|
||||
+ refreshConfig(loading) {
|
||||
+ if (!loading) {
|
||||
+ this.setState({
|
||||
+ loading: true,
|
||||
+ loaded: false,
|
||||
+ });
|
||||
+ }
|
||||
|
||||
const cmd = [
|
||||
"dsconf", "-j", "ldapi://%2fvar%2frun%2fslapd-" + this.props.serverId + ".socket",
|
||||
diff --git a/src/cockpit/389-console/src/lib/server/auditfailLog.jsx b/src/cockpit/389-console/src/lib/server/auditfailLog.jsx
|
||||
index 0e5ae0a88..19785276a 100644
|
||||
--- a/src/cockpit/389-console/src/lib/server/auditfailLog.jsx
|
||||
+++ b/src/cockpit/389-console/src/lib/server/auditfailLog.jsx
|
||||
@@ -244,32 +244,30 @@ export class ServerAuditFailLog extends React.Component {
|
||||
.spawn(cmd, { superuser: true, err: "message" })
|
||||
.done(content => {
|
||||
this.props.reload();
|
||||
+ this.refreshConfig(1);
|
||||
this.props.addNotification(
|
||||
"success",
|
||||
_("Successfully updated Audit Fail Log settings")
|
||||
);
|
||||
- this.setState({
|
||||
- loading: false
|
||||
- });
|
||||
})
|
||||
.fail(err => {
|
||||
const errMsg = JSON.parse(err);
|
||||
this.props.reload();
|
||||
+ this.refreshConfig(1);
|
||||
this.props.addNotification(
|
||||
"error",
|
||||
cockpit.format(_("Error saving Audit Fail Log settings - $0"), errMsg.desc)
|
||||
);
|
||||
- this.setState({
|
||||
- loading: false
|
||||
- });
|
||||
});
|
||||
}
|
||||
|
||||
- refreshConfig() {
|
||||
- this.setState({
|
||||
- loading: true,
|
||||
- loaded: false,
|
||||
- });
|
||||
+ refreshConfig(loading) {
|
||||
+ if (!loading) {
|
||||
+ this.setState({
|
||||
+ loading: true,
|
||||
+ loaded: false,
|
||||
+ });
|
||||
+ }
|
||||
|
||||
const cmd = [
|
||||
"dsconf", "-j", "ldapi://%2fvar%2frun%2fslapd-" + this.props.serverId + ".socket",
|
||||
diff --git a/src/cockpit/389-console/src/lib/server/errorLog.jsx b/src/cockpit/389-console/src/lib/server/errorLog.jsx
|
||||
index 0922a80ff..0ad36e594 100644
|
||||
--- a/src/cockpit/389-console/src/lib/server/errorLog.jsx
|
||||
+++ b/src/cockpit/389-console/src/lib/server/errorLog.jsx
|
||||
@@ -326,32 +326,30 @@ export class ServerErrorLog extends React.Component {
|
||||
.spawn(cmd, { superuser: true, err: "message" })
|
||||
.done(content => {
|
||||
this.props.reload();
|
||||
+ this.handleRefreshConfig(1);
|
||||
this.props.addNotification(
|
||||
"success",
|
||||
_("Successfully updated Error Log settings")
|
||||
);
|
||||
- this.setState({
|
||||
- loading: false
|
||||
- });
|
||||
})
|
||||
.fail(err => {
|
||||
const errMsg = JSON.parse(err);
|
||||
this.props.reload();
|
||||
+ this.handleRefreshConfig(1);
|
||||
this.props.addNotification(
|
||||
"error",
|
||||
cockpit.format(_("Error saving Error Log settings - $0"), errMsg.desc)
|
||||
);
|
||||
- this.setState({
|
||||
- loading: false
|
||||
- });
|
||||
});
|
||||
}
|
||||
|
||||
- handleRefreshConfig() {
|
||||
- this.setState({
|
||||
- loading: true,
|
||||
- loaded: false,
|
||||
- });
|
||||
+ handleRefreshConfig(loading) {
|
||||
+ if (!loading) {
|
||||
+ this.setState({
|
||||
+ loading: true,
|
||||
+ loaded: false,
|
||||
+ });
|
||||
+ };
|
||||
|
||||
const cmd = [
|
||||
"dsconf", "-j", "ldapi://%2fvar%2frun%2fslapd-" + this.props.serverId + ".socket",
|
||||
diff --git a/src/cockpit/389-console/src/lib/server/securityLog.jsx b/src/cockpit/389-console/src/lib/server/securityLog.jsx
|
||||
index 01adcca0c..77000a873 100644
|
||||
--- a/src/cockpit/389-console/src/lib/server/securityLog.jsx
|
||||
+++ b/src/cockpit/389-console/src/lib/server/securityLog.jsx
|
||||
@@ -245,32 +245,30 @@ export class ServerSecurityLog extends React.Component {
|
||||
.spawn(cmd, { superuser: true, err: "message" })
|
||||
.done(content => {
|
||||
this.props.reload();
|
||||
+ this.refreshConfig(1);
|
||||
this.props.addNotification(
|
||||
"success",
|
||||
_("Successfully updated Security Log settings")
|
||||
);
|
||||
- this.setState({
|
||||
- loading: false
|
||||
- });
|
||||
})
|
||||
.fail(err => {
|
||||
const errMsg = JSON.parse(err);
|
||||
this.props.reload();
|
||||
+ this.refreshConfig(1);
|
||||
this.props.addNotification(
|
||||
"error",
|
||||
cockpit.format(_("Error saving Security Log settings - $0"), errMsg.desc)
|
||||
);
|
||||
- this.setState({
|
||||
- loading: false
|
||||
- });
|
||||
});
|
||||
}
|
||||
|
||||
- refreshConfig(refesh) {
|
||||
- this.setState({
|
||||
- loading: true,
|
||||
- loaded: false,
|
||||
- });
|
||||
+ refreshConfig(loading) {
|
||||
+ if (!loading) {
|
||||
+ this.setState({
|
||||
+ loading: true,
|
||||
+ loaded: false,
|
||||
+ });
|
||||
+ }
|
||||
|
||||
const cmd = [
|
||||
"dsconf", "-j", "ldapi://%2fvar%2frun%2fslapd-" + this.props.serverId + ".socket",
|
||||
--
|
||||
2.48.1
|
||||
|
||||
443
0022-Issue-6695-UI-fix-more-minor-issues.patch
Normal file
443
0022-Issue-6695-UI-fix-more-minor-issues.patch
Normal file
@ -0,0 +1,443 @@
|
||||
From c913d0889c0538f7c64fc9fc64580c5d1e9eeeb7 Mon Sep 17 00:00:00 2001
|
||||
From: Mark Reynolds <mreynolds@redhat.com>
|
||||
Date: Wed, 26 Mar 2025 16:33:22 -0400
|
||||
Subject: [PATCH] Issue 6695 - UI - fix more minor issues
|
||||
|
||||
Description:
|
||||
|
||||
fix the following items:
|
||||
|
||||
MemberOf page - subtree scopes - allows you to create blank values, and does not validate suffixes
|
||||
Local password policy - copy/paste error added a duplicate label for a checkbox (send expiring warning)
|
||||
Create instance modal - did not validate database name
|
||||
Update connection monitor chart to include detailed information for connection status (established, close wait, and time wait)
|
||||
|
||||
relates: https://github.com/389ds/389-ds-base/issues/6695
|
||||
|
||||
Reviewed by: spichugi(Thanks!)
|
||||
---
|
||||
src/cockpit/389-console/src/dsModals.jsx | 22 ++--
|
||||
.../389-console/src/lib/database/localPwp.jsx | 3 -
|
||||
.../src/lib/monitor/serverMonitor.jsx | 114 +++++++++++++++---
|
||||
.../389-console/src/lib/plugins/memberOf.jsx | 18 +--
|
||||
src/lib389/lib389/monitor.py | 12 ++
|
||||
5 files changed, 128 insertions(+), 41 deletions(-)
|
||||
|
||||
diff --git a/src/cockpit/389-console/src/dsModals.jsx b/src/cockpit/389-console/src/dsModals.jsx
|
||||
index e3ee18fc0..bf19ce144 100644
|
||||
--- a/src/cockpit/389-console/src/dsModals.jsx
|
||||
+++ b/src/cockpit/389-console/src/dsModals.jsx
|
||||
@@ -4,7 +4,13 @@ import PropTypes from "prop-types";
|
||||
import { DoubleConfirmModal } from "./lib/notifications.jsx";
|
||||
import { BackupTable } from "./lib/database/databaseTables.jsx";
|
||||
import { BackupModal } from "./lib/database/backups.jsx";
|
||||
-import { log_cmd, bad_file_name, valid_dn, callCmdStreamPassword } from "./lib/tools.jsx";
|
||||
+import {
|
||||
+ log_cmd,
|
||||
+ bad_file_name,
|
||||
+ valid_dn,
|
||||
+ valid_db_name,
|
||||
+ callCmdStreamPassword
|
||||
+} from "./lib/tools.jsx";
|
||||
import {
|
||||
Button,
|
||||
Checkbox,
|
||||
@@ -103,10 +109,6 @@ export class CreateInstanceModal extends React.Component {
|
||||
'createDM'
|
||||
];
|
||||
|
||||
- const optionalAttrs = [
|
||||
- 'createDBName'
|
||||
- ];
|
||||
-
|
||||
// Handle server ID
|
||||
if (this.state.createServerId !== "") {
|
||||
if (this.state.createServerId.length > 80) {
|
||||
@@ -142,11 +144,9 @@ export class CreateInstanceModal extends React.Component {
|
||||
}
|
||||
|
||||
if (this.state.createDBCheckbox) {
|
||||
- for (const attr of optionalAttrs) {
|
||||
- if (this.state[attr] === "") {
|
||||
- all_good = false;
|
||||
- errObj[attr] = true;
|
||||
- }
|
||||
+ if (!valid_db_name(this.state.createDBName)) {
|
||||
+ all_good = false;
|
||||
+ errObj["createDBName"] = true;
|
||||
}
|
||||
if (!valid_dn(this.state.createDBSuffix)) {
|
||||
all_good = false;
|
||||
@@ -636,7 +636,7 @@ export class CreateInstanceModal extends React.Component {
|
||||
<FormHelperText >
|
||||
<HelperText>
|
||||
<HelperTextItem variant="error">
|
||||
- {_("Name is required")}
|
||||
+ {createDBName === "" ? _("Name is required") : "Invalid database name"}
|
||||
</HelperTextItem>
|
||||
</HelperText>
|
||||
</FormHelperText>
|
||||
diff --git a/src/cockpit/389-console/src/lib/database/localPwp.jsx b/src/cockpit/389-console/src/lib/database/localPwp.jsx
|
||||
index ff114bb01..8586ba932 100644
|
||||
--- a/src/cockpit/389-console/src/lib/database/localPwp.jsx
|
||||
+++ b/src/cockpit/389-console/src/lib/database/localPwp.jsx
|
||||
@@ -398,9 +398,6 @@ class CreatePolicy extends React.Component {
|
||||
</GridItem>
|
||||
</Grid>
|
||||
<Grid className="ds-margin-top" title={_("Always return a password expiring control when requested (passwordSendExpiringTime).")}>
|
||||
- <GridItem className="ds-label" span={4}>
|
||||
- {_("Send Password Expiring Warning")}
|
||||
- </GridItem>
|
||||
<GridItem span={4}>
|
||||
<Checkbox
|
||||
id="create_passwordsendexpiringtime"
|
||||
diff --git a/src/cockpit/389-console/src/lib/monitor/serverMonitor.jsx b/src/cockpit/389-console/src/lib/monitor/serverMonitor.jsx
|
||||
index 513e1d792..a736ff532 100644
|
||||
--- a/src/cockpit/389-console/src/lib/monitor/serverMonitor.jsx
|
||||
+++ b/src/cockpit/389-console/src/lib/monitor/serverMonitor.jsx
|
||||
@@ -37,6 +37,7 @@ import {
|
||||
import { SyncAltIcon } from "@patternfly/react-icons";
|
||||
|
||||
const _ = cockpit.gettext;
|
||||
+const refresh_interval = 10000; // 10 seconds
|
||||
|
||||
export class ServerMonitor extends React.Component {
|
||||
constructor (props) {
|
||||
@@ -47,12 +48,20 @@ export class ServerMonitor extends React.Component {
|
||||
const initResChart = [];
|
||||
const initSwapChart = [];
|
||||
const initConnChart = [];
|
||||
- for (let idx = 1; idx <= 6; idx++) {
|
||||
- initCPUChart.push({ name: 'CPU', x: "0:00:00", y: 0 });
|
||||
- initResChart.push({ name: 'Resident', x: "0:00:00", y: 0 });
|
||||
- initVirtChart.push({ name: 'Virtual', x: "0:00:00", y: 0 });
|
||||
- initSwapChart.push({ name: 'Swap', x: "0:00:00", y: 0 });
|
||||
- initConnChart.push({ name: 'Connection', x: "0:00:00", y: 0 });
|
||||
+ const initConnEstablishedChart = [];
|
||||
+ const initConnTimeWaitChart = [];
|
||||
+ const initConnCloseWaitChart = [];
|
||||
+ for (let idx = 0; idx <= 5; idx++) {
|
||||
+ const value = refresh_interval / 1000;
|
||||
+ const x_value = "0:00:" + (idx === 0 ? "00" : value * idx).toString();
|
||||
+ initCPUChart.push({ name: 'CPU', x: x_value, y: 0 });
|
||||
+ initResChart.push({ name: 'Resident', x: x_value, y: 0 });
|
||||
+ initVirtChart.push({ name: 'Virtual', x: x_value, y: 0 });
|
||||
+ initSwapChart.push({ name: 'Swap', x: x_value, y: 0 });
|
||||
+ initConnChart.push({ name: 'Connections', x: x_value, y: 0 });
|
||||
+ initConnTimeWaitChart.push({ name: 'Connections time wait', x: x_value, y: 0 });
|
||||
+ initConnCloseWaitChart.push({ name: 'Connections close wait', x: x_value, y: 0 });
|
||||
+ initConnEstablishedChart.push({ name: 'Connections established', x: x_value, y: 0 });
|
||||
}
|
||||
|
||||
this.state = {
|
||||
@@ -71,11 +80,17 @@ export class ServerMonitor extends React.Component {
|
||||
initResChart,
|
||||
initSwapChart,
|
||||
initConnChart,
|
||||
+ initConnEstablishedChart,
|
||||
+ initConnTimeWaitChart,
|
||||
+ initConnCloseWaitChart,
|
||||
cpuChart: [...initCPUChart],
|
||||
memVirtChart: [...initVirtChart],
|
||||
memResChart: [...initResChart],
|
||||
swapChart: [...initSwapChart],
|
||||
connChart: [...initConnChart],
|
||||
+ connEstablishedChart: [...initConnEstablishedChart],
|
||||
+ connTimeWaitChart: [...initConnTimeWaitChart],
|
||||
+ connCloseWaitChart: [...initConnCloseWaitChart],
|
||||
};
|
||||
|
||||
this.handleNavSelect = (event, tabIndex) => {
|
||||
@@ -110,6 +125,9 @@ export class ServerMonitor extends React.Component {
|
||||
memResChart: [...this.state.initResChart],
|
||||
swapChart: [...this.state.initSwapChart],
|
||||
connChart: [...this.state.initConnChart],
|
||||
+ connCloseWaitChart: [...this.state.initConnCloseWaitChart],
|
||||
+ connTimeWaitChart: [...this.state.initConnTimeWaitChart],
|
||||
+ connEstablishedChart: [...this.state.initConnEstablishedChart],
|
||||
});
|
||||
}
|
||||
|
||||
@@ -123,6 +141,9 @@ export class ServerMonitor extends React.Component {
|
||||
let res_mem = 0;
|
||||
let swap_mem = 0;
|
||||
let current_conns = 0;
|
||||
+ let conn_established = 0;
|
||||
+ let conn_close_wait = 0;
|
||||
+ let conn_time_wait = 0;
|
||||
let total_threads = 0;
|
||||
let conn_highmark = this.state.conn_highmark;
|
||||
let cpu_tick_values = this.state.cpu_tick_values;
|
||||
@@ -147,6 +168,9 @@ export class ServerMonitor extends React.Component {
|
||||
res_mem = attrs['rss'][0];
|
||||
swap_mem = attrs['swap'][0];
|
||||
current_conns = attrs['connection_count'][0];
|
||||
+ conn_established = attrs['connection_established_count'][0];
|
||||
+ conn_close_wait = attrs['connection_close_wait_count'][0];
|
||||
+ conn_time_wait = attrs['connection_time_wait_count'][0];
|
||||
total_threads = attrs['total_threads'][0];
|
||||
mem_total = attrs['total_mem'][0];
|
||||
|
||||
@@ -196,6 +220,18 @@ export class ServerMonitor extends React.Component {
|
||||
connChart.shift();
|
||||
connChart.push({ name: _("Connections"), x: interval, y: parseInt(current_conns) });
|
||||
|
||||
+ const connEstablishedChart = this.state.connEstablishedChart;
|
||||
+ connEstablishedChart.shift();
|
||||
+ connEstablishedChart.push({ name: _("Connections established"), x: interval, y: parseInt(conn_established) });
|
||||
+
|
||||
+ const connTimeWaitChart = this.state.connTimeWaitChart;
|
||||
+ connTimeWaitChart.shift();
|
||||
+ connTimeWaitChart.push({ name: _("Connections time wait"), x: interval, y: parseInt(conn_time_wait) });
|
||||
+
|
||||
+ const connCloseWaitChart = this.state.connCloseWaitChart;
|
||||
+ connCloseWaitChart.shift();
|
||||
+ connCloseWaitChart.push({ name: _("Connections close wait"), x: interval, y: parseInt(conn_close_wait) });
|
||||
+
|
||||
this.setState({
|
||||
cpu_tick_values,
|
||||
conn_tick_values,
|
||||
@@ -204,8 +240,14 @@ export class ServerMonitor extends React.Component {
|
||||
memResChart,
|
||||
swapChart,
|
||||
connChart,
|
||||
+ connTimeWaitChart,
|
||||
+ connCloseWaitChart,
|
||||
+ connEstablishedChart,
|
||||
conn_highmark,
|
||||
current_conns,
|
||||
+ conn_close_wait,
|
||||
+ conn_time_wait,
|
||||
+ conn_established,
|
||||
mem_virt_size: virt_mem,
|
||||
mem_res_size: res_mem,
|
||||
mem_swap_size: swap_mem,
|
||||
@@ -224,7 +266,7 @@ export class ServerMonitor extends React.Component {
|
||||
|
||||
startRefresh() {
|
||||
this.setState({
|
||||
- chart_refresh: setInterval(this.refreshCharts, 10000),
|
||||
+ chart_refresh: setInterval(this.refreshCharts, refresh_interval),
|
||||
});
|
||||
}
|
||||
|
||||
@@ -236,8 +278,14 @@ export class ServerMonitor extends React.Component {
|
||||
const {
|
||||
cpu,
|
||||
connChart,
|
||||
+ connTimeWaitChart,
|
||||
+ connCloseWaitChart,
|
||||
+ connEstablishedChart,
|
||||
cpuChart,
|
||||
current_conns,
|
||||
+ conn_established,
|
||||
+ conn_close_wait,
|
||||
+ conn_time_wait,
|
||||
memResChart,
|
||||
memVirtChart,
|
||||
swapChart,
|
||||
@@ -312,15 +360,33 @@ export class ServerMonitor extends React.Component {
|
||||
<Card className="ds-margin-top-lg">
|
||||
<CardBody>
|
||||
<Grid>
|
||||
- <GridItem span="4" className="ds-center" title={_("Established client connections to the server")}>
|
||||
- <TextContent>
|
||||
- <Text className="ds-margin-top-xlg" component={TextVariants.h3}>
|
||||
- {_("Connections")}
|
||||
+ <GridItem span="4" title={_("Established client connections to the server")}>
|
||||
+ <div className="ds-center" >
|
||||
+ <TextContent>
|
||||
+ <Text component={TextVariants.h2}>
|
||||
+ {_("Connections")}
|
||||
+ </Text>
|
||||
+ </TextContent>
|
||||
+ <TextContent>
|
||||
+ <Text component={TextVariants.h6}>
|
||||
+ <b>{numToCommas(current_conns)}</b>
|
||||
+ </Text>
|
||||
+ </TextContent>
|
||||
+ <Divider className="ds-margin-top ds-margin-bottom"/>
|
||||
+ </div>
|
||||
+ <TextContent className="ds-margin-top-lg" title="Connections that are in an ESTABLISHED state">
|
||||
+ <Text component={TextVariants.p}>
|
||||
+ Established: <b>{numToCommas(conn_established)}</b>
|
||||
</Text>
|
||||
</TextContent>
|
||||
- <TextContent>
|
||||
- <Text component={TextVariants.h6}>
|
||||
- <b>{numToCommas(current_conns)}</b>
|
||||
+ <TextContent className="ds-margin-top-lg" title="Connections that are in a CLOSE_WAIT state">
|
||||
+ <Text component={TextVariants.p}>
|
||||
+ Close wait: <b>{numToCommas(conn_close_wait)}</b>
|
||||
+ </Text>
|
||||
+ </TextContent>
|
||||
+ <TextContent className="ds-margin-top-lg" title="Connections that are in a TIME_WAIT state">
|
||||
+ <Text component={TextVariants.p}>
|
||||
+ Time wait: <b>{numToCommas(conn_time_wait)}</b>
|
||||
</Text>
|
||||
</TextContent>
|
||||
</GridItem>
|
||||
@@ -329,13 +395,13 @@ export class ServerMonitor extends React.Component {
|
||||
ariaDesc="connection stats"
|
||||
ariaTitle={_("Live Connection Statistics")}
|
||||
containerComponent={<ChartVoronoiContainer labels={({ datum }) => `${datum.name}: ${datum.y}`} constrainToVisibleArea />}
|
||||
- height={200}
|
||||
+ height={220}
|
||||
minDomain={{ y: 0 }}
|
||||
padding={{
|
||||
bottom: 30,
|
||||
- left: 55,
|
||||
+ left: 60,
|
||||
top: 10,
|
||||
- right: 25,
|
||||
+ right: 30,
|
||||
}}
|
||||
>
|
||||
<ChartAxis />
|
||||
@@ -344,6 +410,18 @@ export class ServerMonitor extends React.Component {
|
||||
<ChartArea
|
||||
data={connChart}
|
||||
/>
|
||||
+ <ChartArea
|
||||
+ data={connEstablishedChart}
|
||||
+ interpolation="monotoneX"
|
||||
+ />
|
||||
+ <ChartArea
|
||||
+ data={connTimeWaitChart}
|
||||
+ interpolation="monotoneX"
|
||||
+ />
|
||||
+ <ChartArea
|
||||
+ data={connCloseWaitChart}
|
||||
+ interpolation="monotoneX"
|
||||
+ />
|
||||
</ChartGroup>
|
||||
</Chart>
|
||||
</GridItem>
|
||||
@@ -372,7 +450,7 @@ export class ServerMonitor extends React.Component {
|
||||
ariaDesc="cpu"
|
||||
ariaTitle={_("Server CPU Usage")}
|
||||
containerComponent={<ChartVoronoiContainer labels={({ datum }) => `${datum.name}: ${datum.y}%`} constrainToVisibleArea />}
|
||||
- height={200}
|
||||
+ height={220}
|
||||
minDomain={{ y: 0 }}
|
||||
padding={{
|
||||
bottom: 30,
|
||||
diff --git a/src/cockpit/389-console/src/lib/plugins/memberOf.jsx b/src/cockpit/389-console/src/lib/plugins/memberOf.jsx
|
||||
index 7c1b02297..704d6d0b1 100644
|
||||
--- a/src/cockpit/389-console/src/lib/plugins/memberOf.jsx
|
||||
+++ b/src/cockpit/389-console/src/lib/plugins/memberOf.jsx
|
||||
@@ -225,7 +225,7 @@ class MemberOf extends React.Component {
|
||||
|
||||
// Handle scope subtree
|
||||
this.handleSubtreeScopeSelect = (event, selection) => {
|
||||
- if (selection === "") {
|
||||
+ if (!selection.trim() || !valid_dn(selection)) {
|
||||
this.setState({isSubtreeScopeOpen: false});
|
||||
return;
|
||||
}
|
||||
@@ -257,7 +257,7 @@ class MemberOf extends React.Component {
|
||||
}, () => { this.validateConfig() });
|
||||
};
|
||||
this.handleSubtreeScopeCreateOption = newValue => {
|
||||
- if (newValue && !this.state.memberOfEntryScopeOptions.includes(newValue)) {
|
||||
+ if (newValue.trim() && valid_dn(newValue) && !this.state.memberOfEntryScopeOptions.includes(newValue)) {
|
||||
this.setState({
|
||||
memberOfEntryScopeOptions: [...this.state.memberOfEntryScopeOptions, newValue],
|
||||
isSubtreeScopeOpen: false
|
||||
@@ -267,7 +267,7 @@ class MemberOf extends React.Component {
|
||||
|
||||
// Handle Exclude Scope subtree
|
||||
this.handleExcludeScopeSelect = (event, selection) => {
|
||||
- if (selection === "") {
|
||||
+ if (!selection.trim() || !valid_dn(selection)) {
|
||||
this.setState({isExcludeScopeOpen: false});
|
||||
return;
|
||||
}
|
||||
@@ -299,7 +299,7 @@ class MemberOf extends React.Component {
|
||||
}, () => { this.validateConfig() });
|
||||
};
|
||||
this.handleExcludeCreateOption = newValue => {
|
||||
- if (newValue && !this.state.memberOfEntryScopeOptions.includes(newValue)) {
|
||||
+ if (newValue.trim() && valid_dn(newValue) && !this.state.memberOfEntryScopeOptions.includes(newValue)) {
|
||||
this.setState({
|
||||
memberOfEntryScopeExcludeOptions: [...this.state.memberOfEntryScopeExcludeOptions, newValue],
|
||||
isExcludeScopeOpen: false
|
||||
@@ -310,7 +310,7 @@ class MemberOf extends React.Component {
|
||||
// Modal scope and exclude Scope
|
||||
// Handle scope subtree
|
||||
this.handleConfigScopeSelect = (event, selection) => {
|
||||
- if (selection === "") {
|
||||
+ if (selection.trim() === "" || !valid_dn(selection)) {
|
||||
this.setState({isConfigSubtreeScopeOpen: false});
|
||||
return;
|
||||
}
|
||||
@@ -342,7 +342,7 @@ class MemberOf extends React.Component {
|
||||
}, () => { this.validateModal() });
|
||||
};
|
||||
this.handleConfigCreateOption = newValue => {
|
||||
- if (newValue && !this.state.configEntryScopeOptions.includes(newValue)) {
|
||||
+ if (newValue.trim() && valid_dn(newValue) && !this.state.configEntryScopeOptions.includes(newValue)) {
|
||||
this.setState({
|
||||
configEntryScopeOptions: [...this.state.configEntryScopeOptions, newValue],
|
||||
isConfigSubtreeScopeOpen: false
|
||||
@@ -352,7 +352,7 @@ class MemberOf extends React.Component {
|
||||
|
||||
// Handle Exclude Scope subtree
|
||||
this.handleConfigExcludeScopeSelect = (event, selection) => {
|
||||
- if (selection === "") {
|
||||
+ if (selection.trim() === "" || !valid_dn(selection)) {
|
||||
this.setState({isConfigExcludeScopeOpen: false});
|
||||
return;
|
||||
}
|
||||
@@ -384,7 +384,7 @@ class MemberOf extends React.Component {
|
||||
}, () => { this.validateModal() });
|
||||
};
|
||||
this.handleConfigExcludeCreateOption = newValue => {
|
||||
- if (newValue && !this.state.configEntryScopeExcludeOptions.includes(newValue)) {
|
||||
+ if (newValue.trim() && valid_dn(newValue) && !this.state.configEntryScopeExcludeOptions.includes(newValue)) {
|
||||
this.setState({
|
||||
configEntryScopeExcludeOptions: [...this.state.configEntryScopeExcludeOptions, newValue],
|
||||
isConfigExcludeScopeOpen: false
|
||||
@@ -1563,7 +1563,7 @@ class MemberOf extends React.Component {
|
||||
))}
|
||||
</Select>
|
||||
<FormHelperText >
|
||||
- {_("A subtree is required, and values must be valid DN's")}
|
||||
+ {"values must be valid DN's"}
|
||||
</FormHelperText>
|
||||
</GridItem>
|
||||
<GridItem className="ds-left-margin" span={3}>
|
||||
diff --git a/src/lib389/lib389/monitor.py b/src/lib389/lib389/monitor.py
|
||||
index 196577ed5..ec82b0346 100644
|
||||
--- a/src/lib389/lib389/monitor.py
|
||||
+++ b/src/lib389/lib389/monitor.py
|
||||
@@ -119,14 +119,26 @@ class Monitor(DSLdapObject):
|
||||
sslport = str(self._instance.sslport)
|
||||
|
||||
conn_count = 0
|
||||
+ conn_established_count = 0
|
||||
+ conn_close_wait_count = 0
|
||||
+ conn_time_wait_count = 0
|
||||
conns = psutil.net_connections()
|
||||
for conn in conns:
|
||||
if len(conn[4]) > 0:
|
||||
conn_port = str(conn[4][1])
|
||||
if conn_port in (port, sslport):
|
||||
+ if conn[5] == 'TIME_WAIT':
|
||||
+ conn_time_wait_count += 1
|
||||
+ if conn[5] == 'CLOSE_WAIT':
|
||||
+ conn_close_wait_count += 1
|
||||
+ if conn[5] == 'ESTABLISHED':
|
||||
+ conn_established_count += 1
|
||||
conn_count += 1
|
||||
|
||||
stats['connection_count'] = [str(conn_count)]
|
||||
+ stats['connection_established_count'] = [str(conn_established_count)]
|
||||
+ stats['connection_close_wait_count'] = [str(conn_close_wait_count)]
|
||||
+ stats['connection_time_wait_count'] = [str(conn_time_wait_count)]
|
||||
|
||||
return stats
|
||||
|
||||
--
|
||||
2.48.1
|
||||
|
||||
116
0023-Issue-6704-UI-Add-error-log-buffering-config.patch
Normal file
116
0023-Issue-6704-UI-Add-error-log-buffering-config.patch
Normal file
@ -0,0 +1,116 @@
|
||||
From a66955c329472b1d837a27928584475e7e388984 Mon Sep 17 00:00:00 2001
|
||||
From: Mark Reynolds <mreynolds@redhat.com>
|
||||
Date: Fri, 28 Mar 2025 10:01:22 -0400
|
||||
Subject: [PATCH] Issue 6704 - UI - Add error log buffering config
|
||||
|
||||
Description:
|
||||
|
||||
Add error log buffering setting to the UI
|
||||
|
||||
Relates: https://github.com/389ds/389-ds-base/issues/6704
|
||||
|
||||
Reviewed by: jchapman (Thanks!)
|
||||
---
|
||||
.../389-console/src/lib/server/errorLog.jsx | 23 +++++++++++++++++++
|
||||
1 file changed, 23 insertions(+)
|
||||
|
||||
diff --git a/src/cockpit/389-console/src/lib/server/errorLog.jsx b/src/cockpit/389-console/src/lib/server/errorLog.jsx
|
||||
index 0ad36e594..14e6aa40a 100644
|
||||
--- a/src/cockpit/389-console/src/lib/server/errorLog.jsx
|
||||
+++ b/src/cockpit/389-console/src/lib/server/errorLog.jsx
|
||||
@@ -38,6 +38,7 @@ const settings_attrs = [
|
||||
'nsslapd-errorlog',
|
||||
'nsslapd-errorlog-level',
|
||||
'nsslapd-errorlog-logging-enabled',
|
||||
+ 'nsslapd-errorlog-logbuffering',
|
||||
];
|
||||
|
||||
const _ = cockpit.gettext;
|
||||
@@ -363,6 +364,7 @@ export class ServerErrorLog extends React.Component {
|
||||
const attrs = config.attrs;
|
||||
let enabled = false;
|
||||
let compressed = false;
|
||||
+ let buffering = false;
|
||||
const level_val = parseInt(attrs['nsslapd-errorlog-level'][0]);
|
||||
const rows = [...this.state.rows];
|
||||
|
||||
@@ -372,6 +374,9 @@ export class ServerErrorLog extends React.Component {
|
||||
if (attrs['nsslapd-errorlog-compress'][0] === "on") {
|
||||
compressed = true;
|
||||
}
|
||||
+ if (attrs['nsslapd-errorlog-logbuffering'][0] === "on") {
|
||||
+ buffering = true;
|
||||
+ }
|
||||
|
||||
for (const row in rows) {
|
||||
if (rows[row].level & level_val) {
|
||||
@@ -403,6 +408,7 @@ export class ServerErrorLog extends React.Component {
|
||||
'nsslapd-errorlog-maxlogsize': attrs['nsslapd-errorlog-maxlogsize'][0],
|
||||
'nsslapd-errorlog-maxlogsperdir': attrs['nsslapd-errorlog-maxlogsperdir'][0],
|
||||
'nsslapd-errorlog-compress': compressed,
|
||||
+ 'nsslapd-errorlog-logbuffering': buffering,
|
||||
rows,
|
||||
// Record original values
|
||||
_rows: JSON.parse(JSON.stringify(rows)),
|
||||
@@ -421,6 +427,7 @@ export class ServerErrorLog extends React.Component {
|
||||
'_nsslapd-errorlog-maxlogsize': attrs['nsslapd-errorlog-maxlogsize'][0],
|
||||
'_nsslapd-errorlog-maxlogsperdir': attrs['nsslapd-errorlog-maxlogsperdir'][0],
|
||||
'_nsslapd-errorlog-compress': compressed,
|
||||
+ '_nsslapd-errorlog-logbuffering': buffering,
|
||||
})
|
||||
);
|
||||
})
|
||||
@@ -441,6 +448,7 @@ export class ServerErrorLog extends React.Component {
|
||||
const attrs = this.state.attrs;
|
||||
let enabled = false;
|
||||
let compressed = false;
|
||||
+ let buffering = false;
|
||||
const level_val = parseInt(attrs['nsslapd-errorlog-level'][0]);
|
||||
const rows = [...this.state.rows];
|
||||
|
||||
@@ -454,6 +462,9 @@ export class ServerErrorLog extends React.Component {
|
||||
if (attrs['nsslapd-errorlog-compress'][0] === "on") {
|
||||
compressed = true;
|
||||
}
|
||||
+ if (attrs['nsslapd-errorlog-logbuffering'][0] === "on") {
|
||||
+ buffering = true;
|
||||
+ }
|
||||
for (const row in rows) {
|
||||
if (rows[row].level & level_val) {
|
||||
rows[row].selected = true;
|
||||
@@ -483,6 +494,7 @@ export class ServerErrorLog extends React.Component {
|
||||
'nsslapd-errorlog-maxlogsize': attrs['nsslapd-errorlog-maxlogsize'][0],
|
||||
'nsslapd-errorlog-maxlogsperdir': attrs['nsslapd-errorlog-maxlogsperdir'][0],
|
||||
'nsslapd-errorlog-compress': compressed,
|
||||
+ 'nsslapd-errorlog-logbuffering': buffering,
|
||||
rows,
|
||||
// Record original values
|
||||
_rows: JSON.parse(JSON.stringify(rows)),
|
||||
@@ -501,6 +513,7 @@ export class ServerErrorLog extends React.Component {
|
||||
'_nsslapd-errorlog-maxlogsize': attrs['nsslapd-errorlog-maxlogsize'][0],
|
||||
'_nsslapd-errorlog-maxlogsperdir': attrs['nsslapd-errorlog-maxlogsperdir'][0],
|
||||
'_nsslapd-errorlog-compress': compressed,
|
||||
+ '_nsslapd-errorlog-logbuffering': buffering,
|
||||
}, this.props.enableTree);
|
||||
}
|
||||
|
||||
@@ -592,6 +605,16 @@ export class ServerErrorLog extends React.Component {
|
||||
/>
|
||||
</FormGroup>
|
||||
</Form>
|
||||
+ <Checkbox
|
||||
+ className="ds-left-margin-md ds-margin-top-lg"
|
||||
+ id="nsslapd-errorlog-logbuffering"
|
||||
+ isChecked={this.state['nsslapd-errorlog-logbuffering']}
|
||||
+ onChange={(e, checked) => {
|
||||
+ this.handleChange(e, "settings");
|
||||
+ }}
|
||||
+ title={"This applies to the error log. Enable error log buffering when using verbose logging levels, otherwise verbose logging levels will impact server performance (nsslapd-errorlog-logbuffering)."}
|
||||
+ label={_("Error Log Buffering Enabled")}
|
||||
+ />
|
||||
|
||||
<ExpandableSection
|
||||
className="ds-left-margin-md ds-margin-top-lg ds-font-size-md"
|
||||
--
|
||||
2.48.1
|
||||
|
||||
474
0024-Issue-6700-CLI-UI-include-superior-objectclasses-all.patch
Normal file
474
0024-Issue-6700-CLI-UI-include-superior-objectclasses-all.patch
Normal file
@ -0,0 +1,474 @@
|
||||
From 16c11881c48b8e285eb3593971bcb4cdf998887d Mon Sep 17 00:00:00 2001
|
||||
From: Mark Reynolds <mreynolds@redhat.com>
|
||||
Date: Sun, 30 Mar 2025 15:49:45 -0400
|
||||
Subject: [PATCH] Issue 6700 - CLI/UI - include superior objectclasses' allowed
|
||||
and requires attrs
|
||||
|
||||
Description:
|
||||
|
||||
When you get/list an objectclass it only lists its level of allowed and
|
||||
required objectclasses, but it should also include all its superior
|
||||
objectclasses' allowed and required attributes.
|
||||
|
||||
Added an option to the CLI to also include all the parent/superior
|
||||
required and allowed attributes
|
||||
|
||||
Relates: https://github.com/389ds/389-ds-base/issues/6700
|
||||
|
||||
Reviewed by: spichugi & tbordaz(Thanks!)
|
||||
---
|
||||
.../suites/clu/dsconf_schema_superior_test.py | 122 ++++++++++++++++++
|
||||
.../tests/suites/schema/schema_test.py | 9 +-
|
||||
.../src/lib/ldap_editor/lib/utils.jsx | 3 +-
|
||||
src/cockpit/389-console/src/schema.jsx | 6 +-
|
||||
src/lib389/lib389/cli_conf/schema.py | 12 +-
|
||||
src/lib389/lib389/schema.py | 105 +++++++++++----
|
||||
6 files changed, 219 insertions(+), 38 deletions(-)
|
||||
create mode 100644 dirsrvtests/tests/suites/clu/dsconf_schema_superior_test.py
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/clu/dsconf_schema_superior_test.py b/dirsrvtests/tests/suites/clu/dsconf_schema_superior_test.py
|
||||
new file mode 100644
|
||||
index 000000000..185e16af2
|
||||
--- /dev/null
|
||||
+++ b/dirsrvtests/tests/suites/clu/dsconf_schema_superior_test.py
|
||||
@@ -0,0 +1,122 @@
|
||||
+# --- BEGIN COPYRIGHT BLOCK ---
|
||||
+# Copyright (C) 2025 Red Hat, Inc.
|
||||
+# All rights reserved.
|
||||
+#
|
||||
+# License: GPL (version 3 or any later version).
|
||||
+# See LICENSE for details.
|
||||
+# --- END COPYRIGHT BLOCK ---
|
||||
+#
|
||||
+import logging
|
||||
+import json
|
||||
+import os
|
||||
+import subprocess
|
||||
+import pytest
|
||||
+from lib389.topologies import topology_st as topo
|
||||
+
|
||||
+log = logging.getLogger(__name__)
|
||||
+
|
||||
+
|
||||
+def execute_dsconf_command(dsconf_cmd, subcommands):
|
||||
+ """Execute dsconf command and return output and return code"""
|
||||
+
|
||||
+ cmdline = dsconf_cmd + subcommands
|
||||
+ proc = subprocess.Popen(cmdline, stdout=subprocess.PIPE)
|
||||
+ out, _ = proc.communicate()
|
||||
+ return out.decode('utf-8'), proc.returncode
|
||||
+
|
||||
+
|
||||
+def get_dsconf_base_cmd(topo):
|
||||
+ """Return base dsconf command list"""
|
||||
+ return ['/usr/sbin/dsconf', topo.standalone.serverid,
|
||||
+ '-j', 'schema']
|
||||
+
|
||||
+
|
||||
+def test_schema_oc_superior(topo):
|
||||
+ """Specify a test case purpose or name here
|
||||
+
|
||||
+ :id: d12aab4a-1436-43eb-802a-0661281a13d0
|
||||
+ :setup: Standalone Instance
|
||||
+ :steps:
|
||||
+ 1. List all the schema
|
||||
+ 2. List all the schema and include superior OC's attrs
|
||||
+ 3. Get objectclass list
|
||||
+ 4. Get objectclass list and include superior OC's attrs
|
||||
+ 5. Get objectclass
|
||||
+ 6. Get objectclass and include superior OC's attrs
|
||||
+ :expectedresults:
|
||||
+ 1. Success
|
||||
+ 2. Success
|
||||
+ 3. Success
|
||||
+ 4. Success
|
||||
+ """
|
||||
+
|
||||
+ dsconf_cmd = get_dsconf_base_cmd(topo)
|
||||
+
|
||||
+ # Test default schema list
|
||||
+ output, rc = execute_dsconf_command(dsconf_cmd, ['list'])
|
||||
+ assert rc == 0
|
||||
+ json_result = json.loads(output)
|
||||
+ for schema_item in json_result:
|
||||
+ if 'name' in schema_item and schema_item['name'] == 'inetOrgPerson':
|
||||
+ assert len(schema_item['must']) == 0
|
||||
+ break
|
||||
+
|
||||
+ # Test including the OC superior attributes
|
||||
+ output, rc = execute_dsconf_command(dsconf_cmd, ['list',
|
||||
+ '--include-oc-sup'])
|
||||
+ assert rc == 0
|
||||
+ json_result = json.loads(output)
|
||||
+ for schema_item in json_result:
|
||||
+ if 'name' in schema_item and schema_item['name'] == 'inetOrgPerson':
|
||||
+ assert len(schema_item['must']) > 0 and \
|
||||
+ 'cn' in schema_item['must'] and 'sn' in schema_item['must']
|
||||
+ break
|
||||
+
|
||||
+ # Test default objectclass list
|
||||
+ output, rc = execute_dsconf_command(dsconf_cmd, ['objectclasses', 'list'])
|
||||
+ assert rc == 0
|
||||
+ json_result = json.loads(output)
|
||||
+ for schema_item in json_result:
|
||||
+ if 'name' in schema_item and schema_item['name'] == 'inetOrgPerson':
|
||||
+ assert len(schema_item['must']) == 0
|
||||
+ break
|
||||
+
|
||||
+ # Test objectclass list and inslude superior attributes
|
||||
+ output, rc = execute_dsconf_command(dsconf_cmd, ['objectclasses', 'list',
|
||||
+ '--include-sup'])
|
||||
+ assert rc == 0
|
||||
+ json_result = json.loads(output)
|
||||
+ for schema_item in json_result:
|
||||
+ if 'name' in schema_item and schema_item['name'] == 'inetOrgPerson':
|
||||
+ assert len(schema_item['must']) > 0 and \
|
||||
+ 'cn' in schema_item['must'] and 'sn' in schema_item['must']
|
||||
+ break
|
||||
+
|
||||
+ # Test default objectclass query
|
||||
+ output, rc = execute_dsconf_command(dsconf_cmd, ['objectclasses', 'query',
|
||||
+ 'inetOrgPerson'])
|
||||
+ assert rc == 0
|
||||
+ result = json.loads(output)
|
||||
+ schema_item = result['oc']
|
||||
+ assert 'names' in schema_item
|
||||
+ assert schema_item['names'][0] == 'inetOrgPerson'
|
||||
+ assert len(schema_item['must']) == 0
|
||||
+
|
||||
+ # Test objectclass query and include superior attributes
|
||||
+ output, rc = execute_dsconf_command(dsconf_cmd, ['objectclasses', 'query',
|
||||
+ 'inetOrgPerson',
|
||||
+ '--include-sup'])
|
||||
+ assert rc == 0
|
||||
+ result = json.loads(output)
|
||||
+ schema_item = result['oc']
|
||||
+ assert 'names' in schema_item
|
||||
+ assert schema_item['names'][0] == 'inetOrgPerson'
|
||||
+ assert len(schema_item['must']) > 0 and 'cn' in schema_item['must'] \
|
||||
+ and 'sn' in schema_item['must']
|
||||
+
|
||||
+
|
||||
+if __name__ == '__main__':
|
||||
+ # Run isolated
|
||||
+ # -s for DEBUG mode
|
||||
+ CURRENT_FILE = os.path.realpath(__file__)
|
||||
+ pytest.main(["-s", CURRENT_FILE])
|
||||
diff --git a/dirsrvtests/tests/suites/schema/schema_test.py b/dirsrvtests/tests/suites/schema/schema_test.py
|
||||
index afc9cc678..8ca15af70 100644
|
||||
--- a/dirsrvtests/tests/suites/schema/schema_test.py
|
||||
+++ b/dirsrvtests/tests/suites/schema/schema_test.py
|
||||
@@ -232,7 +232,7 @@ def test_gecos_mixed_definition_topo(topo_m2, request):
|
||||
repl = ReplicationManager(DEFAULT_SUFFIX)
|
||||
m1 = topo_m2.ms["supplier1"]
|
||||
m2 = topo_m2.ms["supplier2"]
|
||||
-
|
||||
+
|
||||
|
||||
# create a test user
|
||||
testuser_dn = 'uid={},{}'.format('testuser', DEFAULT_SUFFIX)
|
||||
@@ -343,7 +343,7 @@ def test_gecos_directoryString_wins_M1(topo_m2, request):
|
||||
repl = ReplicationManager(DEFAULT_SUFFIX)
|
||||
m1 = topo_m2.ms["supplier1"]
|
||||
m2 = topo_m2.ms["supplier2"]
|
||||
-
|
||||
+
|
||||
|
||||
# create a test user
|
||||
testuser_dn = 'uid={},{}'.format('testuser', DEFAULT_SUFFIX)
|
||||
@@ -471,7 +471,7 @@ def test_gecos_directoryString_wins_M2(topo_m2, request):
|
||||
repl = ReplicationManager(DEFAULT_SUFFIX)
|
||||
m1 = topo_m2.ms["supplier1"]
|
||||
m2 = topo_m2.ms["supplier2"]
|
||||
-
|
||||
+
|
||||
|
||||
# create a test user
|
||||
testuser_dn = 'uid={},{}'.format('testuser', DEFAULT_SUFFIX)
|
||||
@@ -623,11 +623,10 @@ def test_definition_with_sharp(topology_st, request):
|
||||
# start the instances
|
||||
inst.start()
|
||||
|
||||
- i# Check that server is really running.
|
||||
+ # Check that server is really running.
|
||||
assert inst.status()
|
||||
|
||||
|
||||
-
|
||||
if __name__ == '__main__':
|
||||
# Run isolated
|
||||
# -s for DEBUG mode
|
||||
diff --git a/src/cockpit/389-console/src/lib/ldap_editor/lib/utils.jsx b/src/cockpit/389-console/src/lib/ldap_editor/lib/utils.jsx
|
||||
index fc9c898fa..cd94063ec 100644
|
||||
--- a/src/cockpit/389-console/src/lib/ldap_editor/lib/utils.jsx
|
||||
+++ b/src/cockpit/389-console/src/lib/ldap_editor/lib/utils.jsx
|
||||
@@ -873,7 +873,8 @@ export function getAllObjectClasses (serverId, allOcCallback) {
|
||||
'ldapi://%2fvar%2frun%2fslapd-' + serverId + '.socket',
|
||||
'schema',
|
||||
'objectclasses',
|
||||
- 'list'
|
||||
+ 'list',
|
||||
+ '--include-sup'
|
||||
];
|
||||
const result = [];
|
||||
log_cmd("getAllObjectClasses", "", cmd);
|
||||
diff --git a/src/cockpit/389-console/src/schema.jsx b/src/cockpit/389-console/src/schema.jsx
|
||||
index e39f9fef2..19854e785 100644
|
||||
--- a/src/cockpit/389-console/src/schema.jsx
|
||||
+++ b/src/cockpit/389-console/src/schema.jsx
|
||||
@@ -440,7 +440,8 @@ export class Schema extends React.Component {
|
||||
"-j",
|
||||
"ldapi://%2fvar%2frun%2fslapd-" + this.props.serverId + ".socket",
|
||||
"schema",
|
||||
- "list"
|
||||
+ "list",
|
||||
+ "--include-oc-sup"
|
||||
];
|
||||
log_cmd("loadSchemaData", "Get schema objects in one batch", cmd);
|
||||
cockpit
|
||||
@@ -568,7 +569,8 @@ export class Schema extends React.Component {
|
||||
"schema",
|
||||
"objectclasses",
|
||||
"query",
|
||||
- name
|
||||
+ name,
|
||||
+ "--include-sup"
|
||||
];
|
||||
|
||||
log_cmd("openObjectclassModal", "Fetch ObjectClass data from schema", cmd);
|
||||
diff --git a/src/lib389/lib389/cli_conf/schema.py b/src/lib389/lib389/cli_conf/schema.py
|
||||
index 7a06c91bc..7782aa5e5 100644
|
||||
--- a/src/lib389/lib389/cli_conf/schema.py
|
||||
+++ b/src/lib389/lib389/cli_conf/schema.py
|
||||
@@ -31,7 +31,8 @@ def list_all(inst, basedn, log, args):
|
||||
if args is not None and args.json:
|
||||
json = True
|
||||
|
||||
- objectclass_elems = schema.get_objectclasses(json=json)
|
||||
+ objectclass_elems = schema.get_objectclasses(include_sup=args.include_oc_sup,
|
||||
+ json=json)
|
||||
attributetype_elems = schema.get_attributetypes(json=json)
|
||||
matchingrule_elems = schema.get_matchingrules(json=json)
|
||||
|
||||
@@ -67,7 +68,7 @@ def list_objectclasses(inst, basedn, log, args):
|
||||
log = log.getChild('list_objectclasses')
|
||||
schema = Schema(inst)
|
||||
if args is not None and args.json:
|
||||
- print(dump_json(schema.get_objectclasses(json=True), indent=4))
|
||||
+ print(dump_json(schema.get_objectclasses(include_sup=args.include_sup, json=True), indent=4))
|
||||
else:
|
||||
for oc in schema.get_objectclasses():
|
||||
print(oc)
|
||||
@@ -108,7 +109,7 @@ def query_objectclass(inst, basedn, log, args):
|
||||
schema = Schema(inst)
|
||||
# Need the query type
|
||||
oc = _get_arg(args.name, msg="Enter objectclass to query")
|
||||
- result = schema.query_objectclass(oc, json=args.json)
|
||||
+ result = schema.query_objectclass(oc, include_sup=args.include_sup, json=args.json)
|
||||
if args.json:
|
||||
print(dump_json(result, indent=4))
|
||||
else:
|
||||
@@ -339,6 +340,9 @@ def create_parser(subparsers):
|
||||
schema_subcommands = schema_parser.add_subparsers(help='schema')
|
||||
schema_list_parser = schema_subcommands.add_parser('list', help='List all schema objects on this system', formatter_class=CustomHelpFormatter)
|
||||
schema_list_parser.set_defaults(func=list_all)
|
||||
+ schema_list_parser.add_argument('--include-oc-sup', action='store_true',
|
||||
+ default=False,
|
||||
+ help="Include the superior objectclasses' \"may\" and \"must\" attributes")
|
||||
|
||||
attributetypes_parser = schema_subcommands.add_parser('attributetypes', help='Work with attribute types on this system', formatter_class=CustomHelpFormatter)
|
||||
attributetypes_subcommands = attributetypes_parser.add_subparsers(help='schema')
|
||||
@@ -365,9 +369,11 @@ def create_parser(subparsers):
|
||||
objectclasses_subcommands = objectclasses_parser.add_subparsers(help='schema')
|
||||
oc_list_parser = objectclasses_subcommands.add_parser('list', help='List available objectClasses on this system', formatter_class=CustomHelpFormatter)
|
||||
oc_list_parser.set_defaults(func=list_objectclasses)
|
||||
+ oc_list_parser.add_argument('--include-sup', action='store_true', default=False, help="Include the superior objectclasses' \"may\" and \"must\" attributes")
|
||||
oc_query_parser = objectclasses_subcommands.add_parser('query', help='Query an objectClass', formatter_class=CustomHelpFormatter)
|
||||
oc_query_parser.set_defaults(func=query_objectclass)
|
||||
oc_query_parser.add_argument('name', nargs='?', help='ObjectClass to query')
|
||||
+ oc_query_parser.add_argument('--include-sup', action='store_true', default=False, help="Include the superior objectclasses' \"may\" and \"must\" attributes")
|
||||
oc_add_parser = objectclasses_subcommands.add_parser('add', help='Add an objectClass to this system', formatter_class=CustomHelpFormatter)
|
||||
oc_add_parser.set_defaults(func=add_objectclass)
|
||||
_add_parser_args(oc_add_parser, 'objectclasses')
|
||||
diff --git a/src/lib389/lib389/schema.py b/src/lib389/lib389/schema.py
|
||||
index a47e13db8..2e8aa3ed8 100755
|
||||
--- a/src/lib389/lib389/schema.py
|
||||
+++ b/src/lib389/lib389/schema.py
|
||||
@@ -116,15 +116,66 @@ class Schema(DSLdapObject):
|
||||
result = ATTR_SYNTAXES
|
||||
return result
|
||||
|
||||
- def _get_schema_objects(self, object_model, json=False):
|
||||
- """Get all the schema objects for a specific model: Attribute, Objectclass,
|
||||
- or Matchingreule.
|
||||
+ def gather_oc_sup_attrs(self, oc, sup_oc, ocs, processed_ocs=None):
|
||||
+ """
|
||||
+ Recursively build up all the objectclass superiors' may/must
|
||||
+ attributes
|
||||
+
|
||||
+ @param oc - original objectclass we are building up
|
||||
+ @param sup_oc - superior objectclass that we are gathering must/may
|
||||
+ attributes from, and for following its superior
|
||||
+ objectclass
|
||||
+ @param ocs - all objectclasses
|
||||
+ @param processed_ocs - list of all the superior objectclasees we have
|
||||
+ already processed. Used for checking if we
|
||||
+ somehow get into an infinite loop
|
||||
+ """
|
||||
+ if processed_ocs is None:
|
||||
+ # First pass, init our values
|
||||
+ sup_oc = oc
|
||||
+ processed_ocs = [sup_oc['names'][0]]
|
||||
+ elif sup_oc['names'][0] in processed_ocs:
|
||||
+ # We're looping, need to abort. This should never happen because
|
||||
+ # of how the schema is structured, but perhaps a bug was
|
||||
+ # introduced in the server schema handling?
|
||||
+ return
|
||||
+
|
||||
+ # update processed list to prevent loops
|
||||
+ processed_ocs.append(sup_oc['names'][0])
|
||||
+
|
||||
+ for soc in sup_oc['sup']:
|
||||
+ if soc.lower() == "top":
|
||||
+ continue
|
||||
+ # Get sup_oc
|
||||
+ for obj in ocs:
|
||||
+ oc_dict = vars(ObjectClass(obj))
|
||||
+ name = oc_dict['names'][0]
|
||||
+ if name.lower() == soc.lower():
|
||||
+ # Found the superior, get it's attributes
|
||||
+ for attr in oc_dict['may']:
|
||||
+ if attr not in oc['may']:
|
||||
+ oc['may'] = oc['may'] + (attr,)
|
||||
+ for attr in oc_dict['must']:
|
||||
+ if attr not in oc['must']:
|
||||
+ oc['must'] = oc['must'] + (attr,)
|
||||
+
|
||||
+ # Sort the tuples
|
||||
+ oc['may'] = tuple(sorted(oc['may']))
|
||||
+ oc['must'] = tuple(sorted(oc['must']))
|
||||
+
|
||||
+ # Now recurse and check this objectclass
|
||||
+ self.gather_oc_sup_attrs(oc, oc_dict, ocs, processed_ocs)
|
||||
+
|
||||
+ def _get_schema_objects(self, object_model, include_sup=False, json=False):
|
||||
+ """Get all the schema objects for a specific model:
|
||||
+
|
||||
+ Attribute, ObjectClass, or MatchingRule.
|
||||
"""
|
||||
attr_name = self._get_attr_name_by_model(object_model)
|
||||
results = self.get_attr_vals_utf8(attr_name)
|
||||
+ object_insts = []
|
||||
|
||||
if json:
|
||||
- object_insts = []
|
||||
for obj in results:
|
||||
obj_i = vars(object_model(obj))
|
||||
if len(obj_i["names"]) == 1:
|
||||
@@ -136,20 +187,9 @@ class Schema(DSLdapObject):
|
||||
else:
|
||||
obj_i['name'] = ""
|
||||
|
||||
- # Temporary workaround for X-ORIGIN in ObjectClass objects.
|
||||
- # It should be removed after https://github.com/python-ldap/python-ldap/pull/247 is merged
|
||||
- if " X-ORIGIN " in obj and obj_i['names'] == vars(object_model(obj))['names']:
|
||||
- remainder = obj.split(" X-ORIGIN ")[1]
|
||||
- if remainder[:1] == "(":
|
||||
- # Have multiple values
|
||||
- end = remainder.rfind(')')
|
||||
- vals = remainder[1:end]
|
||||
- vals = re.findall(X_ORIGIN_REGEX, vals)
|
||||
- # For now use the first value, but this should be a set (another bug in python-ldap)
|
||||
- obj_i['x_origin'] = vals[0]
|
||||
- else:
|
||||
- # Single X-ORIGIN value
|
||||
- obj_i['x_origin'] = obj.split(" X-ORIGIN ")[1].split("'")[1]
|
||||
+ if object_model is ObjectClass and include_sup:
|
||||
+ self.gather_oc_sup_attrs(obj_i, None, results)
|
||||
+
|
||||
object_insts.append(obj_i)
|
||||
|
||||
object_insts = sorted(object_insts, key=itemgetter('name'))
|
||||
@@ -161,11 +201,20 @@ class Schema(DSLdapObject):
|
||||
|
||||
return {'type': 'list', 'items': object_insts}
|
||||
else:
|
||||
- object_insts = [object_model(obj_i) for obj_i in results]
|
||||
+ for obj_i in results:
|
||||
+ obj_i = object_model(obj_i)
|
||||
+ if object_model is ObjectClass and include_sup:
|
||||
+ obj_ii = vars(obj_i)
|
||||
+ self.gather_oc_sup_attrs(obj_ii, None, results)
|
||||
+ obj_i.may = obj_ii['may']
|
||||
+ obj_i.must = obj_ii['must']
|
||||
+ object_insts.append(obj_i)
|
||||
return sorted(object_insts, key=lambda x: x.names, reverse=False)
|
||||
|
||||
- def _get_schema_object(self, name, object_model, json=False):
|
||||
- objects = self._get_schema_objects(object_model, json=json)
|
||||
+ def _get_schema_object(self, name, object_model, include_sup=False, json=False):
|
||||
+ objects = self._get_schema_objects(object_model,
|
||||
+ include_sup=include_sup,
|
||||
+ json=json)
|
||||
if json:
|
||||
schema_object = [obj_i for obj_i in objects["items"] if name.lower() in
|
||||
list(map(str.lower, obj_i["names"]))]
|
||||
@@ -227,7 +276,6 @@ class Schema(DSLdapObject):
|
||||
def _remove_schema_object(self, name, object_model):
|
||||
attr_name = self._get_attr_name_by_model(object_model)
|
||||
schema_object = self._get_schema_object(name, object_model)
|
||||
-
|
||||
return self.remove(attr_name, str(schema_object))
|
||||
|
||||
def _edit_schema_object(self, name, parameters, object_model):
|
||||
@@ -371,7 +419,6 @@ class Schema(DSLdapObject):
|
||||
:param name: the name of the objectClass you want to remove.
|
||||
:type name: str
|
||||
"""
|
||||
-
|
||||
return self._remove_schema_object(name, ObjectClass)
|
||||
|
||||
def edit_attributetype(self, name, parameters):
|
||||
@@ -396,7 +443,7 @@ class Schema(DSLdapObject):
|
||||
|
||||
return self._edit_schema_object(name, parameters, ObjectClass)
|
||||
|
||||
- def get_objectclasses(self, json=False):
|
||||
+ def get_objectclasses(self, include_sup=False, json=False):
|
||||
"""Returns a list of ldap.schema.models.ObjectClass objects for all
|
||||
objectClasses supported by this instance.
|
||||
|
||||
@@ -404,7 +451,8 @@ class Schema(DSLdapObject):
|
||||
:type json: bool
|
||||
"""
|
||||
|
||||
- return self._get_schema_objects(ObjectClass, json=json)
|
||||
+ return self._get_schema_objects(ObjectClass, include_sup=include_sup,
|
||||
+ json=json)
|
||||
|
||||
def get_attributetypes(self, json=False):
|
||||
"""Returns a list of ldap.schema.models.AttributeType objects for all
|
||||
@@ -447,7 +495,8 @@ class Schema(DSLdapObject):
|
||||
else:
|
||||
return matching_rule
|
||||
|
||||
- def query_objectclass(self, objectclassname, json=False):
|
||||
+ def query_objectclass(self, objectclassname, include_sup=False,
|
||||
+ json=False):
|
||||
"""Returns a single ObjectClass instance that matches objectclassname.
|
||||
Returns None if the objectClass doesn't exist.
|
||||
|
||||
@@ -462,7 +511,9 @@ class Schema(DSLdapObject):
|
||||
<ldap.schema.models.ObjectClass instance>
|
||||
"""
|
||||
|
||||
- objectclass = self._get_schema_object(objectclassname, ObjectClass, json=json)
|
||||
+ objectclass = self._get_schema_object(objectclassname, ObjectClass,
|
||||
+ include_sup=include_sup,
|
||||
+ json=json)
|
||||
|
||||
if json:
|
||||
result = {'type': 'schema', 'oc': objectclass}
|
||||
--
|
||||
2.48.1
|
||||
|
||||
701
0025-Issue-6464-UI-Fixed-spelling-in-cockpit-messages.patch
Normal file
701
0025-Issue-6464-UI-Fixed-spelling-in-cockpit-messages.patch
Normal file
@ -0,0 +1,701 @@
|
||||
From ed7eeafa60db42aa5950aa28f6abdda57e504017 Mon Sep 17 00:00:00 2001
|
||||
From: Mike Weinberg <5876158+mikeweinberg@users.noreply.github.com>
|
||||
Date: Tue, 24 Dec 2024 21:41:59 -0500
|
||||
Subject: [PATCH] Issue 6464 - UI - Fixed spelling in cockpit messages
|
||||
|
||||
Description:
|
||||
I used typos-cli to assist in correcting more spelling errors in cockpit 389-console.
|
||||
I focused only on messages displayed to users in the cockpit project.
|
||||
I also attempted to fix the Japanese translation file so it wouldn't break.
|
||||
|
||||
Relates: https://github.com/389ds/389-ds-base/issues/6464
|
||||
|
||||
Reviewed by: @droideck
|
||||
---
|
||||
src/cockpit/389-console/po/ja.po | 52 +++++++++----------
|
||||
src/cockpit/389-console/src/database.jsx | 2 +-
|
||||
src/cockpit/389-console/src/dsModals.jsx | 4 +-
|
||||
.../src/lib/database/databaseConfig.jsx | 2 +-
|
||||
.../src/lib/database/databaseModal.jsx | 2 +-
|
||||
.../src/lib/database/globalPwp.jsx | 2 +-
|
||||
.../389-console/src/lib/database/indexes.jsx | 4 +-
|
||||
.../389-console/src/lib/database/localPwp.jsx | 4 +-
|
||||
.../ldap_editor/wizards/operations/aciNew.jsx | 2 +-
|
||||
.../wizards/operations/addCosDefinition.jsx | 4 +-
|
||||
.../src/lib/monitor/monitorModals.jsx | 2 +-
|
||||
.../src/lib/monitor/replMonitor.jsx | 2 +-
|
||||
.../389-console/src/lib/plugins/memberOf.jsx | 2 +-
|
||||
.../src/lib/plugins/pamPassThru.jsx | 10 ++--
|
||||
.../lib/plugins/passthroughAuthentication.jsx | 6 +--
|
||||
.../389-console/src/lib/plugins/usn.jsx | 2 +-
|
||||
.../src/lib/replication/replConfig.jsx | 2 +-
|
||||
.../src/lib/replication/replModals.jsx | 4 +-
|
||||
.../src/lib/replication/replTasks.jsx | 6 +--
|
||||
.../lib/security/certificateManagement.jsx | 8 +--
|
||||
.../src/lib/security/securityModals.jsx | 4 +-
|
||||
src/cockpit/389-console/src/schema.jsx | 6 +--
|
||||
22 files changed, 66 insertions(+), 66 deletions(-)
|
||||
|
||||
diff --git a/src/cockpit/389-console/po/ja.po b/src/cockpit/389-console/po/ja.po
|
||||
index 8d9aaa6aa..b159b2f6b 100644
|
||||
--- a/src/cockpit/389-console/po/ja.po
|
||||
+++ b/src/cockpit/389-console/po/ja.po
|
||||
@@ -285,7 +285,7 @@ msgstr "接続名"
|
||||
|
||||
#: src/lib/monitor/monitorModals.jsx:634
|
||||
msgid ""
|
||||
-"Bind password for the specified instance. You can also speciy a password "
|
||||
+"Bind password for the specified instance. You can also specify a password "
|
||||
"file but the filename needs to be inside of brackets [/PATH/FILE]"
|
||||
msgstr ""
|
||||
"指定されたインスタンスのバインドパスワードを入力してください。または、パス"
|
||||
@@ -1813,9 +1813,9 @@ msgstr "レプリカDSRC情報を読み込んでいます ..."
|
||||
|
||||
#: src/lib/monitor/replMonitor.jsx:1501
|
||||
msgid ""
|
||||
-"Only one monitor configuraton can be saved in the server's '~/.dsrc' file. "
|
||||
+"Only one monitor configuration can be saved in the server's '~/.dsrc' file. "
|
||||
"There is already an existing monitor configuration, and if you proceed it "
|
||||
-"will be completely overwritten with the new configuraton."
|
||||
+"will be completely overwritten with the new configuration."
|
||||
msgstr ""
|
||||
"サーバの '~/.dsrc' ファイルに保存できる監視設定は1つだけです。すでに既存"
|
||||
"の監視設定がありますが、このまま進むと新しい設定で完全に上書きされます。"
|
||||
@@ -3361,7 +3361,7 @@ msgid "Suffix is required."
|
||||
msgstr "サフィックスが必要です。"
|
||||
|
||||
#: src/lib/plugins/usn.jsx:221
|
||||
-msgid "Cleanup USN Tombstones task was successfull"
|
||||
+msgid "Cleanup USN Tombstones task was successful"
|
||||
msgstr "USN Tombstonesのクリーンアップタスクが正常に終了しました"
|
||||
|
||||
#: src/lib/plugins/usn.jsx:232
|
||||
@@ -3584,7 +3584,7 @@ msgid "Fixup DN is required."
|
||||
msgstr "修正DNは必須です。"
|
||||
|
||||
#: src/lib/plugins/memberOf.jsx:624
|
||||
-msgid "Fixup task for $0 was successfull"
|
||||
+msgid "Fixup task for $0 was successful"
|
||||
msgstr "$0 の修正タスクが成功しました"
|
||||
|
||||
#: src/lib/plugins/memberOf.jsx:780
|
||||
@@ -4320,7 +4320,7 @@ msgid "Error during the pamConfig entry $0 operation - $1"
|
||||
msgstr "pamConfigエントリの$0に失敗しました - $1"
|
||||
|
||||
#: src/lib/plugins/pamPassThru.jsx:651
|
||||
-msgid "$0 PAM Passthough Auth Config Entry"
|
||||
+msgid "$0 PAM Passthrough Auth Config Entry"
|
||||
msgstr "PAMパススルー認証の設定を$0"
|
||||
|
||||
#: src/lib/plugins/pamPassThru.jsx:702
|
||||
@@ -4708,7 +4708,7 @@ msgstr "サフィックスの作成"
|
||||
#: src/lib/database/databaseModal.jsx:286
|
||||
msgid ""
|
||||
"Database suffix, like 'dc=example,dc=com'. The suffix must be a valid LDAP "
|
||||
-"Distiguished Name (DN)"
|
||||
+"Distinguished Name (DN)"
|
||||
msgstr ""
|
||||
"データベースのサフィックスは 'dc=example,dc=com' のようなものです。サフィック"
|
||||
"スは有効なLDAPの識別名(DN)でなければなりません。"
|
||||
@@ -5466,7 +5466,7 @@ msgstr "パスワードの有効期限を設定する"
|
||||
#: src/lib/database/localPwp.jsx:349 src/lib/database/localPwp.jsx:2732
|
||||
#: src/lib/database/globalPwp.jsx:1176
|
||||
msgid ""
|
||||
-"The maxiumum age of a password in seconds before it expires (passwordMaxAge)."
|
||||
+"The maximum age of a password in seconds before it expires (passwordMaxAge)."
|
||||
msgstr "パスワードの有効期限の最大値を秒単位で指定します。(passwordMaxAge)"
|
||||
|
||||
#: src/lib/database/localPwp.jsx:353 src/lib/database/localPwp.jsx:2734
|
||||
@@ -6821,7 +6821,7 @@ msgid "Index Types"
|
||||
msgstr "インデックスタイプ"
|
||||
|
||||
#: src/lib/database/indexes.jsx:875 src/lib/database/indexes.jsx:1015
|
||||
-msgid "Equailty Indexing"
|
||||
+msgid "Equality Indexing"
|
||||
msgstr "等価性インデックス化"
|
||||
|
||||
#: src/lib/database/indexes.jsx:887 src/lib/database/indexes.jsx:1025
|
||||
@@ -7689,7 +7689,7 @@ msgstr "バインドDNグループ"
|
||||
|
||||
#: src/lib/replication/replConfig.jsx:630
|
||||
msgid ""
|
||||
-"The interval to check for any changes in the group memebrship specified in "
|
||||
+"The interval to check for any changes in the group membership specified in "
|
||||
"the Bind DN Group and automatically rebuilds the list for the replication "
|
||||
"managers accordingly. (nsds5replicabinddngroupcheckinterval)."
|
||||
msgstr ""
|
||||
@@ -7853,7 +7853,7 @@ msgid "Local RUV"
|
||||
msgstr "ローカルRUV"
|
||||
|
||||
#: src/lib/replication/replTasks.jsx:365
|
||||
-msgid "RRefresh the RUV for this suffixs"
|
||||
+msgid "Refresh the RUV for this suffix"
|
||||
msgstr "このサフィックスのRUVを更新します"
|
||||
|
||||
#: src/lib/replication/replTasks.jsx:375 src/lib/replication/replTables.jsx:290
|
||||
@@ -7861,7 +7861,7 @@ msgid "Remote RUV's"
|
||||
msgstr "リモートRUV"
|
||||
|
||||
#: src/lib/replication/replTasks.jsx:379
|
||||
-msgid "Refresh the remote RUVs for this suffixs"
|
||||
+msgid "Refresh the remote RUVs for this suffix"
|
||||
msgstr "このサフィックスのリモートRUVを更新します"
|
||||
|
||||
#: src/lib/replication/replTasks.jsx:394
|
||||
@@ -8330,7 +8330,7 @@ msgstr "スケジュール"
|
||||
#: src/lib/replication/replModals.jsx:1402
|
||||
msgid ""
|
||||
"By default replication updates are sent to the replica as soon as possible, "
|
||||
-"but if there is a need for replication updates to only be sent on certains "
|
||||
+"but if there is a need for replication updates to only be sent on certain "
|
||||
"days and within certain windows of time then you can setup a custom "
|
||||
"replication schedule."
|
||||
msgstr ""
|
||||
@@ -9823,7 +9823,7 @@ msgid "User DN Aliases (userdn)"
|
||||
msgstr "ユーザDNエイリアス"
|
||||
|
||||
#: src/lib/ldap_editor/wizards/operations/aciNew.jsx:1470
|
||||
-msgid "Special bind rules for user DN catagories"
|
||||
+msgid "Special bind rules for user DN categories"
|
||||
msgstr "ユーザDNカテゴリに特別なバインドルールを設定"
|
||||
|
||||
#: src/lib/ldap_editor/wizards/operations/aciNew.jsx:1473
|
||||
@@ -10114,15 +10114,15 @@ msgid "Choose The New CoS Template Parent DN"
|
||||
msgstr "新しいCoSテンプレートの親DNを選択してください"
|
||||
|
||||
#: src/lib/ldap_editor/wizards/operations/addCosDefinition.jsx:992
|
||||
-msgid "Leaving CoS Definiton Creation Wizard"
|
||||
+msgid "Leaving CoS Definition Creation Wizard"
|
||||
msgstr "CoS定義作成ウィザードを終了します"
|
||||
|
||||
#: src/lib/ldap_editor/wizards/operations/addCosDefinition.jsx:1004
|
||||
msgid ""
|
||||
-"You are about to leave CoS Definiton creation wizard. After you click "
|
||||
+"You are about to leave CoS Definition creation wizard. After you click "
|
||||
"'Confirm', you'll appear in CoS Template creation wizard and you won't able "
|
||||
"to return from there until the process is finished. Then you'll be able to "
|
||||
-"use the created entry in the CoS definiton creation. It'll be preselected "
|
||||
+"use the created entry in the CoS definition creation. It'll be preselected "
|
||||
"for you automatically."
|
||||
msgstr ""
|
||||
"CoS定義作成ウィザードを終了し、CoSテンプレート作成ウィザードに移動します。移"
|
||||
@@ -11644,11 +11644,11 @@ msgid "Upload PEM File"
|
||||
msgstr "PEMファイルをアップロード"
|
||||
|
||||
#: src/lib/security/securityModals.jsx:290
|
||||
-msgid "Choose a cerificate from the server's certificate directory"
|
||||
+msgid "Choose a certificate from the server's certificate directory"
|
||||
msgstr "サーバの証明書ディレクトリから証明書を選択してください"
|
||||
|
||||
#: src/lib/security/securityModals.jsx:294
|
||||
-msgid "Choose Cerificate From Server"
|
||||
+msgid "Choose Certificate From Server"
|
||||
msgstr "サーバから証明書を選択"
|
||||
|
||||
#: src/lib/security/securityModals.jsx:313
|
||||
@@ -12004,7 +12004,7 @@ msgid "Loading Certificates ..."
|
||||
msgstr "証明書を読み込んでいます..."
|
||||
|
||||
#: src/lib/security/certificateManagement.jsx:1245
|
||||
-msgid "Trusted Certificate Authorites"
|
||||
+msgid "Trusted Certificate Authorities"
|
||||
msgstr "信頼された認証局"
|
||||
|
||||
#: src/lib/security/certificateManagement.jsx:1261
|
||||
@@ -12075,7 +12075,7 @@ msgstr "警告 - CA証明書のプロパティを変更しています"
|
||||
|
||||
#: src/lib/security/certificateManagement.jsx:1466
|
||||
msgid ""
|
||||
-"Removing the 'C' or 'T' flags from the SSL trust catagory could break all "
|
||||
+"Removing the 'C' or 'T' flags from the SSL trust category could break all "
|
||||
"TLS connectivity to and from the server, are you sure you want to proceed?"
|
||||
msgstr ""
|
||||
"SSL信頼カテゴリから 'C' または 'T' フラグを削除すると、サーバとのすべてのTLS"
|
||||
@@ -14119,7 +14119,7 @@ msgstr "サフィックスDN"
|
||||
#: src/database.jsx:1431
|
||||
msgid ""
|
||||
"Database suffix, like 'dc=example,dc=com'. The suffix must be a valid LDAP "
|
||||
-"Distiguished Name (DN)."
|
||||
+"Distinguished Name (DN)."
|
||||
msgstr ""
|
||||
"'dc=example,dc=com'のようなデータベースサフィックスを設定します。サフィックス"
|
||||
"には有効なLDAP識別名(DN)を設定してください。"
|
||||
@@ -14406,7 +14406,7 @@ msgstr "データベースを作成する"
|
||||
#: src/dsModals.jsx:559
|
||||
msgid ""
|
||||
"Database suffix, like 'dc=example,dc=com'. The suffix must be a valid LDAP "
|
||||
-"Distiguished Name (DN)"
|
||||
+"Distinguished Name (DN)"
|
||||
msgstr ""
|
||||
"'dc=example,dc=com'のようなデータベースサフィックスを設定します。サフィックス"
|
||||
"には有効なLDAP識別名(DN)を設定してください。"
|
||||
@@ -14807,7 +14807,7 @@ msgid "ObjectClass Name is required."
|
||||
msgstr "オブジェクトクラス名は必須です。"
|
||||
|
||||
#: src/schema.jsx:764
|
||||
-msgid "ObjectClass $0 - $1 operation was successfull"
|
||||
+msgid "ObjectClass $0 - $1 operation was successful"
|
||||
msgstr "オブジェクトクラス $0 の $1 を行いました"
|
||||
|
||||
#: src/schema.jsx:779
|
||||
@@ -14823,7 +14823,7 @@ msgid "Error during Attribute removal operation - $0"
|
||||
msgstr "属性の削除に失敗しました - $0"
|
||||
|
||||
#: src/schema.jsx:1081
|
||||
-msgid "Attribute $0 - add operation was successfull"
|
||||
+msgid "Attribute $0 - add operation was successful"
|
||||
msgstr "属性 $0 を追加しました"
|
||||
|
||||
#: src/schema.jsx:1096
|
||||
@@ -14831,7 +14831,7 @@ msgid "Error during the Attribute add operation - $0"
|
||||
msgstr "属性の追加に失敗しました - $0"
|
||||
|
||||
#: src/schema.jsx:1192
|
||||
-msgid "Attribute $0 - replace operation was successfull"
|
||||
+msgid "Attribute $0 - replace operation was successful"
|
||||
msgstr "属性 $0 を置き換えました"
|
||||
|
||||
#: src/schema.jsx:1207
|
||||
diff --git a/src/cockpit/389-console/src/database.jsx b/src/cockpit/389-console/src/database.jsx
|
||||
index 509f7105c..c0c4be414 100644
|
||||
--- a/src/cockpit/389-console/src/database.jsx
|
||||
+++ b/src/cockpit/389-console/src/database.jsx
|
||||
@@ -1487,7 +1487,7 @@ class CreateSuffixModal extends React.Component {
|
||||
<FormGroup
|
||||
label={_("Suffix DN")}
|
||||
fieldId="createSuffix"
|
||||
- title={_("Database suffix, like 'dc=example,dc=com'. The suffix must be a valid LDAP Distiguished Name (DN).")}
|
||||
+ title={_("Database suffix, like 'dc=example,dc=com'. The suffix must be a valid LDAP Distinguished Name (DN).")}
|
||||
>
|
||||
<TextInput
|
||||
isRequired
|
||||
diff --git a/src/cockpit/389-console/src/dsModals.jsx b/src/cockpit/389-console/src/dsModals.jsx
|
||||
index bf19ce144..367cf4759 100644
|
||||
--- a/src/cockpit/389-console/src/dsModals.jsx
|
||||
+++ b/src/cockpit/389-console/src/dsModals.jsx
|
||||
@@ -324,7 +324,7 @@ export class CreateInstanceModal extends React.Component {
|
||||
})
|
||||
.done(() => {
|
||||
// Success!!! Now set Root DN pw, and cleanup everything up...
|
||||
- log_cmd("handleCreateInstance", "Instance creation compelete, remove INF file...", rm_cmd);
|
||||
+ log_cmd("handleCreateInstance", "Instance creation complete, remove INF file...", rm_cmd);
|
||||
cockpit.spawn(rm_cmd, { superuser: true });
|
||||
|
||||
const dm_pw_cmd = ['dsconf', '-j', 'ldapi://%2fvar%2frun%2fslapd-' + newServerId + '.socket',
|
||||
@@ -585,7 +585,7 @@ export class CreateInstanceModal extends React.Component {
|
||||
/>
|
||||
</Grid>
|
||||
<div className={createDBCheckbox ? "" : "ds-hidden"}>
|
||||
- <Grid title={_("Database suffix, like 'dc=example,dc=com'. The suffix must be a valid LDAP Distiguished Name (DN)")}>
|
||||
+ <Grid title={_("Database suffix, like 'dc=example,dc=com'. The suffix must be a valid LDAP Distinguished Name (DN)")}>
|
||||
<GridItem className="ds-label" offset={1} span={3}>
|
||||
{_("Database Suffix")}
|
||||
</GridItem>
|
||||
diff --git a/src/cockpit/389-console/src/lib/database/databaseConfig.jsx b/src/cockpit/389-console/src/lib/database/databaseConfig.jsx
|
||||
index 52a2cf2df..4c7fce706 100644
|
||||
--- a/src/cockpit/389-console/src/lib/database/databaseConfig.jsx
|
||||
+++ b/src/cockpit/389-console/src/lib/database/databaseConfig.jsx
|
||||
@@ -1692,7 +1692,7 @@ export class GlobalDatabaseConfigMDB extends React.Component {
|
||||
</GridItem>
|
||||
</Grid>
|
||||
<Grid
|
||||
- title={_("The maximun number of read transactions that can be opened simultaneously. A value of 0 means this value is computed by the server (nsslapd-mdb-max-readers).")}
|
||||
+ title={_("The maximum number of read transactions that can be opened simultaneously. A value of 0 means this value is computed by the server (nsslapd-mdb-max-readers).")}
|
||||
className="ds-margin-top-xlg"
|
||||
>
|
||||
<GridItem className="ds-label" span={4}>
|
||||
diff --git a/src/cockpit/389-console/src/lib/database/databaseModal.jsx b/src/cockpit/389-console/src/lib/database/databaseModal.jsx
|
||||
index 8d668494e..2edf42b4b 100644
|
||||
--- a/src/cockpit/389-console/src/lib/database/databaseModal.jsx
|
||||
+++ b/src/cockpit/389-console/src/lib/database/databaseModal.jsx
|
||||
@@ -284,7 +284,7 @@ class CreateSubSuffixModal extends React.Component {
|
||||
]}
|
||||
>
|
||||
<Form isHorizontal autoComplete="off">
|
||||
- <Grid className="ds-margin-top" title={_("Database suffix, like 'dc=example,dc=com'. The suffix must be a valid LDAP Distiguished Name (DN)")}>
|
||||
+ <Grid className="ds-margin-top" title={_("Database suffix, like 'dc=example,dc=com'. The suffix must be a valid LDAP Distinguished Name (DN)")}>
|
||||
<GridItem className="ds-label" span={3}>
|
||||
{_("Sub-Suffix DN")}
|
||||
</GridItem>
|
||||
diff --git a/src/cockpit/389-console/src/lib/database/globalPwp.jsx b/src/cockpit/389-console/src/lib/database/globalPwp.jsx
|
||||
index 264ff2ce2..171630fa3 100644
|
||||
--- a/src/cockpit/389-console/src/lib/database/globalPwp.jsx
|
||||
+++ b/src/cockpit/389-console/src/lib/database/globalPwp.jsx
|
||||
@@ -1334,7 +1334,7 @@ export class GlobalPwPolicy extends React.Component {
|
||||
if (this.state.passwordexp) {
|
||||
pwExpirationRows = (
|
||||
<div className="ds-margin-left">
|
||||
- <Grid className="ds-margin-top" title={_("The maxiumum age of a password in seconds before it expires (passwordMaxAge).")}>
|
||||
+ <Grid className="ds-margin-top" title={_("The maximum age of a password in seconds before it expires (passwordMaxAge).")}>
|
||||
<GridItem className="ds-label" span={5}>
|
||||
{_("Password Expiration Time")}
|
||||
</GridItem>
|
||||
diff --git a/src/cockpit/389-console/src/lib/database/indexes.jsx b/src/cockpit/389-console/src/lib/database/indexes.jsx
|
||||
index 9af43722d..0d3492c22 100644
|
||||
--- a/src/cockpit/389-console/src/lib/database/indexes.jsx
|
||||
+++ b/src/cockpit/389-console/src/lib/database/indexes.jsx
|
||||
@@ -872,7 +872,7 @@ class AddIndexModal extends React.Component {
|
||||
onChange={(e, checked) => {
|
||||
handleChange(e);
|
||||
}}
|
||||
- label={_("Equailty Indexing")}
|
||||
+ label={_("Equality Indexing")}
|
||||
/>
|
||||
</GridItem>
|
||||
</Grid>
|
||||
@@ -1013,7 +1013,7 @@ class EditIndexModal extends React.Component {
|
||||
onChange={(e, checked) => {
|
||||
handleChange(e);
|
||||
}}
|
||||
- label={_("Equailty Indexing")}
|
||||
+ label={_("Equality Indexing")}
|
||||
/>
|
||||
</div>
|
||||
);
|
||||
diff --git a/src/cockpit/389-console/src/lib/database/localPwp.jsx b/src/cockpit/389-console/src/lib/database/localPwp.jsx
|
||||
index 8586ba932..cb84be906 100644
|
||||
--- a/src/cockpit/389-console/src/lib/database/localPwp.jsx
|
||||
+++ b/src/cockpit/389-console/src/lib/database/localPwp.jsx
|
||||
@@ -344,7 +344,7 @@ class CreatePolicy extends React.Component {
|
||||
</Grid>
|
||||
<div className="ds-left-indent">
|
||||
<Grid
|
||||
- title={_("The maxiumum age of a password in seconds before it expires (passwordMaxAge).")}
|
||||
+ title={_("The maximum age of a password in seconds before it expires (passwordMaxAge).")}
|
||||
className="ds-margin-top"
|
||||
>
|
||||
<GridItem className="ds-label" span={4}>
|
||||
@@ -2743,7 +2743,7 @@ export class LocalPwPolicy extends React.Component {
|
||||
if (this.state.passwordexp) {
|
||||
pwExpirationRows = (
|
||||
<div className="ds-margin-left">
|
||||
- <Grid className="ds-margin-top" title={_("The maxiumum age of a password in seconds before it expires (passwordMaxAge).")}>
|
||||
+ <Grid className="ds-margin-top" title={_("The maximum age of a password in seconds before it expires (passwordMaxAge).")}>
|
||||
<GridItem className="ds-label" span={5}>
|
||||
{_("Password Expiration Time")}
|
||||
</GridItem>
|
||||
diff --git a/src/cockpit/389-console/src/lib/ldap_editor/wizards/operations/aciNew.jsx b/src/cockpit/389-console/src/lib/ldap_editor/wizards/operations/aciNew.jsx
|
||||
index 1ea66e809..eda2fe73e 100644
|
||||
--- a/src/cockpit/389-console/src/lib/ldap_editor/wizards/operations/aciNew.jsx
|
||||
+++ b/src/cockpit/389-console/src/lib/ldap_editor/wizards/operations/aciNew.jsx
|
||||
@@ -1498,7 +1498,7 @@ class AddNewAci extends React.Component {
|
||||
<FormSelectOption key="roledn" label={_("Role DN (roledn)")} value="roledn" title={_("Bind rules for Roles")} />
|
||||
</>}
|
||||
{!this.state.haveUserRules && !this.state.haveUserAttrRules &&
|
||||
- <FormSelectOption key="special" label={_("User DN Aliases (userdn)")} value="User DN Aliases" title={_("Special bind rules for user DN catagories")} />}
|
||||
+ <FormSelectOption key="special" label={_("User DN Aliases (userdn)")} value="User DN Aliases" title={_("Special bind rules for user DN categories")} />}
|
||||
{!this.state.haveUserRules && !this.state.haveUserAttrRules &&
|
||||
<FormSelectOption key="userattr" label={_("User Attribute (userattr)")} value="userattr" title={_("Bind rule to specify which attribute must match between the entry used to bind to the directory and the targeted entry")} />}
|
||||
<FormSelectOption key="authmethod" label={_("Authentication Method (authmethod)")} value="authmethod" title={_("Specify the authentication methods to restrict")} />
|
||||
diff --git a/src/cockpit/389-console/src/lib/ldap_editor/wizards/operations/addCosDefinition.jsx b/src/cockpit/389-console/src/lib/ldap_editor/wizards/operations/addCosDefinition.jsx
|
||||
index 961f5030e..a08a4a468 100644
|
||||
--- a/src/cockpit/389-console/src/lib/ldap_editor/wizards/operations/addCosDefinition.jsx
|
||||
+++ b/src/cockpit/389-console/src/lib/ldap_editor/wizards/operations/addCosDefinition.jsx
|
||||
@@ -999,7 +999,7 @@ class AddCosDefinition extends React.Component {
|
||||
</Modal>
|
||||
<Modal
|
||||
variant={ModalVariant.small}
|
||||
- title={_("Leaving CoS Definiton Creation Wizard")}
|
||||
+ title={_("Leaving CoS Definition Creation Wizard")}
|
||||
isOpen={this.state.isConfirmModalOpen}
|
||||
onClose={this.handleConfirmModalToggle}
|
||||
actions={[
|
||||
@@ -1011,7 +1011,7 @@ class AddCosDefinition extends React.Component {
|
||||
</Button>
|
||||
]}
|
||||
>
|
||||
- {_("You are about to leave CoS Definiton creation wizard. After you click 'Confirm', you'll appear in CoS Template creation wizard and you won't able to return from there until the process is finished. Then you'll be able to use the created entry in the CoS definiton creation. It'll be preselected for you automatically.")}
|
||||
+ {_("You are about to leave CoS Definition creation wizard. After you click 'Confirm', you'll appear in CoS Template creation wizard and you won't able to return from there until the process is finished. Then you'll be able to use the created entry in the CoS definition creation. It'll be preselected for you automatically.")}
|
||||
</Modal>
|
||||
<Modal
|
||||
variant={ModalVariant.medium}
|
||||
diff --git a/src/cockpit/389-console/src/lib/monitor/monitorModals.jsx b/src/cockpit/389-console/src/lib/monitor/monitorModals.jsx
|
||||
index 0e32a2193..7a65a9872 100644
|
||||
--- a/src/cockpit/389-console/src/lib/monitor/monitorModals.jsx
|
||||
+++ b/src/cockpit/389-console/src/lib/monitor/monitorModals.jsx
|
||||
@@ -630,7 +630,7 @@ class ReportConnectionModal extends React.Component {
|
||||
/>
|
||||
</GridItem>
|
||||
</Grid>
|
||||
- <Grid title={_("Bind password for the specified instance. You can also speciy a password file but the filename needs to be inside of brackets [/PATH/FILE]")}>
|
||||
+ <Grid title={_("Bind password for the specified instance. You can also specify a password file but the filename needs to be inside of brackets [/PATH/FILE]")}>
|
||||
<GridItem className="ds-label" span={3}>
|
||||
{_("Password")}
|
||||
</GridItem>
|
||||
diff --git a/src/cockpit/389-console/src/lib/monitor/replMonitor.jsx b/src/cockpit/389-console/src/lib/monitor/replMonitor.jsx
|
||||
index c6d338b61..8fe4fe1d8 100644
|
||||
--- a/src/cockpit/389-console/src/lib/monitor/replMonitor.jsx
|
||||
+++ b/src/cockpit/389-console/src/lib/monitor/replMonitor.jsx
|
||||
@@ -1503,7 +1503,7 @@ export class ReplMonitor extends React.Component {
|
||||
}
|
||||
|
||||
let overwriteWarning = (
|
||||
- _("Only one monitor configuraton can be saved in the server's '~/.dsrc' file. There is already an existing monitor configuration, and if you proceed it will be completely overwritten with the new configuraton."));
|
||||
+ _("Only one monitor configuration can be saved in the server's '~/.dsrc' file. There is already an existing monitor configuration, and if you proceed it will be completely overwritten with the new configuration."));
|
||||
if (this.state.credRows.length === 0 && this.state.aliasRows.length === 0) {
|
||||
overwriteWarning = (
|
||||
_("This will save the current credentials and aliases to the server's '~/.dsrc' file so it can be reused in the future."));
|
||||
diff --git a/src/cockpit/389-console/src/lib/plugins/memberOf.jsx b/src/cockpit/389-console/src/lib/plugins/memberOf.jsx
|
||||
index 704d6d0b1..00a334621 100644
|
||||
--- a/src/cockpit/389-console/src/lib/plugins/memberOf.jsx
|
||||
+++ b/src/cockpit/389-console/src/lib/plugins/memberOf.jsx
|
||||
@@ -648,7 +648,7 @@ class MemberOf extends React.Component {
|
||||
.done(content => {
|
||||
this.props.addNotification(
|
||||
"success",
|
||||
- cockpit.format(_("Fixup task for $0 was successfull"), this.state.fixupDN)
|
||||
+ cockpit.format(_("Fixup task for $0 was successful"), this.state.fixupDN)
|
||||
);
|
||||
this.props.toggleLoadingHandler();
|
||||
this.setState({
|
||||
diff --git a/src/cockpit/389-console/src/lib/plugins/pamPassThru.jsx b/src/cockpit/389-console/src/lib/plugins/pamPassThru.jsx
|
||||
index 9908694a9..288b4b533 100644
|
||||
--- a/src/cockpit/389-console/src/lib/plugins/pamPassThru.jsx
|
||||
+++ b/src/cockpit/389-console/src/lib/plugins/pamPassThru.jsx
|
||||
@@ -285,7 +285,7 @@ class PAMPassthroughAuthentication extends React.Component {
|
||||
"list",
|
||||
];
|
||||
this.props.toggleLoadingHandler();
|
||||
- log_cmd("loadPAMConfigs", "Get PAM Passthough Authentication Plugin Configs", cmd);
|
||||
+ log_cmd("loadPAMConfigs", "Get PAM Passthrough Authentication Plugin Configs", cmd);
|
||||
cockpit
|
||||
.spawn(cmd, { superuser: true, err: "message" })
|
||||
.done(content => {
|
||||
@@ -348,7 +348,7 @@ class PAMPassthroughAuthentication extends React.Component {
|
||||
this.props.toggleLoadingHandler();
|
||||
log_cmd(
|
||||
"openModal",
|
||||
- "Fetch the PAM Passthough Authentication Plugin pamConfig entry",
|
||||
+ "Fetch the PAM Passthrough Authentication Plugin pamConfig entry",
|
||||
cmd
|
||||
);
|
||||
cockpit
|
||||
@@ -485,7 +485,7 @@ class PAMPassthroughAuthentication extends React.Component {
|
||||
this.props.toggleLoadingHandler();
|
||||
log_cmd(
|
||||
"deletePAMConfig",
|
||||
- "Delete the PAM Passthough Authentication Plugin pamConfig entry",
|
||||
+ "Delete the PAM Passthrough Authentication Plugin pamConfig entry",
|
||||
cmd
|
||||
);
|
||||
cockpit
|
||||
@@ -595,7 +595,7 @@ class PAMPassthroughAuthentication extends React.Component {
|
||||
});
|
||||
log_cmd(
|
||||
"pamPassthroughAuthOperation",
|
||||
- `Do the ${action} operation on the PAM Passthough Authentication Plugin`,
|
||||
+ `Do the ${action} operation on the PAM Passthrough Authentication Plugin`,
|
||||
cmd
|
||||
);
|
||||
cockpit
|
||||
@@ -649,7 +649,7 @@ class PAMPassthroughAuthentication extends React.Component {
|
||||
extraPrimaryProps.spinnerAriaValueText = _("Saving");
|
||||
}
|
||||
|
||||
- const title = cockpit.format(_("$0 PAM Passthough Auth Config Entry"), (newPAMConfigEntry ? _("Add") : _("Edit")));
|
||||
+ const title = cockpit.format(_("$0 PAM Passthrough Auth Config Entry"), (newPAMConfigEntry ? _("Add") : _("Edit")));
|
||||
|
||||
return (
|
||||
<div className={savingPAM ? "ds-disabled" : ""}>
|
||||
diff --git a/src/cockpit/389-console/src/lib/plugins/passthroughAuthentication.jsx b/src/cockpit/389-console/src/lib/plugins/passthroughAuthentication.jsx
|
||||
index 530039c1e..28e8c9ad0 100644
|
||||
--- a/src/cockpit/389-console/src/lib/plugins/passthroughAuthentication.jsx
|
||||
+++ b/src/cockpit/389-console/src/lib/plugins/passthroughAuthentication.jsx
|
||||
@@ -210,7 +210,7 @@ class PassthroughAuthentication extends React.Component {
|
||||
"list"
|
||||
];
|
||||
this.props.toggleLoadingHandler();
|
||||
- log_cmd("loadURLs", "Get Passthough Authentication Plugin Configs", cmd);
|
||||
+ log_cmd("loadURLs", "Get Passthrough Authentication Plugin Configs", cmd);
|
||||
cockpit
|
||||
.spawn(cmd, { superuser: true, err: "message" })
|
||||
.done(content => {
|
||||
@@ -299,7 +299,7 @@ class PassthroughAuthentication extends React.Component {
|
||||
modalSpinning: true
|
||||
});
|
||||
|
||||
- log_cmd("deleteURL", "Delete the Passthough Authentication Plugin URL entry", cmd);
|
||||
+ log_cmd("deleteURL", "Delete the Passthrough Authentication Plugin URL entry", cmd);
|
||||
cockpit
|
||||
.spawn(cmd, {
|
||||
superuser: true,
|
||||
@@ -367,7 +367,7 @@ class PassthroughAuthentication extends React.Component {
|
||||
});
|
||||
log_cmd(
|
||||
"PassthroughAuthOperation",
|
||||
- `Do the ${action} operation on the Passthough Authentication Plugin`,
|
||||
+ `Do the ${action} operation on the Passthrough Authentication Plugin`,
|
||||
cmd
|
||||
);
|
||||
cockpit
|
||||
diff --git a/src/cockpit/389-console/src/lib/plugins/usn.jsx b/src/cockpit/389-console/src/lib/plugins/usn.jsx
|
||||
index b21bf126c..a7ade0675 100644
|
||||
--- a/src/cockpit/389-console/src/lib/plugins/usn.jsx
|
||||
+++ b/src/cockpit/389-console/src/lib/plugins/usn.jsx
|
||||
@@ -218,7 +218,7 @@ class USNPlugin extends React.Component {
|
||||
.done(content => {
|
||||
this.props.addNotification(
|
||||
"success",
|
||||
- _("Cleanup USN Tombstones task was successfull")
|
||||
+ _("Cleanup USN Tombstones task was successful")
|
||||
);
|
||||
this.props.toggleLoadingHandler();
|
||||
this.setState({
|
||||
diff --git a/src/cockpit/389-console/src/lib/replication/replConfig.jsx b/src/cockpit/389-console/src/lib/replication/replConfig.jsx
|
||||
index cc96cf530..902be1485 100644
|
||||
--- a/src/cockpit/389-console/src/lib/replication/replConfig.jsx
|
||||
+++ b/src/cockpit/389-console/src/lib/replication/replConfig.jsx
|
||||
@@ -660,7 +660,7 @@ export class ReplConfig extends React.Component {
|
||||
</GridItem>
|
||||
</Grid>
|
||||
<Grid
|
||||
- title={_("The interval to check for any changes in the group memebrship specified in the Bind DN Group and automatically rebuilds the list for the replication managers accordingly. (nsds5replicabinddngroupcheckinterval).")}
|
||||
+ title={_("The interval to check for any changes in the group membership specified in the Bind DN Group and automatically rebuilds the list for the replication managers accordingly. (nsds5replicabinddngroupcheckinterval).")}
|
||||
className="ds-margin-top"
|
||||
>
|
||||
<GridItem className="ds-label" span={3}>
|
||||
diff --git a/src/cockpit/389-console/src/lib/replication/replModals.jsx b/src/cockpit/389-console/src/lib/replication/replModals.jsx
|
||||
index 8676a232a..68a2ecae5 100644
|
||||
--- a/src/cockpit/389-console/src/lib/replication/replModals.jsx
|
||||
+++ b/src/cockpit/389-console/src/lib/replication/replModals.jsx
|
||||
@@ -661,7 +661,7 @@ export class WinsyncAgmtModal extends React.Component {
|
||||
<GridItem span={12}>
|
||||
<TextContent>
|
||||
<Text component={TextVariants.h5}>
|
||||
- {_("By default replication updates are sent to the replica as soon as possible, but if there is a need for replication updates to only be sent on certains days and within certain windows of time then you can setup a custom replication schedule.")}
|
||||
+ {_("By default replication updates are sent to the replica as soon as possible, but if there is a need for replication updates to only be sent on certain days and within certain windows of time then you can setup a custom replication schedule.")}
|
||||
</Text>
|
||||
</TextContent>
|
||||
</GridItem>
|
||||
@@ -1406,7 +1406,7 @@ export class ReplAgmtModal extends React.Component {
|
||||
<GridItem span={12}>
|
||||
<TextContent>
|
||||
<Text component={TextVariants.h5}>
|
||||
- {_("By default replication updates are sent to the replica as soon as possible, but if there is a need for replication updates to only be sent on certains days and within certain windows of time then you can setup a custom replication schedule.")}
|
||||
+ {_("By default replication updates are sent to the replica as soon as possible, but if there is a need for replication updates to only be sent on certain days and within certain windows of time then you can setup a custom replication schedule.")}
|
||||
</Text>
|
||||
</TextContent>
|
||||
</GridItem>
|
||||
diff --git a/src/cockpit/389-console/src/lib/replication/replTasks.jsx b/src/cockpit/389-console/src/lib/replication/replTasks.jsx
|
||||
index 99646ae3b..f0c902198 100644
|
||||
--- a/src/cockpit/389-console/src/lib/replication/replTasks.jsx
|
||||
+++ b/src/cockpit/389-console/src/lib/replication/replTasks.jsx
|
||||
@@ -359,7 +359,7 @@ export class ReplRUV extends React.Component {
|
||||
{_("Local RUV")}
|
||||
<Button
|
||||
variant="plain"
|
||||
- aria-label={_("Refresh the RUV for this suffixs")}
|
||||
+ aria-label={_("Refresh the RUV for this suffix")}
|
||||
onClick={() => {
|
||||
this.props.reload(this.props.suffix);
|
||||
}}
|
||||
@@ -374,7 +374,7 @@ export class ReplRUV extends React.Component {
|
||||
{_("Remote RUV's")}
|
||||
<Button
|
||||
variant="plain"
|
||||
- aria-label={_("Refresh the remote RUVs for this suffixs")}
|
||||
+ aria-label={_("Refresh the remote RUVs for this suffix")}
|
||||
onClick={() => {
|
||||
this.props.reload(this.props.suffix);
|
||||
}}
|
||||
@@ -456,7 +456,7 @@ export class ReplRUV extends React.Component {
|
||||
checked={this.state.modalChecked}
|
||||
mTitle={_("Initialize Replication Changelog From LDIF")}
|
||||
mMsg={_("Are you sure you want to attempt to initialize the changelog from LDIF? This will reject all operations during during the initialization.")}
|
||||
- mSpinningMsg={_("Initialzing Replication Change Log ...")}
|
||||
+ mSpinningMsg={_("Initializing Replication Change Log ...")}
|
||||
mBtnName={_("Import Changelog LDIF")}
|
||||
/>
|
||||
<ExportCLModal
|
||||
diff --git a/src/cockpit/389-console/src/lib/security/certificateManagement.jsx b/src/cockpit/389-console/src/lib/security/certificateManagement.jsx
|
||||
index edcfb301f..a24120225 100644
|
||||
--- a/src/cockpit/389-console/src/lib/security/certificateManagement.jsx
|
||||
+++ b/src/cockpit/389-console/src/lib/security/certificateManagement.jsx
|
||||
@@ -523,7 +523,7 @@ export class CertificateManagement extends React.Component {
|
||||
});
|
||||
this.props.addNotification(
|
||||
"error",
|
||||
- cockpit.format(_("Faield to create temporary certificate file: $0"), err)
|
||||
+ cockpit.format(_("Failed to create temporary certificate file: $0"), err)
|
||||
);
|
||||
});
|
||||
} else {
|
||||
@@ -1242,7 +1242,7 @@ export class CertificateManagement extends React.Component {
|
||||
} else {
|
||||
certificatePage = (
|
||||
<Tabs isBox isSecondary className="ds-margin-top-xlg ds-left-indent" activeKey={this.state.activeTabKey} onSelect={this.handleNavSelect}>
|
||||
- <Tab eventKey={0} title={<TabTitleText>{_("Trusted Certificate Authorites")} <font size="2">({this.state.CACerts.length})</font></TabTitleText>}>
|
||||
+ <Tab eventKey={0} title={<TabTitleText>{_("Trusted Certificate Authorities")} <font size="2">({this.state.CACerts.length})</font></TabTitleText>}>
|
||||
<div className="ds-margin-top-lg ds-left-indent">
|
||||
<CertTable
|
||||
certs={this.state.CACerts}
|
||||
@@ -1297,7 +1297,7 @@ export class CertificateManagement extends React.Component {
|
||||
this.showAddCSRModal();
|
||||
}}
|
||||
>
|
||||
- {_("Create Certificate Sigining Request")}
|
||||
+ {_("Create Certificate Signing Request")}
|
||||
</Button>
|
||||
</div>
|
||||
</Tab>
|
||||
@@ -1464,7 +1464,7 @@ export class CertificateManagement extends React.Component {
|
||||
item={this.state.certName}
|
||||
checked={this.state.modalChecked}
|
||||
mTitle={_("Warning - Altering CA Certificate Properties")}
|
||||
- mMsg={_("Removing the 'C' or 'T' flags from the SSL trust catagory could break all TLS connectivity to and from the server, are you sure you want to proceed?")}
|
||||
+ mMsg={_("Removing the 'C' or 'T' flags from the SSL trust category could break all TLS connectivity to and from the server, are you sure you want to proceed?")}
|
||||
mSpinningMsg={_("Editing CA Certificate ...")}
|
||||
mBtnName={_("Change Trust Flags")}
|
||||
/>
|
||||
diff --git a/src/cockpit/389-console/src/lib/security/securityModals.jsx b/src/cockpit/389-console/src/lib/security/securityModals.jsx
|
||||
index fcb9184c6..9f17e05c2 100644
|
||||
--- a/src/cockpit/389-console/src/lib/security/securityModals.jsx
|
||||
+++ b/src/cockpit/389-console/src/lib/security/securityModals.jsx
|
||||
@@ -288,11 +288,11 @@ export class SecurityAddCertModal extends React.Component {
|
||||
browseButtonText={_("Upload PEM File")}
|
||||
/>
|
||||
</div>
|
||||
- <div title={_("Choose a cerificate from the server's certificate directory")}>
|
||||
+ <div title={_("Choose a certificate from the server's certificate directory")}>
|
||||
<Radio
|
||||
id="certRadioSelect"
|
||||
className="ds-margin-top-lg"
|
||||
- label={_("Choose Cerificate From Server")}
|
||||
+ label={_("Choose Certificate From Server")}
|
||||
name="certChoice"
|
||||
isChecked={certRadioSelect}
|
||||
onChange={handleRadioChange}
|
||||
diff --git a/src/cockpit/389-console/src/schema.jsx b/src/cockpit/389-console/src/schema.jsx
|
||||
index 19854e785..8b7a6cda9 100644
|
||||
--- a/src/cockpit/389-console/src/schema.jsx
|
||||
+++ b/src/cockpit/389-console/src/schema.jsx
|
||||
@@ -766,7 +766,7 @@ export class Schema extends React.Component {
|
||||
console.info("cmdOperationObjectclass", "Result", content);
|
||||
this.props.addNotification(
|
||||
"success",
|
||||
- cockpit.format(_("ObjectClass $0 - $1 operation was successfull"), ocName, action)
|
||||
+ cockpit.format(_("ObjectClass $0 - $1 operation was successful"), ocName, action)
|
||||
);
|
||||
this.loadSchemaData();
|
||||
this.closeObjectclassModal();
|
||||
@@ -1083,7 +1083,7 @@ export class Schema extends React.Component {
|
||||
console.info("cmdOperationAttribute", "Result", content);
|
||||
this.props.addNotification(
|
||||
"success",
|
||||
- cockpit.format(_("Attribute $0 - add operation was successfull"), atName)
|
||||
+ cockpit.format(_("Attribute $0 - add operation was successful"), atName)
|
||||
);
|
||||
this.loadSchemaData();
|
||||
this.closeAttributeModal();
|
||||
@@ -1194,7 +1194,7 @@ export class Schema extends React.Component {
|
||||
console.info("cmdOperationAttribute", "Result", content);
|
||||
this.props.addNotification(
|
||||
"success",
|
||||
- cockpit.format(_("Attribute $0 - replace operation was successfull"), atName)
|
||||
+ cockpit.format(_("Attribute $0 - replace operation was successful"), atName)
|
||||
);
|
||||
this.loadSchemaData();
|
||||
this.closeAttributeModal();
|
||||
--
|
||||
2.48.1
|
||||
|
||||
@ -0,0 +1,38 @@
|
||||
From 41123f8f9baadd9ca7eeafb7a1f570feeb17909e Mon Sep 17 00:00:00 2001
|
||||
From: James Chapman <jachapma@redhat.com>
|
||||
Date: Fri, 4 Apr 2025 09:09:34 +0000
|
||||
Subject: [PATCH] Issue 6481 - When ports that are in use are used to update a
|
||||
DS instance the error message is not helpful (#6723)
|
||||
|
||||
Bug description: The initial fix for this issue used sudo lsof to determineif
|
||||
a server port was in use. lsof is not installed by default and using sudo can
|
||||
ask for a password.
|
||||
|
||||
Fix description: Use ss -ntplu (which is installed by default) to detect if a
|
||||
port is taken.
|
||||
|
||||
Fixes: https://github.com/389ds/389-ds-base/issues/6481
|
||||
|
||||
Fix Author: Viktor Ashirov <vashirov@redhat.com>
|
||||
|
||||
Reviewed by: @progier389 (Thank you)
|
||||
---
|
||||
src/cockpit/389-console/src/lib/tools.jsx | 2 +-
|
||||
1 file changed, 1 insertion(+), 1 deletion(-)
|
||||
|
||||
diff --git a/src/cockpit/389-console/src/lib/tools.jsx b/src/cockpit/389-console/src/lib/tools.jsx
|
||||
index ba43bdd6c..384ed7862 100644
|
||||
--- a/src/cockpit/389-console/src/lib/tools.jsx
|
||||
+++ b/src/cockpit/389-console/src/lib/tools.jsx
|
||||
@@ -221,7 +221,7 @@ export function is_port_in_use(port) {
|
||||
return;
|
||||
}
|
||||
|
||||
- let cmd = ['bash', '-c', `sudo lsof -i :${port} || echo "free"`];
|
||||
+ let cmd = ['bash', '-c', `ss -ntplu | grep -w :${port} || echo "free"`];
|
||||
log_cmd("is_port_in_use", cmd);
|
||||
|
||||
cockpit
|
||||
--
|
||||
2.48.1
|
||||
|
||||
216
0027-Security-fix-for-CVE-2025-2487.patch
Normal file
216
0027-Security-fix-for-CVE-2025-2487.patch
Normal file
@ -0,0 +1,216 @@
|
||||
From 569bd395af2dd2366d46b7332feb8f39b42e94ea Mon Sep 17 00:00:00 2001
|
||||
From: Pierre Rogier <progier@redhat.com>
|
||||
Date: Thu, 27 Feb 2025 16:36:48 +0100
|
||||
Subject: [PATCH] Security fix for CVE-2025-2487
|
||||
|
||||
Description:
|
||||
A denial of service vulnerability was found in the 389 Directory Server.
|
||||
The 389 Directory Server may crash (Null Pointer Exception) after some
|
||||
failed rename subtree operations (i.e. MODDN) issued by a user having enough
|
||||
privileges to do so.
|
||||
|
||||
References:
|
||||
- https://access.redhat.com/security/cve/CVE-2025-2487
|
||||
- https://bugzilla.redhat.com/show_bug.cgi?id=2353071
|
||||
---
|
||||
ldap/servers/slapd/back-ldbm/findentry.c | 36 +++++++++++++++++-----
|
||||
ldap/servers/slapd/back-ldbm/ldbm_add.c | 2 ++
|
||||
ldap/servers/slapd/back-ldbm/ldbm_modify.c | 6 ++++
|
||||
ldap/servers/slapd/back-ldbm/ldbm_modrdn.c | 13 ++++++--
|
||||
4 files changed, 48 insertions(+), 9 deletions(-)
|
||||
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/findentry.c b/ldap/servers/slapd/back-ldbm/findentry.c
|
||||
index 7bb56ef2c..907b4367a 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/findentry.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/findentry.c
|
||||
@@ -99,6 +99,7 @@ find_entry_internal_dn(
|
||||
int isroot = 0;
|
||||
int op_type;
|
||||
int reverted_entry = 0;
|
||||
+ int return_err = LDAP_SUCCESS;
|
||||
|
||||
/* get the managedsait ldap message control */
|
||||
slapi_pblock_get(pb, SLAPI_MANAGEDSAIT, &managedsait);
|
||||
@@ -121,6 +122,7 @@ find_entry_internal_dn(
|
||||
if (rc) { /* if check_entry_for_referral returns non-zero, result is sent. */
|
||||
*rc = FE_RC_SENT_RESULT;
|
||||
}
|
||||
+ slapi_set_ldap_result(pb, LDAP_REFERRAL, NULL, NULL, 0, NULL);
|
||||
return (NULL);
|
||||
}
|
||||
}
|
||||
@@ -153,7 +155,12 @@ find_entry_internal_dn(
|
||||
slapi_log_err(SLAPI_LOG_ERR, "find_entry_internal_dn", "Retry count exceeded (%s)\n", slapi_sdn_get_dn(sdn));
|
||||
}
|
||||
if (reverted_entry) {
|
||||
+ CACHE_RETURN(&inst->inst_cache, &e);
|
||||
+ slapi_set_ldap_result(pb, LDAP_BUSY, NULL, NULL, 0, NULL);
|
||||
slapi_send_ldap_result(pb, LDAP_BUSY, NULL, "target entry busy because of a canceled operation", 0, NULL);
|
||||
+ if (rc) {
|
||||
+ *rc = FE_RC_SENT_RESULT; /* Result is sent */
|
||||
+ }
|
||||
return (NULL);
|
||||
}
|
||||
/*
|
||||
@@ -179,6 +186,7 @@ find_entry_internal_dn(
|
||||
if (rc) { /* if check_entry_for_referral returns non-zero, result is sent. */
|
||||
*rc = FE_RC_SENT_RESULT;
|
||||
}
|
||||
+ slapi_set_ldap_result(pb, LDAP_REFERRAL, NULL, NULL, 0, NULL);
|
||||
return (NULL);
|
||||
}
|
||||
/* else fall through to no such object */
|
||||
@@ -189,7 +197,7 @@ find_entry_internal_dn(
|
||||
if (me && !isroot) {
|
||||
/* If not root, you may not want to reveal it. */
|
||||
int acl_type = -1;
|
||||
- int return_err = LDAP_NO_SUCH_OBJECT;
|
||||
+ return_err = LDAP_NO_SUCH_OBJECT;
|
||||
err = LDAP_SUCCESS;
|
||||
switch (op_type) {
|
||||
case SLAPI_OPERATION_ADD:
|
||||
@@ -230,18 +238,22 @@ find_entry_internal_dn(
|
||||
* do not return the "matched" DN.
|
||||
* Plus, the bind case returns LDAP_INAPPROPRIATE_AUTH.
|
||||
*/
|
||||
+ slapi_set_ldap_result(pb, return_err, NULL, NULL, 0, NULL);
|
||||
slapi_send_ldap_result(pb, return_err, NULL, NULL, 0, NULL);
|
||||
} else {
|
||||
+ slapi_set_ldap_result(pb, LDAP_NO_SUCH_OBJECT, NULL, NULL, 0, NULL);
|
||||
slapi_send_ldap_result(pb, LDAP_NO_SUCH_OBJECT,
|
||||
(char *)slapi_sdn_get_dn(&ancestorsdn), NULL, 0, NULL);
|
||||
}
|
||||
} else {
|
||||
+ slapi_set_ldap_result(pb, LDAP_NO_SUCH_OBJECT, NULL, NULL, 0, NULL);
|
||||
slapi_send_ldap_result(pb, LDAP_NO_SUCH_OBJECT,
|
||||
(char *)slapi_sdn_get_dn(&ancestorsdn), NULL, 0, NULL);
|
||||
}
|
||||
} else {
|
||||
- slapi_send_ldap_result(pb, (LDAP_INVALID_DN_SYNTAX == err) ? LDAP_INVALID_DN_SYNTAX : LDAP_OPERATIONS_ERROR,
|
||||
- (char *)slapi_sdn_get_dn(&ancestorsdn), NULL, 0, NULL);
|
||||
+ return_err = (LDAP_INVALID_DN_SYNTAX == err) ? LDAP_INVALID_DN_SYNTAX : LDAP_OPERATIONS_ERROR;
|
||||
+ slapi_set_ldap_result(pb, return_err, NULL, NULL, 0, NULL);
|
||||
+ slapi_send_ldap_result(pb, return_err, (char *)slapi_sdn_get_dn(&ancestorsdn), NULL, 0, NULL);
|
||||
}
|
||||
if (rc) {
|
||||
*rc = FE_RC_SENT_RESULT;
|
||||
@@ -265,13 +277,15 @@ find_entry_internal_uniqueid(
|
||||
backend *be,
|
||||
const char *uniqueid,
|
||||
int lock,
|
||||
- back_txn *txn)
|
||||
+ back_txn *txn,
|
||||
+ int *rc)
|
||||
{
|
||||
ldbm_instance *inst = (ldbm_instance *)be->be_instance_info;
|
||||
struct backentry *e;
|
||||
int err;
|
||||
size_t tries = 0;
|
||||
int reverted_entry = 0;
|
||||
+ int return_err = 0;
|
||||
|
||||
while ((tries < LDBM_CACHE_RETRY_COUNT) &&
|
||||
(e = uniqueid2entry(be, uniqueid, txn, &err)) != NULL) {
|
||||
@@ -307,12 +321,20 @@ find_entry_internal_uniqueid(
|
||||
}
|
||||
|
||||
if (reverted_entry) {
|
||||
+ slapi_set_ldap_result(pb, LDAP_BUSY, NULL, NULL, 0, NULL);
|
||||
slapi_send_ldap_result(pb, LDAP_BUSY, NULL, "target entry busy because of a canceled operation", 0, NULL);
|
||||
+ if (rc) {
|
||||
+ *rc = FE_RC_SENT_RESULT; /* Result is sent */
|
||||
+ }
|
||||
return (NULL);
|
||||
} else {
|
||||
/* entry not found */
|
||||
- slapi_send_ldap_result(pb, (0 == err || DBI_RC_NOTFOUND == err) ? LDAP_NO_SUCH_OBJECT : LDAP_OPERATIONS_ERROR, NULL /* matched */, NULL,
|
||||
- 0, NULL);
|
||||
+ return_err = (0 == err || DBI_RC_NOTFOUND == err) ? LDAP_NO_SUCH_OBJECT : LDAP_OPERATIONS_ERROR;
|
||||
+ slapi_set_ldap_result(pb, return_err, NULL, NULL, 0, NULL);
|
||||
+ slapi_send_ldap_result(pb, return_err, NULL /* matched */, NULL, 0, NULL);
|
||||
+ if (rc) {
|
||||
+ *rc = FE_RC_SENT_RESULT; /* Result is sent */
|
||||
+ }
|
||||
}
|
||||
slapi_log_err(SLAPI_LOG_TRACE,
|
||||
"find_entry_internal_uniqueid", "<= not found; uniqueid = (%s)\n",
|
||||
@@ -334,7 +356,7 @@ find_entry_internal(
|
||||
if (addr->uniqueid != NULL) {
|
||||
slapi_log_err(SLAPI_LOG_TRACE, "find_entry_internal", "=> (uniqueid=%s) lock %d\n",
|
||||
addr->uniqueid, lock);
|
||||
- return (find_entry_internal_uniqueid(pb, be, addr->uniqueid, lock, txn));
|
||||
+ return (find_entry_internal_uniqueid(pb, be, addr->uniqueid, lock, txn, rc));
|
||||
} else {
|
||||
struct backentry *entry = NULL;
|
||||
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/ldbm_add.c b/ldap/servers/slapd/back-ldbm/ldbm_add.c
|
||||
index b7453697f..dec3a0c6d 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/ldbm_add.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/ldbm_add.c
|
||||
@@ -435,6 +435,8 @@ ldbm_back_add(Slapi_PBlock *pb)
|
||||
slapi_log_err(SLAPI_LOG_BACKLDBM, "ldbm_back_add",
|
||||
"find_entry2modify_only returned NULL parententry pdn: %s, uniqueid: %s\n",
|
||||
slapi_sdn_get_dn(&parentsdn), addr.uniqueid ? addr.uniqueid : "none");
|
||||
+ slapi_pblock_get(pb, SLAPI_RESULT_CODE, &ldap_result_code);
|
||||
+ goto error_return;
|
||||
}
|
||||
modify_init(&parent_modify_c, parententry);
|
||||
}
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/ldbm_modify.c b/ldap/servers/slapd/back-ldbm/ldbm_modify.c
|
||||
index 29df2ce75..24c62a952 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/ldbm_modify.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/ldbm_modify.c
|
||||
@@ -177,6 +177,12 @@ modify_update_all(backend *be, Slapi_PBlock *pb, modify_context *mc, back_txn *t
|
||||
slapi_pblock_get(pb, SLAPI_OPERATION, &operation);
|
||||
is_ruv = operation_is_flag_set(operation, OP_FLAG_REPL_RUV);
|
||||
}
|
||||
+ if (NULL == mc->new_entry) {
|
||||
+ /* test entry to avoid crashing in id2entry_add_ext */
|
||||
+ slapi_log_err(SLAPI_LOG_BACKLDBM, "modify_update_all",
|
||||
+ "No entry in modify_context ==> operation is aborted.\n");
|
||||
+ return -1;
|
||||
+ }
|
||||
/*
|
||||
* Update the ID to Entry index.
|
||||
* Note that id2entry_add replaces the entry, so the Entry ID stays the same.
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/ldbm_modrdn.c b/ldap/servers/slapd/back-ldbm/ldbm_modrdn.c
|
||||
index 76b8d49d7..e3b7e5783 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/ldbm_modrdn.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/ldbm_modrdn.c
|
||||
@@ -497,8 +497,8 @@ ldbm_back_modrdn(Slapi_PBlock *pb)
|
||||
slapi_pblock_get(pb, SLAPI_TARGET_ADDRESS, &old_addr);
|
||||
e = find_entry2modify(pb, be, old_addr, &txn, &result_sent);
|
||||
if (e == NULL) {
|
||||
- ldap_result_code = -1;
|
||||
- goto error_return; /* error result sent by find_entry2modify() */
|
||||
+ slapi_pblock_get(pb, SLAPI_RESULT_CODE, &ldap_result_code);
|
||||
+ goto error_return; /* error result set and sent by find_entry2modify() */
|
||||
}
|
||||
if (slapi_entry_flag_is_set(e->ep_entry, SLAPI_ENTRY_FLAG_TOMBSTONE) &&
|
||||
!is_resurect_operation) {
|
||||
@@ -530,6 +530,11 @@ ldbm_back_modrdn(Slapi_PBlock *pb)
|
||||
oldparent_addr.uniqueid = NULL;
|
||||
}
|
||||
parententry = find_entry2modify_only(pb, be, &oldparent_addr, &txn, &result_sent);
|
||||
+ if (parententry == NULL) {
|
||||
+ slapi_pblock_get(pb, SLAPI_RESULT_CODE, &ldap_result_code);
|
||||
+ goto error_return; /* error result set and sent by find_entry2modify() */
|
||||
+ }
|
||||
+
|
||||
modify_init(&parent_modify_context, parententry);
|
||||
|
||||
/* Fetch and lock the new parent of the entry that is moving */
|
||||
@@ -540,6 +545,10 @@ ldbm_back_modrdn(Slapi_PBlock *pb)
|
||||
}
|
||||
newparententry = find_entry2modify_only(pb, be, newsuperior_addr, &txn, &result_sent);
|
||||
slapi_ch_free_string(&newsuperior_addr->uniqueid);
|
||||
+ if (newparententry == NULL) {
|
||||
+ slapi_pblock_get(pb, SLAPI_RESULT_CODE, &ldap_result_code);
|
||||
+ goto error_return; /* error result set and sent by find_entry2modify() */
|
||||
+ }
|
||||
modify_init(&newparent_modify_context, newparententry);
|
||||
}
|
||||
|
||||
--
|
||||
2.48.1
|
||||
|
||||
478
0028-Issue-6553-Update-concread-to-0.5.4-and-refactor-sta.patch
Normal file
478
0028-Issue-6553-Update-concread-to-0.5.4-and-refactor-sta.patch
Normal file
@ -0,0 +1,478 @@
|
||||
From 17b474f1bd4159f5c6f5acd1e36d5646b649e03f Mon Sep 17 00:00:00 2001
|
||||
From: Simon Pichugin <spichugi@redhat.com>
|
||||
Date: Wed, 19 Feb 2025 18:56:34 -0800
|
||||
Subject: [PATCH] Issue 6553 - Update concread to 0.5.4 and refactor statistics
|
||||
tracking (#6607)
|
||||
|
||||
Description: Implement new cache statistics tracking with atomic counters
|
||||
and dedicated stats structs.
|
||||
Update concread dependency to 0.5.4 for improved cache performance.
|
||||
Add tests for cache statistics functionality.
|
||||
|
||||
Fixes: https://github.com/389ds/389-ds-base/issues/6553
|
||||
|
||||
Reviewed by: @firstyear
|
||||
---
|
||||
ldap/servers/slapd/dn.c | 4 +-
|
||||
src/Cargo.lock | 457 ++++++++++++++++++-------------------
|
||||
src/librslapd/Cargo.toml | 3 +-
|
||||
src/librslapd/src/cache.rs | 331 ++++++++++++++++++++++++---
|
||||
4 files changed, 526 insertions(+), 269 deletions(-)
|
||||
|
||||
diff --git a/ldap/servers/slapd/dn.c b/ldap/servers/slapd/dn.c
|
||||
index 093019e28..5fbe67d07 100644
|
||||
--- a/ldap/servers/slapd/dn.c
|
||||
+++ b/ldap/servers/slapd/dn.c
|
||||
@@ -58,7 +58,7 @@ struct ndn_cache {
|
||||
|
||||
/*
|
||||
* This means we need 1 MB minimum per thread
|
||||
- *
|
||||
+ *
|
||||
*/
|
||||
#define NDN_CACHE_MINIMUM_CAPACITY 1048576
|
||||
/*
|
||||
@@ -3008,7 +3008,7 @@ ndn_cache_get_stats(uint64_t *hits, uint64_t *tries, uint64_t *size, uint64_t *m
|
||||
uint64_t freq_evicts;
|
||||
uint64_t recent_evicts;
|
||||
uint64_t p_weight;
|
||||
- cache_char_stats(cache,
|
||||
+ cache_char_stats(cache,
|
||||
&reader_hits,
|
||||
&reader_includes,
|
||||
&write_hits,
|
||||
diff --git a/src/librslapd/Cargo.toml b/src/librslapd/Cargo.toml
|
||||
index 6d8d63de4..6d9b621fc 100644
|
||||
--- a/src/librslapd/Cargo.toml
|
||||
+++ b/src/librslapd/Cargo.toml
|
||||
@@ -16,8 +16,7 @@ crate-type = ["staticlib", "lib"]
|
||||
[dependencies]
|
||||
slapd = { path = "../slapd" }
|
||||
libc = "0.2"
|
||||
-concread = "^0.2.20"
|
||||
+concread = "0.5.4"
|
||||
|
||||
[build-dependencies]
|
||||
cbindgen = "0.26"
|
||||
-
|
||||
diff --git a/src/librslapd/src/cache.rs b/src/librslapd/src/cache.rs
|
||||
index b025c830a..e3c692865 100644
|
||||
--- a/src/librslapd/src/cache.rs
|
||||
+++ b/src/librslapd/src/cache.rs
|
||||
@@ -1,38 +1,171 @@
|
||||
// This exposes C-FFI capable bindings for the concread concurrently readable cache.
|
||||
+use concread::arcache::stats::{ARCacheWriteStat, ReadCountStat};
|
||||
use concread::arcache::{ARCache, ARCacheBuilder, ARCacheReadTxn, ARCacheWriteTxn};
|
||||
-use std::convert::TryInto;
|
||||
+use concread::cowcell::CowCell;
|
||||
use std::ffi::{CStr, CString};
|
||||
use std::os::raw::c_char;
|
||||
|
||||
+#[derive(Clone, Debug, Default)]
|
||||
+struct CacheStats {
|
||||
+ reader_hits: u64, // Hits from read transactions (main + local)
|
||||
+ reader_includes: u64, // Number of includes from read transactions
|
||||
+ write_hits: u64, // Hits from write transactions
|
||||
+ write_inc_or_mod: u64, // Number of includes/modifications from write transactions
|
||||
+ freq_evicts: u64, // Number of evictions from frequent set
|
||||
+ recent_evicts: u64, // Number of evictions from recent set
|
||||
+ p_weight: u64, // Current cache weight between recent and frequent.
|
||||
+ shared_max: u64, // Maximum number of items in the shared cache.
|
||||
+ freq: u64, // Number of items in the frequent set at this point in time.
|
||||
+ recent: u64, // Number of items in the recent set at this point in time.
|
||||
+ all_seen_keys: u64, // Number of total keys seen through the cache's lifetime.
|
||||
+}
|
||||
+
|
||||
+impl CacheStats {
|
||||
+ fn new() -> Self {
|
||||
+ CacheStats::default()
|
||||
+ }
|
||||
+
|
||||
+ fn update_from_read_stat(&mut self, stat: ReadCountStat) {
|
||||
+ self.reader_hits += stat.main_hit + stat.local_hit;
|
||||
+ self.reader_includes += stat.include + stat.local_include;
|
||||
+ }
|
||||
+
|
||||
+ fn update_from_write_stat(&mut self, stat: &FFIWriteStat) {
|
||||
+ self.write_hits += stat.read_hits;
|
||||
+ self.write_inc_or_mod += stat.includes + stat.modifications;
|
||||
+ self.freq_evicts += stat.freq_evictions;
|
||||
+ self.recent_evicts += stat.recent_evictions;
|
||||
+ self.p_weight = stat.p_weight;
|
||||
+ self.shared_max = stat.shared_max;
|
||||
+ self.freq = stat.freq;
|
||||
+ self.recent = stat.recent;
|
||||
+ self.all_seen_keys = stat.all_seen_keys;
|
||||
+ }
|
||||
+}
|
||||
+
|
||||
+#[derive(Debug, Default)]
|
||||
+pub struct FFIWriteStat {
|
||||
+ pub read_ops: u64,
|
||||
+ pub read_hits: u64,
|
||||
+ pub p_weight: u64,
|
||||
+ pub shared_max: u64,
|
||||
+ pub freq: u64,
|
||||
+ pub recent: u64,
|
||||
+ pub all_seen_keys: u64,
|
||||
+ pub includes: u64,
|
||||
+ pub modifications: u64,
|
||||
+ pub freq_evictions: u64,
|
||||
+ pub recent_evictions: u64,
|
||||
+ pub ghost_freq_revives: u64,
|
||||
+ pub ghost_rec_revives: u64,
|
||||
+ pub haunted_includes: u64,
|
||||
+}
|
||||
+
|
||||
+impl<K> ARCacheWriteStat<K> for FFIWriteStat {
|
||||
+ fn cache_clear(&mut self) {
|
||||
+ self.read_ops = 0;
|
||||
+ self.read_hits = 0;
|
||||
+ }
|
||||
+
|
||||
+ fn cache_read(&mut self) {
|
||||
+ self.read_ops += 1;
|
||||
+ }
|
||||
+
|
||||
+ fn cache_hit(&mut self) {
|
||||
+ self.read_hits += 1;
|
||||
+ }
|
||||
+
|
||||
+ fn p_weight(&mut self, p: u64) {
|
||||
+ self.p_weight = p;
|
||||
+ }
|
||||
+
|
||||
+ fn shared_max(&mut self, i: u64) {
|
||||
+ self.shared_max = i;
|
||||
+ }
|
||||
+
|
||||
+ fn freq(&mut self, i: u64) {
|
||||
+ self.freq = i;
|
||||
+ }
|
||||
+
|
||||
+ fn recent(&mut self, i: u64) {
|
||||
+ self.recent = i;
|
||||
+ }
|
||||
+
|
||||
+ fn all_seen_keys(&mut self, i: u64) {
|
||||
+ self.all_seen_keys = i;
|
||||
+ }
|
||||
+
|
||||
+ fn include(&mut self, _k: &K) {
|
||||
+ self.includes += 1;
|
||||
+ }
|
||||
+
|
||||
+ fn include_haunted(&mut self, _k: &K) {
|
||||
+ self.haunted_includes += 1;
|
||||
+ }
|
||||
+
|
||||
+ fn modify(&mut self, _k: &K) {
|
||||
+ self.modifications += 1;
|
||||
+ }
|
||||
+
|
||||
+ fn ghost_frequent_revive(&mut self, _k: &K) {
|
||||
+ self.ghost_freq_revives += 1;
|
||||
+ }
|
||||
+
|
||||
+ fn ghost_recent_revive(&mut self, _k: &K) {
|
||||
+ self.ghost_rec_revives += 1;
|
||||
+ }
|
||||
+
|
||||
+ fn evict_from_recent(&mut self, _k: &K) {
|
||||
+ self.recent_evictions += 1;
|
||||
+ }
|
||||
+
|
||||
+ fn evict_from_frequent(&mut self, _k: &K) {
|
||||
+ self.freq_evictions += 1;
|
||||
+ }
|
||||
+}
|
||||
+
|
||||
pub struct ARCacheChar {
|
||||
inner: ARCache<CString, CString>,
|
||||
+ stats: CowCell<CacheStats>,
|
||||
}
|
||||
|
||||
pub struct ARCacheCharRead<'a> {
|
||||
- inner: ARCacheReadTxn<'a, CString, CString>,
|
||||
+ inner: ARCacheReadTxn<'a, CString, CString, ReadCountStat>,
|
||||
+ cache: &'a ARCacheChar,
|
||||
}
|
||||
|
||||
pub struct ARCacheCharWrite<'a> {
|
||||
- inner: ARCacheWriteTxn<'a, CString, CString>,
|
||||
+ inner: ARCacheWriteTxn<'a, CString, CString, FFIWriteStat>,
|
||||
+ cache: &'a ARCacheChar,
|
||||
+}
|
||||
+
|
||||
+impl ARCacheChar {
|
||||
+ fn new(max: usize, read_max: usize) -> Option<Self> {
|
||||
+ ARCacheBuilder::new()
|
||||
+ .set_size(max, read_max)
|
||||
+ .set_reader_quiesce(false)
|
||||
+ .build()
|
||||
+ .map(|inner| Self {
|
||||
+ inner,
|
||||
+ stats: CowCell::new(CacheStats::new()),
|
||||
+ })
|
||||
+ }
|
||||
}
|
||||
|
||||
#[no_mangle]
|
||||
pub extern "C" fn cache_char_create(max: usize, read_max: usize) -> *mut ARCacheChar {
|
||||
- let inner = if let Some(cache) = ARCacheBuilder::new().set_size(max, read_max).build() {
|
||||
- cache
|
||||
+ if let Some(cache) = ARCacheChar::new(max, read_max) {
|
||||
+ Box::into_raw(Box::new(cache))
|
||||
} else {
|
||||
- return std::ptr::null_mut();
|
||||
- };
|
||||
- let cache: Box<ARCacheChar> = Box::new(ARCacheChar { inner });
|
||||
- Box::into_raw(cache)
|
||||
+ std::ptr::null_mut()
|
||||
+ }
|
||||
}
|
||||
|
||||
#[no_mangle]
|
||||
pub extern "C" fn cache_char_free(cache: *mut ARCacheChar) {
|
||||
- // Should we be responsible to drain and free everything?
|
||||
debug_assert!(!cache.is_null());
|
||||
unsafe {
|
||||
- let _drop = Box::from_raw(cache);
|
||||
+ drop(Box::from_raw(cache));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -53,22 +186,22 @@ pub extern "C" fn cache_char_stats(
|
||||
) {
|
||||
let cache_ref = unsafe {
|
||||
debug_assert!(!cache.is_null());
|
||||
- &(*cache) as &ARCacheChar
|
||||
+ &(*cache)
|
||||
};
|
||||
- let stats = cache_ref.inner.view_stats();
|
||||
- *reader_hits = stats.reader_hits.try_into().unwrap();
|
||||
- *reader_includes = stats.reader_includes.try_into().unwrap();
|
||||
- *write_hits = stats.write_hits.try_into().unwrap();
|
||||
- *write_inc_or_mod = (stats.write_includes + stats.write_modifies)
|
||||
- .try_into()
|
||||
- .unwrap();
|
||||
- *shared_max = stats.shared_max.try_into().unwrap();
|
||||
- *freq = stats.freq.try_into().unwrap();
|
||||
- *recent = stats.recent.try_into().unwrap();
|
||||
- *freq_evicts = stats.freq_evicts.try_into().unwrap();
|
||||
- *recent_evicts = stats.recent_evicts.try_into().unwrap();
|
||||
- *p_weight = stats.p_weight.try_into().unwrap();
|
||||
- *all_seen_keys = stats.all_seen_keys.try_into().unwrap();
|
||||
+
|
||||
+ // Get stats snapshot
|
||||
+ let stats_read = cache_ref.stats.read();
|
||||
+ *reader_hits = stats_read.reader_hits;
|
||||
+ *reader_includes = stats_read.reader_includes;
|
||||
+ *write_hits = stats_read.write_hits;
|
||||
+ *write_inc_or_mod = stats_read.write_inc_or_mod;
|
||||
+ *freq_evicts = stats_read.freq_evicts;
|
||||
+ *recent_evicts = stats_read.recent_evicts;
|
||||
+ *p_weight = stats_read.p_weight;
|
||||
+ *shared_max = stats_read.shared_max;
|
||||
+ *freq = stats_read.freq;
|
||||
+ *recent = stats_read.recent;
|
||||
+ *all_seen_keys = stats_read.all_seen_keys;
|
||||
}
|
||||
|
||||
// start read
|
||||
@@ -79,7 +212,8 @@ pub extern "C" fn cache_char_read_begin(cache: *mut ARCacheChar) -> *mut ARCache
|
||||
&(*cache) as &ARCacheChar
|
||||
};
|
||||
let read_txn = Box::new(ARCacheCharRead {
|
||||
- inner: cache_ref.inner.read(),
|
||||
+ inner: cache_ref.inner.read_stats(ReadCountStat::default()),
|
||||
+ cache: cache_ref,
|
||||
});
|
||||
Box::into_raw(read_txn)
|
||||
}
|
||||
@@ -87,8 +221,20 @@ pub extern "C" fn cache_char_read_begin(cache: *mut ARCacheChar) -> *mut ARCache
|
||||
#[no_mangle]
|
||||
pub extern "C" fn cache_char_read_complete(read_txn: *mut ARCacheCharRead) {
|
||||
debug_assert!(!read_txn.is_null());
|
||||
+
|
||||
unsafe {
|
||||
- let _drop = Box::from_raw(read_txn);
|
||||
+ let read_txn_box = Box::from_raw(read_txn);
|
||||
+ let read_stats = read_txn_box.inner.finish();
|
||||
+ let write_stats = read_txn_box
|
||||
+ .cache
|
||||
+ .inner
|
||||
+ .try_quiesce_stats(FFIWriteStat::default());
|
||||
+
|
||||
+ // Update stats
|
||||
+ let mut stats_write = read_txn_box.cache.stats.write();
|
||||
+ stats_write.update_from_read_stat(read_stats);
|
||||
+ stats_write.update_from_write_stat(&write_stats);
|
||||
+ stats_write.commit();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -141,7 +287,8 @@ pub extern "C" fn cache_char_write_begin(
|
||||
&(*cache) as &ARCacheChar
|
||||
};
|
||||
let write_txn = Box::new(ARCacheCharWrite {
|
||||
- inner: cache_ref.inner.write(),
|
||||
+ inner: cache_ref.inner.write_stats(FFIWriteStat::default()),
|
||||
+ cache: cache_ref,
|
||||
});
|
||||
Box::into_raw(write_txn)
|
||||
}
|
||||
@@ -149,15 +296,21 @@ pub extern "C" fn cache_char_write_begin(
|
||||
#[no_mangle]
|
||||
pub extern "C" fn cache_char_write_commit(write_txn: *mut ARCacheCharWrite) {
|
||||
debug_assert!(!write_txn.is_null());
|
||||
- let wr = unsafe { Box::from_raw(write_txn) };
|
||||
- (*wr).inner.commit();
|
||||
+ unsafe {
|
||||
+ let write_txn_box = Box::from_raw(write_txn);
|
||||
+ let current_stats = write_txn_box.inner.commit();
|
||||
+
|
||||
+ let mut stats_write = write_txn_box.cache.stats.write();
|
||||
+ stats_write.update_from_write_stat(¤t_stats);
|
||||
+ stats_write.commit();
|
||||
+ }
|
||||
}
|
||||
|
||||
#[no_mangle]
|
||||
pub extern "C" fn cache_char_write_rollback(write_txn: *mut ARCacheCharWrite) {
|
||||
debug_assert!(!write_txn.is_null());
|
||||
unsafe {
|
||||
- let _drop = Box::from_raw(write_txn);
|
||||
+ drop(Box::from_raw(write_txn));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -182,7 +335,7 @@ pub extern "C" fn cache_char_write_include(
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
- use crate::cache::*;
|
||||
+ use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_cache_basic() {
|
||||
@@ -199,4 +352,116 @@ mod tests {
|
||||
cache_char_read_complete(read_txn);
|
||||
cache_char_free(cache_ptr);
|
||||
}
|
||||
+
|
||||
+ #[test]
|
||||
+ fn test_cache_stats() {
|
||||
+ let cache = cache_char_create(100, 8);
|
||||
+
|
||||
+ // Variables to store stats
|
||||
+ let mut reader_hits = 0;
|
||||
+ let mut reader_includes = 0;
|
||||
+ let mut write_hits = 0;
|
||||
+ let mut write_inc_or_mod = 0;
|
||||
+ let mut shared_max = 0;
|
||||
+ let mut freq = 0;
|
||||
+ let mut recent = 0;
|
||||
+ let mut freq_evicts = 0;
|
||||
+ let mut recent_evicts = 0;
|
||||
+ let mut p_weight = 0;
|
||||
+ let mut all_seen_keys = 0;
|
||||
+
|
||||
+ // Do some operations
|
||||
+ let key = CString::new("stats_test").unwrap();
|
||||
+ let value = CString::new("value").unwrap();
|
||||
+
|
||||
+ let write_txn = cache_char_write_begin(cache);
|
||||
+ cache_char_write_include(write_txn, key.as_ptr(), value.as_ptr());
|
||||
+ cache_char_write_commit(write_txn);
|
||||
+
|
||||
+ let read_txn = cache_char_read_begin(cache);
|
||||
+ let _ = cache_char_read_get(read_txn, key.as_ptr());
|
||||
+ cache_char_read_complete(read_txn);
|
||||
+
|
||||
+ // Get stats
|
||||
+ cache_char_stats(
|
||||
+ cache,
|
||||
+ &mut reader_hits,
|
||||
+ &mut reader_includes,
|
||||
+ &mut write_hits,
|
||||
+ &mut write_inc_or_mod,
|
||||
+ &mut shared_max,
|
||||
+ &mut freq,
|
||||
+ &mut recent,
|
||||
+ &mut freq_evicts,
|
||||
+ &mut recent_evicts,
|
||||
+ &mut p_weight,
|
||||
+ &mut all_seen_keys,
|
||||
+ );
|
||||
+
|
||||
+ // Verify that stats were updated
|
||||
+ assert!(write_inc_or_mod > 0);
|
||||
+ assert!(all_seen_keys > 0);
|
||||
+
|
||||
+ cache_char_free(cache);
|
||||
+ }
|
||||
+
|
||||
+ #[test]
|
||||
+ fn test_cache_read_write_operations() {
|
||||
+ let cache = cache_char_create(100, 8);
|
||||
+
|
||||
+ // Create test data
|
||||
+ let key = CString::new("test_key").unwrap();
|
||||
+ let value = CString::new("test_value").unwrap();
|
||||
+
|
||||
+ // Test write operation
|
||||
+ let write_txn = cache_char_write_begin(cache);
|
||||
+ cache_char_write_include(write_txn, key.as_ptr(), value.as_ptr());
|
||||
+ cache_char_write_commit(write_txn);
|
||||
+
|
||||
+ // Test read operation
|
||||
+ let read_txn = cache_char_read_begin(cache);
|
||||
+ let result = cache_char_read_get(read_txn, key.as_ptr());
|
||||
+ assert!(!result.is_null());
|
||||
+
|
||||
+ // Verify the value
|
||||
+ let retrieved_value = unsafe { CStr::from_ptr(result) };
|
||||
+ assert_eq!(retrieved_value.to_bytes(), value.as_bytes());
|
||||
+
|
||||
+ cache_char_read_complete(read_txn);
|
||||
+ cache_char_free(cache);
|
||||
+ }
|
||||
+
|
||||
+ #[test]
|
||||
+ fn test_cache_miss() {
|
||||
+ let cache = cache_char_create(100, 8);
|
||||
+ let read_txn = cache_char_read_begin(cache);
|
||||
+
|
||||
+ let missing_key = CString::new("nonexistent").unwrap();
|
||||
+ let result = cache_char_read_get(read_txn, missing_key.as_ptr());
|
||||
+ assert!(result.is_null());
|
||||
+
|
||||
+ cache_char_read_complete(read_txn);
|
||||
+ cache_char_free(cache);
|
||||
+ }
|
||||
+
|
||||
+ #[test]
|
||||
+ fn test_write_rollback() {
|
||||
+ let cache = cache_char_create(100, 8);
|
||||
+
|
||||
+ let key = CString::new("rollback_test").unwrap();
|
||||
+ let value = CString::new("value").unwrap();
|
||||
+
|
||||
+ // Start write transaction and rollback
|
||||
+ let write_txn = cache_char_write_begin(cache);
|
||||
+ cache_char_write_include(write_txn, key.as_ptr(), value.as_ptr());
|
||||
+ cache_char_write_rollback(write_txn);
|
||||
+
|
||||
+ // Verify key doesn't exist
|
||||
+ let read_txn = cache_char_read_begin(cache);
|
||||
+ let result = cache_char_read_get(read_txn, key.as_ptr());
|
||||
+ assert!(result.is_null());
|
||||
+
|
||||
+ cache_char_read_complete(read_txn);
|
||||
+ cache_char_free(cache);
|
||||
+ }
|
||||
}
|
||||
--
|
||||
2.48.1
|
||||
|
||||
300
0029-Issue-6715-dsconf-backend-replication-monitor-fails-.patch
Normal file
300
0029-Issue-6715-dsconf-backend-replication-monitor-fails-.patch
Normal file
@ -0,0 +1,300 @@
|
||||
From 7e34dbf974c71a77ca2c5a615bed70a26acf17b6 Mon Sep 17 00:00:00 2001
|
||||
From: progier389 <progier@redhat.com>
|
||||
Date: Fri, 4 Apr 2025 14:44:11 +0200
|
||||
Subject: [PATCH] Issue 6715 - dsconf backend replication monitor fails if
|
||||
replica id starts with 0 (#6716)
|
||||
|
||||
* Issue 6715 - dsconf backend replication monitor fails if replica id starts with 0
|
||||
lib389 fails to retrioeve the csn if the replicaid is not exactly the normalized form of a number
|
||||
Typically because 010 does not match with the RUV value.
|
||||
Solution: Ensure that rid get normalized before comparing them or using them in a dict
|
||||
|
||||
Issue: #6715
|
||||
|
||||
Reviewed by: @droideck, @tbordaz (Thanks!)
|
||||
|
||||
(cherry picked from commit d19a4f9955aa0cdf3daf1c8a5ac0f6d83bfd9a7b)
|
||||
---
|
||||
.../suites/replication/regression_m2_test.py | 129 +++++++++++++++++-
|
||||
src/lib389/lib389/__init__.py | 5 +-
|
||||
src/lib389/lib389/agreement.py | 7 +-
|
||||
src/lib389/lib389/replica.py | 33 ++++-
|
||||
4 files changed, 163 insertions(+), 11 deletions(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/replication/regression_m2_test.py b/dirsrvtests/tests/suites/replication/regression_m2_test.py
|
||||
index ed138013d..1a2f80522 100644
|
||||
--- a/dirsrvtests/tests/suites/replication/regression_m2_test.py
|
||||
+++ b/dirsrvtests/tests/suites/replication/regression_m2_test.py
|
||||
@@ -12,6 +12,7 @@ import time
|
||||
import logging
|
||||
import ldif
|
||||
import ldap
|
||||
+import pprint
|
||||
import pytest
|
||||
import subprocess
|
||||
import time
|
||||
@@ -29,7 +30,10 @@ from lib389.idm.group import Groups, Group
|
||||
from lib389.idm.domain import Domain
|
||||
from lib389.idm.directorymanager import DirectoryManager
|
||||
from lib389.idm.services import ServiceAccounts, ServiceAccount
|
||||
-from lib389.replica import Replicas, ReplicationManager, ReplicaRole, BootstrapReplicationManager
|
||||
+from lib389.replica import (
|
||||
+ Replicas, ReplicationManager, ReplicationMonitor, ReplicaRole,
|
||||
+ BootstrapReplicationManager, NormalizedRidDict
|
||||
+)
|
||||
from lib389.agreement import Agreements
|
||||
from lib389 import pid_from_file
|
||||
from lib389.dseldif import *
|
||||
@@ -1144,6 +1148,129 @@ def test_bulk_import(preserve_topo_m2):
|
||||
assert len(users_s1) == len(users_s2)
|
||||
|
||||
|
||||
+def check_monitoring_status(inst):
|
||||
+ creds = { 'binddn': DN_DM, 'bindpw': PW_DM }
|
||||
+ repl_monitor = ReplicationMonitor(inst)
|
||||
+ report_dict = repl_monitor.generate_report(lambda h,p: creds, use_json=True)
|
||||
+ log.debug(f'(Monitoring status: {pprint.pformat(report_dict)}')
|
||||
+
|
||||
+ agmts_status = {}
|
||||
+ for inst_status in report_dict.values():
|
||||
+ for replica_status in inst_status:
|
||||
+ suffix = replica_status['replica_root']
|
||||
+ rid = replica_status['replica_id']
|
||||
+ for agmt_status in replica_status['agmts_status']:
|
||||
+ rag_status = agmt_status['replication-status'][0]
|
||||
+ if 'Unavailable' in rag_status:
|
||||
+ aname = agmt_status['agmt-name'][0]
|
||||
+ url = f'{agmt_status["replica"][0]}/{suffix}'
|
||||
+ assert False, f"'Unavailable' found in agreement {aname} of replica {url} : {rag_status}"
|
||||
+
|
||||
+ assert 'Unavailable' not in str(report_dict)
|
||||
+
|
||||
+
|
||||
+def reinit_replica(S1, S2):
|
||||
+ # Reinit replication
|
||||
+ agmt = Agreements(S1).list()[0]
|
||||
+ agmt.begin_reinit()
|
||||
+ (done, error) = agmt.wait_reinit()
|
||||
+ assert done is True
|
||||
+ assert error is False
|
||||
+
|
||||
+ repl = ReplicationManager(DEFAULT_SUFFIX)
|
||||
+ repl.wait_for_replication(S1, S2)
|
||||
+ repl.wait_for_replication(S2, S1)
|
||||
+
|
||||
+
|
||||
+def test_rid_starting_with_0(topo_m2, request):
|
||||
+ """Check that replication monitoring works if replica
|
||||
+ id starts with 0
|
||||
+
|
||||
+ :id: ed0176e6-0bf7-11f0-9846-482ae39447e5
|
||||
+ :setup: 2 Supplier Instances
|
||||
+ :steps:
|
||||
+ 1. Initialize replication to ensure that init status is set
|
||||
+ 2. Check that monitoring status does not contains 'Unavailable'
|
||||
+ 3. Change replica ids to 001 and 002
|
||||
+ 4. Initialize replication to ensure that init status is set
|
||||
+ 5. Check that monitoring status does not contains 'Unavailable'
|
||||
+ 6. Restore the replica ids to 1 and 2
|
||||
+ :expectedresults:
|
||||
+ 1. Success
|
||||
+ 2. Success
|
||||
+ 3. Success
|
||||
+ 4. Success
|
||||
+ 5. Success
|
||||
+ 6. Success
|
||||
+ """
|
||||
+ S1 = topo_m2.ms["supplier1"]
|
||||
+ S2 = topo_m2.ms["supplier2"]
|
||||
+ replicas = [ Replicas(inst).get(DEFAULT_SUFFIX) for inst in topo_m2 ]
|
||||
+
|
||||
+ # Reinit replication (to ensure that init status is set)
|
||||
+ reinit_replica(S1, S2)
|
||||
+
|
||||
+ # Get replication monitoring results
|
||||
+ check_monitoring_status(S1)
|
||||
+
|
||||
+ # Change replica id
|
||||
+ for replica,rid in zip(replicas, ['010', '020']):
|
||||
+ replica.replace('nsDS5ReplicaId', rid)
|
||||
+
|
||||
+ # Restore replica id in finalizer
|
||||
+ def fin():
|
||||
+ for replica,rid in zip(replicas, ['1', '2']):
|
||||
+ replica.replace('nsDS5ReplicaId', rid)
|
||||
+ reinit_replica(S1, S2)
|
||||
+
|
||||
+ request.addfinalizer(fin)
|
||||
+ # Reinit replication
|
||||
+ reinit_replica(S1, S2)
|
||||
+
|
||||
+ # Get replication monitoring results
|
||||
+ check_monitoring_status(S1)
|
||||
+
|
||||
+
|
||||
+def test_normalized_rid_dict():
|
||||
+ """Check that lib389.replica NormalizedRidDict class behaves as expected
|
||||
+
|
||||
+ :id: 0f88a29c-0fcd-11f0-b5df-482ae39447e5
|
||||
+ :setup: None
|
||||
+ :steps:
|
||||
+ 1. Initialize a NormalizedRidDict
|
||||
+ 2. Check that normalization do something
|
||||
+ 3. Check that key stored in NormalizedRidDict are normalized
|
||||
+ 4. Check that normalized and non normalized keys have the same value
|
||||
+ :expectedresults:
|
||||
+ 1. Success
|
||||
+ 2. Success
|
||||
+ 3. Success
|
||||
+ 4. Success
|
||||
+ """
|
||||
+
|
||||
+ sd = { '1': 'v1', '020': 'v2' }
|
||||
+ nsd = { NormalizedRidDict.normalize_rid(key): val for key,val in sd.items() }
|
||||
+ nkeys = list(nsd.keys())
|
||||
+
|
||||
+ # Initialize a NormalizedRidDict
|
||||
+ nrd = NormalizedRidDict()
|
||||
+ for key,val in sd.items():
|
||||
+ nrd[key] = val
|
||||
+
|
||||
+ # Check that normalization do something
|
||||
+ assert nkeys != list(sd.keys())
|
||||
+
|
||||
+ # Check that key stored in NormalizedRidDict are normalized
|
||||
+ for key in nrd.keys():
|
||||
+ assert key in nkeys
|
||||
+
|
||||
+ # Check that normalized and non normalized keys have the same value
|
||||
+ for key,val in sd.items():
|
||||
+ nkey = NormalizedRidDict.normalize_rid(key)
|
||||
+ assert nrd[key] == val
|
||||
+ assert nrd[nkey] == val
|
||||
+
|
||||
+
|
||||
def test_online_reinit_may_hang(topo_with_sigkill):
|
||||
"""Online reinitialization may hang when the first
|
||||
entry of the DB is RUV entry instead of the suffix
|
||||
diff --git a/src/lib389/lib389/__init__.py b/src/lib389/lib389/__init__.py
|
||||
index ac8f5f3f2..1ac9770b0 100644
|
||||
--- a/src/lib389/lib389/__init__.py
|
||||
+++ b/src/lib389/lib389/__init__.py
|
||||
@@ -60,6 +60,7 @@ from lib389.utils import (
|
||||
normalizeDN,
|
||||
escapeDNValue,
|
||||
ensure_bytes,
|
||||
+ ensure_int,
|
||||
ensure_str,
|
||||
ensure_list_str,
|
||||
format_cmd_list,
|
||||
@@ -3401,7 +3402,7 @@ class DirSrv(SimpleLDAPObject, object):
|
||||
# Error
|
||||
consumer.close()
|
||||
return None
|
||||
- rid = ensure_str(replica_entries[0].getValue(REPL_ID))
|
||||
+ rid = ensure_int(replica_entries[0].getValue(REPL_ID))
|
||||
except:
|
||||
# Error
|
||||
consumer.close()
|
||||
@@ -3418,7 +3419,7 @@ class DirSrv(SimpleLDAPObject, object):
|
||||
return error_msg
|
||||
elements = ensure_list_str(entry[0].getValues('nsds50ruv'))
|
||||
for ruv in elements:
|
||||
- if ('replica %s ' % rid) in ruv:
|
||||
+ if ('replica %d ' % rid) in ruv:
|
||||
ruv_parts = ruv.split()
|
||||
if len(ruv_parts) == 5:
|
||||
return ruv_parts[4]
|
||||
diff --git a/src/lib389/lib389/agreement.py b/src/lib389/lib389/agreement.py
|
||||
index 170ab9050..745dd0bd0 100644
|
||||
--- a/src/lib389/lib389/agreement.py
|
||||
+++ b/src/lib389/lib389/agreement.py
|
||||
@@ -161,7 +161,7 @@ class Agreement(DSLdapObject):
|
||||
from lib389.replica import Replicas
|
||||
replicas = Replicas(self._instance)
|
||||
replica = replicas.get(suffix)
|
||||
- rid = replica.get_attr_val_utf8(REPL_ID)
|
||||
+ rid = int(replica.get_attr_val_utf8(REPL_ID))
|
||||
|
||||
# Open a connection to the consumer
|
||||
consumer = DirSrv(verbose=self._instance.verbose)
|
||||
@@ -191,12 +191,15 @@ class Agreement(DSLdapObject):
|
||||
else:
|
||||
elements = ensure_list_str(entry[0].getValues('nsds50ruv'))
|
||||
for ruv in elements:
|
||||
- if ('replica %s ' % rid) in ruv:
|
||||
+ if ('replica %d ' % rid) in ruv:
|
||||
ruv_parts = ruv.split()
|
||||
if len(ruv_parts) == 5:
|
||||
result_msg = ruv_parts[4]
|
||||
break
|
||||
except ldap.INVALID_CREDENTIALS as e:
|
||||
+ self._log.debug('Failed to search for the suffix ' +
|
||||
+ '({}) consumer ({}:{}) failed, error: {}'.format(
|
||||
+ suffix, host, port, e))
|
||||
raise(e)
|
||||
except ldap.LDAPError as e:
|
||||
self._log.debug('Failed to search for the suffix ' +
|
||||
diff --git a/src/lib389/lib389/replica.py b/src/lib389/lib389/replica.py
|
||||
index 07f75b878..0486c48f8 100644
|
||||
--- a/src/lib389/lib389/replica.py
|
||||
+++ b/src/lib389/lib389/replica.py
|
||||
@@ -822,6 +822,26 @@ class ReplicaLegacy(object):
|
||||
raise ValueError('Failed to update replica: ' + str(e))
|
||||
|
||||
|
||||
+class NormalizedRidDict(dict):
|
||||
+ """A dict whose key is a Normalized Replica ID
|
||||
+ """
|
||||
+
|
||||
+ @staticmethod
|
||||
+ def normalize_rid(rid):
|
||||
+ return int(rid)
|
||||
+
|
||||
+ def __init__(self):
|
||||
+ super().__init__()
|
||||
+
|
||||
+ def __getitem__(self, key):
|
||||
+ nkey = NormalizedRidDict.normalize_rid(key)
|
||||
+ return super().__getitem__(nkey)
|
||||
+
|
||||
+ def __setitem__(self, key, value):
|
||||
+ nkey = NormalizedRidDict.normalize_rid(key)
|
||||
+ super().__setitem__(nkey, value)
|
||||
+
|
||||
+
|
||||
class RUV(object):
|
||||
"""Represents the server in memory RUV object. The RUV contains each
|
||||
update vector the server knows of, along with knowledge of CSN state of the
|
||||
@@ -839,11 +859,11 @@ class RUV(object):
|
||||
else:
|
||||
self._log = logging.getLogger(__name__)
|
||||
self._rids = []
|
||||
- self._rid_url = {}
|
||||
- self._rid_rawruv = {}
|
||||
- self._rid_csn = {}
|
||||
- self._rid_maxcsn = {}
|
||||
- self._rid_modts = {}
|
||||
+ self._rid_url = NormalizedRidDict()
|
||||
+ self._rid_rawruv = NormalizedRidDict()
|
||||
+ self._rid_csn = NormalizedRidDict()
|
||||
+ self._rid_maxcsn = NormalizedRidDict()
|
||||
+ self._rid_modts = NormalizedRidDict()
|
||||
self._data_generation = None
|
||||
self._data_generation_csn = None
|
||||
# Process the array of data
|
||||
@@ -935,9 +955,10 @@ class RUV(object):
|
||||
:returns: str
|
||||
"""
|
||||
self._log.debug("Allocated rids: %s" % self._rids)
|
||||
+ rids = [ int(rid) for rid in self._rids ]
|
||||
for i in range(1, 65534):
|
||||
self._log.debug("Testing ... %s" % i)
|
||||
- if str(i) not in self._rids:
|
||||
+ if i not in rids:
|
||||
return str(i)
|
||||
raise Exception("Unable to alloc rid!")
|
||||
|
||||
--
|
||||
2.49.0
|
||||
|
||||
@ -0,0 +1,47 @@
|
||||
From 087538df097d352995952fb1f36cba3d1fca27c2 Mon Sep 17 00:00:00 2001
|
||||
From: James Chapman <jachapma@redhat.com>
|
||||
Date: Sun, 6 Apr 2025 21:23:08 +0000
|
||||
Subject: [PATCH] Issue 6713 - ns-slapd crash during mdb offline import (#6714)
|
||||
|
||||
Bug description: A segmentation fault is triggered in
|
||||
dbmdb_import_prepare_worker_entry() during an mdb offline import.
|
||||
|
||||
The import producer thread parses, validates and writes ldif entries
|
||||
to the worker queue, while the import worker threads simultaneously read,
|
||||
format and index entries before adding them to the DB. A race condition
|
||||
occurs when a worker thread reads an entry before the producer has
|
||||
fully written it, leading to a segmentation fault.
|
||||
|
||||
Fix description: Ensure thread safe access by locking the worker queue
|
||||
before writing entries.
|
||||
|
||||
Fixes: https://github.com/389ds/389-ds-base/issues/6713
|
||||
|
||||
Reviewed by: @progier389, @bordaz (Thank you)
|
||||
---
|
||||
ldap/servers/slapd/back-ldbm/db-mdb/mdb_import_threads.c | 3 ++-
|
||||
1 file changed, 2 insertions(+), 1 deletion(-)
|
||||
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/db-mdb/mdb_import_threads.c b/ldap/servers/slapd/back-ldbm/db-mdb/mdb_import_threads.c
|
||||
index 0f445bb56..39d2b06f7 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/db-mdb/mdb_import_threads.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/db-mdb/mdb_import_threads.c
|
||||
@@ -417,13 +417,14 @@ dbmdb_import_workerq_push(ImportQueue_t *q, WorkerQueueData_t *data)
|
||||
safe_cond_wait(&q->cv, &q->mutex);
|
||||
}
|
||||
}
|
||||
- pthread_mutex_unlock(&q->mutex);
|
||||
if (q->job->flags & FLAG_ABORT) {
|
||||
/* in this case worker thread does not free the data so we should do it */
|
||||
dbmdb_import_workerq_free_data(data);
|
||||
+ pthread_mutex_unlock(&q->mutex);
|
||||
return -1;
|
||||
}
|
||||
dbmdb_dup_worker_slot(q, data, slot);
|
||||
+ pthread_mutex_unlock(&q->mutex);
|
||||
return 0;
|
||||
}
|
||||
|
||||
--
|
||||
2.49.0
|
||||
|
||||
601
0031-Issue-6571-Nested-group-does-not-receive-memberOf-at.patch
Normal file
601
0031-Issue-6571-Nested-group-does-not-receive-memberOf-at.patch
Normal file
@ -0,0 +1,601 @@
|
||||
From fc891f41513204d97b20f9750e7f36c053f5f401 Mon Sep 17 00:00:00 2001
|
||||
From: tbordaz <tbordaz@redhat.com>
|
||||
Date: Tue, 25 Mar 2025 09:20:50 +0100
|
||||
Subject: [PATCH] Issue 6571 - Nested group does not receive memberOf attribute
|
||||
(#6679)
|
||||
|
||||
Bug description:
|
||||
There is a risk to create a loop in group membership.
|
||||
For example G2 is member of G1 and G1 is member of G2.
|
||||
Memberof plugins iterates from a node to its ancestors
|
||||
to update the 'memberof' values of the node.
|
||||
The plugin uses a valueset ('already_seen_ndn_vals')
|
||||
to keep the track of the nodes it already visited.
|
||||
It uses this valueset to detect a possible loop and
|
||||
in that case it does not add the ancestor as the
|
||||
memberof value of the node.
|
||||
This is an error in case there are multiples paths
|
||||
up to an ancestor.
|
||||
|
||||
Fix description:
|
||||
The ancestor should be added to the node systematically,
|
||||
just in case the ancestor is in 'already_seen_ndn_vals'
|
||||
it skips the final recursion
|
||||
|
||||
fixes: #6571
|
||||
|
||||
Reviewed by: Pierre Rogier, Mark Reynolds (Thanks !!!)
|
||||
---
|
||||
.../suites/memberof_plugin/regression_test.py | 109 +++++++++
|
||||
.../tests/suites/plugins/memberof_test.py | 5 +
|
||||
.../slapi_memberof/basic_interface_test.py | 226 ++++++++++++------
|
||||
ldap/servers/plugins/memberof/memberof.c | 52 ++--
|
||||
4 files changed, 291 insertions(+), 101 deletions(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/memberof_plugin/regression_test.py b/dirsrvtests/tests/suites/memberof_plugin/regression_test.py
|
||||
index 646eb7433..51c43a71e 100644
|
||||
--- a/dirsrvtests/tests/suites/memberof_plugin/regression_test.py
|
||||
+++ b/dirsrvtests/tests/suites/memberof_plugin/regression_test.py
|
||||
@@ -465,6 +465,21 @@ def _find_memberof_ext(server, user_dn=None, group_dn=None, find_result=True):
|
||||
else:
|
||||
assert (not found)
|
||||
|
||||
+def _check_membership(server, entry, expected_members, expected_memberof):
|
||||
+ assert server
|
||||
+ assert entry
|
||||
+
|
||||
+ memberof = entry.get_attr_vals('memberof')
|
||||
+ member = entry.get_attr_vals('member')
|
||||
+ assert len(member) == len(expected_members)
|
||||
+ assert len(memberof) == len(expected_memberof)
|
||||
+ for e in expected_members:
|
||||
+ server.log.info("Checking %s has member %s" % (entry.dn, e.dn))
|
||||
+ assert e.dn.encode() in member
|
||||
+ for e in expected_memberof:
|
||||
+ server.log.info("Checking %s is member of %s" % (entry.dn, e.dn))
|
||||
+ assert e.dn.encode() in memberof
|
||||
+
|
||||
|
||||
def test_memberof_group(topology_st):
|
||||
"""Test memberof does not fail if group is moved into scope
|
||||
@@ -532,6 +547,100 @@ def test_memberof_group(topology_st):
|
||||
_find_memberof_ext(inst, dn1, g2n, True)
|
||||
_find_memberof_ext(inst, dn2, g2n, True)
|
||||
|
||||
+def test_multipaths(topology_st, request):
|
||||
+ """Test memberof succeeds to update memberof when
|
||||
+ there are multiple paths from a leaf to an intermediate node
|
||||
+
|
||||
+ :id: 35aa704a-b895-4153-9dcb-1e8a13612ebf
|
||||
+
|
||||
+ :setup: Single instance
|
||||
+
|
||||
+ :steps:
|
||||
+ 1. Create a graph G1->U1, G2->G21->U1
|
||||
+ 2. Add G2 as member of G1: G1->U1, G1->G2->G21->U1
|
||||
+ 3. Check members and memberof in entries G1,G2,G21,User1
|
||||
+
|
||||
+ :expectedresults:
|
||||
+ 1. Graph should be created
|
||||
+ 2. succeed
|
||||
+ 3. Membership is okay
|
||||
+ """
|
||||
+
|
||||
+ inst = topology_st.standalone
|
||||
+ memberof = MemberOfPlugin(inst)
|
||||
+ memberof.enable()
|
||||
+ memberof.replace('memberOfEntryScope', SUFFIX)
|
||||
+ if (memberof.get_memberofdeferredupdate() and memberof.get_memberofdeferredupdate().lower() == "on"):
|
||||
+ delay = 3
|
||||
+ else:
|
||||
+ delay = 0
|
||||
+ inst.restart()
|
||||
+
|
||||
+ #
|
||||
+ # Create the hierarchy
|
||||
+ #
|
||||
+ #
|
||||
+ # Grp1 ---------------> User1
|
||||
+ # ^
|
||||
+ # /
|
||||
+ # Grp2 ----> Grp21 ------/
|
||||
+ #
|
||||
+ users = UserAccounts(inst, SUFFIX, rdn=None)
|
||||
+ user1 = users.create(properties={'uid': "user1",
|
||||
+ 'cn': "user1",
|
||||
+ 'sn': 'SN',
|
||||
+ 'description': 'leaf',
|
||||
+ 'uidNumber': '1000',
|
||||
+ 'gidNumber': '2000',
|
||||
+ 'homeDirectory': '/home/user1'
|
||||
+ })
|
||||
+ group = Groups(inst, SUFFIX, rdn=None)
|
||||
+ g1 = group.create(properties={'cn': 'group1',
|
||||
+ 'member': user1.dn,
|
||||
+ 'description': 'group1'})
|
||||
+ g21 = group.create(properties={'cn': 'group21',
|
||||
+ 'member': user1.dn,
|
||||
+ 'description': 'group21'})
|
||||
+ g2 = group.create(properties={'cn': 'group2',
|
||||
+ 'member': [g21.dn],
|
||||
+ 'description': 'group2'})
|
||||
+
|
||||
+ # Enable debug logs if necessary
|
||||
+ #inst.config.replace('nsslapd-errorlog-level', '65536')
|
||||
+ #inst.config.set('nsslapd-accesslog-level','260')
|
||||
+ #inst.config.set('nsslapd-plugin-logging', 'on')
|
||||
+ #inst.config.set('nsslapd-auditlog-logging-enabled','on')
|
||||
+ #inst.config.set('nsslapd-auditfaillog-logging-enabled','on')
|
||||
+
|
||||
+ #
|
||||
+ # Update the hierarchy
|
||||
+ #
|
||||
+ #
|
||||
+ # Grp1 ----------------> User1
|
||||
+ # \ ^
|
||||
+ # \ /
|
||||
+ # --> Grp2 --> Grp21 --
|
||||
+ #
|
||||
+ g1.add_member(g2.dn)
|
||||
+ time.sleep(delay)
|
||||
+
|
||||
+ #
|
||||
+ # Check G1, G2, G21 and User1 members and memberof
|
||||
+ #
|
||||
+ _check_membership(inst, g1, expected_members=[g2, user1], expected_memberof=[])
|
||||
+ _check_membership(inst, g2, expected_members=[g21], expected_memberof=[g1])
|
||||
+ _check_membership(inst, g21, expected_members=[user1], expected_memberof=[g2, g1])
|
||||
+ _check_membership(inst, user1, expected_members=[], expected_memberof=[g21, g2, g1])
|
||||
+
|
||||
+ def fin():
|
||||
+ try:
|
||||
+ user1.delete()
|
||||
+ g1.delete()
|
||||
+ g2.delete()
|
||||
+ g21.delete()
|
||||
+ except:
|
||||
+ pass
|
||||
+ request.addfinalizer(fin)
|
||||
|
||||
def _config_memberof_entrycache_on_modrdn_failure(server):
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/plugins/memberof_test.py b/dirsrvtests/tests/suites/plugins/memberof_test.py
|
||||
index 0ebdf7fb3..912dead39 100644
|
||||
--- a/dirsrvtests/tests/suites/plugins/memberof_test.py
|
||||
+++ b/dirsrvtests/tests/suites/plugins/memberof_test.py
|
||||
@@ -2169,9 +2169,14 @@ def test_complex_group_scenario_6(topology_st):
|
||||
|
||||
# add Grp[1-4] (uniqueMember) to grp5
|
||||
# it creates a membership loop !!!
|
||||
+ topology_st.standalone.config.replace('nsslapd-errorlog-level', '65536')
|
||||
mods = [(ldap.MOD_ADD, 'uniqueMember', memofegrp020_5)]
|
||||
for grp in [memofegrp020_1, memofegrp020_2, memofegrp020_3, memofegrp020_4]:
|
||||
topology_st.standalone.modify_s(ensure_str(grp), mods)
|
||||
+ topology_st.standalone.config.replace('nsslapd-errorlog-level', '0')
|
||||
+
|
||||
+ results = topology_st.standalone.ds_error_log.match('.*detecting a loop in group.*')
|
||||
+ assert results
|
||||
|
||||
time.sleep(5)
|
||||
# assert user[1-4] are member of grp20_[1-4]
|
||||
diff --git a/dirsrvtests/tests/suites/slapi_memberof/basic_interface_test.py b/dirsrvtests/tests/suites/slapi_memberof/basic_interface_test.py
|
||||
index c5ecf5227..cc25f7e6c 100644
|
||||
--- a/dirsrvtests/tests/suites/slapi_memberof/basic_interface_test.py
|
||||
+++ b/dirsrvtests/tests/suites/slapi_memberof/basic_interface_test.py
|
||||
@@ -4220,18 +4220,18 @@ def test_slapi_memberof_reuse_only_1(topo, request, install_test_plugin):
|
||||
|
||||
def test_slapi_memberof_reuse_only_2(topo, request, install_test_plugin):
|
||||
"""
|
||||
- Test that management hierarchy (manager) is computed with slapi_memberof
|
||||
+ Test that membership is computed with slapi_memberof
|
||||
It requires slapi_memberof to ONLY reuse the computed values
|
||||
from memberof plugins. As memberof plugin is enabled, it returns
|
||||
memberof.
|
||||
with following parameters
|
||||
- member attribute: memberof
|
||||
- - membership attribute: 'manager'
|
||||
+ - membership attribute: 'member'
|
||||
- span over all backends: 'off'
|
||||
- skip nesting membership: 'off'
|
||||
- - computation mode: MEMBEROF_REUSE_IF_POSSIBLE <--
|
||||
+ - computation mode: MEMBEROF_REUSE_ONLY <--
|
||||
- Scope: None
|
||||
- - ExcludeScope: ou=foo1,dc=example,dc=com <--
|
||||
+ - ExcludeScope: dc=example,dc=com <--
|
||||
- Maximum return entries: None
|
||||
|
||||
:id: fb4f8c86-aa39-4252-90e0-36cfd7b3dd80
|
||||
@@ -4274,59 +4274,141 @@ def test_slapi_memberof_reuse_only_2(topo, request, install_test_plugin):
|
||||
--- e_1_parent_1_1_3_0
|
||||
---- e_1_parent_1_1_1_3_0
|
||||
"""
|
||||
+ # Configure memberof plugin to add 'memberof' attribute
|
||||
+ # to the members ('member') of groups that are in the suffix
|
||||
memberof = MemberOfPlugin(topo.standalone)
|
||||
memberof.enable()
|
||||
memberof.replace('memberOfAttr', 'memberof')
|
||||
- memberof.replace('memberOfGroupAttr', 'manager')
|
||||
+ memberof.replace('memberOfGroupAttr', 'member')
|
||||
memberof.replace('memberOfAllBackends', 'off')
|
||||
memberof.replace('memberOfSkipNested', 'off')
|
||||
memberof.replace('memberOfEntryScope', DEFAULT_SUFFIX)
|
||||
topo.standalone.restart()
|
||||
|
||||
- user = UserAccounts(topo.standalone, DEFAULT_SUFFIX)
|
||||
+ #topo.standalone.config.replace('nsslapd-errorlog-level', '65536')
|
||||
+ #topo.standalone.config.set('nsslapd-accesslog-level','260')
|
||||
+ #topo.standalone.config.set('nsslapd-auditlog-logging-enabled','on')
|
||||
+ #topo.standalone.config.set('nsslapd-auditfaillog-logging-enabled','on')
|
||||
+ #topo.standalone.config.set('nsslapd-plugin-logging', 'on')
|
||||
|
||||
# First subtree
|
||||
- e_1_parent_0 = add_entry(topo.standalone, uid="e_1_parent_0")
|
||||
-
|
||||
- e_1_parent_1_0 = add_entry(topo.standalone, uid="e_1_parent_1_0", manager=[ensure_bytes(e_1_parent_0)])
|
||||
-
|
||||
- e_1_parent_1_1_0 = add_entry(topo.standalone, uid="e_1_parent_1_1_0", manager=[ensure_bytes(e_1_parent_1_0)])
|
||||
-
|
||||
- e_1_parent_1_1_1_0 = add_entry(topo.standalone, uid="e_1_parent_1_1_1_0", manager=[ensure_bytes(e_1_parent_1_1_0)])
|
||||
- e_2_parent_1_1_1_0 = add_entry(topo.standalone, uid="e_2_parent_1_1_1_0", manager=[ensure_bytes(e_1_parent_1_1_0)])
|
||||
- e_3_parent_1_1_1_0 = add_entry(topo.standalone, uid="e_3_parent_1_1_1_0", manager=[ensure_bytes(e_1_parent_1_1_0)])
|
||||
- e_4_parent_1_1_1_0 = add_entry(topo.standalone, uid="e_4_parent_1_1_1_0", manager=[ensure_bytes(e_1_parent_1_1_0)])
|
||||
- e_5_parent_1_1_1_0 = add_entry(topo.standalone, uid="e_5_parent_1_1_1_0", manager=[ensure_bytes(e_1_parent_1_1_0)])
|
||||
-
|
||||
- e_2_parent_1_1_0 = add_entry(topo.standalone, uid="e_2_parent_1_1_0", manager=[ensure_bytes(e_1_parent_1_0)])
|
||||
-
|
||||
- e_2_parent_1_0 = add_entry(topo.standalone, uid="e_2_parent_1_0", manager=[ensure_bytes(e_1_parent_0)])
|
||||
-
|
||||
- e_1_parent_2_1_0 = add_entry(topo.standalone, uid="e_1_parent_2_1_0", manager=[ensure_bytes(e_2_parent_1_0)])
|
||||
- e_2_parent_2_1_0 = add_entry(topo.standalone, uid="e_2_parent_2_1_0", manager=[ensure_bytes(e_2_parent_1_0)])
|
||||
- e_1_parent_2_2_1_0 = add_entry(topo.standalone, uid="e_1_parent_2_2_1_0", manager=[ensure_bytes(e_2_parent_2_1_0)])
|
||||
- e_3_parent_2_1_0 = add_entry(topo.standalone, uid="e_3_parent_2_1_0", manager=[ensure_bytes(e_2_parent_1_0)])
|
||||
- e_4_parent_2_1_0 = add_entry(topo.standalone, uid="e_4_parent_2_1_0", manager=[ensure_bytes(e_2_parent_1_0)])
|
||||
-
|
||||
- # 2nd subtree
|
||||
- e_2_parent_0 = add_entry(topo.standalone, uid="e_2_parent_0")
|
||||
-
|
||||
- e_1_parent_2_0 = add_entry(topo.standalone, uid="e_1_parent_2_0", manager=[ensure_bytes(e_2_parent_0)])
|
||||
- e_2_parent_2_0 = add_entry(topo.standalone, uid="e_2_parent_2_0", manager=[ensure_bytes(e_2_parent_0)])
|
||||
- e_3_parent_2_0 = add_entry(topo.standalone, uid="e_3_parent_2_0", manager=[ensure_bytes(e_2_parent_0)])
|
||||
- e_4_parent_2_0 = add_entry(topo.standalone, uid="e_4_parent_2_0", manager=[ensure_bytes(e_2_parent_0)])
|
||||
-
|
||||
- # third subtree
|
||||
- e_3_parent_0 = add_entry(topo.standalone, uid="e_3_parent_0")
|
||||
-
|
||||
- e_1_parent_3_0 = add_entry(topo.standalone, uid="e_1_parent_3_0", manager=[ensure_bytes(e_3_parent_0)])
|
||||
-
|
||||
- e_1_parent_1_3_0 = add_entry(topo.standalone, uid="e_1_parent_1_3_0", manager=[ensure_bytes(e_1_parent_3_0)])
|
||||
-
|
||||
- e_1_parent_1_1_3_0 = add_entry(topo.standalone, uid="e_1_parent_1_1_3_0", manager=[ensure_bytes(e_1_parent_1_3_0)])
|
||||
-
|
||||
- e_1_parent_1_1_1_3_0 = add_entry(topo.standalone, uid="e_1_parent_1_1_1_3_0", manager=[ensure_bytes(e_1_parent_1_1_3_0)])
|
||||
+ e_1_parent_0 = add_entry(topo.standalone, uid="e_1_parent_0")
|
||||
+ e_1_parent_1_0 = add_entry(topo.standalone, uid="e_1_parent_1_0")
|
||||
+ e_1_parent_1_1_0 = add_entry(topo.standalone, uid="e_1_parent_1_1_0")
|
||||
+ e_1_parent_1_1_1_0 = add_entry(topo.standalone, uid="e_1_parent_1_1_1_0")
|
||||
+ e_2_parent_1_1_1_0 = add_entry(topo.standalone, uid="e_2_parent_1_1_1_0")
|
||||
+ e_3_parent_1_1_1_0 = add_entry(topo.standalone, uid="e_3_parent_1_1_1_0")
|
||||
+ e_4_parent_1_1_1_0 = add_entry(topo.standalone, uid="e_4_parent_1_1_1_0")
|
||||
+ e_5_parent_1_1_1_0 = add_entry(topo.standalone, uid="e_5_parent_1_1_1_0")
|
||||
+ e_2_parent_1_1_0 = add_entry(topo.standalone, uid="e_2_parent_1_1_0")
|
||||
+ e_2_parent_1_0 = add_entry(topo.standalone, uid="e_2_parent_1_0")
|
||||
+ e_1_parent_2_1_0 = add_entry(topo.standalone, uid="e_1_parent_2_1_0")
|
||||
+ e_2_parent_2_1_0 = add_entry(topo.standalone, uid="e_2_parent_2_1_0")
|
||||
+ e_1_parent_2_2_1_0 = add_entry(topo.standalone, uid="e_1_parent_2_2_1_0")
|
||||
+ e_3_parent_2_1_0 = add_entry(topo.standalone, uid="e_3_parent_2_1_0")
|
||||
+ e_4_parent_2_1_0 = add_entry(topo.standalone, uid="e_4_parent_2_1_0")
|
||||
+
|
||||
+ e_2_parent_0 = add_entry(topo.standalone, uid="e_2_parent_0")
|
||||
+ e_1_parent_2_0 = add_entry(topo.standalone, uid="e_1_parent_2_0")
|
||||
+ e_2_parent_2_0 = add_entry(topo.standalone, uid="e_2_parent_2_0")
|
||||
+ e_3_parent_2_0 = add_entry(topo.standalone, uid="e_3_parent_2_0")
|
||||
+ e_4_parent_2_0 = add_entry(topo.standalone, uid="e_4_parent_2_0")
|
||||
+
|
||||
+ e_3_parent_0 = add_entry(topo.standalone, uid="e_3_parent_0")
|
||||
+ e_1_parent_3_0 = add_entry(topo.standalone, uid="e_1_parent_3_0")
|
||||
+ e_1_parent_1_3_0 = add_entry(topo.standalone, uid="e_1_parent_1_3_0")
|
||||
+ e_1_parent_1_1_3_0 = add_entry(topo.standalone, uid="e_1_parent_1_1_3_0")
|
||||
+ e_1_parent_1_1_1_3_0 = add_entry(topo.standalone, uid="e_1_parent_1_1_1_3_0")
|
||||
+
|
||||
+ # e_1_parent_0
|
||||
+ # - e_1_parent_1_0
|
||||
+ # - e_2_parent_1_0
|
||||
+ members = [ensure_bytes(e_1_parent_1_0),
|
||||
+ ensure_bytes(e_2_parent_1_0)]
|
||||
+ mod = [(ldap.MOD_REPLACE, 'member', members)]
|
||||
+ topo.standalone.modify_s(e_1_parent_0, mod)
|
||||
+
|
||||
+ # - e_1_parent_1_0
|
||||
+ # -- e_1_parent_1_1_0
|
||||
+ # -- e_2_parent_1_1_0
|
||||
+ members = [ensure_bytes(e_1_parent_1_1_0),
|
||||
+ ensure_bytes(e_2_parent_1_1_0)]
|
||||
+ mod = [(ldap.MOD_REPLACE, 'member', members)]
|
||||
+ topo.standalone.modify_s(e_1_parent_1_0, mod)
|
||||
+
|
||||
+ # -- e_1_parent_1_1_0
|
||||
+ # --- e_1_parent_1_1_1_0
|
||||
+ # --- e_2_parent_1_1_1_0
|
||||
+ # --- e_3_parent_1_1_1_0
|
||||
+ # --- e_4_parent_1_1_1_0
|
||||
+ # --- e_5_parent_1_1_1_0
|
||||
+ members = [ensure_bytes(e_1_parent_1_1_1_0),
|
||||
+ ensure_bytes(e_2_parent_1_1_1_0),
|
||||
+ ensure_bytes(e_3_parent_1_1_1_0),
|
||||
+ ensure_bytes(e_4_parent_1_1_1_0),
|
||||
+ ensure_bytes(e_5_parent_1_1_1_0)]
|
||||
+ mod = [(ldap.MOD_REPLACE, 'member', members)]
|
||||
+ topo.standalone.modify_s(e_1_parent_1_1_0, mod)
|
||||
+
|
||||
+ # - e_2_parent_1_0
|
||||
+ # -- e_1_parent_2_1_0
|
||||
+ # -- e_2_parent_2_1_0
|
||||
+ # -- e_3_parent_2_1_0
|
||||
+ # -- e_4_parent_2_1_0
|
||||
+ members = [ensure_bytes(e_1_parent_2_1_0),
|
||||
+ ensure_bytes(e_2_parent_2_1_0),
|
||||
+ ensure_bytes(e_3_parent_2_1_0),
|
||||
+ ensure_bytes(e_4_parent_2_1_0)]
|
||||
+ mod = [(ldap.MOD_REPLACE, 'member', members)]
|
||||
+ topo.standalone.modify_s(e_2_parent_1_0, mod)
|
||||
+
|
||||
+ # -- e_2_parent_2_1_0
|
||||
+ # --- e_1_parent_2_2_1_0
|
||||
+ members = [ensure_bytes(e_1_parent_2_2_1_0)]
|
||||
+ mod = [(ldap.MOD_REPLACE, 'member', members)]
|
||||
+ topo.standalone.modify_s(e_2_parent_2_1_0, mod)
|
||||
+
|
||||
+ # e_2_parent_0
|
||||
+ # - e_1_parent_2_0
|
||||
+ # - e_2_parent_2_0
|
||||
+ # - e_3_parent_2_0
|
||||
+ # - e_4_parent_2_0
|
||||
+ members = [ensure_bytes(e_1_parent_2_0),
|
||||
+ ensure_bytes(e_2_parent_2_0),
|
||||
+ ensure_bytes(e_3_parent_2_0),
|
||||
+ ensure_bytes(e_4_parent_2_0)]
|
||||
+ mod = [(ldap.MOD_REPLACE, 'member', members)]
|
||||
+ topo.standalone.modify_s(e_2_parent_0, mod)
|
||||
+
|
||||
+ # e_3_parent_0
|
||||
+ # - e_1_parent_3_0
|
||||
+ members = [ensure_bytes(e_1_parent_3_0)]
|
||||
+ mod = [(ldap.MOD_REPLACE, 'member', members)]
|
||||
+ topo.standalone.modify_s(e_3_parent_0, mod)
|
||||
+
|
||||
+ # - e_1_parent_3_0
|
||||
+ # -- e_1_parent_1_3_0
|
||||
+ members = [ensure_bytes(e_1_parent_1_3_0)]
|
||||
+ mod = [(ldap.MOD_REPLACE, 'member', members)]
|
||||
+ topo.standalone.modify_s(e_1_parent_3_0, mod)
|
||||
+
|
||||
+ # -- e_1_parent_1_3_0
|
||||
+ # --- e_1_parent_1_1_3_0
|
||||
+ members = [ensure_bytes(e_1_parent_1_1_3_0)]
|
||||
+ mod = [(ldap.MOD_REPLACE, 'member', members)]
|
||||
+ topo.standalone.modify_s(e_1_parent_1_3_0, mod)
|
||||
+
|
||||
+ # --- e_1_parent_1_1_3_0
|
||||
+ # ---- e_1_parent_1_1_1_3_0
|
||||
+ members = [ensure_bytes(e_1_parent_1_1_1_3_0)]
|
||||
+ mod = [(ldap.MOD_REPLACE, 'member', members)]
|
||||
+ topo.standalone.modify_s(e_1_parent_1_1_3_0, mod)
|
||||
|
||||
+ #
|
||||
+ # configure the test plugin to request 'memberof' with the
|
||||
+ # same scope and groupAttr ('member') so that we can
|
||||
+ # reuse the values computed by memberof plugin
|
||||
+ #
|
||||
dn_config = 'cn=test_slapi_memberof,cn=plugins,cn=config'
|
||||
topo.standalone.add_s(Entry((dn_config, {'objectclass': 'top nsSlapdPlugin extensibleObject'.split(),
|
||||
'cn': 'test_slapi_memberof',
|
||||
@@ -4337,7 +4419,7 @@ def test_slapi_memberof_reuse_only_2(topo, request, install_test_plugin):
|
||||
'nsslapd-plugin-depends-on-type': 'database',
|
||||
'nsslapd-pluginId': 'test_slapi_memberof-plugin',
|
||||
'slapimemberOfMemberDN': 'uid=test_user_11,ou=People,dc=example,dc=com',
|
||||
- 'slapimemberOfGroupAttr': 'manager',
|
||||
+ 'slapimemberOfGroupAttr': 'member',
|
||||
'slapimemberOfAttr': 'memberof',
|
||||
'slapimemberOfFlag': 'MEMBEROF_REUSE_ONLY',
|
||||
'slapimemberOfAllBackends': 'off',
|
||||
@@ -4350,63 +4432,63 @@ def test_slapi_memberof_reuse_only_2(topo, request, install_test_plugin):
|
||||
topo.standalone.restart()
|
||||
|
||||
# Check the first subtree
|
||||
- expected = [ e_1_parent_1_0, e_1_parent_1_1_0, e_1_parent_1_1_1_0, e_2_parent_1_1_1_0, e_3_parent_1_1_1_0, e_4_parent_1_1_1_0, e_5_parent_1_1_1_0, e_2_parent_1_1_0, e_2_parent_1_0, e_1_parent_2_1_0, e_2_parent_2_1_0, e_1_parent_2_2_1_0, e_3_parent_2_1_0, e_4_parent_2_1_0]
|
||||
- res = _extop_test_slapi_member(server=topo.standalone, dn=e_1_parent_0, relation="manager")
|
||||
+ expected = [EMPTY_RESULT]
|
||||
+ res = _extop_test_slapi_member(server=topo.standalone, dn=e_1_parent_0, relation="member")
|
||||
_check_res_vs_expected("first subtree", res, expected)
|
||||
|
||||
# Check the second subtree
|
||||
- expected = [e_1_parent_2_0, e_2_parent_2_0, e_3_parent_2_0, e_4_parent_2_0]
|
||||
- res = _extop_test_slapi_member(server=topo.standalone, dn=e_2_parent_0, relation="manager")
|
||||
+ expected = [EMPTY_RESULT]
|
||||
+ res = _extop_test_slapi_member(server=topo.standalone, dn=e_2_parent_0, relation="member")
|
||||
_check_res_vs_expected("second subtree", res, expected)
|
||||
|
||||
# Check the third subtree
|
||||
- expected = [e_1_parent_3_0, e_1_parent_1_3_0, e_1_parent_1_1_3_0, e_1_parent_1_1_1_3_0]
|
||||
- res = _extop_test_slapi_member(server=topo.standalone, dn=e_3_parent_0, relation="manager")
|
||||
+ expected = [EMPTY_RESULT]
|
||||
+ res = _extop_test_slapi_member(server=topo.standalone, dn=e_3_parent_0, relation="member")
|
||||
_check_res_vs_expected("third subtree", res, expected)
|
||||
|
||||
# check e_1_parent_1_0
|
||||
- expected = [e_1_parent_1_1_0, e_1_parent_1_1_1_0, e_2_parent_1_1_1_0, e_3_parent_1_1_1_0, e_4_parent_1_1_1_0, e_5_parent_1_1_1_0, e_2_parent_1_1_0]
|
||||
- res = _extop_test_slapi_member(server=topo.standalone, dn=e_1_parent_1_0, relation="manager")
|
||||
- _check_res_vs_expected("organisation reporting to e_1_parent_1_0", res, expected)
|
||||
+ expected = [e_1_parent_0]
|
||||
+ res = _extop_test_slapi_member(server=topo.standalone, dn=e_1_parent_1_0, relation="member")
|
||||
+ _check_res_vs_expected("Groups which e_1_parent_1_0 is member of", res, expected)
|
||||
|
||||
# check e_1_parent_1_1_0
|
||||
- expected = [e_1_parent_1_1_1_0, e_2_parent_1_1_1_0, e_3_parent_1_1_1_0, e_4_parent_1_1_1_0, e_5_parent_1_1_1_0]
|
||||
- res = _extop_test_slapi_member(server=topo.standalone, dn=e_1_parent_1_1_0, relation="manager")
|
||||
+ expected = [e_1_parent_0, e_1_parent_1_0]
|
||||
+ res = _extop_test_slapi_member(server=topo.standalone, dn=e_1_parent_1_1_0, relation="member")
|
||||
_check_res_vs_expected("organisation reporting to e_1_parent_1_1_0", res, expected)
|
||||
|
||||
# check e_2_parent_1_1_0
|
||||
- expected = [EMPTY_RESULT]
|
||||
- res = _extop_test_slapi_member(server=topo.standalone, dn=e_2_parent_1_1_0, relation="manager")
|
||||
+ expected = [e_1_parent_0, e_1_parent_1_0]
|
||||
+ res = _extop_test_slapi_member(server=topo.standalone, dn=e_2_parent_1_1_0, relation="member")
|
||||
_check_res_vs_expected("organisation reporting to e_2_parent_1_1_0", res, expected)
|
||||
|
||||
# check e_2_parent_1_0
|
||||
- expected = [e_1_parent_2_1_0, e_2_parent_2_1_0, e_1_parent_2_2_1_0, e_3_parent_2_1_0, e_4_parent_2_1_0]
|
||||
- res = _extop_test_slapi_member(server=topo.standalone, dn=e_2_parent_1_0, relation="manager")
|
||||
+ expected = [e_1_parent_0]
|
||||
+ res = _extop_test_slapi_member(server=topo.standalone, dn=e_2_parent_1_0, relation="member")
|
||||
_check_res_vs_expected("organisation reporting to e_2_parent_1_0", res, expected)
|
||||
|
||||
# check e_2_parent_2_1_0
|
||||
- expected = [e_1_parent_2_2_1_0]
|
||||
- res = _extop_test_slapi_member(server=topo.standalone, dn=e_2_parent_2_1_0, relation="manager")
|
||||
+ expected = [e_1_parent_0, e_2_parent_1_0]
|
||||
+ res = _extop_test_slapi_member(server=topo.standalone, dn=e_2_parent_2_1_0, relation="member")
|
||||
_check_res_vs_expected("organisation reporting to e_2_parent_2_1_0", res, expected)
|
||||
|
||||
# Check e_1_parent_3_0
|
||||
- expected = [e_1_parent_1_3_0, e_1_parent_1_1_3_0, e_1_parent_1_1_1_3_0]
|
||||
- res = _extop_test_slapi_member(server=topo.standalone, dn=e_1_parent_3_0, relation="manager")
|
||||
+ expected = [e_3_parent_0]
|
||||
+ res = _extop_test_slapi_member(server=topo.standalone, dn=e_1_parent_3_0, relation="member")
|
||||
_check_res_vs_expected("organisation reporting to e_1_parent_3_0", res, expected)
|
||||
|
||||
# Check e_1_parent_1_3_0
|
||||
- expected = [e_1_parent_1_1_3_0, e_1_parent_1_1_1_3_0]
|
||||
- res = _extop_test_slapi_member(server=topo.standalone, dn=e_1_parent_1_3_0, relation="manager")
|
||||
+ expected = [e_3_parent_0, e_1_parent_3_0]
|
||||
+ res = _extop_test_slapi_member(server=topo.standalone, dn=e_1_parent_1_3_0, relation="member")
|
||||
_check_res_vs_expected("organisation reporting to e_1_parent_1_3_0", res, expected)
|
||||
|
||||
# Check e_1_parent_1_1_3_0
|
||||
- expected = [e_1_parent_1_1_1_3_0]
|
||||
- res = _extop_test_slapi_member(server=topo.standalone, dn=e_1_parent_1_1_3_0, relation="manager")
|
||||
+ expected = [e_3_parent_0, e_1_parent_3_0, e_1_parent_1_3_0]
|
||||
+ res = _extop_test_slapi_member(server=topo.standalone, dn=e_1_parent_1_1_3_0, relation="member")
|
||||
_check_res_vs_expected("organisation reporting to e_1_parent_1_1_3_0", res, expected)
|
||||
|
||||
# Check e_1_parent_1_1_1_3_0
|
||||
- expected = [EMPTY_RESULT]
|
||||
- res = _extop_test_slapi_member(server=topo.standalone, dn=e_1_parent_1_1_1_3_0, relation="manager")
|
||||
+ expected = [e_3_parent_0, e_1_parent_3_0, e_1_parent_1_3_0, e_1_parent_1_1_3_0]
|
||||
+ res = _extop_test_slapi_member(server=topo.standalone, dn=e_1_parent_1_1_1_3_0, relation="member")
|
||||
_check_res_vs_expected("organisation reporting to e_1_parent_1_1_1_3_0", res, expected)
|
||||
|
||||
def fin():
|
||||
diff --git a/ldap/servers/plugins/memberof/memberof.c b/ldap/servers/plugins/memberof/memberof.c
|
||||
index 0f7b3a41d..563af47f8 100644
|
||||
--- a/ldap/servers/plugins/memberof/memberof.c
|
||||
+++ b/ldap/servers/plugins/memberof/memberof.c
|
||||
@@ -1602,7 +1602,7 @@ memberof_call_foreach_dn(Slapi_PBlock *pb __attribute__((unused)), Slapi_DN *sdn
|
||||
ht_grp = ancestors_cache_lookup(config, (const void *)ndn);
|
||||
if (ht_grp) {
|
||||
#if MEMBEROF_CACHE_DEBUG
|
||||
- slapi_log_err(SLAPI_LOG_PLUGIN, MEMBEROF_PLUGIN_SUBSYSTEM, "memberof_call_foreach_dn: Ancestors of %s already cached (%x)\n", ndn, ht_grp);
|
||||
+ slapi_log_err(SLAPI_LOG_PLUGIN, MEMBEROF_PLUGIN_SUBSYSTEM, "memberof_call_foreach_dn: Ancestors of %s already cached (%lx)\n", ndn, (ulong) ht_grp);
|
||||
#endif
|
||||
add_ancestors_cbdata(ht_grp, callback_data);
|
||||
*cached = 1;
|
||||
@@ -1610,7 +1610,7 @@ memberof_call_foreach_dn(Slapi_PBlock *pb __attribute__((unused)), Slapi_DN *sdn
|
||||
}
|
||||
}
|
||||
#if MEMBEROF_CACHE_DEBUG
|
||||
- slapi_log_err(SLAPI_LOG_PLUGIN, MEMBEROF_PLUGIN_SUBSYSTEM, "memberof_call_foreach_dn: Ancestors of %s not cached\n", ndn);
|
||||
+ slapi_log_err(SLAPI_LOG_PLUGIN, MEMBEROF_PLUGIN_SUBSYSTEM, "memberof_call_foreach_dn: Ancestors of %s not cached\n", slapi_sdn_get_ndn(sdn));
|
||||
#endif
|
||||
|
||||
/* Escape the dn, and build the search filter. */
|
||||
@@ -3243,7 +3243,8 @@ cache_ancestors(MemberOfConfig *config, Slapi_Value **member_ndn_val, memberof_g
|
||||
return;
|
||||
}
|
||||
#if MEMBEROF_CACHE_DEBUG
|
||||
- if (double_check = ancestors_cache_lookup(config, (const void*) key)) {
|
||||
+ double_check = ancestors_cache_lookup(config, (const void*) key);
|
||||
+ if (double_check) {
|
||||
dump_cache_entry(double_check, "read back");
|
||||
}
|
||||
#endif
|
||||
@@ -3273,13 +3274,13 @@ merge_ancestors(Slapi_Value **member_ndn_val, memberof_get_groups_data *v1, memb
|
||||
sval_dn = slapi_value_new_string(slapi_value_get_string(sval));
|
||||
if (sval_dn) {
|
||||
/* Use the normalized dn from v1 to search it
|
||||
- * in v2
|
||||
- */
|
||||
+ * in v2
|
||||
+ */
|
||||
val_sdn = slapi_sdn_new_dn_byval(slapi_value_get_string(sval_dn));
|
||||
sval_ndn = slapi_value_new_string(slapi_sdn_get_ndn(val_sdn));
|
||||
if (!slapi_valueset_find(
|
||||
((memberof_get_groups_data *)v2)->config->group_slapiattrs[0], v2_group_norm_vals, sval_ndn)) {
|
||||
-/* This ancestor was not already present in v2 => Add it
|
||||
+ /* This ancestor was not already present in v2 => Add it
|
||||
* Using slapi_valueset_add_value it consumes val
|
||||
* so do not free sval
|
||||
*/
|
||||
@@ -3328,7 +3329,7 @@ memberof_get_groups_r(MemberOfConfig *config, Slapi_DN *member_sdn, memberof_get
|
||||
|
||||
merge_ancestors(&member_ndn_val, &member_data, data);
|
||||
if (!cached && member_data.use_cache)
|
||||
- cache_ancestors(config, &member_ndn_val, &member_data);
|
||||
+ cache_ancestors(config, &member_ndn_val, data);
|
||||
|
||||
slapi_value_free(&member_ndn_val);
|
||||
slapi_valueset_free(groupvals);
|
||||
@@ -3389,25 +3390,6 @@ memberof_get_groups_callback(Slapi_Entry *e, void *callback_data)
|
||||
goto bail;
|
||||
}
|
||||
|
||||
- /* Have we been here before? Note that we don't loop through all of the group_slapiattrs
|
||||
- * in config. We only need this attribute for it's syntax so the comparison can be
|
||||
- * performed. Since all of the grouping attributes are validated to use the Dinstinguished
|
||||
- * Name syntax, we can safely just use the first group_slapiattr. */
|
||||
- if (slapi_valueset_find(
|
||||
- ((memberof_get_groups_data *)callback_data)->config->group_slapiattrs[0], already_seen_ndn_vals, group_ndn_val)) {
|
||||
- /* we either hit a recursive grouping, or an entry is
|
||||
- * a member of a group through multiple paths. Either
|
||||
- * way, we can just skip processing this entry since we've
|
||||
- * already gone through this part of the grouping hierarchy. */
|
||||
- slapi_log_err(SLAPI_LOG_PLUGIN, MEMBEROF_PLUGIN_SUBSYSTEM,
|
||||
- "memberof_get_groups_callback - Possible group recursion"
|
||||
- " detected in %s\n",
|
||||
- group_ndn);
|
||||
- slapi_value_free(&group_ndn_val);
|
||||
- ((memberof_get_groups_data *)callback_data)->use_cache = PR_FALSE;
|
||||
- goto bail;
|
||||
- }
|
||||
-
|
||||
/* if the group does not belong to an excluded subtree, adds it to the valueset */
|
||||
if (memberof_entry_in_scope(config, group_sdn)) {
|
||||
/* Push group_dn_val into the valueset. This memory is now owned
|
||||
@@ -3417,9 +3399,21 @@ memberof_get_groups_callback(Slapi_Entry *e, void *callback_data)
|
||||
group_dn_val = slapi_value_new_string(group_dn);
|
||||
slapi_valueset_add_value_ext(groupvals, group_dn_val, SLAPI_VALUE_FLAG_PASSIN);
|
||||
|
||||
- /* push this ndn to detect group recursion */
|
||||
- already_seen_ndn_val = slapi_value_new_string(group_ndn);
|
||||
- slapi_valueset_add_value_ext(already_seen_ndn_vals, already_seen_ndn_val, SLAPI_VALUE_FLAG_PASSIN);
|
||||
+ if (slapi_valueset_find(
|
||||
+ ((memberof_get_groups_data *)callback_data)->config->group_slapiattrs[0], already_seen_ndn_vals, group_ndn_val)) {
|
||||
+ /* The group group_ndn_val has already been processed
|
||||
+ * skip the final recursion to prevent infinite loop
|
||||
+ */
|
||||
+ slapi_log_err(SLAPI_LOG_PLUGIN, MEMBEROF_PLUGIN_SUBSYSTEM,
|
||||
+ "memberof_get_groups_callback - detecting a loop in group %s (stop building memberof)\n",
|
||||
+ group_ndn);
|
||||
+ ((memberof_get_groups_data *)callback_data)->use_cache = PR_FALSE;
|
||||
+ goto bail;
|
||||
+ } else {
|
||||
+ /* keep this ndn to detect a possible group recursion */
|
||||
+ already_seen_ndn_val = slapi_value_new_string(group_ndn);
|
||||
+ slapi_valueset_add_value_ext(already_seen_ndn_vals, already_seen_ndn_val, SLAPI_VALUE_FLAG_PASSIN);
|
||||
+ }
|
||||
}
|
||||
if (!config->skip_nested || config->fixup_task) {
|
||||
/* now recurse to find ancestors groups of e */
|
||||
--
|
||||
2.49.0
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
From 17da0257b24749765777a4e64c3626cb39cca639 Mon Sep 17 00:00:00 2001
|
||||
From f16be0c3fb752f6258f064963fb771ce16ac7d9f Mon Sep 17 00:00:00 2001
|
||||
From: tbordaz <tbordaz@redhat.com>
|
||||
Date: Mon, 31 Mar 2025 11:05:01 +0200
|
||||
Subject: [PATCH] Issue 6571 - (2nd) Nested group does not receive memberOf
|
||||
@ -21,10 +21,10 @@ review by: Simon Pichugin (Thanks !!)
|
||||
2 files changed, 203 insertions(+), 1 deletion(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/memberof_plugin/regression_test.py b/dirsrvtests/tests/suites/memberof_plugin/regression_test.py
|
||||
index dba908975..9ba40a0c3 100644
|
||||
index 51c43a71e..cb21c32a2 100644
|
||||
--- a/dirsrvtests/tests/suites/memberof_plugin/regression_test.py
|
||||
+++ b/dirsrvtests/tests/suites/memberof_plugin/regression_test.py
|
||||
@@ -598,6 +598,8 @@ def test_multipaths(topology_st, request):
|
||||
@@ -595,6 +595,8 @@ def test_multipaths(topology_st, request):
|
||||
'homeDirectory': '/home/user1'
|
||||
})
|
||||
group = Groups(inst, SUFFIX, rdn=None)
|
||||
@ -33,7 +33,7 @@ index dba908975..9ba40a0c3 100644
|
||||
g1 = group.create(properties={'cn': 'group1',
|
||||
'member': user1.dn,
|
||||
'description': 'group1'})
|
||||
@@ -635,6 +637,158 @@ def test_multipaths(topology_st, request):
|
||||
@@ -632,6 +634,158 @@ def test_multipaths(topology_st, request):
|
||||
_check_membership(inst, g21, expected_members=[user1], expected_memberof=[g2, g1])
|
||||
_check_membership(inst, user1, expected_members=[], expected_memberof=[g21, g2, g1])
|
||||
|
||||
@ -193,10 +193,10 @@ index dba908975..9ba40a0c3 100644
|
||||
try:
|
||||
user1.delete()
|
||||
diff --git a/ldap/servers/plugins/memberof/memberof.c b/ldap/servers/plugins/memberof/memberof.c
|
||||
index 32bdcf3f1..f79b083a9 100644
|
||||
index 563af47f8..35fd1a4a0 100644
|
||||
--- a/ldap/servers/plugins/memberof/memberof.c
|
||||
+++ b/ldap/servers/plugins/memberof/memberof.c
|
||||
@@ -3258,6 +3258,35 @@ merge_ancestors(Slapi_Value **member_ndn_val, memberof_get_groups_data *v1, memb
|
||||
@@ -3268,6 +3268,35 @@ merge_ancestors(Slapi_Value **member_ndn_val, memberof_get_groups_data *v1, memb
|
||||
Slapi_ValueSet *v2_group_norm_vals = *((memberof_get_groups_data *)v2)->group_norm_vals;
|
||||
int merged_cnt = 0;
|
||||
|
||||
@ -232,7 +232,7 @@ index 32bdcf3f1..f79b083a9 100644
|
||||
hint = slapi_valueset_first_value(v1_groupvals, &sval);
|
||||
while (sval) {
|
||||
if (memberof_compare(config, member_ndn_val, &sval)) {
|
||||
@@ -3319,7 +3348,7 @@ memberof_get_groups_r(MemberOfConfig *config, Slapi_DN *member_sdn, memberof_get
|
||||
@@ -3329,7 +3358,7 @@ memberof_get_groups_r(MemberOfConfig *config, Slapi_DN *member_sdn, memberof_get
|
||||
|
||||
merge_ancestors(&member_ndn_val, &member_data, data);
|
||||
if (!cached && member_data.use_cache)
|
||||
@ -241,7 +241,7 @@ index 32bdcf3f1..f79b083a9 100644
|
||||
|
||||
slapi_value_free(&member_ndn_val);
|
||||
slapi_valueset_free(groupvals);
|
||||
@@ -4285,6 +4314,25 @@ memberof_fix_memberof_callback(Slapi_Entry *e, void *callback_data)
|
||||
@@ -4295,6 +4324,25 @@ memberof_fix_memberof_callback(Slapi_Entry *e, void *callback_data)
|
||||
|
||||
/* get a list of all of the groups this user belongs to */
|
||||
groups = memberof_get_groups(config, sdn);
|
||||
@ -0,0 +1,39 @@
|
||||
From 60fc8b93f2dab0453e8cc398605eda40289d3bf7 Mon Sep 17 00:00:00 2001
|
||||
From: Navid Yaghoobi <n.yaghoobi.s@gmail.com>
|
||||
Date: Mon, 12 Aug 2024 20:35:45 +1000
|
||||
Subject: [PATCH] Issue 6288 - dsidm crash with account policy when
|
||||
alt-state-attr is disabled (#6292)
|
||||
|
||||
Bug Description:
|
||||
If disable alt-state-attr for account policy plugin by using value 1.1 then
|
||||
dsidm command will crash if state-attr doesn't exit at that time.
|
||||
|
||||
Fix Description:
|
||||
Check if alt-state-attri key exist under account data dict before getting its value.
|
||||
|
||||
relates: https://github.com/389ds/389-ds-base/issues/6288
|
||||
|
||||
Author: Navid Yaghoobi
|
||||
|
||||
Reviewed by: @progier389
|
||||
---
|
||||
src/lib389/lib389/idm/account.py | 3 ++-
|
||||
1 file changed, 2 insertions(+), 1 deletion(-)
|
||||
|
||||
diff --git a/src/lib389/lib389/idm/account.py b/src/lib389/lib389/idm/account.py
|
||||
index 4b823b662..d892c8d0d 100644
|
||||
--- a/src/lib389/lib389/idm/account.py
|
||||
+++ b/src/lib389/lib389/idm/account.py
|
||||
@@ -140,7 +140,8 @@ class Account(DSLdapObject):
|
||||
"nsAccountLock", state_attr])
|
||||
|
||||
last_login_time = self._dict_get_with_ignore_indexerror(account_data, state_attr)
|
||||
- if not last_login_time:
|
||||
+ # if last_login_time not exist then check alt_state_attr only if its not disabled and exist
|
||||
+ if not last_login_time and alt_state_attr in account_data:
|
||||
last_login_time = self._dict_get_with_ignore_indexerror(account_data, alt_state_attr)
|
||||
|
||||
create_time = self._dict_get_with_ignore_indexerror(account_data, "createTimestamp")
|
||||
--
|
||||
2.49.0
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
From 446a23d0ed2d3ffa76c5fb5e9576d6876bdbf04f Mon Sep 17 00:00:00 2001
|
||||
From 823216f67034fa6649cedfdd11d851c45a5630cb Mon Sep 17 00:00:00 2001
|
||||
From: Simon Pichugin <spichugi@redhat.com>
|
||||
Date: Fri, 28 Mar 2025 11:28:54 -0700
|
||||
Subject: [PATCH] Issue 6686 - CLI - Re-enabling user accounts that reached
|
||||
@ -24,8 +24,8 @@ Reviewed by: @mreynolds389 (Thanks!)
|
||||
---
|
||||
.../clu/dsidm_account_inactivity_test.py | 329 ++++++++++++++++++
|
||||
src/lib389/lib389/cli_idm/account.py | 25 +-
|
||||
src/lib389/lib389/idm/account.py | 28 +-
|
||||
3 files changed, 377 insertions(+), 5 deletions(-)
|
||||
src/lib389/lib389/idm/account.py | 27 +-
|
||||
3 files changed, 376 insertions(+), 5 deletions(-)
|
||||
create mode 100644 dirsrvtests/tests/suites/clu/dsidm_account_inactivity_test.py
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/clu/dsidm_account_inactivity_test.py b/dirsrvtests/tests/suites/clu/dsidm_account_inactivity_test.py
|
||||
@ -401,20 +401,19 @@ index 15f766588..a0dfd8f65 100644
|
||||
|
||||
def reset_password(inst, basedn, log, args):
|
||||
diff --git a/src/lib389/lib389/idm/account.py b/src/lib389/lib389/idm/account.py
|
||||
index 4b823b662..faf6f6f16 100644
|
||||
index d892c8d0d..faf6f6f16 100644
|
||||
--- a/src/lib389/lib389/idm/account.py
|
||||
+++ b/src/lib389/lib389/idm/account.py
|
||||
@@ -140,7 +140,8 @@ class Account(DSLdapObject):
|
||||
@@ -140,7 +140,7 @@ class Account(DSLdapObject):
|
||||
"nsAccountLock", state_attr])
|
||||
|
||||
last_login_time = self._dict_get_with_ignore_indexerror(account_data, state_attr)
|
||||
- if not last_login_time:
|
||||
- # if last_login_time not exist then check alt_state_attr only if its not disabled and exist
|
||||
+ # if last_login_time not exist then check alt_state_attr only if its not disabled and exist
|
||||
+ if not last_login_time and alt_state_attr in account_data:
|
||||
if not last_login_time and alt_state_attr in account_data:
|
||||
last_login_time = self._dict_get_with_ignore_indexerror(account_data, alt_state_attr)
|
||||
|
||||
create_time = self._dict_get_with_ignore_indexerror(account_data, "createTimestamp")
|
||||
@@ -203,12 +204,33 @@ class Account(DSLdapObject):
|
||||
@@ -204,12 +204,33 @@ class Account(DSLdapObject):
|
||||
self.replace('nsAccountLock', 'true')
|
||||
|
||||
def unlock(self):
|
||||
@ -1,4 +1,4 @@
|
||||
From ff364a4b1c88e1a8f678e056af88cce50cd8717c Mon Sep 17 00:00:00 2001
|
||||
From 92183a60cb1c106b2ebf614107c88837483f2ec6 Mon Sep 17 00:00:00 2001
|
||||
From: progier389 <progier@redhat.com>
|
||||
Date: Fri, 28 Mar 2025 17:32:14 +0100
|
||||
Subject: [PATCH] Issue 6698 - NPE after configuring invalid filtered role
|
||||
@ -19,7 +19,7 @@ Reviewed by: @tbordaz , @mreynolds389 (Thanks!)
|
||||
2 files changed, 88 insertions(+), 9 deletions(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/roles/basic_test.py b/dirsrvtests/tests/suites/roles/basic_test.py
|
||||
index 875ac47c1..b79816c58 100644
|
||||
index e53f9fccb..32b7657c0 100644
|
||||
--- a/dirsrvtests/tests/suites/roles/basic_test.py
|
||||
+++ b/dirsrvtests/tests/suites/roles/basic_test.py
|
||||
@@ -28,6 +28,7 @@ from lib389.dbgen import dbgen_users
|
||||
@ -30,7 +30,7 @@ index 875ac47c1..b79816c58 100644
|
||||
from lib389.backend import Backends
|
||||
|
||||
logging.getLogger(__name__).setLevel(logging.INFO)
|
||||
@@ -427,7 +428,6 @@ def test_vattr_on_filtered_role_restart(topo, request):
|
||||
@@ -433,7 +434,6 @@ def test_vattr_on_filtered_role_restart(topo, request):
|
||||
log.info("Check the default value of attribute nsslapd-ignore-virtual-attrs should be OFF")
|
||||
assert topo.standalone.config.present('nsslapd-ignore-virtual-attrs', 'off')
|
||||
|
||||
@ -38,7 +38,7 @@ index 875ac47c1..b79816c58 100644
|
||||
log.info("Check the virtual attribute definition is found (after a required delay)")
|
||||
topo.standalone.restart()
|
||||
time.sleep(5)
|
||||
@@ -541,7 +541,7 @@ def test_managed_and_filtered_role_rewrite(topo, request):
|
||||
@@ -552,7 +552,7 @@ def test_managed_and_filtered_role_rewrite(topo, request):
|
||||
indexes = backend.get_indexes()
|
||||
try:
|
||||
index = indexes.create(properties={
|
||||
@ -47,7 +47,7 @@ index 875ac47c1..b79816c58 100644
|
||||
'nsSystemIndex': 'false',
|
||||
'nsIndexType': ['eq', 'pres']
|
||||
})
|
||||
@@ -593,7 +593,6 @@ def test_managed_and_filtered_role_rewrite(topo, request):
|
||||
@@ -604,7 +604,6 @@ def test_managed_and_filtered_role_rewrite(topo, request):
|
||||
dn = "uid=%s0000%d,%s" % (RDN, i, PARENT)
|
||||
topo.standalone.modify_s(dn, [(ldap.MOD_REPLACE, 'nsRoleDN', [role.dn.encode()])])
|
||||
|
||||
@ -55,7 +55,7 @@ index 875ac47c1..b79816c58 100644
|
||||
# Now check that search is fast, evaluating only 4 entries
|
||||
search_start = time.time()
|
||||
entries = topo.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, "(nsrole=%s)" % role.dn)
|
||||
@@ -676,7 +675,7 @@ def test_not_such_entry_role_rewrite(topo, request):
|
||||
@@ -687,7 +686,7 @@ def test_not_such_entry_role_rewrite(topo, request):
|
||||
indexes = backend.get_indexes()
|
||||
try:
|
||||
index = indexes.create(properties={
|
||||
@ -64,7 +64,7 @@ index 875ac47c1..b79816c58 100644
|
||||
'nsSystemIndex': 'false',
|
||||
'nsIndexType': ['eq', 'pres']
|
||||
})
|
||||
@@ -730,7 +729,7 @@ def test_not_such_entry_role_rewrite(topo, request):
|
||||
@@ -741,7 +740,7 @@ def test_not_such_entry_role_rewrite(topo, request):
|
||||
|
||||
# Enable plugin level to check message
|
||||
topo.standalone.config.loglevel(vals=(ErrorLog.DEFAULT,ErrorLog.PLUGIN))
|
||||
@ -73,7 +73,7 @@ index 875ac47c1..b79816c58 100644
|
||||
# Now check that search is fast, evaluating only 4 entries
|
||||
search_start = time.time()
|
||||
entries = topo.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, "(|(nsrole=%s)(nsrole=cn=not_such_entry_role,%s))" % (role.dn, DEFAULT_SUFFIX))
|
||||
@@ -758,6 +757,77 @@ def test_not_such_entry_role_rewrite(topo, request):
|
||||
@@ -769,6 +768,77 @@ def test_not_such_entry_role_rewrite(topo, request):
|
||||
|
||||
request.addfinalizer(fin)
|
||||
|
||||
@ -152,10 +152,10 @@ index 875ac47c1..b79816c58 100644
|
||||
CURRENT_FILE = os.path.realpath(__file__)
|
||||
pytest.main("-s -v %s" % CURRENT_FILE)
|
||||
diff --git a/ldap/servers/slapd/filter.c b/ldap/servers/slapd/filter.c
|
||||
index ce09891b8..f541b8fc1 100644
|
||||
index 44b726a34..2c89ca421 100644
|
||||
--- a/ldap/servers/slapd/filter.c
|
||||
+++ b/ldap/servers/slapd/filter.c
|
||||
@@ -1038,9 +1038,11 @@ slapi_filter_get_subfilt(
|
||||
@@ -1033,9 +1033,11 @@ slapi_filter_get_subfilt(
|
||||
}
|
||||
|
||||
/*
|
||||
@ -169,7 +169,7 @@ index ce09891b8..f541b8fc1 100644
|
||||
*/
|
||||
int
|
||||
slapi_filter_replace_ex(Slapi_Filter *f, char *s)
|
||||
@@ -1099,8 +1101,15 @@ slapi_filter_free_bits(Slapi_Filter *f)
|
||||
@@ -1094,8 +1096,15 @@ slapi_filter_free_bits(Slapi_Filter *f)
|
||||
int
|
||||
slapi_filter_replace_strfilter(Slapi_Filter *f, char *strfilter)
|
||||
{
|
||||
498
0036-Issue-6626-Ignore-replica-busy-condition-in-healthch.patch
Normal file
498
0036-Issue-6626-Ignore-replica-busy-condition-in-healthch.patch
Normal file
@ -0,0 +1,498 @@
|
||||
From 5a4e8c5752a9d3c9e481c16efa794cc8ab5425c4 Mon Sep 17 00:00:00 2001
|
||||
From: progier389 <progier@redhat.com>
|
||||
Date: Wed, 19 Mar 2025 19:04:30 +0100
|
||||
Subject: [PATCH] Issue 6626 - Ignore replica busy condition in healthcheck
|
||||
(#6630)
|
||||
|
||||
Replica Busy condition is expected when there is more than 2 suppliers so healthcheck should not report any error for such condition.
|
||||
Fixed issue in CI tests:
|
||||
|
||||
test_healthcheck_replication_out_of_sync_not_broken was unstable and redundant with test_healthcheck_replica_busy so I moved it back in health_repl_test.py and rewrite it to test working replication whose replica are not in sync (healthcheck should not report anything)
|
||||
some tests (not always the same) were randomly failing with an empty log (which is unexpected because healthcheck output should never be empty.
|
||||
I suspect the log capture mechanism so health_repl_test.py now run dsctl instance healthcheck using subprocess module and capture the output. So far I have not changed the other files because I have not noticed any failure.
|
||||
Issue: #6626
|
||||
|
||||
Reviewed by: @tbordaz (Thanks!)
|
||||
|
||||
(cherry picked from commit bc22cfa6184f51b8492c692f1c95e721d538ab5e)
|
||||
---
|
||||
.../suites/healthcheck/health_repl_test.py | 245 ++++++++++++++----
|
||||
.../suites/healthcheck/health_sync_test.py | 130 ----------
|
||||
src/lib389/lib389/replica.py | 3 +
|
||||
3 files changed, 191 insertions(+), 187 deletions(-)
|
||||
delete mode 100644 dirsrvtests/tests/suites/healthcheck/health_sync_test.py
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/healthcheck/health_repl_test.py b/dirsrvtests/tests/suites/healthcheck/health_repl_test.py
|
||||
index a8d94dfcb..729cc3d6b 100644
|
||||
--- a/dirsrvtests/tests/suites/healthcheck/health_repl_test.py
|
||||
+++ b/dirsrvtests/tests/suites/healthcheck/health_repl_test.py
|
||||
@@ -9,56 +9,124 @@
|
||||
|
||||
import pytest
|
||||
import os
|
||||
-from contextlib import suppress
|
||||
+import random
|
||||
+import re
|
||||
+import string
|
||||
+import subprocess
|
||||
+import threading
|
||||
+import time
|
||||
+from contextlib import suppress, AbstractContextManager
|
||||
from lib389.backend import Backend, Backends
|
||||
from lib389.idm.user import UserAccounts
|
||||
from lib389.replica import Changelog, ReplicationManager, Replicas
|
||||
from lib389.utils import *
|
||||
from lib389._constants import *
|
||||
from lib389.cli_base import FakeArgs
|
||||
-from lib389.topologies import topology_m2, topology_m3
|
||||
from lib389.cli_ctl.health import health_check_run
|
||||
+from lib389.topologies import topology_m2, topology_m3
|
||||
from lib389.paths import Paths
|
||||
|
||||
CMD_OUTPUT = 'No issues found.'
|
||||
JSON_OUTPUT = '[]'
|
||||
|
||||
+LOGIC_DICT = {
|
||||
+ False: ( "not ", "", lambda x: x ),
|
||||
+ True: ( "", "not ", lambda x: not x )
|
||||
+ }
|
||||
+
|
||||
ds_paths = Paths()
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
-def run_healthcheck_and_flush_log(topology, instance, searched_code, json, searched_code2=None):
|
||||
- args = FakeArgs()
|
||||
- args.instance = instance.serverid
|
||||
- args.verbose = instance.verbose
|
||||
- args.list_errors = False
|
||||
- args.list_checks = False
|
||||
- args.check = ['replication', 'backends:userroot:cl_trimming']
|
||||
- args.dry_run = False
|
||||
-
|
||||
+class LoadInstance(AbstractContextManager):
|
||||
+ @staticmethod
|
||||
+ def create_test_user(inst):
|
||||
+ users = UserAccounts(inst, DEFAULT_SUFFIX)
|
||||
+ uid = str(20000 + int(inst.serverid[8:]))
|
||||
+ properties = {
|
||||
+ 'uid': f'testuser_{inst.serverid}',
|
||||
+ 'cn' : f'testuser_{inst.serverid}',
|
||||
+ 'sn' : 'user_{inst.serverid}',
|
||||
+ 'uidNumber' : uid,
|
||||
+ 'gidNumber' : uid,
|
||||
+ 'homeDirectory' : f'/home/testuser_{inst.serverid}'
|
||||
+ }
|
||||
+ return users.ensure_state(properties=properties)
|
||||
+
|
||||
+ def __init__(self, inst):
|
||||
+ self.inst = inst
|
||||
+ self.stop = threading.Event()
|
||||
+ self.thread = threading.Thread(target=self.loader)
|
||||
+ self.user = LoadInstance.create_test_user(inst)
|
||||
+
|
||||
+ def loader(self):
|
||||
+ while not self.stop.is_set():
|
||||
+ value = ''.join(random.choices(string.ascii_uppercase + string.digits, k=10))
|
||||
+ self.user.replace('description', value)
|
||||
+ #log.info(f'Modified {self.user.dn} description with {value} on {self.inst.serverid}')
|
||||
+ time.sleep(0.001)
|
||||
+
|
||||
+ def __exit__(self, *args):
|
||||
+ self.stop.set()
|
||||
+ self.thread.join()
|
||||
+ self.user.delete()
|
||||
+
|
||||
+ def __enter__(self):
|
||||
+ self.thread.start()
|
||||
+ return self
|
||||
+
|
||||
+
|
||||
+class BreakReplication(AbstractContextManager):
|
||||
+ def __init__(self, inst):
|
||||
+ self.replica = Replicas(inst).list()[0]
|
||||
+ self.oldval = None
|
||||
+
|
||||
+ def __exit__(self, *args):
|
||||
+ self.replica.replace('nsds5ReplicaBindDNGroup', self.oldval)
|
||||
+
|
||||
+ def __enter__(self):
|
||||
+ self.oldval = self.replica.get_attr_val_utf8('nsds5ReplicaBindDNGroup')
|
||||
+ self.replica.replace('nsds5ReplicaBindDNGroup', 'cn=repl')
|
||||
+ return self
|
||||
+
|
||||
+
|
||||
+def assert_is_in_result(result, searched_code, isnot=False):
|
||||
+ # Assert if searched_code is not in logcap
|
||||
+ if searched_code is None:
|
||||
+ return
|
||||
+
|
||||
+ # Handle positive and negative tests:
|
||||
+ nomatch, match, f = LOGIC_DICT[bool(isnot)]
|
||||
+ try:
|
||||
+ assert f(re.search(re.escape(searched_code), result))
|
||||
+ log.info(f'Searched code {searched_code} is {match}in healthcheck output')
|
||||
+ except AssertionError as exc:
|
||||
+ log.error(f'{searched_code} is {nomatch}in healthcheck output: {result}')
|
||||
+ raise
|
||||
+
|
||||
+
|
||||
+def run_healthcheck_and_check_result(topology, instance, searched_code, json, searched_code2=None, isnot=False):
|
||||
+ cmd = [ 'dsctl', ]
|
||||
if json:
|
||||
- log.info('Use healthcheck with --json option')
|
||||
- args.json = json
|
||||
- health_check_run(instance, topology.logcap.log, args)
|
||||
- assert topology.logcap.contains(searched_code)
|
||||
- log.info('Healthcheck returned searched code: %s' % searched_code)
|
||||
-
|
||||
- if searched_code2 is not None:
|
||||
- assert topology.logcap.contains(searched_code2)
|
||||
- log.info('Healthcheck returned searched code: %s' % searched_code2)
|
||||
- else:
|
||||
- log.info('Use healthcheck without --json option')
|
||||
- args.json = json
|
||||
- health_check_run(instance, topology.logcap.log, args)
|
||||
- assert topology.logcap.contains(searched_code)
|
||||
- log.info('Healthcheck returned searched code: %s' % searched_code)
|
||||
-
|
||||
- if searched_code2 is not None:
|
||||
- assert topology.logcap.contains(searched_code2)
|
||||
- log.info('Healthcheck returned searched code: %s' % searched_code2)
|
||||
-
|
||||
- log.info('Clear the log')
|
||||
- topology.logcap.flush()
|
||||
+ cmd.append('--json')
|
||||
+ if searched_code == CMD_OUTPUT:
|
||||
+ searched_code = JSON_OUTPUT
|
||||
+ cmd.append(instance.serverid)
|
||||
+ cmd.extend(['healthcheck', '--check', 'replication' , 'backends:userroot:cl_trimming'])
|
||||
+
|
||||
+ result = subprocess.run(cmd, capture_output=True, universal_newlines=True)
|
||||
+ log.info(f'Running: {cmd}')
|
||||
+ log.info(f'Stdout: {result.stdout}')
|
||||
+ log.info(f'Stderr: {result.stdout}')
|
||||
+ log.info(f'Return code: {result.returncode}')
|
||||
+ stdout = result.stdout
|
||||
+
|
||||
+ # stdout should not be empty
|
||||
+ assert stdout is not None
|
||||
+ assert len(stdout) > 0
|
||||
+ assert_is_in_result(stdout, searched_code, isnot=isnot)
|
||||
+ assert_is_in_result(stdout, searched_code2, isnot=isnot)
|
||||
+
|
||||
|
||||
|
||||
def set_changelog_trimming(instance):
|
||||
@@ -112,15 +180,15 @@ def test_healthcheck_replication_replica_not_reachable(topology_m2):
|
||||
with suppress(Exception):
|
||||
repl.wait_for_replication(M1, M2, timeout=5)
|
||||
|
||||
- run_healthcheck_and_flush_log(topology_m2, M1, RET_CODE, json=False)
|
||||
- run_healthcheck_and_flush_log(topology_m2, M1, RET_CODE, json=True)
|
||||
+ run_healthcheck_and_check_result(topology_m2, M1, RET_CODE, json=False)
|
||||
+ run_healthcheck_and_check_result(topology_m2, M1, RET_CODE, json=True)
|
||||
|
||||
log.info('Set nsds5replicaport for the replication agreement to a reachable port')
|
||||
agmt_m1.replace('nsDS5ReplicaPort', '{}'.format(M2.port))
|
||||
repl.wait_for_replication(M1, M2)
|
||||
|
||||
- run_healthcheck_and_flush_log(topology_m2, M1, CMD_OUTPUT, json=False)
|
||||
- run_healthcheck_and_flush_log(topology_m2, M1, JSON_OUTPUT, json=True)
|
||||
+ run_healthcheck_and_check_result(topology_m2, M1, CMD_OUTPUT, json=False)
|
||||
+ run_healthcheck_and_check_result(topology_m2, M1, JSON_OUTPUT, json=True)
|
||||
|
||||
|
||||
@pytest.mark.xfail(ds_is_older("1.4.1"), reason="Not implemented")
|
||||
@@ -160,13 +228,13 @@ def test_healthcheck_changelog_trimming_not_configured(topology_m2):
|
||||
|
||||
time.sleep(3)
|
||||
|
||||
- run_healthcheck_and_flush_log(topology_m2, M1, RET_CODE, json=False)
|
||||
- run_healthcheck_and_flush_log(topology_m2, M1, RET_CODE, json=True)
|
||||
+ run_healthcheck_and_check_result(topology_m2, M1, RET_CODE, json=False)
|
||||
+ run_healthcheck_and_check_result(topology_m2, M1, RET_CODE, json=True)
|
||||
|
||||
set_changelog_trimming(M1)
|
||||
|
||||
- run_healthcheck_and_flush_log(topology_m2, M1, CMD_OUTPUT, json=False)
|
||||
- run_healthcheck_and_flush_log(topology_m2, M1, JSON_OUTPUT, json=True)
|
||||
+ run_healthcheck_and_check_result(topology_m2, M1, CMD_OUTPUT, json=False)
|
||||
+ run_healthcheck_and_check_result(topology_m2, M1, JSON_OUTPUT, json=True)
|
||||
|
||||
|
||||
@pytest.mark.xfail(ds_is_older("1.4.1"), reason="Not implemented")
|
||||
@@ -208,8 +276,8 @@ def test_healthcheck_replication_presence_of_conflict_entries(topology_m2):
|
||||
|
||||
repl.test_replication_topology(topology_m2)
|
||||
|
||||
- run_healthcheck_and_flush_log(topology_m2, M1, RET_CODE, json=False)
|
||||
- run_healthcheck_and_flush_log(topology_m2, M1, RET_CODE, json=True)
|
||||
+ run_healthcheck_and_check_result(topology_m2, M1, RET_CODE, json=False)
|
||||
+ run_healthcheck_and_check_result(topology_m2, M1, RET_CODE, json=True)
|
||||
|
||||
|
||||
def test_healthcheck_non_replicated_suffixes(topology_m2):
|
||||
@@ -245,6 +313,44 @@ def test_healthcheck_non_replicated_suffixes(topology_m2):
|
||||
health_check_run(inst, topology_m2.logcap.log, args)
|
||||
|
||||
|
||||
+def test_healthcheck_replica_busy(topology_m3):
|
||||
+ """Check that HealthCheck does not returns DSREPLLE0003 code when a replicva is busy
|
||||
+
|
||||
+ :id: b7c4a5aa-ef98-11ef-87f5-482ae39447e5
|
||||
+ :setup: 3 MMR topology
|
||||
+ :steps:
|
||||
+ 1. Create a 3 suppliers full-mesh topology
|
||||
+ 2. Generate constant modify load on S1 and S2
|
||||
+ 3. Wait a bit to ensure stable replication flow
|
||||
+ 4. Perform a modify on S3
|
||||
+ 5. Use HealthCheck on S3 without --json option
|
||||
+ 6. Use HealthCheck on S3 with --json option
|
||||
+ :expectedresults:
|
||||
+ 1. Success
|
||||
+ 2. Success
|
||||
+ 3. Success
|
||||
+ 4. Success
|
||||
+ 5. Healthcheck should not reports DSREPLLE0003 code and related details
|
||||
+ 6. Healthcheck should not reports DSREPLLE0003 code and related details
|
||||
+ """
|
||||
+
|
||||
+ RET_CODE = 'DSREPLLE0003'
|
||||
+ # Is DSREPLLE0003 ignored if replica is busy ?
|
||||
+ ignored = not ds_is_older("2.7")
|
||||
+
|
||||
+ S1 = topology_m3.ms['supplier1']
|
||||
+ S2 = topology_m3.ms['supplier2']
|
||||
+ S3 = topology_m3.ms['supplier3']
|
||||
+ with LoadInstance(S1), LoadInstance(S2):
|
||||
+ # Wait a bit to let replication starts
|
||||
+ time.sleep(10)
|
||||
+ # Create user on S3 then remove it:
|
||||
+ LoadInstance(S3).user.delete()
|
||||
+ # S3 agrements should now be in the replica busy state
|
||||
+ run_healthcheck_and_check_result(topology_m3, S3, RET_CODE, json=False, isnot=ignored)
|
||||
+ run_healthcheck_and_check_result(topology_m3, S3, RET_CODE, json=True, isnot=ignored)
|
||||
+
|
||||
+
|
||||
@pytest.mark.xfail(ds_is_older("1.4.1"), reason="Not implemented")
|
||||
def test_healthcheck_replication_out_of_sync_broken(topology_m3):
|
||||
"""Check if HealthCheck returns DSREPLLE0001 code
|
||||
@@ -265,25 +371,50 @@ def test_healthcheck_replication_out_of_sync_broken(topology_m3):
|
||||
|
||||
RET_CODE = 'DSREPLLE0001'
|
||||
|
||||
- M1 = topology_m3.ms['supplier1']
|
||||
- M2 = topology_m3.ms['supplier2']
|
||||
- M3 = topology_m3.ms['supplier3']
|
||||
+ S1 = topology_m3.ms['supplier1']
|
||||
+ S2 = topology_m3.ms['supplier2']
|
||||
+ S3 = topology_m3.ms['supplier3']
|
||||
|
||||
log.info('Break supplier2 and supplier3')
|
||||
- replicas = Replicas(M2)
|
||||
- replica = replicas.list()[0]
|
||||
- replica.replace('nsds5ReplicaBindDNGroup', 'cn=repl')
|
||||
+ with BreakReplication(S2), BreakReplication(S3):
|
||||
+ time.sleep(1)
|
||||
+ log.info('Perform update on supplier1')
|
||||
+ test_users_m1 = UserAccounts(S1, DEFAULT_SUFFIX)
|
||||
+ test_users_m1.create_test_user(1005, 2000)
|
||||
|
||||
- replicas = Replicas(M3)
|
||||
- replica = replicas.list()[0]
|
||||
- replica.replace('nsds5ReplicaBindDNGroup', 'cn=repl')
|
||||
+ time.sleep(3)
|
||||
+ run_healthcheck_and_check_result(topology_m3, S1, RET_CODE, json=False)
|
||||
+ run_healthcheck_and_check_result(topology_m3, S1, RET_CODE, json=True)
|
||||
|
||||
- log.info('Perform update on supplier1')
|
||||
- test_users_m1 = UserAccounts(M1, DEFAULT_SUFFIX)
|
||||
- test_users_m1.create_test_user(1005, 2000)
|
||||
|
||||
- run_healthcheck_and_flush_log(topology_m3, M1, RET_CODE, json=False)
|
||||
- run_healthcheck_and_flush_log(topology_m3, M1, RET_CODE, json=True)
|
||||
+def test_healthcheck_replication_out_of_sync_not_broken(topology_m3):
|
||||
+ """Check that HealthCheck returns no issues when replication is in progress
|
||||
+
|
||||
+ :id: 8305000d-ba4d-4c00-8331-be0e8bd92150
|
||||
+ :setup: 3 MMR topology
|
||||
+ :steps:
|
||||
+ 1. Create a 3 suppliers full-mesh topology, all replicas being synchronized
|
||||
+ 2. Generate constant load on two supplier
|
||||
+ 3. Use HealthCheck without --json option
|
||||
+ 4. Use HealthCheck with --json option
|
||||
+ :expectedresults:
|
||||
+ 1. Success
|
||||
+ 2. Success
|
||||
+ 3. Healthcheck reports no issue found
|
||||
+ 4. Healthcheck reports no issue found
|
||||
+ """
|
||||
+
|
||||
+ RET_CODE = CMD_OUTPUT
|
||||
+
|
||||
+ S1 = topology_m3.ms['supplier1']
|
||||
+ S2 = topology_m3.ms['supplier2']
|
||||
+ S3 = topology_m3.ms['supplier3']
|
||||
+
|
||||
+ with LoadInstance(S1), LoadInstance(S2):
|
||||
+ # Wait a bit to let replication starts
|
||||
+ time.sleep(10)
|
||||
+ run_healthcheck_and_check_result(topology_m3, S1, RET_CODE, json=False)
|
||||
+ run_healthcheck_and_check_result(topology_m3, S1, RET_CODE, json=True)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
diff --git a/dirsrvtests/tests/suites/healthcheck/health_sync_test.py b/dirsrvtests/tests/suites/healthcheck/health_sync_test.py
|
||||
deleted file mode 100644
|
||||
index 8f9bc0a00..000000000
|
||||
--- a/dirsrvtests/tests/suites/healthcheck/health_sync_test.py
|
||||
+++ /dev/null
|
||||
@@ -1,130 +0,0 @@
|
||||
-# --- BEGIN COPYRIGHT BLOCK ---
|
||||
-# Copyright (C) 2020 Red Hat, Inc.
|
||||
-# All rights reserved.
|
||||
-#
|
||||
-# License: GPL (version 3 or any later version).
|
||||
-# See LICENSE for details.
|
||||
-# --- END COPYRIGHT BLOCK ---
|
||||
-#
|
||||
-
|
||||
-import pytest
|
||||
-import os
|
||||
-import time
|
||||
-from datetime import *
|
||||
-from lib389.idm.user import UserAccounts
|
||||
-from lib389.utils import *
|
||||
-from lib389._constants import *
|
||||
-from lib389.cli_base import FakeArgs
|
||||
-from lib389.topologies import topology_m3
|
||||
-from lib389.cli_ctl.health import health_check_run
|
||||
-from lib389.paths import Paths
|
||||
-
|
||||
-ds_paths = Paths()
|
||||
-log = logging.getLogger(__name__)
|
||||
-
|
||||
-
|
||||
-def run_healthcheck_and_flush_log(topology, instance, searched_code, json, searched_code2=None):
|
||||
- args = FakeArgs()
|
||||
- args.instance = instance.serverid
|
||||
- args.verbose = instance.verbose
|
||||
- args.list_errors = False
|
||||
- args.list_checks = False
|
||||
- args.check = ['replication']
|
||||
- args.dry_run = False
|
||||
-
|
||||
- if json:
|
||||
- log.info('Use healthcheck with --json option')
|
||||
- args.json = json
|
||||
- health_check_run(instance, topology.logcap.log, args)
|
||||
- assert topology.logcap.contains(searched_code)
|
||||
- log.info('Healthcheck returned searched code: %s' % searched_code)
|
||||
-
|
||||
- if searched_code2 is not None:
|
||||
- assert topology.logcap.contains(searched_code2)
|
||||
- log.info('Healthcheck returned searched code: %s' % searched_code2)
|
||||
- else:
|
||||
- log.info('Use healthcheck without --json option')
|
||||
- args.json = json
|
||||
- health_check_run(instance, topology.logcap.log, args)
|
||||
- assert topology.logcap.contains(searched_code)
|
||||
- log.info('Healthcheck returned searched code: %s' % searched_code)
|
||||
-
|
||||
- if searched_code2 is not None:
|
||||
- assert topology.logcap.contains(searched_code2)
|
||||
- log.info('Healthcheck returned searched code: %s' % searched_code2)
|
||||
-
|
||||
- log.info('Clear the log')
|
||||
- topology.logcap.flush()
|
||||
-
|
||||
-
|
||||
-# This test is in separate file because it is timeout specific
|
||||
-@pytest.mark.xfail(ds_is_older("1.4.1"), reason="Not implemented")
|
||||
-#unstable or unstatus tests, skipped for now
|
||||
-@pytest.mark.flaky(max_runs=2, min_passes=1)
|
||||
-def test_healthcheck_replication_out_of_sync_not_broken(topology_m3):
|
||||
- """Check if HealthCheck returns DSREPLLE0003 code
|
||||
-
|
||||
- :id: 8305000d-ba4d-4c00-8331-be0e8bd92150
|
||||
- :setup: 3 MMR topology
|
||||
- :steps:
|
||||
- 1. Create a 3 suppliers full-mesh topology, all replicas being synchronized
|
||||
- 2. Stop M1
|
||||
- 3. Perform an update on M2 and M3.
|
||||
- 4. Check M2 and M3 are synchronized.
|
||||
- 5. From M2, reinitialize the M3 agreement
|
||||
- 6. Stop M2 and M3
|
||||
- 7. Restart M1
|
||||
- 8. Start M3
|
||||
- 9. Use HealthCheck without --json option
|
||||
- 10. Use HealthCheck with --json option
|
||||
- :expectedresults:
|
||||
- 1. Success
|
||||
- 2. Success
|
||||
- 3. Success
|
||||
- 4. Success
|
||||
- 5. Success
|
||||
- 6. Success
|
||||
- 7. Success
|
||||
- 8. Success
|
||||
- 9. Healthcheck reports DSREPLLE0003 code and related details
|
||||
- 10. Healthcheck reports DSREPLLE0003 code and related details
|
||||
- """
|
||||
-
|
||||
- RET_CODE = 'DSREPLLE0003'
|
||||
-
|
||||
- M1 = topology_m3.ms['supplier1']
|
||||
- M2 = topology_m3.ms['supplier2']
|
||||
- M3 = topology_m3.ms['supplier3']
|
||||
-
|
||||
- log.info('Stop supplier1')
|
||||
- M1.stop()
|
||||
-
|
||||
- log.info('Perform update on supplier2 and supplier3')
|
||||
- test_users_m2 = UserAccounts(M2, DEFAULT_SUFFIX)
|
||||
- test_users_m3 = UserAccounts(M3, DEFAULT_SUFFIX)
|
||||
- test_users_m2.create_test_user(1000, 2000)
|
||||
- for user_num in range(1001, 3000):
|
||||
- test_users_m3.create_test_user(user_num, 2000)
|
||||
- time.sleep(2)
|
||||
-
|
||||
- log.info('Stop M2 and M3')
|
||||
- M2.stop()
|
||||
- M3.stop()
|
||||
-
|
||||
- log.info('Start M1 first, then M2, so that M2 acquires M1')
|
||||
- M1.start()
|
||||
- M2.start()
|
||||
- time.sleep(2)
|
||||
-
|
||||
- log.info('Start M3 which should not be able to acquire M1 since M2 is updating it')
|
||||
- M3.start()
|
||||
- time.sleep(2)
|
||||
-
|
||||
- run_healthcheck_and_flush_log(topology_m3, M3, RET_CODE, json=False)
|
||||
- run_healthcheck_and_flush_log(topology_m3, M3, RET_CODE, json=True)
|
||||
-
|
||||
-
|
||||
-if __name__ == '__main__':
|
||||
- # Run isolated
|
||||
- # -s for DEBUG mode
|
||||
- CURRENT_FILE = os.path.realpath(__file__)
|
||||
diff --git a/src/lib389/lib389/replica.py b/src/lib389/lib389/replica.py
|
||||
index 0486c48f8..0dc3a0f54 100644
|
||||
--- a/src/lib389/lib389/replica.py
|
||||
+++ b/src/lib389/lib389/replica.py
|
||||
@@ -1276,6 +1276,9 @@ class Replica(DSLdapObject):
|
||||
report['check'] = f'replication:agmts_status'
|
||||
yield report
|
||||
elif status['state'] == 'amber':
|
||||
+ if "can't acquire busy replica" in status['reason']:
|
||||
+ # Ignore replica busy condition
|
||||
+ continue
|
||||
# Warning
|
||||
report = copy.deepcopy(DSREPLLE0003)
|
||||
report['detail'] = report['detail'].replace('SUFFIX', suffix)
|
||||
--
|
||||
2.49.0
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
From ebe986c78c6cd4e1f10172d8a8a11faf814fbc22 Mon Sep 17 00:00:00 2001
|
||||
From 879f48e68f9e37d268cf126d7e147fa04a34bcde Mon Sep 17 00:00:00 2001
|
||||
From: Mark Reynolds <mreynolds@redhat.com>
|
||||
Date: Thu, 6 Mar 2025 16:49:53 -0500
|
||||
Subject: [PATCH] Issue 6655 - fix replication release replica decoding error
|
||||
@ -15,25 +15,22 @@ Relates: https://github.com/389ds/389-ds-base/issues/6655
|
||||
|
||||
Reviewed by: spichugi, tbordaz, and vashirov(Thanks!!!)
|
||||
---
|
||||
.../suites/replication/acceptance_test.py | 12 ++++++++++
|
||||
.../suites/replication/acceptance_test.py | 10 +++++++-
|
||||
ldap/servers/plugins/replication/repl_extop.c | 24 ++++++++++++-------
|
||||
2 files changed, 27 insertions(+), 9 deletions(-)
|
||||
2 files changed, 24 insertions(+), 10 deletions(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/replication/acceptance_test.py b/dirsrvtests/tests/suites/replication/acceptance_test.py
|
||||
index fc8622051..0f18edb44 100644
|
||||
index c2f3f2572..ede350c4c 100644
|
||||
--- a/dirsrvtests/tests/suites/replication/acceptance_test.py
|
||||
+++ b/dirsrvtests/tests/suites/replication/acceptance_test.py
|
||||
@@ -1,5 +1,9 @@
|
||||
@@ -1,5 +1,5 @@
|
||||
# --- BEGIN COPYRIGHT BLOCK ---
|
||||
+<<<<<<< HEAD
|
||||
# Copyright (C) 2021 Red Hat, Inc.
|
||||
+=======
|
||||
-# Copyright (C) 2017 Red Hat, Inc.
|
||||
+# Copyright (C) 2025 Red Hat, Inc.
|
||||
+>>>>>>> a623c3f90 (Issue 6655 - fix replication release replica decoding error)
|
||||
# All rights reserved.
|
||||
#
|
||||
# License: GPL (version 3 or any later version).
|
||||
@@ -453,6 +457,13 @@ def test_multi_subsuffix_replication(topo_m4):
|
||||
@@ -452,6 +452,13 @@ def test_multi_subsuffix_replication(topo_m4):
|
||||
f"User {user_dn} on supplier {user_obj._instance.serverid} "
|
||||
f"still has 'Description {j}'"
|
||||
)
|
||||
@ -47,7 +44,7 @@ index fc8622051..0f18edb44 100644
|
||||
finally:
|
||||
for suffix, test_users in test_users_by_suffix.items():
|
||||
for user in test_users:
|
||||
@@ -507,6 +518,7 @@ def test_new_suffix(topo_m4, new_suffix):
|
||||
@@ -506,6 +513,7 @@ def test_new_suffix(topo_m4, new_suffix):
|
||||
repl.remove_supplier(m1)
|
||||
repl.remove_supplier(m2)
|
||||
|
||||
@ -56,10 +53,10 @@ index fc8622051..0f18edb44 100644
|
||||
"""Check a replication with many attributes (add and delete)
|
||||
|
||||
diff --git a/ldap/servers/plugins/replication/repl_extop.c b/ldap/servers/plugins/replication/repl_extop.c
|
||||
index 14b756df1..dacc611c0 100644
|
||||
index f53b9811f..818d99431 100644
|
||||
--- a/ldap/servers/plugins/replication/repl_extop.c
|
||||
+++ b/ldap/servers/plugins/replication/repl_extop.c
|
||||
@@ -1134,6 +1134,12 @@ send_response:
|
||||
@@ -1124,6 +1124,12 @@ send_response:
|
||||
slapi_pblock_set(pb, SLAPI_EXT_OP_RET_OID, REPL_NSDS50_REPLICATION_RESPONSE_OID);
|
||||
}
|
||||
|
||||
@ -71,8 +68,8 @@ index 14b756df1..dacc611c0 100644
|
||||
+
|
||||
slapi_pblock_set(pb, SLAPI_EXT_OP_RET_VALUE, resp_bval);
|
||||
slapi_log_err(SLAPI_LOG_REPL, repl_plugin_name,
|
||||
"multimaster_extop_StartNSDS50ReplicationRequest - "
|
||||
@@ -1251,12 +1257,6 @@ send_response:
|
||||
"multisupplier_extop_StartNSDS50ReplicationRequest - "
|
||||
@@ -1241,12 +1247,6 @@ send_response:
|
||||
if (NULL != ruv_bervals) {
|
||||
ber_bvecfree(ruv_bervals);
|
||||
}
|
||||
@ -85,7 +82,7 @@ index 14b756df1..dacc611c0 100644
|
||||
|
||||
return return_value;
|
||||
}
|
||||
@@ -1389,6 +1389,13 @@ multimaster_extop_EndNSDS50ReplicationRequest(Slapi_PBlock *pb)
|
||||
@@ -1381,6 +1381,13 @@ multisupplier_extop_EndNSDS50ReplicationRequest(Slapi_PBlock *pb)
|
||||
}
|
||||
}
|
||||
send_response:
|
||||
@ -99,7 +96,7 @@ index 14b756df1..dacc611c0 100644
|
||||
/* Send the response code */
|
||||
if ((resp_bere = der_alloc()) == NULL) {
|
||||
goto free_and_return;
|
||||
@@ -1419,11 +1426,10 @@ free_and_return:
|
||||
@@ -1411,11 +1418,10 @@ free_and_return:
|
||||
if (NULL != resp_bval) {
|
||||
ber_bvfree(resp_bval);
|
||||
}
|
||||
@ -112,7 +109,7 @@ index 14b756df1..dacc611c0 100644
|
||||
}
|
||||
|
||||
return return_value;
|
||||
@@ -1516,7 +1522,7 @@ multimaster_extop_abort_cleanruv(Slapi_PBlock *pb)
|
||||
@@ -1508,7 +1514,7 @@ multisupplier_extop_abort_cleanruv(Slapi_PBlock *pb)
|
||||
rid);
|
||||
}
|
||||
/*
|
||||
@ -120,7 +117,7 @@ index 14b756df1..dacc611c0 100644
|
||||
+ * Get the replica
|
||||
*/
|
||||
if ((r = replica_get_replica_from_root(repl_root)) == NULL) {
|
||||
slapi_log_err(SLAPI_LOG_ERR, repl_plugin_name, "multimaster_extop_abort_cleanruv - "
|
||||
slapi_log_err(SLAPI_LOG_ERR, repl_plugin_name, "multisupplier_extop_abort_cleanruv - "
|
||||
--
|
||||
2.49.0
|
||||
|
||||
@ -0,0 +1,67 @@
|
||||
From c9d051ba371354611ee086bf785578fe2934690d Mon Sep 17 00:00:00 2001
|
||||
From: Mark Reynolds <mreynolds@redhat.com>
|
||||
Date: Mon, 19 May 2025 17:01:16 -0400
|
||||
Subject: [PATCH] Issue 6787 - Improve error message when bulk import
|
||||
connection is closed
|
||||
|
||||
Description:
|
||||
|
||||
If an online replication initialization connection is closed a vague error
|
||||
message is reported when the init is aborted:
|
||||
|
||||
factory_destructor - ERROR bulk import abandoned
|
||||
|
||||
It should be clear that the import is being abandoned because the connection
|
||||
was closed and identify the conn id.
|
||||
|
||||
relates: https://github.com/389ds/389-ds-base/issues/6787
|
||||
|
||||
Reviewed by: progier(Thanks!)
|
||||
---
|
||||
ldap/servers/slapd/back-ldbm/import.c | 7 +++++--
|
||||
ldap/servers/slapd/back-ldbm/proto-back-ldbm.h | 2 +-
|
||||
2 files changed, 6 insertions(+), 3 deletions(-)
|
||||
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/import.c b/ldap/servers/slapd/back-ldbm/import.c
|
||||
index 30ec462fa..5a03bb533 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/import.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/import.c
|
||||
@@ -189,9 +189,10 @@ factory_constructor(void *object __attribute__((unused)), void *parent __attribu
|
||||
}
|
||||
|
||||
void
|
||||
-factory_destructor(void *extension, void *object __attribute__((unused)), void *parent __attribute__((unused)))
|
||||
+factory_destructor(void *extension, void *object, void *parent __attribute__((unused)))
|
||||
{
|
||||
ImportJob *job = (ImportJob *)extension;
|
||||
+ Connection *conn = (Connection *)object;
|
||||
PRThread *thread;
|
||||
|
||||
if (extension == NULL)
|
||||
@@ -203,7 +204,9 @@ factory_destructor(void *extension, void *object __attribute__((unused)), void *
|
||||
*/
|
||||
thread = job->main_thread;
|
||||
slapi_log_err(SLAPI_LOG_ERR, "factory_destructor",
|
||||
- "ERROR bulk import abandoned\n");
|
||||
+ "ERROR bulk import abandoned: conn=%ld was closed\n",
|
||||
+ conn->c_connid);
|
||||
+
|
||||
import_abort_all(job, 1);
|
||||
/* wait for bdb_import_main to finish... */
|
||||
PR_JoinThread(thread);
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/proto-back-ldbm.h b/ldap/servers/slapd/back-ldbm/proto-back-ldbm.h
|
||||
index 0317c184d..43cf83493 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/proto-back-ldbm.h
|
||||
+++ b/ldap/servers/slapd/back-ldbm/proto-back-ldbm.h
|
||||
@@ -606,7 +606,7 @@ int ldbm_ancestorid_move_subtree(
|
||||
int ldbm_back_wire_import(Slapi_PBlock *pb);
|
||||
void import_abort_all(struct _ImportJob *job, int wait_for_them);
|
||||
void *factory_constructor(void *object __attribute__((unused)), void *parent __attribute__((unused)));
|
||||
-void factory_destructor(void *extension, void *object __attribute__((unused)), void *parent __attribute__((unused)));
|
||||
+void factory_destructor(void *extension, void *object, void *parent __attribute__((unused)));
|
||||
uint64_t wait_for_ref_count(Slapi_Counter *inst_ref_count);
|
||||
|
||||
/*
|
||||
--
|
||||
2.49.0
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
From 8cba0dd699541d562d74502f35176df33f188512 Mon Sep 17 00:00:00 2001
|
||||
From 25304ebd8bdd6230ed8aba8c93ee4a6b5ef1c665 Mon Sep 17 00:00:00 2001
|
||||
From: James Chapman <jachapma@redhat.com>
|
||||
Date: Fri, 30 May 2025 11:12:43 +0000
|
||||
Subject: [PATCH] Issue 6641 - modrdn fails when a user is member of multiple
|
||||
@ -23,17 +23,14 @@ Fixes: https://github.com/389ds/389-ds-base/issues/6641
|
||||
Relates: https://github.com/389ds/389-ds-base/issues/6566
|
||||
|
||||
Reviewed by: @progier389, @tbordaz, @vashirov (Thank you)
|
||||
|
||||
(cherry picked from commit 132ce4ab158679475cb83dbe28cc4fd7ced5cd19)
|
||||
Signed-off-by: Masahiro Matsuya <mmatsuya@redhat.com>
|
||||
---
|
||||
.../tests/suites/plugins/modrdn_test.py | 174 ++++++++++++++++++
|
||||
ldap/servers/plugins/automember/automember.c | 11 +-
|
||||
ldap/servers/plugins/memberof/memberof.c | 123 +++++--------
|
||||
ldap/servers/plugins/referint/referint.c | 30 +--
|
||||
ldap/servers/plugins/memberof/memberof.c | 124 +++++--------
|
||||
ldap/servers/plugins/referint/referint.c | 43 +++--
|
||||
ldap/servers/slapd/modify.c | 51 +++++
|
||||
ldap/servers/slapd/slapi-plugin.h | 1 +
|
||||
6 files changed, 301 insertions(+), 89 deletions(-)
|
||||
6 files changed, 303 insertions(+), 101 deletions(-)
|
||||
create mode 100644 dirsrvtests/tests/suites/plugins/modrdn_test.py
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/plugins/modrdn_test.py b/dirsrvtests/tests/suites/plugins/modrdn_test.py
|
||||
@ -217,10 +214,10 @@ index 000000000..be79b0c3c
|
||||
+ assert not groupA.is_member(user_orig_dn)
|
||||
+ assert not groupZ.is_member(user_orig_dn)
|
||||
diff --git a/ldap/servers/plugins/automember/automember.c b/ldap/servers/plugins/automember/automember.c
|
||||
index 419adb052..fde92ee12 100644
|
||||
index 38f817e5d..f900db7f2 100644
|
||||
--- a/ldap/servers/plugins/automember/automember.c
|
||||
+++ b/ldap/servers/plugins/automember/automember.c
|
||||
@@ -1754,13 +1754,12 @@ automember_update_member_value(Slapi_Entry *member_e, const char *group_dn, char
|
||||
@@ -1755,13 +1755,12 @@ automember_update_member_value(Slapi_Entry *member_e, const char *group_dn, char
|
||||
}
|
||||
|
||||
mod_pb = slapi_pblock_new();
|
||||
@ -238,7 +235,7 @@ index 419adb052..fde92ee12 100644
|
||||
slapi_log_err(SLAPI_LOG_ERR, AUTOMEMBER_PLUGIN_SUBSYSTEM,
|
||||
"automember_update_member_value - Unable to add \"%s\" as "
|
||||
"a \"%s\" value to group \"%s\" (%s).\n",
|
||||
@@ -1770,7 +1769,7 @@ automember_update_member_value(Slapi_Entry *member_e, const char *group_dn, char
|
||||
@@ -1771,7 +1770,7 @@ automember_update_member_value(Slapi_Entry *member_e, const char *group_dn, char
|
||||
}
|
||||
} else {
|
||||
/* delete value */
|
||||
@ -248,10 +245,18 @@ index 419adb052..fde92ee12 100644
|
||||
"automember_update_member_value - Unable to delete \"%s\" as "
|
||||
"a \"%s\" value from group \"%s\" (%s).\n",
|
||||
diff --git a/ldap/servers/plugins/memberof/memberof.c b/ldap/servers/plugins/memberof/memberof.c
|
||||
index f79b083a9..f3dc7cf00 100644
|
||||
index 35fd1a4a0..16dae2195 100644
|
||||
--- a/ldap/servers/plugins/memberof/memberof.c
|
||||
+++ b/ldap/servers/plugins/memberof/memberof.c
|
||||
@@ -1482,18 +1482,9 @@ memberof_del_dn_type_callback(Slapi_Entry *e, void *callback_data)
|
||||
@@ -964,7 +964,6 @@ modify_need_fixup(int set)
|
||||
mod_pb, memberof_get_config_area(),
|
||||
mods, 0, 0,
|
||||
memberof_get_plugin_id(), SLAPI_OP_FLAG_FIXUP|SLAPI_OP_FLAG_BYPASS_REFERRALS);
|
||||
-
|
||||
slapi_modify_internal_pb(mod_pb);
|
||||
slapi_pblock_get(mod_pb, SLAPI_PLUGIN_INTOP_RESULT, &rc);
|
||||
slapi_pblock_destroy(mod_pb);
|
||||
@@ -1492,18 +1491,9 @@ memberof_del_dn_type_callback(Slapi_Entry *e, void *callback_data)
|
||||
mod.mod_op = LDAP_MOD_DELETE;
|
||||
mod.mod_type = ((memberof_del_dn_data *)callback_data)->type;
|
||||
mod.mod_values = val;
|
||||
@ -273,7 +278,7 @@ index f79b083a9..f3dc7cf00 100644
|
||||
slapi_pblock_destroy(mod_pb);
|
||||
|
||||
if (rc == LDAP_NO_SUCH_ATTRIBUTE && val[0] == NULL) {
|
||||
@@ -1966,6 +1957,7 @@ memberof_replace_dn_type_callback(Slapi_Entry *e, void *callback_data)
|
||||
@@ -1976,6 +1966,7 @@ memberof_replace_dn_type_callback(Slapi_Entry *e, void *callback_data)
|
||||
|
||||
return rc;
|
||||
}
|
||||
@ -281,7 +286,7 @@ index f79b083a9..f3dc7cf00 100644
|
||||
LDAPMod **
|
||||
my_copy_mods(LDAPMod **orig_mods)
|
||||
{
|
||||
@@ -2774,33 +2766,6 @@ memberof_modop_one_replace_r(Slapi_PBlock *pb, MemberOfConfig *config, int mod_o
|
||||
@@ -2784,33 +2775,6 @@ memberof_modop_one_replace_r(Slapi_PBlock *pb, MemberOfConfig *config, int mod_o
|
||||
replace_mod.mod_values = replace_val;
|
||||
}
|
||||
rc = memberof_add_memberof_attr(mods, op_to, config->auto_add_oc);
|
||||
@ -315,7 +320,7 @@ index f79b083a9..f3dc7cf00 100644
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4454,43 +4419,57 @@ memberof_add_memberof_attr(LDAPMod **mods, const char *dn, char *add_oc)
|
||||
@@ -4461,43 +4425,57 @@ memberof_add_memberof_attr(LDAPMod **mods, const char *dn, char *add_oc)
|
||||
Slapi_PBlock *mod_pb = NULL;
|
||||
int added_oc = 0;
|
||||
int rc = 0;
|
||||
@ -407,10 +412,10 @@ index f79b083a9..f3dc7cf00 100644
|
||||
}
|
||||
slapi_pblock_destroy(mod_pb);
|
||||
diff --git a/ldap/servers/plugins/referint/referint.c b/ldap/servers/plugins/referint/referint.c
|
||||
index 28240c1f6..c5e259d8d 100644
|
||||
index 218863ea5..a2f2e4706 100644
|
||||
--- a/ldap/servers/plugins/referint/referint.c
|
||||
+++ b/ldap/servers/plugins/referint/referint.c
|
||||
@@ -711,19 +711,28 @@ static int
|
||||
@@ -712,19 +712,28 @@ static int
|
||||
_do_modify(Slapi_PBlock *mod_pb, Slapi_DN *entrySDN, LDAPMod **mods)
|
||||
{
|
||||
int rc = 0;
|
||||
@ -449,19 +454,52 @@ index 28240c1f6..c5e259d8d 100644
|
||||
|
||||
return rc;
|
||||
}
|
||||
@@ -1033,7 +1042,6 @@ _update_all_per_mod(Slapi_DN *entrySDN, /* DN of the searched entry */
|
||||
@@ -924,7 +933,6 @@ _update_all_per_mod(Slapi_DN *entrySDN, /* DN of the searched entry */
|
||||
{
|
||||
Slapi_Mods *smods = NULL;
|
||||
char *newDN = NULL;
|
||||
- struct berval bv = {0};
|
||||
char **dnParts = NULL;
|
||||
char *sval = NULL;
|
||||
char *newvalue = NULL;
|
||||
@@ -1027,30 +1035,21 @@ _update_all_per_mod(Slapi_DN *entrySDN, /* DN of the searched entry */
|
||||
}
|
||||
/* else: normalize_rc < 0) Ignore the DN normalization error for now. */
|
||||
|
||||
- bv.bv_val = newDN;
|
||||
- bv.bv_len = strlen(newDN);
|
||||
p = PL_strstr(sval, slapi_sdn_get_ndn(origDN));
|
||||
if (p == sval) {
|
||||
/* (case 1) */
|
||||
slapi_mods_add_string(smods, LDAP_MOD_DELETE, attrName, sval);
|
||||
slapi_mods_add_string(smods, LDAP_MOD_ADD, attrName, newDN);
|
||||
-
|
||||
- /* Add only if the attr value does not exist */
|
||||
- if (VALUE_PRESENT != attr_value_find_wsi(attr, &bv, &v)) {
|
||||
- slapi_mods_add_string(smods, LDAP_MOD_ADD, attrName, newDN);
|
||||
- }
|
||||
+ slapi_mods_add_string(smods, LDAP_MOD_ADD, attrName, newDN);
|
||||
} else if (p) {
|
||||
/* (case 2) */
|
||||
slapi_mods_add_string(smods, LDAP_MOD_DELETE, attrName, sval);
|
||||
*p = '\0';
|
||||
newvalue = slapi_ch_smprintf("%s%s", sval, newDN);
|
||||
- /* Add only if the attr value does not exist */
|
||||
- if (VALUE_PRESENT != attr_value_find_wsi(attr, &bv, &v)) {
|
||||
- slapi_mods_add_string(smods, LDAP_MOD_ADD, attrName, newvalue);
|
||||
- }
|
||||
+ slapi_mods_add_string(smods, LDAP_MOD_ADD, attrName, newvalue);
|
||||
slapi_ch_free_string(&newvalue);
|
||||
}
|
||||
/* else: value does not include the modified DN. Ignore it. */
|
||||
slapi_ch_free_string(&sval);
|
||||
- bv = (struct berval){0};
|
||||
}
|
||||
rc = _do_modify(mod_pb, entrySDN, slapi_mods_get_ldapmods_byref(smods));
|
||||
if (rc) {
|
||||
diff --git a/ldap/servers/slapd/modify.c b/ldap/servers/slapd/modify.c
|
||||
index 669bb104c..455eb63ec 100644
|
||||
index 55131d77c..8241fce28 100644
|
||||
--- a/ldap/servers/slapd/modify.c
|
||||
+++ b/ldap/servers/slapd/modify.c
|
||||
@@ -492,6 +492,57 @@ slapi_modify_internal_set_pb_ext(Slapi_PBlock *pb, const Slapi_DN *sdn, LDAPMod
|
||||
@@ -489,6 +489,57 @@ slapi_modify_internal_set_pb_ext(Slapi_PBlock *pb, const Slapi_DN *sdn, LDAPMod
|
||||
slapi_pblock_set(pb, SLAPI_PLUGIN_IDENTITY, plugin_identity);
|
||||
}
|
||||
|
||||
@ -520,10 +558,10 @@ index 669bb104c..455eb63ec 100644
|
||||
|
||||
static int
|
||||
diff --git a/ldap/servers/slapd/slapi-plugin.h b/ldap/servers/slapd/slapi-plugin.h
|
||||
index 9fdcaccc8..a84a60c92 100644
|
||||
index de461706c..c30a2b8ec 100644
|
||||
--- a/ldap/servers/slapd/slapi-plugin.h
|
||||
+++ b/ldap/servers/slapd/slapi-plugin.h
|
||||
@@ -5965,6 +5965,7 @@ void slapi_add_entry_internal_set_pb(Slapi_PBlock *pb, Slapi_Entry *e, LDAPContr
|
||||
@@ -5955,6 +5955,7 @@ void slapi_add_entry_internal_set_pb(Slapi_PBlock *pb, Slapi_Entry *e, LDAPContr
|
||||
int slapi_add_internal_set_pb(Slapi_PBlock *pb, const char *dn, LDAPMod **attrs, LDAPControl **controls, Slapi_ComponentId *plugin_identity, int operation_flags);
|
||||
void slapi_modify_internal_set_pb(Slapi_PBlock *pb, const char *dn, LDAPMod **mods, LDAPControl **controls, const char *uniqueid, Slapi_ComponentId *plugin_identity, int operation_flags);
|
||||
void slapi_modify_internal_set_pb_ext(Slapi_PBlock *pb, const Slapi_DN *sdn, LDAPMod **mods, LDAPControl **controls, const char *uniqueid, Slapi_ComponentId *plugin_identity, int operation_flags);
|
||||
@ -532,5 +570,5 @@ index 9fdcaccc8..a84a60c92 100644
|
||||
* Set \c Slapi_PBlock to perform modrdn/rename internally
|
||||
*
|
||||
--
|
||||
2.51.1
|
||||
2.49.0
|
||||
|
||||
115
0040-Issue-6791-crash-in-liblmdb-during-instance-shutdown.patch
Normal file
115
0040-Issue-6791-crash-in-liblmdb-during-instance-shutdown.patch
Normal file
@ -0,0 +1,115 @@
|
||||
From 3bddcd38b0b3accdf9ccfecd117c1fb4fc229305 Mon Sep 17 00:00:00 2001
|
||||
From: progier389 <progier@redhat.com>
|
||||
Date: Fri, 6 Jun 2025 15:26:52 +0200
|
||||
Subject: [PATCH] Issue 6791 - crash in liblmdb during instance shutdown
|
||||
(#6793)
|
||||
|
||||
Sometime ns-slapd process crashes during the shutdown.
|
||||
The core stacks shows that a thread attempts to use liblmdb after lmdb environment get closed
|
||||
in one of thread specific data destructor callback.
|
||||
The fix ensure that lmdb is not called after its environment get closed
|
||||
|
||||
Issue: #6791
|
||||
|
||||
Reviewed by: @droideck, @tbordaz (Thanks!)
|
||||
|
||||
(cherry picked from commit d7ba8408289601753b8f8f5298ac70ca1326b3b0)
|
||||
---
|
||||
.../slapd/back-ldbm/db-mdb/mdb_instance.c | 20 ++++++++++++++++++-
|
||||
.../slapd/back-ldbm/db-mdb/mdb_layer.h | 1 +
|
||||
ldap/servers/slapd/back-ldbm/db-mdb/mdb_txn.c | 4 +++-
|
||||
3 files changed, 23 insertions(+), 2 deletions(-)
|
||||
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/db-mdb/mdb_instance.c b/ldap/servers/slapd/back-ldbm/db-mdb/mdb_instance.c
|
||||
index 05f1e348d..b51333182 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/db-mdb/mdb_instance.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/db-mdb/mdb_instance.c
|
||||
@@ -97,6 +97,9 @@ typedef struct {
|
||||
static dbmdb_dbi_t *dbi_slots; /* The alloced slots */
|
||||
static int dbi_nbslots; /* Number of available slots in dbi_slots */
|
||||
|
||||
+static int32_t g_mdb_env_is_open = false;
|
||||
+static void dbmdb_set_is_env_open(bool is_open);
|
||||
+
|
||||
/*
|
||||
* twalk_r is not available before glibc-2.30 so lets replace it by twalk
|
||||
* and a global variable (it is possible because there is a single call
|
||||
@@ -720,6 +723,7 @@ int dbmdb_make_env(dbmdb_ctx_t *ctx, int readOnly, mdb_mode_t mode)
|
||||
rc = mdb_env_open(env, ctx->home, flags, mode);
|
||||
}
|
||||
if (rc ==0) {
|
||||
+ dbmdb_set_is_env_open(true);
|
||||
rc = mdb_env_info(env, &envinfo);
|
||||
}
|
||||
if (rc ==0) { /* Update the INFO file with the real size provided by the db */
|
||||
@@ -752,6 +756,7 @@ int dbmdb_make_env(dbmdb_ctx_t *ctx, int readOnly, mdb_mode_t mode)
|
||||
}
|
||||
if (rc != 0 && env) {
|
||||
ctx->env = NULL;
|
||||
+ dbmdb_set_is_env_open(false);
|
||||
mdb_env_close(env);
|
||||
}
|
||||
return rc;
|
||||
@@ -768,6 +773,7 @@ void dbmdb_ctx_close(dbmdb_ctx_t *ctx)
|
||||
*/
|
||||
}
|
||||
if (ctx->env) {
|
||||
+ dbmdb_set_is_env_open(false);
|
||||
mdb_env_close(ctx->env);
|
||||
ctx->env = NULL;
|
||||
}
|
||||
@@ -1735,7 +1741,7 @@ dbmdb_privdb_put(mdb_privdb_t *db, int dbi_idx, MDB_val *key, MDB_val *data)
|
||||
}
|
||||
|
||||
|
||||
-/* Create a private database environment */
|
||||
+/* Create a private database environment (used to build entryrdn during import) */
|
||||
mdb_privdb_t *
|
||||
dbmdb_privdb_create(dbmdb_ctx_t *ctx, size_t dbsize, ...)
|
||||
{
|
||||
@@ -1818,3 +1824,15 @@ bail:
|
||||
}
|
||||
return db;
|
||||
}
|
||||
+
|
||||
+bool
|
||||
+dbmdb_is_env_open()
|
||||
+{
|
||||
+ return (bool) slapi_atomic_load_32(&g_mdb_env_is_open, __ATOMIC_ACQUIRE);
|
||||
+}
|
||||
+
|
||||
+static void
|
||||
+dbmdb_set_is_env_open(bool is_open)
|
||||
+{
|
||||
+ slapi_atomic_store_32(&g_mdb_env_is_open, (int32_t)is_open, __ATOMIC_RELEASE);
|
||||
+}
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/db-mdb/mdb_layer.h b/ldap/servers/slapd/back-ldbm/db-mdb/mdb_layer.h
|
||||
index fe230d60e..fdc4a9288 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/db-mdb/mdb_layer.h
|
||||
+++ b/ldap/servers/slapd/back-ldbm/db-mdb/mdb_layer.h
|
||||
@@ -504,6 +504,7 @@ int dbmdb_cmp_vals(MDB_val *v1, MDB_val *v2);
|
||||
dbmdb_stats_t *dbdmd_gather_stats(dbmdb_ctx_t *conf, backend *be);
|
||||
void dbmdb_free_stats(dbmdb_stats_t **stats);
|
||||
int dbmdb_reset_vlv_file(backend *be, const char *filename);
|
||||
+bool dbmdb_is_env_open(void);
|
||||
|
||||
/* mdb_txn.c */
|
||||
void shutdown_mdbtxn(void);
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/db-mdb/mdb_txn.c b/ldap/servers/slapd/back-ldbm/db-mdb/mdb_txn.c
|
||||
index 74088db89..7d2dfe36e 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/db-mdb/mdb_txn.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/db-mdb/mdb_txn.c
|
||||
@@ -47,7 +47,9 @@ cleanup_mdbtxn_stack(void *arg)
|
||||
slapi_ch_free((void**)&anchor);
|
||||
while (txn) {
|
||||
txn2 = txn->parent;
|
||||
- TXN_ABORT(TXN(txn));
|
||||
+ if (dbmdb_is_env_open()) {
|
||||
+ TXN_ABORT(TXN(txn));
|
||||
+ }
|
||||
slapi_ch_free((void**)&txn);
|
||||
txn = txn2;
|
||||
}
|
||||
--
|
||||
2.49.0
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
From 284da99d0cd1ad16c702f4a4f68d2a479ac41576 Mon Sep 17 00:00:00 2001
|
||||
From d7808a65a3eccb3688a919ea64639cc2aaf454e7 Mon Sep 17 00:00:00 2001
|
||||
From: Viktor Ashirov <vashirov@redhat.com>
|
||||
Date: Wed, 18 Jun 2025 11:12:28 +0200
|
||||
Subject: [PATCH] Issue 6819 - Incorrect pwdpolicysubentry returned for an
|
||||
@ -15,26 +15,37 @@ Update the template for CoS pointer definition to use
|
||||
Fixes: https://github.com/389ds/389-ds-base/issues/6819
|
||||
|
||||
Reviewed by: @droideck, @tbordaz (Thanks!)
|
||||
|
||||
(cherry picked from commit 622c191302879035ef7450a29aa7569ee768c3ab)
|
||||
Signed-off-by: Masahiro Matsuya <mmatsuya@redhat.com>
|
||||
---
|
||||
.../suites/password/password_policy_test.py | 2 +-
|
||||
.../password/pwdPolicy_attribute_test.py | 73 +++++++++++++++++--
|
||||
src/lib389/lib389/pwpolicy.py | 2 +-
|
||||
2 files changed, 66 insertions(+), 9 deletions(-)
|
||||
3 files changed, 67 insertions(+), 10 deletions(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/password/password_policy_test.py b/dirsrvtests/tests/suites/password/password_policy_test.py
|
||||
index f46f95981..528674381 100644
|
||||
--- a/dirsrvtests/tests/suites/password/password_policy_test.py
|
||||
+++ b/dirsrvtests/tests/suites/password/password_policy_test.py
|
||||
@@ -85,7 +85,7 @@ def create_subtree_policy_custom(instance, dn, properties):
|
||||
|
||||
# The CoS specification entry at the subtree level
|
||||
cos_pointer_defs = CosPointerDefinitions(instance, dn)
|
||||
- cos_pointer_defs.create(properties={'cosAttribute': 'pwdpolicysubentry default operational',
|
||||
+ cos_pointer_defs.create(properties={'cosAttribute': 'pwdpolicysubentry default operational-default',
|
||||
'cosTemplateDn': cos_template.dn,
|
||||
'cn': 'nsPwPolicy_CoS'})
|
||||
except ldap.LDAPError as e:
|
||||
diff --git a/dirsrvtests/tests/suites/password/pwdPolicy_attribute_test.py b/dirsrvtests/tests/suites/password/pwdPolicy_attribute_test.py
|
||||
index c2c1e47fb..0dde8d637 100644
|
||||
index e3727b682..d0c172f94 100644
|
||||
--- a/dirsrvtests/tests/suites/password/pwdPolicy_attribute_test.py
|
||||
+++ b/dirsrvtests/tests/suites/password/pwdPolicy_attribute_test.py
|
||||
@@ -59,17 +59,39 @@ def test_user(topology_st, request):
|
||||
@@ -59,17 +59,39 @@ def add_test_user(topology_st, request):
|
||||
return user
|
||||
|
||||
|
||||
-@pytest.fixture(scope="module")
|
||||
-def password_policy(topology_st, test_user):
|
||||
-def password_policy(topology_st, add_test_user):
|
||||
+@pytest.fixture(scope="function")
|
||||
+def password_policy(topology_st, request, test_user):
|
||||
+def password_policy(topology_st, request, add_test_user):
|
||||
"""Set up password policy for subtree and user"""
|
||||
|
||||
pwp = PwPolicyManager(topology_st.standalone)
|
||||
@ -72,8 +83,8 @@ index c2c1e47fb..0dde8d637 100644
|
||||
- pwp.create_user_policy(TEST_USER_DN, policy_props)
|
||||
|
||||
@pytest.mark.skipif(ds_is_older('1.4.3.3'), reason="Not implemented")
|
||||
def test_pwd_reset(topology_st, test_user):
|
||||
@@ -257,8 +279,43 @@ def test_pwd_min_age(topology_st, test_user, password_policy):
|
||||
def test_pwdReset_by_user_DM(topology_st, add_test_user):
|
||||
@@ -301,8 +323,43 @@ def test_pwd_min_age(topology_st, add_test_user, password_policy):
|
||||
log.info('Bind as DM')
|
||||
topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
|
||||
user.reset_password(TEST_USER_PWD)
|
||||
@ -133,5 +144,5 @@ index 7ffe449cc..6a47a44fe 100644
|
||||
'cn': 'nsPwPolicy_CoS'})
|
||||
except ldap.LDAPError as e:
|
||||
--
|
||||
2.51.1
|
||||
2.49.0
|
||||
|
||||
114
0042-Issue-6736-Exception-thrown-by-dsconf-instance-repl-.patch
Normal file
114
0042-Issue-6736-Exception-thrown-by-dsconf-instance-repl-.patch
Normal file
@ -0,0 +1,114 @@
|
||||
From 81cf0e55e7295fd033bc03711743ea81404625c0 Mon Sep 17 00:00:00 2001
|
||||
From: progier389 <progier@redhat.com>
|
||||
Date: Wed, 16 Apr 2025 18:13:47 +0200
|
||||
Subject: [PATCH] Issue 6736 - Exception thrown by dsconf instance repl get_ruv
|
||||
(#6742)
|
||||
|
||||
After issue #6715 commit dsconf instance replication get_ruv fails with error
|
||||
because by default NormalizedRidDict does not calling getitem as I expected.
|
||||
The solution is to overwrite the get function to do things properly.
|
||||
|
||||
Issue: #6736
|
||||
Relates: #6715
|
||||
|
||||
Reviewed by: @droideck (Thanks!)
|
||||
|
||||
(cherry picked from commit 64b2514d09b642913f2781ebdf8a5931f9e74eb6)
|
||||
---
|
||||
.../suites/replication/regression_m2_test.py | 42 +++++++++++++++++++
|
||||
src/lib389/lib389/replica.py | 12 +++++-
|
||||
2 files changed, 53 insertions(+), 1 deletion(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/replication/regression_m2_test.py b/dirsrvtests/tests/suites/replication/regression_m2_test.py
|
||||
index 1a2f80522..10a5fa419 100644
|
||||
--- a/dirsrvtests/tests/suites/replication/regression_m2_test.py
|
||||
+++ b/dirsrvtests/tests/suites/replication/regression_m2_test.py
|
||||
@@ -1271,6 +1271,48 @@ def test_normalized_rid_dict():
|
||||
assert nrd[nkey] == val
|
||||
|
||||
|
||||
+def test_get_with_normalized_rid_dict():
|
||||
+ """Check lib389.replica NormalizedRidDict.get() function
|
||||
+
|
||||
+ :id: 4422e1be-1619-11f0-a37a-482ae39447e5
|
||||
+ :setup: None
|
||||
+ :steps:
|
||||
+ 1. Initialize a NormalizedRidDict
|
||||
+ 2. Check that normalization do something
|
||||
+ 3. Check that get() returns the expected value
|
||||
+ 4. Check get() with wrong key
|
||||
+ :expectedresults:
|
||||
+ 1. Success
|
||||
+ 2. Success
|
||||
+ 3. Success
|
||||
+ 4. Should return the default value if it is provided
|
||||
+ otherwise it should return None
|
||||
+ """
|
||||
+
|
||||
+ sd = { '1': 'v1', '020': 'v2' }
|
||||
+ nsd = { NormalizedRidDict.normalize_rid(key): val for key,val in sd.items() }
|
||||
+ nkeys = list(nsd.keys())
|
||||
+
|
||||
+ # Initialize a NormalizedRidDict
|
||||
+ nrd = NormalizedRidDict()
|
||||
+ for key,val in sd.items():
|
||||
+ nrd[key] = val
|
||||
+
|
||||
+ # Check that get() returns the expected value
|
||||
+ for key,val in sd.items():
|
||||
+ nkey = NormalizedRidDict.normalize_rid(key)
|
||||
+ assert nrd.get(key) == val
|
||||
+ assert nrd.get(nkey) == val
|
||||
+ assert nrd.get(key, 'foo') == val
|
||||
+ assert nrd.get(nkey, 'foo') == val
|
||||
+
|
||||
+ # Check get() with wrong key
|
||||
+ assert nrd.get('99', 'foo2') == 'foo2'
|
||||
+ assert nrd.get('099', 'foo2') == 'foo2'
|
||||
+ assert nrd.get('99') is None
|
||||
+ assert nrd.get('099') is None
|
||||
+
|
||||
+
|
||||
def test_online_reinit_may_hang(topo_with_sigkill):
|
||||
"""Online reinitialization may hang when the first
|
||||
entry of the DB is RUV entry instead of the suffix
|
||||
diff --git a/src/lib389/lib389/replica.py b/src/lib389/lib389/replica.py
|
||||
index 0dc3a0f54..5183c41e9 100644
|
||||
--- a/src/lib389/lib389/replica.py
|
||||
+++ b/src/lib389/lib389/replica.py
|
||||
@@ -841,6 +841,12 @@ class NormalizedRidDict(dict):
|
||||
nkey = NormalizedRidDict.normalize_rid(key)
|
||||
super().__setitem__(nkey, value)
|
||||
|
||||
+ def get(self, key, vdef=None):
|
||||
+ try:
|
||||
+ return self[key]
|
||||
+ except KeyError:
|
||||
+ return vdef
|
||||
+
|
||||
|
||||
class RUV(object):
|
||||
"""Represents the server in memory RUV object. The RUV contains each
|
||||
@@ -911,7 +917,7 @@ class RUV(object):
|
||||
ValueError("Wrong CSN value was supplied")
|
||||
|
||||
timestamp = int(csn[:8], 16)
|
||||
- time_str = datetime.datetime.utcfromtimestamp(timestamp).strftime('%Y-%m-%d %H:%M:%S')
|
||||
+ time_str = datetime.datetime.fromtimestamp(timestamp, datetime.UTC).strftime('%Y-%m-%d %H:%M:%S')
|
||||
# We are parsing shorter CSN which contains only timestamp
|
||||
if len(csn) == 8:
|
||||
return time_str
|
||||
@@ -987,6 +993,10 @@ class RUV(object):
|
||||
return False
|
||||
return True
|
||||
|
||||
+ def __str__(self):
|
||||
+ return str(self.format_ruv())
|
||||
+
|
||||
+
|
||||
|
||||
class ChangelogLDIF(object):
|
||||
def __init__(self, file_path, output_file):
|
||||
--
|
||||
2.49.0
|
||||
|
||||
125
0043-Issue-6825-RootDN-Access-Control-Plugin-with-wildcar.patch
Normal file
125
0043-Issue-6825-RootDN-Access-Control-Plugin-with-wildcar.patch
Normal file
@ -0,0 +1,125 @@
|
||||
From c5f5a8700faa51d80af48360afd1f382a2acba8f Mon Sep 17 00:00:00 2001
|
||||
From: James Chapman <jachapma@redhat.com>
|
||||
Date: Wed, 25 Jun 2025 14:11:05 +0000
|
||||
Subject: [PATCH] =?UTF-8?q?Issue=206825=20-=20RootDN=20Access=20Control=20?=
|
||||
=?UTF-8?q?Plugin=20with=20wildcards=20for=20IP=20addre=E2=80=A6=20(#6826)?=
|
||||
MIME-Version: 1.0
|
||||
Content-Type: text/plain; charset=UTF-8
|
||||
Content-Transfer-Encoding: 8bit
|
||||
|
||||
Bug description:
|
||||
RootDN Access Control Plugin with wildcards for IP addresses fails withi
|
||||
an error "Invalid IP address"
|
||||
|
||||
socket.inet_aton() validates IPv4 IP addresses and does not support wildcards.
|
||||
|
||||
Fix description:
|
||||
Add a regex pattern to match wildcard IP addresses, check each octet is
|
||||
between 0-255
|
||||
|
||||
Fixes: https://github.com/389ds/389-ds-base/issues/6825
|
||||
|
||||
Reviewed by: @droideck (Thank you)
|
||||
---
|
||||
.../lib389/cli_conf/plugins/rootdn_ac.py | 16 +++-----
|
||||
src/lib389/lib389/utils.py | 40 +++++++++++++++++++
|
||||
2 files changed, 45 insertions(+), 11 deletions(-)
|
||||
|
||||
diff --git a/src/lib389/lib389/cli_conf/plugins/rootdn_ac.py b/src/lib389/lib389/cli_conf/plugins/rootdn_ac.py
|
||||
index 65486fff8..1456f5ebe 100644
|
||||
--- a/src/lib389/lib389/cli_conf/plugins/rootdn_ac.py
|
||||
+++ b/src/lib389/lib389/cli_conf/plugins/rootdn_ac.py
|
||||
@@ -8,7 +8,7 @@
|
||||
|
||||
import socket
|
||||
from lib389.plugins import RootDNAccessControlPlugin
|
||||
-from lib389.utils import is_valid_hostname
|
||||
+from lib389.utils import is_valid_hostname, is_valid_ip
|
||||
from lib389.cli_conf import add_generic_plugin_parsers, generic_object_edit
|
||||
from lib389.cli_base import CustomHelpFormatter
|
||||
|
||||
@@ -62,19 +62,13 @@ def validate_args(args):
|
||||
|
||||
if args.allow_ip is not None:
|
||||
for ip in args.allow_ip:
|
||||
- if ip != "delete":
|
||||
- try:
|
||||
- socket.inet_aton(ip)
|
||||
- except socket.error:
|
||||
- raise ValueError(f"Invalid IP address ({ip}) for '--allow-ip'")
|
||||
+ if ip != "delete" and not is_valid_ip(ip):
|
||||
+ raise ValueError(f"Invalid IP address ({ip}) for '--allow-ip'")
|
||||
|
||||
if args.deny_ip is not None and args.deny_ip != "delete":
|
||||
for ip in args.deny_ip:
|
||||
- if ip != "delete":
|
||||
- try:
|
||||
- socket.inet_aton(ip)
|
||||
- except socket.error:
|
||||
- raise ValueError(f"Invalid IP address ({ip}) for '--deny-ip'")
|
||||
+ if ip != "delete" and not is_valid_ip(ip):
|
||||
+ raise ValueError(f"Invalid IP address ({ip}) for '--deny-ip'")
|
||||
|
||||
if args.allow_host is not None:
|
||||
for hostname in args.allow_host:
|
||||
diff --git a/src/lib389/lib389/utils.py b/src/lib389/lib389/utils.py
|
||||
index 660fb1f0e..2053c71b7 100644
|
||||
--- a/src/lib389/lib389/utils.py
|
||||
+++ b/src/lib389/lib389/utils.py
|
||||
@@ -33,6 +33,7 @@ import shutil
|
||||
import ldap
|
||||
import mmap
|
||||
import socket
|
||||
+import ipaddress
|
||||
import time
|
||||
import stat
|
||||
from datetime import (datetime, timedelta)
|
||||
@@ -1713,6 +1714,45 @@ def is_valid_hostname(hostname):
|
||||
allowed = re.compile(r"(?!-)[A-Z\d-]{1,63}(?<!-)$", re.IGNORECASE)
|
||||
return all(allowed.match(x) for x in hostname.split("."))
|
||||
|
||||
+def is_valid_ip(ip):
|
||||
+ """ Validate an IPv4 or IPv6 address, including asterisks for wildcards. """
|
||||
+ if '*' in ip and '.' in ip:
|
||||
+ ipv4_pattern = r'^(\d{1,3}|\*)\.(\d{1,3}|\*)\.(\d{1,3}|\*)\.(\d{1,3}|\*)$'
|
||||
+ if re.match(ipv4_pattern, ip):
|
||||
+ octets = ip.split('.')
|
||||
+ for octet in octets:
|
||||
+ if octet != '*':
|
||||
+ try:
|
||||
+ val = int(octet, 10)
|
||||
+ if not (0 <= val <= 255):
|
||||
+ return False
|
||||
+ except ValueError:
|
||||
+ return False
|
||||
+ return True
|
||||
+ else:
|
||||
+ return False
|
||||
+
|
||||
+ if '*' in ip and ':' in ip:
|
||||
+ ipv6_pattern = r'^([0-9a-fA-F]{1,4}|\*)(:([0-9a-fA-F]{1,4}|\*)){0,7}$'
|
||||
+ if re.match(ipv6_pattern, ip):
|
||||
+ octets = ip.split(':')
|
||||
+ for octet in octets:
|
||||
+ if octet != '*':
|
||||
+ try:
|
||||
+ val = int(octet, 16)
|
||||
+ if not (0 <= val <= 0xFFFF):
|
||||
+ return False
|
||||
+ except ValueError:
|
||||
+ return False
|
||||
+ return True
|
||||
+ else:
|
||||
+ return False
|
||||
+
|
||||
+ try:
|
||||
+ ipaddress.ip_address(ip)
|
||||
+ return True
|
||||
+ except ValueError:
|
||||
+ return False
|
||||
|
||||
def parse_size(size):
|
||||
"""
|
||||
--
|
||||
2.49.0
|
||||
|
||||
481
0044-Issue-6822-Backend-creation-cleanup-and-Database-UI-.patch
Normal file
481
0044-Issue-6822-Backend-creation-cleanup-and-Database-UI-.patch
Normal file
@ -0,0 +1,481 @@
|
||||
From ce6dcab43433382ee86fd452ad037860bac44abb Mon Sep 17 00:00:00 2001
|
||||
From: Simon Pichugin <spichugi@redhat.com>
|
||||
Date: Fri, 27 Jun 2025 18:43:39 -0700
|
||||
Subject: [PATCH] Issue 6822 - Backend creation cleanup and Database UI tab
|
||||
error handling (#6823)
|
||||
|
||||
Description: Add rollback functionality when mapping tree creation fails
|
||||
during backend creation to prevent orphaned backends.
|
||||
Improve error handling in Database, Replication and Monitoring UI tabs
|
||||
to gracefully handle backend get-tree command failures.
|
||||
|
||||
Fixes: https://github.com/389ds/389-ds-base/issues/6822
|
||||
|
||||
Reviewed by: @mreynolds389 (Thanks!)
|
||||
---
|
||||
src/cockpit/389-console/src/database.jsx | 119 ++++++++------
|
||||
src/cockpit/389-console/src/monitor.jsx | 165 +++++++++++---------
|
||||
src/cockpit/389-console/src/replication.jsx | 55 ++++---
|
||||
src/lib389/lib389/backend.py | 18 ++-
|
||||
4 files changed, 210 insertions(+), 147 deletions(-)
|
||||
|
||||
diff --git a/src/cockpit/389-console/src/database.jsx b/src/cockpit/389-console/src/database.jsx
|
||||
index c0c4be414..276125dfc 100644
|
||||
--- a/src/cockpit/389-console/src/database.jsx
|
||||
+++ b/src/cockpit/389-console/src/database.jsx
|
||||
@@ -478,6 +478,59 @@ export class Database extends React.Component {
|
||||
}
|
||||
|
||||
loadSuffixTree(fullReset) {
|
||||
+ const treeData = [
|
||||
+ {
|
||||
+ name: _("Global Database Configuration"),
|
||||
+ icon: <CogIcon />,
|
||||
+ id: "dbconfig",
|
||||
+ },
|
||||
+ {
|
||||
+ name: _("Chaining Configuration"),
|
||||
+ icon: <ExternalLinkAltIcon />,
|
||||
+ id: "chaining-config",
|
||||
+ },
|
||||
+ {
|
||||
+ name: _("Backups & LDIFs"),
|
||||
+ icon: <CopyIcon />,
|
||||
+ id: "backups",
|
||||
+ },
|
||||
+ {
|
||||
+ name: _("Password Policies"),
|
||||
+ id: "pwp",
|
||||
+ icon: <KeyIcon />,
|
||||
+ children: [
|
||||
+ {
|
||||
+ name: _("Global Policy"),
|
||||
+ icon: <HomeIcon />,
|
||||
+ id: "pwpolicy",
|
||||
+ },
|
||||
+ {
|
||||
+ name: _("Local Policies"),
|
||||
+ icon: <UsersIcon />,
|
||||
+ id: "localpwpolicy",
|
||||
+ },
|
||||
+ ],
|
||||
+ defaultExpanded: true
|
||||
+ },
|
||||
+ {
|
||||
+ name: _("Suffixes"),
|
||||
+ icon: <CatalogIcon />,
|
||||
+ id: "suffixes-tree",
|
||||
+ children: [],
|
||||
+ defaultExpanded: true,
|
||||
+ action: (
|
||||
+ <Button
|
||||
+ onClick={this.handleShowSuffixModal}
|
||||
+ variant="plain"
|
||||
+ aria-label="Create new suffix"
|
||||
+ title={_("Create new suffix")}
|
||||
+ >
|
||||
+ <PlusIcon />
|
||||
+ </Button>
|
||||
+ ),
|
||||
+ }
|
||||
+ ];
|
||||
+
|
||||
const cmd = [
|
||||
"dsconf", "-j", "ldapi://%2fvar%2frun%2fslapd-" + this.props.serverId + ".socket",
|
||||
"backend", "get-tree",
|
||||
@@ -491,58 +544,20 @@ export class Database extends React.Component {
|
||||
suffixData = JSON.parse(content);
|
||||
this.processTree(suffixData);
|
||||
}
|
||||
- const treeData = [
|
||||
- {
|
||||
- name: _("Global Database Configuration"),
|
||||
- icon: <CogIcon />,
|
||||
- id: "dbconfig",
|
||||
- },
|
||||
- {
|
||||
- name: _("Chaining Configuration"),
|
||||
- icon: <ExternalLinkAltIcon />,
|
||||
- id: "chaining-config",
|
||||
- },
|
||||
- {
|
||||
- name: _("Backups & LDIFs"),
|
||||
- icon: <CopyIcon />,
|
||||
- id: "backups",
|
||||
- },
|
||||
- {
|
||||
- name: _("Password Policies"),
|
||||
- id: "pwp",
|
||||
- icon: <KeyIcon />,
|
||||
- children: [
|
||||
- {
|
||||
- name: _("Global Policy"),
|
||||
- icon: <HomeIcon />,
|
||||
- id: "pwpolicy",
|
||||
- },
|
||||
- {
|
||||
- name: _("Local Policies"),
|
||||
- icon: <UsersIcon />,
|
||||
- id: "localpwpolicy",
|
||||
- },
|
||||
- ],
|
||||
- defaultExpanded: true
|
||||
- },
|
||||
- {
|
||||
- name: _("Suffixes"),
|
||||
- icon: <CatalogIcon />,
|
||||
- id: "suffixes-tree",
|
||||
- children: suffixData,
|
||||
- defaultExpanded: true,
|
||||
- action: (
|
||||
- <Button
|
||||
- onClick={this.handleShowSuffixModal}
|
||||
- variant="plain"
|
||||
- aria-label="Create new suffix"
|
||||
- title={_("Create new suffix")}
|
||||
- >
|
||||
- <PlusIcon />
|
||||
- </Button>
|
||||
- ),
|
||||
- }
|
||||
- ];
|
||||
+
|
||||
+ let current_node = this.state.node_name;
|
||||
+ if (fullReset) {
|
||||
+ current_node = DB_CONFIG;
|
||||
+ }
|
||||
+
|
||||
+ treeData[4].children = suffixData; // suffixes node
|
||||
+ this.setState(() => ({
|
||||
+ nodes: treeData,
|
||||
+ node_name: current_node,
|
||||
+ }), this.loadAttrs);
|
||||
+ })
|
||||
+ .fail(err => {
|
||||
+ // Handle backend get-tree failure gracefully
|
||||
let current_node = this.state.node_name;
|
||||
if (fullReset) {
|
||||
current_node = DB_CONFIG;
|
||||
diff --git a/src/cockpit/389-console/src/monitor.jsx b/src/cockpit/389-console/src/monitor.jsx
|
||||
index 7e0e0c5d4..c89be9051 100644
|
||||
--- a/src/cockpit/389-console/src/monitor.jsx
|
||||
+++ b/src/cockpit/389-console/src/monitor.jsx
|
||||
@@ -199,6 +199,84 @@ export class Monitor extends React.Component {
|
||||
}
|
||||
|
||||
loadSuffixTree(fullReset) {
|
||||
+ const basicData = [
|
||||
+ {
|
||||
+ name: _("Server Statistics"),
|
||||
+ icon: <ClusterIcon />,
|
||||
+ id: "server-monitor",
|
||||
+ type: "server",
|
||||
+ },
|
||||
+ {
|
||||
+ name: _("Replication"),
|
||||
+ icon: <TopologyIcon />,
|
||||
+ id: "replication-monitor",
|
||||
+ type: "replication",
|
||||
+ defaultExpanded: true,
|
||||
+ children: [
|
||||
+ {
|
||||
+ name: _("Synchronization Report"),
|
||||
+ icon: <MonitoringIcon />,
|
||||
+ id: "sync-report",
|
||||
+ item: "sync-report",
|
||||
+ type: "repl-mon",
|
||||
+ },
|
||||
+ {
|
||||
+ name: _("Log Analysis"),
|
||||
+ icon: <MonitoringIcon />,
|
||||
+ id: "log-analysis",
|
||||
+ item: "log-analysis",
|
||||
+ type: "repl-mon",
|
||||
+ }
|
||||
+ ],
|
||||
+ },
|
||||
+ {
|
||||
+ name: _("Database"),
|
||||
+ icon: <DatabaseIcon />,
|
||||
+ id: "database-monitor",
|
||||
+ type: "database",
|
||||
+ children: [], // Will be populated with treeData on success
|
||||
+ defaultExpanded: true,
|
||||
+ },
|
||||
+ {
|
||||
+ name: _("Logging"),
|
||||
+ icon: <CatalogIcon />,
|
||||
+ id: "log-monitor",
|
||||
+ defaultExpanded: true,
|
||||
+ children: [
|
||||
+ {
|
||||
+ name: _("Access Log"),
|
||||
+ icon: <BookIcon size="sm" />,
|
||||
+ id: "access-log-monitor",
|
||||
+ type: "log",
|
||||
+ },
|
||||
+ {
|
||||
+ name: _("Audit Log"),
|
||||
+ icon: <BookIcon size="sm" />,
|
||||
+ id: "audit-log-monitor",
|
||||
+ type: "log",
|
||||
+ },
|
||||
+ {
|
||||
+ name: _("Audit Failure Log"),
|
||||
+ icon: <BookIcon size="sm" />,
|
||||
+ id: "auditfail-log-monitor",
|
||||
+ type: "log",
|
||||
+ },
|
||||
+ {
|
||||
+ name: _("Errors Log"),
|
||||
+ icon: <BookIcon size="sm" />,
|
||||
+ id: "error-log-monitor",
|
||||
+ type: "log",
|
||||
+ },
|
||||
+ {
|
||||
+ name: _("Security Log"),
|
||||
+ icon: <BookIcon size="sm" />,
|
||||
+ id: "security-log-monitor",
|
||||
+ type: "log",
|
||||
+ },
|
||||
+ ]
|
||||
+ },
|
||||
+ ];
|
||||
+
|
||||
const cmd = [
|
||||
"dsconf", "-j", "ldapi://%2fvar%2frun%2fslapd-" + this.props.serverId + ".socket",
|
||||
"backend", "get-tree",
|
||||
@@ -209,76 +287,7 @@ export class Monitor extends React.Component {
|
||||
.done(content => {
|
||||
const treeData = JSON.parse(content);
|
||||
this.processTree(treeData);
|
||||
- const basicData = [
|
||||
- {
|
||||
- name: _("Server Statistics"),
|
||||
- icon: <ClusterIcon />,
|
||||
- id: "server-monitor",
|
||||
- type: "server",
|
||||
- },
|
||||
- {
|
||||
- name: _("Replication"),
|
||||
- icon: <TopologyIcon />,
|
||||
- id: "replication-monitor",
|
||||
- type: "replication",
|
||||
- defaultExpanded: true,
|
||||
- children: [
|
||||
- {
|
||||
- name: _("Synchronization Report"),
|
||||
- icon: <MonitoringIcon />,
|
||||
- id: "sync-report",
|
||||
- item: "sync-report",
|
||||
- type: "repl-mon",
|
||||
- },
|
||||
- ],
|
||||
- },
|
||||
- {
|
||||
- name: _("Database"),
|
||||
- icon: <DatabaseIcon />,
|
||||
- id: "database-monitor",
|
||||
- type: "database",
|
||||
- children: [],
|
||||
- defaultExpanded: true,
|
||||
- },
|
||||
- {
|
||||
- name: _("Logging"),
|
||||
- icon: <CatalogIcon />,
|
||||
- id: "log-monitor",
|
||||
- defaultExpanded: true,
|
||||
- children: [
|
||||
- {
|
||||
- name: _("Access Log"),
|
||||
- icon: <BookIcon size="sm" />,
|
||||
- id: "access-log-monitor",
|
||||
- type: "log",
|
||||
- },
|
||||
- {
|
||||
- name: _("Audit Log"),
|
||||
- icon: <BookIcon size="sm" />,
|
||||
- id: "audit-log-monitor",
|
||||
- type: "log",
|
||||
- },
|
||||
- {
|
||||
- name: _("Audit Failure Log"),
|
||||
- icon: <BookIcon size="sm" />,
|
||||
- id: "auditfail-log-monitor",
|
||||
- type: "log",
|
||||
- },
|
||||
- {
|
||||
- name: _("Errors Log"),
|
||||
- icon: <BookIcon size="sm" />,
|
||||
- id: "error-log-monitor",
|
||||
- type: "log",
|
||||
- },
|
||||
- {
|
||||
- name: _("Security Log"),
|
||||
- icon: <BookIcon size="sm" />,
|
||||
- id: "security-log-monitor",
|
||||
- type: "log",
|
||||
- },
|
||||
- ]
|
||||
- },
|
||||
- ];
|
||||
+
|
||||
let current_node = this.state.node_name;
|
||||
let type = this.state.node_type;
|
||||
if (fullReset) {
|
||||
@@ -288,6 +297,22 @@ export class Monitor extends React.Component {
|
||||
basicData[2].children = treeData; // database node
|
||||
this.processReplSuffixes(basicData[1].children);
|
||||
|
||||
+ this.setState(() => ({
|
||||
+ nodes: basicData,
|
||||
+ node_name: current_node,
|
||||
+ node_type: type,
|
||||
+ }), this.update_tree_nodes);
|
||||
+ })
|
||||
+ .fail(err => {
|
||||
+ // Handle backend get-tree failure gracefully
|
||||
+ let current_node = this.state.node_name;
|
||||
+ let type = this.state.node_type;
|
||||
+ if (fullReset) {
|
||||
+ current_node = "server-monitor";
|
||||
+ type = "server";
|
||||
+ }
|
||||
+ this.processReplSuffixes(basicData[1].children);
|
||||
+
|
||||
this.setState(() => ({
|
||||
nodes: basicData,
|
||||
node_name: current_node,
|
||||
diff --git a/src/cockpit/389-console/src/replication.jsx b/src/cockpit/389-console/src/replication.jsx
|
||||
index fa492fd2a..aa535bfc7 100644
|
||||
--- a/src/cockpit/389-console/src/replication.jsx
|
||||
+++ b/src/cockpit/389-console/src/replication.jsx
|
||||
@@ -177,6 +177,16 @@ export class Replication extends React.Component {
|
||||
loaded: false
|
||||
});
|
||||
|
||||
+ const basicData = [
|
||||
+ {
|
||||
+ name: _("Suffixes"),
|
||||
+ icon: <TopologyIcon />,
|
||||
+ id: "repl-suffixes",
|
||||
+ children: [],
|
||||
+ defaultExpanded: true
|
||||
+ }
|
||||
+ ];
|
||||
+
|
||||
const cmd = [
|
||||
"dsconf", "-j", "ldapi://%2fvar%2frun%2fslapd-" + this.props.serverId + ".socket",
|
||||
"backend", "get-tree",
|
||||
@@ -199,15 +209,7 @@ export class Replication extends React.Component {
|
||||
}
|
||||
}
|
||||
}
|
||||
- const basicData = [
|
||||
- {
|
||||
- name: _("Suffixes"),
|
||||
- icon: <TopologyIcon />,
|
||||
- id: "repl-suffixes",
|
||||
- children: [],
|
||||
- defaultExpanded: true
|
||||
- }
|
||||
- ];
|
||||
+
|
||||
let current_node = this.state.node_name;
|
||||
let current_type = this.state.node_type;
|
||||
let replicated = this.state.node_replicated;
|
||||
@@ -258,6 +260,19 @@ export class Replication extends React.Component {
|
||||
}
|
||||
|
||||
basicData[0].children = treeData;
|
||||
+ this.setState({
|
||||
+ nodes: basicData,
|
||||
+ node_name: current_node,
|
||||
+ node_type: current_type,
|
||||
+ node_replicated: replicated,
|
||||
+ }, () => { this.update_tree_nodes() });
|
||||
+ })
|
||||
+ .fail(err => {
|
||||
+ // Handle backend get-tree failure gracefully
|
||||
+ let current_node = this.state.node_name;
|
||||
+ let current_type = this.state.node_type;
|
||||
+ let replicated = this.state.node_replicated;
|
||||
+
|
||||
this.setState({
|
||||
nodes: basicData,
|
||||
node_name: current_node,
|
||||
@@ -905,18 +920,18 @@ export class Replication extends React.Component {
|
||||
disableTree: false
|
||||
});
|
||||
});
|
||||
- })
|
||||
- .fail(err => {
|
||||
- const errMsg = JSON.parse(err);
|
||||
- this.props.addNotification(
|
||||
- "error",
|
||||
- cockpit.format(_("Error loading replication agreements configuration - $0"), errMsg.desc)
|
||||
- );
|
||||
- this.setState({
|
||||
- suffixLoading: false,
|
||||
- disableTree: false
|
||||
+ })
|
||||
+ .fail(err => {
|
||||
+ const errMsg = JSON.parse(err);
|
||||
+ this.props.addNotification(
|
||||
+ "error",
|
||||
+ cockpit.format(_("Error loading replication agreements configuration - $0"), errMsg.desc)
|
||||
+ );
|
||||
+ this.setState({
|
||||
+ suffixLoading: false,
|
||||
+ disableTree: false
|
||||
+ });
|
||||
});
|
||||
- });
|
||||
})
|
||||
.fail(err => {
|
||||
// changelog failure
|
||||
diff --git a/src/lib389/lib389/backend.py b/src/lib389/lib389/backend.py
|
||||
index ee1597126..02169384a 100644
|
||||
--- a/src/lib389/lib389/backend.py
|
||||
+++ b/src/lib389/lib389/backend.py
|
||||
@@ -694,24 +694,32 @@ class Backend(DSLdapObject):
|
||||
parent_suffix = properties.pop('parent', False)
|
||||
|
||||
# Okay, now try to make the backend.
|
||||
- super(Backend, self).create(dn, properties, basedn)
|
||||
+ backend_obj = super(Backend, self).create(dn, properties, basedn)
|
||||
|
||||
# We check if the mapping tree exists in create, so do this *after*
|
||||
if create_mapping_tree is True:
|
||||
- properties = {
|
||||
+ mapping_tree_properties = {
|
||||
'cn': self._nprops_stash['nsslapd-suffix'],
|
||||
'nsslapd-state': 'backend',
|
||||
'nsslapd-backend': self._nprops_stash['cn'],
|
||||
}
|
||||
if parent_suffix:
|
||||
# This is a subsuffix, set the parent suffix
|
||||
- properties['nsslapd-parent-suffix'] = parent_suffix
|
||||
- self._mts.create(properties=properties)
|
||||
+ mapping_tree_properties['nsslapd-parent-suffix'] = parent_suffix
|
||||
+
|
||||
+ try:
|
||||
+ self._mts.create(properties=mapping_tree_properties)
|
||||
+ except Exception as e:
|
||||
+ try:
|
||||
+ backend_obj.delete()
|
||||
+ except Exception as cleanup_error:
|
||||
+ self._instance.log.error(f"Failed to cleanup backend after mapping tree creation failure: {cleanup_error}")
|
||||
+ raise e
|
||||
|
||||
# We can't create the sample entries unless a mapping tree was installed.
|
||||
if sample_entries is not False and create_mapping_tree is True:
|
||||
self.create_sample_entries(sample_entries)
|
||||
- return self
|
||||
+ return backend_obj
|
||||
|
||||
def delete(self):
|
||||
"""Deletes the backend, it's mapping tree and all related indices.
|
||||
--
|
||||
2.49.0
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
From 2b73c3596e724f314b0e09cf6209e0151260f7e5 Mon Sep 17 00:00:00 2001
|
||||
From 25d4e86f5e1935ffc7b0900392bd6911e5c6c4a4 Mon Sep 17 00:00:00 2001
|
||||
From: Alexander Bokovoy <abokovoy@redhat.com>
|
||||
Date: Wed, 9 Jul 2025 12:08:09 +0300
|
||||
Subject: [PATCH] Issue 6857 - uiduniq: allow specifying match rules in the
|
||||
@ -22,10 +22,10 @@ Signed-off-by: Alexander Bokovoy <abokovoy@redhat.com>
|
||||
1 file changed, 7 insertions(+)
|
||||
|
||||
diff --git a/ldap/servers/plugins/uiduniq/uid.c b/ldap/servers/plugins/uiduniq/uid.c
|
||||
index 5b763b551..15cf88477 100644
|
||||
index 053af4f9d..887e79d78 100644
|
||||
--- a/ldap/servers/plugins/uiduniq/uid.c
|
||||
+++ b/ldap/servers/plugins/uiduniq/uid.c
|
||||
@@ -1031,7 +1031,14 @@ preop_add(Slapi_PBlock *pb)
|
||||
@@ -1030,7 +1030,14 @@ preop_add(Slapi_PBlock *pb)
|
||||
}
|
||||
|
||||
for (i = 0; attrNames && attrNames[i]; i++) {
|
||||
@ -1,4 +1,4 @@
|
||||
From 3ba73d2aa55f18ff73d4b3901ce101133745effc Mon Sep 17 00:00:00 2001
|
||||
From 08526a9c791aa6953ada8759dea1370f989da1c5 Mon Sep 17 00:00:00 2001
|
||||
From: Mark Reynolds <mreynolds@redhat.com>
|
||||
Date: Wed, 9 Jul 2025 14:18:50 -0400
|
||||
Subject: [PATCH] Issue 6859 - str2filter is not fully applying matching rules
|
||||
@ -338,10 +338,10 @@ index b190e0ec1..b338f405f 100644
|
||||
+ with pytest.raises(ldap.CONSTRAINT_VIOLATION):
|
||||
+ user.replace('cn', 'common_name')
|
||||
diff --git a/ldap/servers/plugins/uiduniq/uid.c b/ldap/servers/plugins/uiduniq/uid.c
|
||||
index 15cf88477..5a0d61b86 100644
|
||||
index 887e79d78..fdb1404a0 100644
|
||||
--- a/ldap/servers/plugins/uiduniq/uid.c
|
||||
+++ b/ldap/servers/plugins/uiduniq/uid.c
|
||||
@@ -1179,6 +1179,10 @@ preop_modify(Slapi_PBlock *pb)
|
||||
@@ -1178,6 +1178,10 @@ preop_modify(Slapi_PBlock *pb)
|
||||
for (; mods && *mods; mods++) {
|
||||
mod = *mods;
|
||||
for (i = 0; attrNames && attrNames[i]; i++) {
|
||||
@ -352,7 +352,7 @@ index 15cf88477..5a0d61b86 100644
|
||||
if ((slapi_attr_type_cmp(mod->mod_type, attrNames[i], 1) == 0) && /* mod contains target attr */
|
||||
(mod->mod_op & LDAP_MOD_BVALUES) && /* mod is bval encoded (not string val) */
|
||||
(mod->mod_bvalues && mod->mod_bvalues[0]) && /* mod actually contains some values */
|
||||
@@ -1187,6 +1191,9 @@ preop_modify(Slapi_PBlock *pb)
|
||||
@@ -1186,6 +1190,9 @@ preop_modify(Slapi_PBlock *pb)
|
||||
{
|
||||
addMod(&checkmods, &checkmodsCapacity, &modcount, mod);
|
||||
}
|
||||
@ -363,10 +363,10 @@ index 15cf88477..5a0d61b86 100644
|
||||
}
|
||||
if (modcount == 0) {
|
||||
diff --git a/ldap/servers/slapd/plugin_mr.c b/ldap/servers/slapd/plugin_mr.c
|
||||
index 6cf88b7de..f40c9a39b 100644
|
||||
index b262820c5..67051a5ff 100644
|
||||
--- a/ldap/servers/slapd/plugin_mr.c
|
||||
+++ b/ldap/servers/slapd/plugin_mr.c
|
||||
@@ -624,7 +624,7 @@ attempt_mr_filter_create(mr_filter_t *f, struct slapdplugin *mrp, Slapi_PBlock *
|
||||
@@ -626,7 +626,7 @@ attempt_mr_filter_create(mr_filter_t *f, struct slapdplugin *mrp, Slapi_PBlock *
|
||||
int rc;
|
||||
IFP mrf_create = NULL;
|
||||
f->mrf_match = NULL;
|
||||
163
0047-Issue-6872-compressed-log-rotation-creates-files-wit.patch
Normal file
163
0047-Issue-6872-compressed-log-rotation-creates-files-wit.patch
Normal file
@ -0,0 +1,163 @@
|
||||
From 86a717f09431897b475deebdfb4f0a09ff50fe8f Mon Sep 17 00:00:00 2001
|
||||
From: Mark Reynolds <mreynolds@redhat.com>
|
||||
Date: Tue, 15 Jul 2025 17:56:18 -0400
|
||||
Subject: [PATCH] Issue 6872 - compressed log rotation creates files with world
|
||||
readable permission
|
||||
|
||||
Description:
|
||||
|
||||
When compressing a log file, first create the empty file using open()
|
||||
so we can set the correct permissions right from the start. gzopen()
|
||||
always uses permission 644 and that is not safe. So after creating it
|
||||
with open(), with the correct permissions, then pass the FD to gzdopen()
|
||||
and write the compressed content.
|
||||
|
||||
relates: https://github.com/389ds/389-ds-base/issues/6872
|
||||
|
||||
Reviewed by: progier(Thanks!)
|
||||
---
|
||||
.../logging/logging_compression_test.py | 15 ++++++++--
|
||||
ldap/servers/slapd/log.c | 28 +++++++++++++------
|
||||
ldap/servers/slapd/schema.c | 2 +-
|
||||
3 files changed, 33 insertions(+), 12 deletions(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/logging/logging_compression_test.py b/dirsrvtests/tests/suites/logging/logging_compression_test.py
|
||||
index e30874cc0..3a987d62c 100644
|
||||
--- a/dirsrvtests/tests/suites/logging/logging_compression_test.py
|
||||
+++ b/dirsrvtests/tests/suites/logging/logging_compression_test.py
|
||||
@@ -1,5 +1,5 @@
|
||||
# --- BEGIN COPYRIGHT BLOCK ---
|
||||
-# Copyright (C) 2022 Red Hat, Inc.
|
||||
+# Copyright (C) 2025 Red Hat, Inc.
|
||||
# All rights reserved.
|
||||
#
|
||||
# License: GPL (version 3 or any later version).
|
||||
@@ -22,12 +22,21 @@ log = logging.getLogger(__name__)
|
||||
|
||||
pytestmark = pytest.mark.tier1
|
||||
|
||||
+
|
||||
def log_rotated_count(log_type, log_dir, check_compressed=False):
|
||||
- # Check if the log was rotated
|
||||
+ """
|
||||
+ Check if the log was rotated and has the correct permissions
|
||||
+ """
|
||||
log_file = f'{log_dir}/{log_type}.2*'
|
||||
if check_compressed:
|
||||
log_file += ".gz"
|
||||
- return len(glob.glob(log_file))
|
||||
+ log_files = glob.glob(log_file)
|
||||
+ for logf in log_files:
|
||||
+ # Check permissions
|
||||
+ st = os.stat(logf)
|
||||
+ assert oct(st.st_mode) == '0o100600' # 0600
|
||||
+
|
||||
+ return len(log_files)
|
||||
|
||||
|
||||
def update_and_sleep(inst, suffix, sleep=True):
|
||||
diff --git a/ldap/servers/slapd/log.c b/ldap/servers/slapd/log.c
|
||||
index 7e2c980a4..3344894d1 100644
|
||||
--- a/ldap/servers/slapd/log.c
|
||||
+++ b/ldap/servers/slapd/log.c
|
||||
@@ -172,17 +172,28 @@ get_syslog_loglevel(int loglevel)
|
||||
}
|
||||
|
||||
static int
|
||||
-compress_log_file(char *log_name)
|
||||
+compress_log_file(char *log_name, int32_t mode)
|
||||
{
|
||||
char gzip_log[BUFSIZ] = {0};
|
||||
char buf[LOG_CHUNK] = {0};
|
||||
size_t bytes_read = 0;
|
||||
gzFile outfile = NULL;
|
||||
FILE *source = NULL;
|
||||
+ int fd = 0;
|
||||
|
||||
PR_snprintf(gzip_log, sizeof(gzip_log), "%s.gz", log_name);
|
||||
- if ((outfile = gzopen(gzip_log,"wb")) == NULL) {
|
||||
- /* Failed to open new gzip file */
|
||||
+
|
||||
+ /*
|
||||
+ * Try to open the file as we may have an incorrect path. We also need to
|
||||
+ * set the permissions using open() as gzopen() creates the file with
|
||||
+ * 644 permissions (world readable - bad). So we create an empty file with
|
||||
+ * the correct permissions, then we pass the FD to gzdopen() to write the
|
||||
+ * compressed content.
|
||||
+ */
|
||||
+ if ((fd = open(gzip_log, O_WRONLY|O_CREAT|O_TRUNC, mode)) >= 0) {
|
||||
+ /* FIle successfully created, now pass the FD to gzdopen() */
|
||||
+ outfile = gzdopen(fd, "ab");
|
||||
+ } else {
|
||||
return -1;
|
||||
}
|
||||
|
||||
@@ -191,6 +202,7 @@ compress_log_file(char *log_name)
|
||||
gzclose(outfile);
|
||||
return -1;
|
||||
}
|
||||
+
|
||||
bytes_read = fread(buf, 1, LOG_CHUNK, source);
|
||||
while (bytes_read > 0) {
|
||||
int bytes_written = gzwrite(outfile, buf, bytes_read);
|
||||
@@ -3291,7 +3303,7 @@ log__open_accesslogfile(int logfile_state, int locked)
|
||||
return LOG_UNABLE_TO_OPENFILE;
|
||||
}
|
||||
} else if (loginfo.log_access_compress) {
|
||||
- if (compress_log_file(newfile) != 0) {
|
||||
+ if (compress_log_file(newfile, loginfo.log_access_mode) != 0) {
|
||||
slapi_log_err(SLAPI_LOG_ERR, "log__open_auditfaillogfile",
|
||||
"failed to compress rotated access log (%s)\n",
|
||||
newfile);
|
||||
@@ -3455,7 +3467,7 @@ log__open_securitylogfile(int logfile_state, int locked)
|
||||
return LOG_UNABLE_TO_OPENFILE;
|
||||
}
|
||||
} else if (loginfo.log_security_compress) {
|
||||
- if (compress_log_file(newfile) != 0) {
|
||||
+ if (compress_log_file(newfile, loginfo.log_security_mode) != 0) {
|
||||
slapi_log_err(SLAPI_LOG_ERR, "log__open_securitylogfile",
|
||||
"failed to compress rotated security audit log (%s)\n",
|
||||
newfile);
|
||||
@@ -6172,7 +6184,7 @@ log__open_errorlogfile(int logfile_state, int locked)
|
||||
return LOG_UNABLE_TO_OPENFILE;
|
||||
}
|
||||
} else if (loginfo.log_error_compress) {
|
||||
- if (compress_log_file(newfile) != 0) {
|
||||
+ if (compress_log_file(newfile, loginfo.log_error_mode) != 0) {
|
||||
PR_snprintf(buffer, sizeof(buffer), "Failed to compress errors log file (%s)\n", newfile);
|
||||
log__error_emergency(buffer, 1, 1);
|
||||
} else {
|
||||
@@ -6355,7 +6367,7 @@ log__open_auditlogfile(int logfile_state, int locked)
|
||||
return LOG_UNABLE_TO_OPENFILE;
|
||||
}
|
||||
} else if (loginfo.log_audit_compress) {
|
||||
- if (compress_log_file(newfile) != 0) {
|
||||
+ if (compress_log_file(newfile, loginfo.log_audit_mode) != 0) {
|
||||
slapi_log_err(SLAPI_LOG_ERR, "log__open_auditfaillogfile",
|
||||
"failed to compress rotated audit log (%s)\n",
|
||||
newfile);
|
||||
@@ -6514,7 +6526,7 @@ log__open_auditfaillogfile(int logfile_state, int locked)
|
||||
return LOG_UNABLE_TO_OPENFILE;
|
||||
}
|
||||
} else if (loginfo.log_auditfail_compress) {
|
||||
- if (compress_log_file(newfile) != 0) {
|
||||
+ if (compress_log_file(newfile, loginfo.log_auditfail_mode) != 0) {
|
||||
slapi_log_err(SLAPI_LOG_ERR, "log__open_auditfaillogfile",
|
||||
"failed to compress rotated auditfail log (%s)\n",
|
||||
newfile);
|
||||
diff --git a/ldap/servers/slapd/schema.c b/ldap/servers/slapd/schema.c
|
||||
index 401a3dce8..9dee642b9 100644
|
||||
--- a/ldap/servers/slapd/schema.c
|
||||
+++ b/ldap/servers/slapd/schema.c
|
||||
@@ -903,7 +903,7 @@ oc_check_allowed_sv(Slapi_PBlock *pb, Slapi_Entry *e, const char *type, struct o
|
||||
|
||||
if (pb) {
|
||||
PR_snprintf(errtext, sizeof(errtext),
|
||||
- "attribute \"%s\" not allowed\n",
|
||||
+ "attribute \"%s\" not allowed",
|
||||
escape_string(type, ebuf));
|
||||
slapi_pblock_set(pb, SLAPI_PB_RESULT_TEXT, errtext);
|
||||
}
|
||||
--
|
||||
2.49.0
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
From a7231528b5ad7e887eeed4317de48d054cd046cd Mon Sep 17 00:00:00 2001
|
||||
From 1757b59d3cd0ab5714373f8e016c8e6aedcd7897 Mon Sep 17 00:00:00 2001
|
||||
From: Mark Reynolds <mreynolds@redhat.com>
|
||||
Date: Wed, 23 Jul 2025 19:35:32 -0400
|
||||
Subject: [PATCH] Issue 6895 - Crash if repl keep alive entry can not be
|
||||
@ -13,9 +13,6 @@ we try and get the dn from the entry and we get a use-after-free crash.
|
||||
Relates: https://github.com/389ds/389-ds-base/issues/6895
|
||||
|
||||
Reviewed by: spichugi(Thanks!)
|
||||
|
||||
(cherry picked from commit 43ab6b1d1de138d6be03b657f27cbb6ba19ddd14)
|
||||
Signed-off-by: Masahiro Matsuya <mmatsuya@redhat.com>
|
||||
---
|
||||
ldap/servers/plugins/chainingdb/cb_config.c | 3 +--
|
||||
ldap/servers/plugins/posix-winsync/posix-winsync.c | 1 -
|
||||
@ -38,10 +35,10 @@ index 40a7088d7..24fa1bcb3 100644
|
||||
rc = res;
|
||||
slapi_pblock_destroy(util_pb);
|
||||
diff --git a/ldap/servers/plugins/posix-winsync/posix-winsync.c b/ldap/servers/plugins/posix-winsync/posix-winsync.c
|
||||
index 56efb2330..ab37497cd 100644
|
||||
index 51a55b643..3a002bb70 100644
|
||||
--- a/ldap/servers/plugins/posix-winsync/posix-winsync.c
|
||||
+++ b/ldap/servers/plugins/posix-winsync/posix-winsync.c
|
||||
@@ -1625,7 +1625,6 @@ posix_winsync_end_update_cb(void *cbdata __attribute__((unused)),
|
||||
@@ -1626,7 +1626,6 @@ posix_winsync_end_update_cb(void *cbdata __attribute__((unused)),
|
||||
"posix_winsync_end_update_cb: "
|
||||
"add task entry\n");
|
||||
}
|
||||
@ -50,7 +47,7 @@ index 56efb2330..ab37497cd 100644
|
||||
pb = NULL;
|
||||
posix_winsync_config_reset_MOFTaskCreated();
|
||||
diff --git a/ldap/servers/plugins/replication/repl5_init.c b/ldap/servers/plugins/replication/repl5_init.c
|
||||
index 5a748e35a..9b6523a2e 100644
|
||||
index 8bc0b5372..5047fb8dc 100644
|
||||
--- a/ldap/servers/plugins/replication/repl5_init.c
|
||||
+++ b/ldap/servers/plugins/replication/repl5_init.c
|
||||
@@ -682,7 +682,6 @@ create_repl_schema_policy(void)
|
||||
@ -78,10 +75,10 @@ index 5a748e35a..9b6523a2e 100644
|
||||
}
|
||||
slapi_pblock_destroy(pb);
|
||||
diff --git a/ldap/servers/plugins/replication/repl5_replica.c b/ldap/servers/plugins/replication/repl5_replica.c
|
||||
index d67f1bc71..cec140140 100644
|
||||
index 59062b46b..a97c807e9 100644
|
||||
--- a/ldap/servers/plugins/replication/repl5_replica.c
|
||||
+++ b/ldap/servers/plugins/replication/repl5_replica.c
|
||||
@@ -440,10 +440,10 @@ replica_subentry_create(const char *repl_root, ReplicaId rid)
|
||||
@@ -465,10 +465,10 @@ replica_subentry_create(const char *repl_root, ReplicaId rid)
|
||||
if (return_value != LDAP_SUCCESS &&
|
||||
return_value != LDAP_ALREADY_EXISTS &&
|
||||
return_value != LDAP_REFERRAL /* CONSUMER */) {
|
||||
@ -97,5 +94,5 @@ index d67f1bc71..cec140140 100644
|
||||
goto done;
|
||||
}
|
||||
--
|
||||
2.51.1
|
||||
2.49.0
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
From 18a807e0e23b1160ea61e05e721da9fbd0c560b1 Mon Sep 17 00:00:00 2001
|
||||
From 04d050b856845d12dbc5fa5ed15d96be204a4a8b Mon Sep 17 00:00:00 2001
|
||||
From: Simon Pichugin <spichugi@redhat.com>
|
||||
Date: Mon, 28 Jul 2025 15:41:29 -0700
|
||||
Subject: [PATCH] Issue 6884 - Mask password hashes in audit logs (#6885)
|
||||
@ -14,23 +14,20 @@ password masking works correctly across all log formats and operation types.
|
||||
Fixes: https://github.com/389ds/389-ds-base/issues/6884
|
||||
|
||||
Reviewed by: @mreynolds389, @vashirov (Thanks!!)
|
||||
|
||||
(cherry picked from commit 24f9aea1ae7e29bd885212825dc52d2a5db08a03)
|
||||
Signed-off-by: Masahiro Matsuya <mmatsuya@redhat.com>
|
||||
---
|
||||
.../logging/audit_password_masking_test.py | 457 ++++++++++++++++++
|
||||
ldap/servers/slapd/auditlog.c | 144 +++++-
|
||||
.../logging/audit_password_masking_test.py | 501 ++++++++++++++++++
|
||||
ldap/servers/slapd/auditlog.c | 170 +++++-
|
||||
ldap/servers/slapd/slapi-private.h | 1 +
|
||||
src/lib389/lib389/chaining.py | 3 +-
|
||||
4 files changed, 586 insertions(+), 19 deletions(-)
|
||||
4 files changed, 652 insertions(+), 23 deletions(-)
|
||||
create mode 100644 dirsrvtests/tests/suites/logging/audit_password_masking_test.py
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/logging/audit_password_masking_test.py b/dirsrvtests/tests/suites/logging/audit_password_masking_test.py
|
||||
new file mode 100644
|
||||
index 000000000..ae379cbba
|
||||
index 000000000..3b6a54849
|
||||
--- /dev/null
|
||||
+++ b/dirsrvtests/tests/suites/logging/audit_password_masking_test.py
|
||||
@@ -0,0 +1,457 @@
|
||||
@@ -0,0 +1,501 @@
|
||||
+# --- BEGIN COPYRIGHT BLOCK ---
|
||||
+# Copyright (C) 2025 Red Hat, Inc.
|
||||
+# All rights reserved.
|
||||
@ -48,6 +45,7 @@ index 000000000..ae379cbba
|
||||
+from lib389._constants import DEFAULT_SUFFIX, DN_DM, PW_DM
|
||||
+from lib389.topologies import topology_m2 as topo
|
||||
+from lib389.idm.user import UserAccounts
|
||||
+from lib389.dirsrv_log import DirsrvAuditJSONLog
|
||||
+from lib389.plugins import ChainingBackendPlugin
|
||||
+from lib389.chaining import ChainingLinks
|
||||
+from lib389.agreement import Agreements
|
||||
@ -64,7 +62,9 @@ index 000000000..ae379cbba
|
||||
+
|
||||
+def setup_audit_logging(inst, log_format='default', display_attrs=None):
|
||||
+ """Configure audit logging settings"""
|
||||
+ inst.config.replace('nsslapd-auditlog-logbuffering', 'off')
|
||||
+ inst.config.replace('nsslapd-auditlog-logging-enabled', 'on')
|
||||
+ inst.config.replace('nsslapd-auditlog-log-format', log_format)
|
||||
+
|
||||
+ if display_attrs is not None:
|
||||
+ inst.config.replace('nsslapd-auditlog-display-attrs', display_attrs)
|
||||
@ -75,7 +75,7 @@ index 000000000..ae379cbba
|
||||
+def check_password_masked(inst, log_format, expected_password, actual_password):
|
||||
+ """Helper function to check password masking in audit logs"""
|
||||
+
|
||||
+ inst.restart() # Flush the logs
|
||||
+ time.sleep(1) # Allow log to flush
|
||||
+
|
||||
+ # List of all password/credential attributes that should be masked
|
||||
+ password_attributes = [
|
||||
@ -90,25 +90,50 @@ index 000000000..ae379cbba
|
||||
+ user_password_scheme = inst.config.get_attr_val_utf8('passwordStorageScheme')
|
||||
+ root_password_scheme = inst.config.get_attr_val_utf8('nsslapd-rootpwstoragescheme')
|
||||
+
|
||||
+ # Check LDIF format logs
|
||||
+ found_masked = False
|
||||
+ found_actual = False
|
||||
+ found_hashed = False
|
||||
+ if log_format == 'json':
|
||||
+ # Check JSON format logs
|
||||
+ audit_log = DirsrvAuditJSONLog(inst)
|
||||
+ log_lines = audit_log.readlines()
|
||||
+
|
||||
+ # Check each password attribute for masked password
|
||||
+ for attr in password_attributes:
|
||||
+ if inst.ds_audit_log.match(f"{attr}: {re.escape(expected_password)}"):
|
||||
+ found_masked = True
|
||||
+ if inst.ds_audit_log.match(f"{attr}: {actual_password}"):
|
||||
+ found_actual = True
|
||||
+ found_masked = False
|
||||
+ found_actual = False
|
||||
+ found_hashed = False
|
||||
+
|
||||
+ # Check for hashed passwords in LDIF format
|
||||
+ if user_password_scheme:
|
||||
+ if inst.ds_audit_log.match(f"userPassword: {{{user_password_scheme}}}"):
|
||||
+ found_hashed = True
|
||||
+ if root_password_scheme:
|
||||
+ if inst.ds_audit_log.match(f"nsslapd-rootpw: {{{root_password_scheme}}}"):
|
||||
+ found_hashed = True
|
||||
+ for line in log_lines:
|
||||
+ # Check if any password attribute is present in the line
|
||||
+ for attr in password_attributes:
|
||||
+ if attr in line:
|
||||
+ if expected_password in line:
|
||||
+ found_masked = True
|
||||
+ if actual_password in line:
|
||||
+ found_actual = True
|
||||
+ # Check for password scheme indicators (hashed passwords)
|
||||
+ if user_password_scheme and f'{{{user_password_scheme}}}' in line:
|
||||
+ found_hashed = True
|
||||
+ if root_password_scheme and f'{{{root_password_scheme}}}' in line:
|
||||
+ found_hashed = True
|
||||
+ break # Found a password attribute, no need to check others for this line
|
||||
+
|
||||
+ else:
|
||||
+ # Check LDIF format logs
|
||||
+ found_masked = False
|
||||
+ found_actual = False
|
||||
+ found_hashed = False
|
||||
+
|
||||
+ # Check each password attribute for masked password
|
||||
+ for attr in password_attributes:
|
||||
+ if inst.ds_audit_log.match(f"{attr}: {re.escape(expected_password)}"):
|
||||
+ found_masked = True
|
||||
+ if inst.ds_audit_log.match(f"{attr}: {actual_password}"):
|
||||
+ found_actual = True
|
||||
+
|
||||
+ # Check for hashed passwords in LDIF format
|
||||
+ if user_password_scheme:
|
||||
+ if inst.ds_audit_log.match(f"userPassword: {{{user_password_scheme}}}"):
|
||||
+ found_hashed = True
|
||||
+ if root_password_scheme:
|
||||
+ if inst.ds_audit_log.match(f"nsslapd-rootpw: {{{root_password_scheme}}}"):
|
||||
+ found_hashed = True
|
||||
+
|
||||
+ # Delete audit logs to avoid interference with other tests
|
||||
+ # We need to reset the root password to default as deleteAuditLogs()
|
||||
@ -124,6 +149,9 @@ index 000000000..ae379cbba
|
||||
+ ("default", None),
|
||||
+ ("default", "*"),
|
||||
+ ("default", "userPassword"),
|
||||
+ ("json", None),
|
||||
+ ("json", "*"),
|
||||
+ ("json", "userPassword")
|
||||
+])
|
||||
+def test_password_masking_add_operation(topo, log_format, display_attrs):
|
||||
+ """Test password masking in ADD operations
|
||||
@ -177,6 +205,9 @@ index 000000000..ae379cbba
|
||||
+ ("default", None),
|
||||
+ ("default", "*"),
|
||||
+ ("default", "userPassword"),
|
||||
+ ("json", None),
|
||||
+ ("json", "*"),
|
||||
+ ("json", "userPassword")
|
||||
+])
|
||||
+def test_password_masking_modify_operation(topo, log_format, display_attrs):
|
||||
+ """Test password masking in MODIFY operations
|
||||
@ -243,6 +274,9 @@ index 000000000..ae379cbba
|
||||
+ ("default", None),
|
||||
+ ("default", "*"),
|
||||
+ ("default", "nsslapd-rootpw"),
|
||||
+ ("json", None),
|
||||
+ ("json", "*"),
|
||||
+ ("json", "nsslapd-rootpw")
|
||||
+])
|
||||
+def test_password_masking_rootpw_modify_operation(topo, log_format, display_attrs):
|
||||
+ """Test password masking for nsslapd-rootpw MODIFY operations
|
||||
@ -272,7 +306,6 @@ index 000000000..ae379cbba
|
||||
+ try:
|
||||
+ dm.change_password(TEST_PASSWORD)
|
||||
+ dm.rebind(TEST_PASSWORD)
|
||||
+ dm.change_password(PW_DM)
|
||||
+
|
||||
+ found_masked, found_actual, found_hashed = check_password_masked(inst, log_format, MASKED_PASSWORD, TEST_PASSWORD)
|
||||
+ assert found_masked, f"Masked root password not found in {log_format} MODIFY operation (first root password)"
|
||||
@ -281,7 +314,6 @@ index 000000000..ae379cbba
|
||||
+
|
||||
+ dm.change_password(TEST_PASSWORD_2)
|
||||
+ dm.rebind(TEST_PASSWORD_2)
|
||||
+ dm.change_password(PW_DM)
|
||||
+
|
||||
+ found_masked_2, found_actual_2, found_hashed_2 = check_password_masked(inst, log_format, MASKED_PASSWORD, TEST_PASSWORD_2)
|
||||
+ assert found_masked_2, f"Masked root password not found in {log_format} MODIFY operation (second root password)"
|
||||
@ -297,6 +329,9 @@ index 000000000..ae379cbba
|
||||
+ ("default", None),
|
||||
+ ("default", "*"),
|
||||
+ ("default", "nsmultiplexorcredentials"),
|
||||
+ ("json", None),
|
||||
+ ("json", "*"),
|
||||
+ ("json", "nsmultiplexorcredentials")
|
||||
+])
|
||||
+def test_password_masking_multiplexor_credentials(topo, log_format, display_attrs):
|
||||
+ """Test password masking for nsmultiplexorcredentials in chaining/multiplexor configurations
|
||||
@ -365,6 +400,9 @@ index 000000000..ae379cbba
|
||||
+ ("default", None),
|
||||
+ ("default", "*"),
|
||||
+ ("default", "nsDS5ReplicaCredentials"),
|
||||
+ ("json", None),
|
||||
+ ("json", "*"),
|
||||
+ ("json", "nsDS5ReplicaCredentials")
|
||||
+])
|
||||
+def test_password_masking_replica_credentials(topo, log_format, display_attrs):
|
||||
+ """Test password masking for nsDS5ReplicaCredentials in replication agreements
|
||||
@ -426,6 +464,9 @@ index 000000000..ae379cbba
|
||||
+ ("default", None),
|
||||
+ ("default", "*"),
|
||||
+ ("default", "nsDS5ReplicaBootstrapCredentials"),
|
||||
+ ("json", None),
|
||||
+ ("json", "*"),
|
||||
+ ("json", "nsDS5ReplicaBootstrapCredentials")
|
||||
+])
|
||||
+def test_password_masking_bootstrap_credentials(topo, log_format, display_attrs):
|
||||
+ """Test password masking for nsDS5ReplicaCredentials and nsDS5ReplicaBootstrapCredentials in replication agreements
|
||||
@ -490,10 +531,10 @@ index 000000000..ae379cbba
|
||||
+ pytest.main(["-s", CURRENT_FILE])
|
||||
\ No newline at end of file
|
||||
diff --git a/ldap/servers/slapd/auditlog.c b/ldap/servers/slapd/auditlog.c
|
||||
index 0597ecc6f..c41415725 100644
|
||||
index ff9a6fdde..ed42069d4 100644
|
||||
--- a/ldap/servers/slapd/auditlog.c
|
||||
+++ b/ldap/servers/slapd/auditlog.c
|
||||
@@ -37,6 +37,89 @@ static void write_audit_file(Slapi_Entry *entry, int logtype, int optype, const
|
||||
@@ -39,6 +39,89 @@ static void write_audit_file(Slapi_PBlock *pb, Slapi_Entry *entry, int logtype,
|
||||
|
||||
static const char *modrdn_changes[4];
|
||||
|
||||
@ -583,11 +624,12 @@ index 0597ecc6f..c41415725 100644
|
||||
void
|
||||
write_audit_log_entry(Slapi_PBlock *pb)
|
||||
{
|
||||
@@ -248,7 +331,21 @@ add_entry_attrs(Slapi_Entry *entry, lenstr *l)
|
||||
@@ -279,10 +362,31 @@ add_entry_attrs_ext(Slapi_Entry *entry, lenstr *l, PRBool use_json, json_object
|
||||
{
|
||||
slapi_entry_attr_find(entry, req_attr, &entry_attr);
|
||||
if (entry_attr) {
|
||||
- log_entry_attr(entry_attr, req_attr, l);
|
||||
- if (use_json) {
|
||||
- log_entry_attr_json(entry_attr, req_attr, id_list);
|
||||
+ if (strcmp(req_attr, PSEUDO_ATTR_UNHASHEDUSERPASSWORD) == 0) {
|
||||
+ /* Do not write the unhashed clear-text password */
|
||||
+ continue;
|
||||
@ -596,17 +638,28 @@ index 0597ecc6f..c41415725 100644
|
||||
+ /* Check if this is a password attribute that needs masking */
|
||||
+ if (is_password_attribute(req_attr)) {
|
||||
+ /* userpassword/rootdn password - mask the value */
|
||||
+ addlenstr(l, "#");
|
||||
+ addlenstr(l, req_attr);
|
||||
+ addlenstr(l, ": **********************\n");
|
||||
+ } else {
|
||||
+ if (use_json) {
|
||||
+ json_object *secret_obj = json_object_new_object();
|
||||
+ json_object_object_add(secret_obj, req_attr,
|
||||
+ json_object_new_string("**********************"));
|
||||
+ json_object_array_add(id_list, secret_obj);
|
||||
+ } else {
|
||||
+ addlenstr(l, "#");
|
||||
+ addlenstr(l, req_attr);
|
||||
+ addlenstr(l, ": **********************\n");
|
||||
+ }
|
||||
} else {
|
||||
- log_entry_attr(entry_attr, req_attr, l);
|
||||
+ /* Regular attribute - log normally */
|
||||
+ log_entry_attr(entry_attr, req_attr, l);
|
||||
+ }
|
||||
+ if (use_json) {
|
||||
+ log_entry_attr_json(entry_attr, req_attr, id_list);
|
||||
+ } else {
|
||||
+ log_entry_attr(entry_attr, req_attr, l);
|
||||
+ }
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
@@ -262,13 +359,11 @@ add_entry_attrs(Slapi_Entry *entry, lenstr *l)
|
||||
@@ -297,9 +401,7 @@ add_entry_attrs_ext(Slapi_Entry *entry, lenstr *l, PRBool use_json, json_object
|
||||
continue;
|
||||
}
|
||||
|
||||
@ -615,14 +668,55 @@ index 0597ecc6f..c41415725 100644
|
||||
- {
|
||||
+ if (is_password_attribute(attr)) {
|
||||
/* userpassword/rootdn password - mask the value */
|
||||
addlenstr(l, "#");
|
||||
addlenstr(l, attr);
|
||||
- addlenstr(l, ": ****************************\n");
|
||||
+ addlenstr(l, ": **********************\n");
|
||||
if (use_json) {
|
||||
json_object *secret_obj = json_object_new_object();
|
||||
@@ -309,7 +411,7 @@ add_entry_attrs_ext(Slapi_Entry *entry, lenstr *l, PRBool use_json, json_object
|
||||
} else {
|
||||
addlenstr(l, "#");
|
||||
addlenstr(l, attr);
|
||||
- addlenstr(l, ": ****************************\n");
|
||||
+ addlenstr(l, ": **********************\n");
|
||||
}
|
||||
continue;
|
||||
}
|
||||
log_entry_attr(entry_attr, attr, l);
|
||||
@@ -354,6 +449,10 @@ write_audit_file(
|
||||
@@ -478,6 +580,9 @@ write_audit_file_json(Slapi_PBlock *pb, Slapi_Entry *entry, int logtype,
|
||||
}
|
||||
}
|
||||
|
||||
+ /* Check if this is a password attribute that needs masking */
|
||||
+ int is_password_attr = is_password_attribute(mods[j]->mod_type);
|
||||
+
|
||||
mod = json_object_new_object();
|
||||
switch (operationtype) {
|
||||
case LDAP_MOD_ADD:
|
||||
@@ -502,7 +607,12 @@ write_audit_file_json(Slapi_PBlock *pb, Slapi_Entry *entry, int logtype,
|
||||
json_object *val_list = NULL;
|
||||
val_list = json_object_new_array();
|
||||
for (size_t i = 0; mods[j]->mod_bvalues != NULL && mods[j]->mod_bvalues[i] != NULL; i++) {
|
||||
- json_object_array_add(val_list, json_object_new_string(mods[j]->mod_bvalues[i]->bv_val));
|
||||
+ if (is_password_attr) {
|
||||
+ /* Mask password values */
|
||||
+ json_object_array_add(val_list, json_object_new_string("**********************"));
|
||||
+ } else {
|
||||
+ json_object_array_add(val_list, json_object_new_string(mods[j]->mod_bvalues[i]->bv_val));
|
||||
+ }
|
||||
}
|
||||
json_object_object_add(mod, "values", val_list);
|
||||
}
|
||||
@@ -514,8 +624,11 @@ write_audit_file_json(Slapi_PBlock *pb, Slapi_Entry *entry, int logtype,
|
||||
}
|
||||
case SLAPI_OPERATION_ADD: {
|
||||
int len;
|
||||
+
|
||||
e = change;
|
||||
- tmp = slapi_entry2str(e, &len);
|
||||
+
|
||||
+ /* Create a masked string representation for password attributes */
|
||||
+ tmp = create_masked_entry_string(e, &len);
|
||||
tmpsave = tmp;
|
||||
while ((tmp = strchr(tmp, '\n')) != NULL) {
|
||||
tmp++;
|
||||
@@ -668,6 +781,10 @@ write_audit_file(
|
||||
break;
|
||||
}
|
||||
}
|
||||
@ -633,7 +727,7 @@ index 0597ecc6f..c41415725 100644
|
||||
switch (operationtype) {
|
||||
case LDAP_MOD_ADD:
|
||||
addlenstr(l, "add: ");
|
||||
@@ -378,18 +477,27 @@ write_audit_file(
|
||||
@@ -692,18 +809,27 @@ write_audit_file(
|
||||
break;
|
||||
}
|
||||
if (operationtype != LDAP_MOD_IGNORE) {
|
||||
@ -673,7 +767,7 @@ index 0597ecc6f..c41415725 100644
|
||||
}
|
||||
}
|
||||
addlenstr(l, "-\n");
|
||||
@@ -400,7 +508,7 @@ write_audit_file(
|
||||
@@ -714,7 +840,7 @@ write_audit_file(
|
||||
e = change;
|
||||
addlenstr(l, attr_changetype);
|
||||
addlenstr(l, ": add\n");
|
||||
@ -683,10 +777,10 @@ index 0597ecc6f..c41415725 100644
|
||||
while ((tmp = strchr(tmp, '\n')) != NULL) {
|
||||
tmp++;
|
||||
diff --git a/ldap/servers/slapd/slapi-private.h b/ldap/servers/slapd/slapi-private.h
|
||||
index dfb0e272a..af2180e55 100644
|
||||
index b119da0bf..ccc7b6928 100644
|
||||
--- a/ldap/servers/slapd/slapi-private.h
|
||||
+++ b/ldap/servers/slapd/slapi-private.h
|
||||
@@ -843,6 +843,7 @@ void task_cleanup(void);
|
||||
@@ -846,6 +846,7 @@ void task_cleanup(void);
|
||||
/* for reversible encyrption */
|
||||
#define SLAPI_MB_CREDENTIALS "nsmultiplexorcredentials"
|
||||
#define SLAPI_REP_CREDENTIALS "nsds5ReplicaCredentials"
|
||||
@ -716,5 +810,5 @@ index 533b83ebf..33ae78c8b 100644
|
||||
|
||||
class ChainingLinks(DSLdapObjects):
|
||||
--
|
||||
2.51.1
|
||||
2.49.0
|
||||
|
||||
@ -0,0 +1,76 @@
|
||||
From ef25a3d11026d1b12353aa8faff2c0d5827282d4 Mon Sep 17 00:00:00 2001
|
||||
From: Simon Pichugin <spichugi@redhat.com>
|
||||
Date: Fri, 4 Oct 2024 08:55:11 -0700
|
||||
Subject: [PATCH] Issue 6339 - Address Coverity scan issues in memberof and
|
||||
bdb_layer (#6353)
|
||||
|
||||
Description: Add null check for memberof attribute in memberof.c
|
||||
Fix memory leak by freeing 'cookie' in memberof.c
|
||||
Add null check for database environment in bdb_layer.c
|
||||
Fix race condition by adding mutex lock/unlock in bdb_layer.c
|
||||
|
||||
Fixes: https://github.com/389ds/389-ds-base/issues/6339
|
||||
|
||||
Reviewed by: @progier389, @tbordaz (Thanks!)
|
||||
---
|
||||
ldap/servers/plugins/memberof/memberof.c | 1 +
|
||||
ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.c | 17 ++++++++++++++---
|
||||
2 files changed, 15 insertions(+), 3 deletions(-)
|
||||
|
||||
diff --git a/ldap/servers/plugins/memberof/memberof.c b/ldap/servers/plugins/memberof/memberof.c
|
||||
index 16dae2195..3775e52c9 100644
|
||||
--- a/ldap/servers/plugins/memberof/memberof.c
|
||||
+++ b/ldap/servers/plugins/memberof/memberof.c
|
||||
@@ -931,6 +931,7 @@ perform_needed_fixup(void)
|
||||
}
|
||||
be = slapi_get_next_backend(cookie);
|
||||
}
|
||||
+ slapi_ch_free_string(&cookie);
|
||||
slapi_ch_free_string(&td.bind_dn);
|
||||
slapi_ch_free_string(&td.filter_str);
|
||||
memberof_free_config(&config);
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.c b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.c
|
||||
index 7c3a7dcc9..f2b380f9b 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.c
|
||||
@@ -6925,6 +6925,7 @@ bdb_public_private_open(backend *be, const char *db_filename, int rw, dbi_env_t
|
||||
bdb_config *conf = (bdb_config *)li->li_dblayer_config;
|
||||
bdb_db_env **ppEnv = (bdb_db_env**)&priv->dblayer_env;
|
||||
char dbhome[MAXPATHLEN];
|
||||
+ bdb_db_env *pEnv = NULL;
|
||||
DB_ENV *bdb_env = NULL;
|
||||
DB *bdb_db = NULL;
|
||||
struct stat st = {0};
|
||||
@@ -6974,7 +6975,13 @@ bdb_public_private_open(backend *be, const char *db_filename, int rw, dbi_env_t
|
||||
conf->bdb_tx_max = 50;
|
||||
rc = bdb_start(li, DBLAYER_NORMAL_MODE);
|
||||
if (rc == 0) {
|
||||
- bdb_env = ((struct bdb_db_env*)(priv->dblayer_env))->bdb_DB_ENV;
|
||||
+ pEnv = (bdb_db_env *)priv->dblayer_env;
|
||||
+ if (pEnv == NULL) {
|
||||
+ fprintf(stderr, "bdb_public_private_open: dbenv is not available (0x%p) for database %s\n",
|
||||
+ (void *)pEnv, db_filename ? db_filename : "unknown");
|
||||
+ return EINVAL;
|
||||
+ }
|
||||
+ bdb_env = pEnv->bdb_DB_ENV;
|
||||
}
|
||||
} else {
|
||||
/* Setup minimal environment */
|
||||
@@ -7018,8 +7025,12 @@ bdb_public_private_close(struct ldbminfo *li, dbi_env_t **env, dbi_db_t **db)
|
||||
if (priv) {
|
||||
/* Detect if db is fully set up in read write mode */
|
||||
bdb_db_env *pEnv = (bdb_db_env *)priv->dblayer_env;
|
||||
- if (pEnv && pEnv->bdb_thread_count>0) {
|
||||
- rw = 1;
|
||||
+ if (pEnv) {
|
||||
+ pthread_mutex_lock(&pEnv->bdb_thread_count_lock);
|
||||
+ if (pEnv->bdb_thread_count > 0) {
|
||||
+ rw = 1;
|
||||
+ }
|
||||
+ pthread_mutex_unlock(&pEnv->bdb_thread_count_lock);
|
||||
}
|
||||
}
|
||||
if (rw == 0) {
|
||||
--
|
||||
2.49.0
|
||||
|
||||
31
0051-Issue-6468-CLI-Fix-default-error-log-level.patch
Normal file
31
0051-Issue-6468-CLI-Fix-default-error-log-level.patch
Normal file
@ -0,0 +1,31 @@
|
||||
From ea9f39790ab5160deaeba364b1d679bf971ea148 Mon Sep 17 00:00:00 2001
|
||||
From: Viktor Ashirov <vashirov@redhat.com>
|
||||
Date: Tue, 29 Jul 2025 08:00:00 +0200
|
||||
Subject: [PATCH] Issue 6468 - CLI - Fix default error log level
|
||||
|
||||
Description:
|
||||
Default error log level is 16384
|
||||
|
||||
Relates: https://github.com/389ds/389-ds-base/issues/6468
|
||||
|
||||
Reviewed by: @droideck (Thanks!)
|
||||
---
|
||||
src/lib389/lib389/cli_conf/logging.py | 2 +-
|
||||
1 file changed, 1 insertion(+), 1 deletion(-)
|
||||
|
||||
diff --git a/src/lib389/lib389/cli_conf/logging.py b/src/lib389/lib389/cli_conf/logging.py
|
||||
index efa034f93..7dc4b9dee 100644
|
||||
--- a/src/lib389/lib389/cli_conf/logging.py
|
||||
+++ b/src/lib389/lib389/cli_conf/logging.py
|
||||
@@ -44,7 +44,7 @@ ERROR_LEVELS = {
|
||||
+ "methods used for a SASL bind"
|
||||
},
|
||||
"default": {
|
||||
- "level": 6384,
|
||||
+ "level": 16384,
|
||||
"desc": "Default logging level"
|
||||
},
|
||||
"filter": {
|
||||
--
|
||||
2.49.0
|
||||
|
||||
@ -0,0 +1,97 @@
|
||||
From f00b9fa472dae3e623cdec88a45df728f5a12613 Mon Sep 17 00:00:00 2001
|
||||
From: James Chapman <jachapma@redhat.com>
|
||||
Date: Fri, 1 Aug 2025 13:27:02 +0100
|
||||
Subject: [PATCH] Issue 6768 - ns-slapd crashes when a referral is added
|
||||
(#6780)
|
||||
|
||||
Bug description: When a paged result search is successfully run on a referred
|
||||
suffix, we retrieve the search result set from the pblock and try to release
|
||||
it. In this case the search result set is NULL, which triggers a SEGV during
|
||||
the release.
|
||||
|
||||
Fix description: If the search result code is LDAP_REFERRAL, skip deletion of
|
||||
the search result set. Added test case.
|
||||
|
||||
Fixes: https://github.com/389ds/389-ds-base/issues/6768
|
||||
|
||||
Reviewed by: @tbordaz, @progier389 (Thank you)
|
||||
---
|
||||
.../paged_results/paged_results_test.py | 46 +++++++++++++++++++
|
||||
ldap/servers/slapd/opshared.c | 4 +-
|
||||
2 files changed, 49 insertions(+), 1 deletion(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/paged_results/paged_results_test.py b/dirsrvtests/tests/suites/paged_results/paged_results_test.py
|
||||
index fca48db0f..1bb94b53a 100644
|
||||
--- a/dirsrvtests/tests/suites/paged_results/paged_results_test.py
|
||||
+++ b/dirsrvtests/tests/suites/paged_results/paged_results_test.py
|
||||
@@ -1271,6 +1271,52 @@ def test_search_stress_abandon(create_40k_users, create_user):
|
||||
paged_search(conn, create_40k_users.suffix, [req_ctrl], search_flt, searchreq_attrlist, abandon_rate=abandon_rate)
|
||||
|
||||
|
||||
+def test_search_referral(topology_st):
|
||||
+ """Test a paged search on a referred suffix doesnt crash the server.
|
||||
+
|
||||
+ :id: c788bdbf-965b-4f12-ac24-d4d695e2cce2
|
||||
+
|
||||
+ :setup: Standalone instance
|
||||
+
|
||||
+ :steps:
|
||||
+ 1. Configure a default referral.
|
||||
+ 2. Create a paged result search control.
|
||||
+ 3. Paged result search on referral suffix (doesnt exist on the instance, triggering a referral).
|
||||
+ 4. Check the server is still running.
|
||||
+ 5. Remove referral.
|
||||
+
|
||||
+ :expectedresults:
|
||||
+ 1. Referral sucessfully set.
|
||||
+ 2. Control created.
|
||||
+ 3. Search returns ldap.REFERRAL (10).
|
||||
+ 4. Server still running.
|
||||
+ 5. Referral removed.
|
||||
+ """
|
||||
+
|
||||
+ page_size = 5
|
||||
+ SEARCH_SUFFIX = "dc=referme,dc=com"
|
||||
+ REFERRAL = "ldap://localhost.localdomain:389/o%3dnetscaperoot"
|
||||
+
|
||||
+ log.info('Configuring referral')
|
||||
+ topology_st.standalone.config.set('nsslapd-referral', REFERRAL)
|
||||
+ referral = topology_st.standalone.config.get_attr_val_utf8('nsslapd-referral')
|
||||
+ assert (referral == REFERRAL)
|
||||
+
|
||||
+ log.info('Create paged result search control')
|
||||
+ req_ctrl = SimplePagedResultsControl(True, size=page_size, cookie='')
|
||||
+
|
||||
+ log.info('Perform a paged result search on referred suffix, no chase')
|
||||
+ with pytest.raises(ldap.REFERRAL):
|
||||
+ topology_st.standalone.search_ext_s(SEARCH_SUFFIX, ldap.SCOPE_SUBTREE, serverctrls=[req_ctrl])
|
||||
+
|
||||
+ log.info('Confirm instance is still running')
|
||||
+ assert (topology_st.standalone.status())
|
||||
+
|
||||
+ log.info('Remove referral')
|
||||
+ topology_st.standalone.config.remove_all('nsslapd-referral')
|
||||
+ referral = topology_st.standalone.config.get_attr_val_utf8('nsslapd-referral')
|
||||
+ assert (referral == None)
|
||||
+
|
||||
if __name__ == '__main__':
|
||||
# Run isolated
|
||||
# -s for DEBUG mode
|
||||
diff --git a/ldap/servers/slapd/opshared.c b/ldap/servers/slapd/opshared.c
|
||||
index a9b10660b..bd473e28c 100644
|
||||
--- a/ldap/servers/slapd/opshared.c
|
||||
+++ b/ldap/servers/slapd/opshared.c
|
||||
@@ -895,7 +895,9 @@ op_shared_search(Slapi_PBlock *pb, int send_result)
|
||||
/* Free the results if not "no_such_object" */
|
||||
void *sr = NULL;
|
||||
slapi_pblock_get(pb, SLAPI_SEARCH_RESULT_SET, &sr);
|
||||
- be->be_search_results_release(&sr);
|
||||
+ if (be->be_search_results_release != NULL) {
|
||||
+ be->be_search_results_release(&sr);
|
||||
+ }
|
||||
}
|
||||
pagedresults_set_search_result(pb_conn, operation, NULL, 1, pr_idx);
|
||||
rc = pagedresults_set_current_be(pb_conn, NULL, pr_idx, 1);
|
||||
--
|
||||
2.49.0
|
||||
|
||||
@ -0,0 +1,92 @@
|
||||
From 66a7526ec1c3cdbc37189b7a34cc5ddb3c9b88f7 Mon Sep 17 00:00:00 2001
|
||||
From: Viktor Ashirov <vashirov@redhat.com>
|
||||
Date: Tue, 13 May 2025 13:53:05 +0200
|
||||
Subject: [PATCH] Issue 6778 - Memory leak in
|
||||
roles_cache_create_object_from_entry
|
||||
|
||||
Bug Description:
|
||||
`this_role` has internal allocations (`dn`, `rolescopedn`, etc.)
|
||||
that are not freed.
|
||||
|
||||
Fix Description:
|
||||
Use `roles_cache_role_object_free` to free `this_role` and all its
|
||||
internal structures.
|
||||
|
||||
Fixes: https://github.com/389ds/389-ds-base/issues/6778
|
||||
|
||||
Reviewed by: @mreynolds389 (Thanks!)
|
||||
---
|
||||
ldap/servers/plugins/roles/roles_cache.c | 15 ++++++++-------
|
||||
1 file changed, 8 insertions(+), 7 deletions(-)
|
||||
|
||||
diff --git a/ldap/servers/plugins/roles/roles_cache.c b/ldap/servers/plugins/roles/roles_cache.c
|
||||
index bbed11802..60d7182e2 100644
|
||||
--- a/ldap/servers/plugins/roles/roles_cache.c
|
||||
+++ b/ldap/servers/plugins/roles/roles_cache.c
|
||||
@@ -1098,7 +1098,7 @@ roles_cache_create_object_from_entry(Slapi_Entry *role_entry, role_object **resu
|
||||
/* We determine the role type by reading the objectclass */
|
||||
if (roles_cache_is_role_entry(role_entry) == 0) {
|
||||
/* Bad type */
|
||||
- slapi_ch_free((void **)&this_role);
|
||||
+ roles_cache_role_object_free(this_role);
|
||||
return SLAPI_ROLE_DEFINITION_ERROR;
|
||||
}
|
||||
|
||||
@@ -1108,7 +1108,7 @@ roles_cache_create_object_from_entry(Slapi_Entry *role_entry, role_object **resu
|
||||
this_role->type = type;
|
||||
} else {
|
||||
/* Bad type */
|
||||
- slapi_ch_free((void **)&this_role);
|
||||
+ roles_cache_role_object_free(this_role);
|
||||
return SLAPI_ROLE_DEFINITION_ERROR;
|
||||
}
|
||||
|
||||
@@ -1166,7 +1166,7 @@ roles_cache_create_object_from_entry(Slapi_Entry *role_entry, role_object **resu
|
||||
filter_attr_value = (char *)slapi_entry_attr_get_charptr(role_entry, ROLE_FILTER_ATTR_NAME);
|
||||
if (filter_attr_value == NULL) {
|
||||
/* Means probably no attribute or no value there */
|
||||
- slapi_ch_free((void **)&this_role);
|
||||
+ roles_cache_role_object_free(this_role);
|
||||
return SLAPI_ROLE_ERROR_NO_FILTER_SPECIFIED;
|
||||
}
|
||||
|
||||
@@ -1205,7 +1205,7 @@ roles_cache_create_object_from_entry(Slapi_Entry *role_entry, role_object **resu
|
||||
(char *)slapi_sdn_get_ndn(this_role->dn),
|
||||
ROLE_FILTER_ATTR_NAME, filter_attr_value,
|
||||
ROLE_FILTER_ATTR_NAME);
|
||||
- slapi_ch_free((void **)&this_role);
|
||||
+ roles_cache_role_object_free(this_role);
|
||||
slapi_ch_free_string(&filter_attr_value);
|
||||
return SLAPI_ROLE_ERROR_FILTER_BAD;
|
||||
}
|
||||
@@ -1217,7 +1217,7 @@ roles_cache_create_object_from_entry(Slapi_Entry *role_entry, role_object **resu
|
||||
filter = slapi_str2filter(filter_attr_value);
|
||||
if (filter == NULL) {
|
||||
/* An error has occured */
|
||||
- slapi_ch_free((void **)&this_role);
|
||||
+ roles_cache_role_object_free(this_role);
|
||||
slapi_ch_free_string(&filter_attr_value);
|
||||
return SLAPI_ROLE_ERROR_FILTER_BAD;
|
||||
}
|
||||
@@ -1228,7 +1228,8 @@ roles_cache_create_object_from_entry(Slapi_Entry *role_entry, role_object **resu
|
||||
(char *)slapi_sdn_get_ndn(this_role->dn),
|
||||
filter_attr_value,
|
||||
ROLE_FILTER_ATTR_NAME);
|
||||
- slapi_ch_free((void **)&this_role);
|
||||
+ roles_cache_role_object_free(this_role);
|
||||
+ slapi_filter_free(filter, 1);
|
||||
slapi_ch_free_string(&filter_attr_value);
|
||||
return SLAPI_ROLE_ERROR_FILTER_BAD;
|
||||
}
|
||||
@@ -1285,7 +1286,7 @@ roles_cache_create_object_from_entry(Slapi_Entry *role_entry, role_object **resu
|
||||
if (rc == 0) {
|
||||
*result = this_role;
|
||||
} else {
|
||||
- slapi_ch_free((void **)&this_role);
|
||||
+ roles_cache_role_object_free(this_role);
|
||||
}
|
||||
|
||||
slapi_log_err(SLAPI_LOG_PLUGIN, ROLES_PLUGIN_SUBSYSTEM,
|
||||
--
|
||||
2.49.0
|
||||
|
||||
262
0054-Issue-6778-Memory-leak-in-roles_cache_create_object_.patch
Normal file
262
0054-Issue-6778-Memory-leak-in-roles_cache_create_object_.patch
Normal file
@ -0,0 +1,262 @@
|
||||
From 2ef79bc91e8e418bb9f0b26a684fb2e24c9a07ec Mon Sep 17 00:00:00 2001
|
||||
From: Viktor Ashirov <vashirov@redhat.com>
|
||||
Date: Wed, 16 Jul 2025 11:22:30 +0200
|
||||
Subject: [PATCH] Issue 6778 - Memory leak in
|
||||
roles_cache_create_object_from_entry part 2
|
||||
|
||||
Bug Description:
|
||||
Everytime a role with scope DN is processed, we leak rolescopeDN.
|
||||
|
||||
Fix Description:
|
||||
* Initialize all pointer variables to NULL
|
||||
* Add additional NULL checks
|
||||
* Free rolescopeDN
|
||||
* Move test_rewriter_with_invalid_filter before the DB contains 90k entries
|
||||
* Use task.wait() for import task completion instead of parsing logs,
|
||||
increase the timeout
|
||||
|
||||
Fixes: https://github.com/389ds/389-ds-base/issues/6778
|
||||
|
||||
Reviewed by: @progier389 (Thanks!)
|
||||
---
|
||||
dirsrvtests/tests/suites/roles/basic_test.py | 164 +++++++++----------
|
||||
ldap/servers/plugins/roles/roles_cache.c | 10 +-
|
||||
2 files changed, 82 insertions(+), 92 deletions(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/roles/basic_test.py b/dirsrvtests/tests/suites/roles/basic_test.py
|
||||
index 32b7657c0..570fca2d5 100644
|
||||
--- a/dirsrvtests/tests/suites/roles/basic_test.py
|
||||
+++ b/dirsrvtests/tests/suites/roles/basic_test.py
|
||||
@@ -510,6 +510,76 @@ def test_vattr_on_managed_role(topo, request):
|
||||
|
||||
request.addfinalizer(fin)
|
||||
|
||||
+def test_rewriter_with_invalid_filter(topo, request):
|
||||
+ """Test that server does not crash when having
|
||||
+ invalid filter in filtered role
|
||||
+
|
||||
+ :id: 5013b0b2-0af6-11f0-8684-482ae39447e5
|
||||
+ :setup: standalone server
|
||||
+ :steps:
|
||||
+ 1. Setup filtered role with good filter
|
||||
+ 2. Setup nsrole rewriter
|
||||
+ 3. Restart the server
|
||||
+ 4. Search for entries
|
||||
+ 5. Setup filtered role with bad filter
|
||||
+ 6. Search for entries
|
||||
+ :expectedresults:
|
||||
+ 1. Operation should succeed
|
||||
+ 2. Operation should succeed
|
||||
+ 3. Operation should succeed
|
||||
+ 4. Operation should succeed
|
||||
+ 5. Operation should succeed
|
||||
+ 6. Operation should succeed
|
||||
+ """
|
||||
+ inst = topo.standalone
|
||||
+ entries = []
|
||||
+
|
||||
+ def fin():
|
||||
+ inst.start()
|
||||
+ for entry in entries:
|
||||
+ entry.delete()
|
||||
+ request.addfinalizer(fin)
|
||||
+
|
||||
+ # Setup filtered role
|
||||
+ roles = FilteredRoles(inst, f'ou=people,{DEFAULT_SUFFIX}')
|
||||
+ filter_ko = '(&((objectClass=top)(objectClass=nsPerson))'
|
||||
+ filter_ok = '(&(objectClass=top)(objectClass=nsPerson))'
|
||||
+ role_properties = {
|
||||
+ 'cn': 'TestFilteredRole',
|
||||
+ 'nsRoleFilter': filter_ok,
|
||||
+ 'description': 'Test good filter',
|
||||
+ }
|
||||
+ role = roles.create(properties=role_properties)
|
||||
+ entries.append(role)
|
||||
+
|
||||
+ # Setup nsrole rewriter
|
||||
+ rewriters = Rewriters(inst)
|
||||
+ rewriter_properties = {
|
||||
+ "cn": "nsrole",
|
||||
+ "nsslapd-libpath": 'libroles-plugin',
|
||||
+ "nsslapd-filterrewriter": 'role_nsRole_filter_rewriter',
|
||||
+ }
|
||||
+ rewriter = rewriters.ensure_state(properties=rewriter_properties)
|
||||
+ entries.append(rewriter)
|
||||
+
|
||||
+ # Restart thge instance
|
||||
+ inst.restart()
|
||||
+
|
||||
+ # Search for entries
|
||||
+ entries = inst.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, "(nsrole=%s)" % role.dn)
|
||||
+
|
||||
+ # Set bad filter
|
||||
+ role_properties = {
|
||||
+ 'cn': 'TestFilteredRole',
|
||||
+ 'nsRoleFilter': filter_ko,
|
||||
+ 'description': 'Test bad filter',
|
||||
+ }
|
||||
+ role.ensure_state(properties=role_properties)
|
||||
+
|
||||
+ # Search for entries
|
||||
+ entries = inst.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, "(nsrole=%s)" % role.dn)
|
||||
+
|
||||
+
|
||||
def test_managed_and_filtered_role_rewrite(topo, request):
|
||||
"""Test that filter components containing 'nsrole=xxx'
|
||||
are reworked if xxx is either a filtered role or a managed
|
||||
@@ -581,17 +651,11 @@ def test_managed_and_filtered_role_rewrite(topo, request):
|
||||
PARENT="ou=people,%s" % DEFAULT_SUFFIX
|
||||
dbgen_users(topo.standalone, 90000, import_ldif, DEFAULT_SUFFIX, entry_name=RDN, generic=True, parent=PARENT)
|
||||
|
||||
- # online import
|
||||
+ # Online import
|
||||
import_task = ImportTask(topo.standalone)
|
||||
import_task.import_suffix_from_ldif(ldiffile=import_ldif, suffix=DEFAULT_SUFFIX)
|
||||
- # Check for up to 200sec that the completion
|
||||
- for i in range(1, 20):
|
||||
- if len(topo.standalone.ds_error_log.match('.*import userRoot: Import complete. Processed 9000.*')) > 0:
|
||||
- break
|
||||
- time.sleep(10)
|
||||
- import_complete = topo.standalone.ds_error_log.match('.*import userRoot: Import complete. Processed 9000.*')
|
||||
- assert (len(import_complete) == 1)
|
||||
-
|
||||
+ import_task.wait(timeout=400)
|
||||
+ assert import_task.get_exit_code() == 0
|
||||
# Restart server
|
||||
topo.standalone.restart()
|
||||
|
||||
@@ -715,17 +779,11 @@ def test_not_such_entry_role_rewrite(topo, request):
|
||||
PARENT="ou=people,%s" % DEFAULT_SUFFIX
|
||||
dbgen_users(topo.standalone, 91000, import_ldif, DEFAULT_SUFFIX, entry_name=RDN, generic=True, parent=PARENT)
|
||||
|
||||
- # online import
|
||||
+ # Online import
|
||||
import_task = ImportTask(topo.standalone)
|
||||
import_task.import_suffix_from_ldif(ldiffile=import_ldif, suffix=DEFAULT_SUFFIX)
|
||||
- # Check for up to 200sec that the completion
|
||||
- for i in range(1, 20):
|
||||
- if len(topo.standalone.ds_error_log.match('.*import userRoot: Import complete. Processed 9100.*')) > 0:
|
||||
- break
|
||||
- time.sleep(10)
|
||||
- import_complete = topo.standalone.ds_error_log.match('.*import userRoot: Import complete. Processed 9100.*')
|
||||
- assert (len(import_complete) == 1)
|
||||
-
|
||||
+ import_task.wait(timeout=400)
|
||||
+ assert import_task.get_exit_code() == 0
|
||||
# Restart server
|
||||
topo.standalone.restart()
|
||||
|
||||
@@ -769,76 +827,6 @@ def test_not_such_entry_role_rewrite(topo, request):
|
||||
request.addfinalizer(fin)
|
||||
|
||||
|
||||
-def test_rewriter_with_invalid_filter(topo, request):
|
||||
- """Test that server does not crash when having
|
||||
- invalid filter in filtered role
|
||||
-
|
||||
- :id: 5013b0b2-0af6-11f0-8684-482ae39447e5
|
||||
- :setup: standalone server
|
||||
- :steps:
|
||||
- 1. Setup filtered role with good filter
|
||||
- 2. Setup nsrole rewriter
|
||||
- 3. Restart the server
|
||||
- 4. Search for entries
|
||||
- 5. Setup filtered role with bad filter
|
||||
- 6. Search for entries
|
||||
- :expectedresults:
|
||||
- 1. Operation should succeed
|
||||
- 2. Operation should succeed
|
||||
- 3. Operation should succeed
|
||||
- 4. Operation should succeed
|
||||
- 5. Operation should succeed
|
||||
- 6. Operation should succeed
|
||||
- """
|
||||
- inst = topo.standalone
|
||||
- entries = []
|
||||
-
|
||||
- def fin():
|
||||
- inst.start()
|
||||
- for entry in entries:
|
||||
- entry.delete()
|
||||
- request.addfinalizer(fin)
|
||||
-
|
||||
- # Setup filtered role
|
||||
- roles = FilteredRoles(inst, f'ou=people,{DEFAULT_SUFFIX}')
|
||||
- filter_ko = '(&((objectClass=top)(objectClass=nsPerson))'
|
||||
- filter_ok = '(&(objectClass=top)(objectClass=nsPerson))'
|
||||
- role_properties = {
|
||||
- 'cn': 'TestFilteredRole',
|
||||
- 'nsRoleFilter': filter_ok,
|
||||
- 'description': 'Test good filter',
|
||||
- }
|
||||
- role = roles.create(properties=role_properties)
|
||||
- entries.append(role)
|
||||
-
|
||||
- # Setup nsrole rewriter
|
||||
- rewriters = Rewriters(inst)
|
||||
- rewriter_properties = {
|
||||
- "cn": "nsrole",
|
||||
- "nsslapd-libpath": 'libroles-plugin',
|
||||
- "nsslapd-filterrewriter": 'role_nsRole_filter_rewriter',
|
||||
- }
|
||||
- rewriter = rewriters.ensure_state(properties=rewriter_properties)
|
||||
- entries.append(rewriter)
|
||||
-
|
||||
- # Restart thge instance
|
||||
- inst.restart()
|
||||
-
|
||||
- # Search for entries
|
||||
- entries = inst.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, "(nsrole=%s)" % role.dn)
|
||||
-
|
||||
- # Set bad filter
|
||||
- role_properties = {
|
||||
- 'cn': 'TestFilteredRole',
|
||||
- 'nsRoleFilter': filter_ko,
|
||||
- 'description': 'Test bad filter',
|
||||
- }
|
||||
- role.ensure_state(properties=role_properties)
|
||||
-
|
||||
- # Search for entries
|
||||
- entries = inst.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, "(nsrole=%s)" % role.dn)
|
||||
-
|
||||
-
|
||||
if __name__ == "__main__":
|
||||
CURRENT_FILE = os.path.realpath(__file__)
|
||||
pytest.main("-s -v %s" % CURRENT_FILE)
|
||||
diff --git a/ldap/servers/plugins/roles/roles_cache.c b/ldap/servers/plugins/roles/roles_cache.c
|
||||
index 60d7182e2..60f5a919a 100644
|
||||
--- a/ldap/servers/plugins/roles/roles_cache.c
|
||||
+++ b/ldap/servers/plugins/roles/roles_cache.c
|
||||
@@ -1117,16 +1117,17 @@ roles_cache_create_object_from_entry(Slapi_Entry *role_entry, role_object **resu
|
||||
|
||||
rolescopeDN = slapi_entry_attr_get_charptr(role_entry, ROLE_SCOPE_DN);
|
||||
if (rolescopeDN) {
|
||||
- Slapi_DN *rolescopeSDN;
|
||||
- Slapi_DN *top_rolescopeSDN, *top_this_roleSDN;
|
||||
+ Slapi_DN *rolescopeSDN = NULL;
|
||||
+ Slapi_DN *top_rolescopeSDN = NULL;
|
||||
+ Slapi_DN *top_this_roleSDN = NULL;
|
||||
|
||||
/* Before accepting to use this scope, first check if it belongs to the same suffix */
|
||||
rolescopeSDN = slapi_sdn_new_dn_byref(rolescopeDN);
|
||||
- if ((strlen((char *)slapi_sdn_get_ndn(rolescopeSDN)) > 0) &&
|
||||
+ if (rolescopeSDN && (strlen((char *)slapi_sdn_get_ndn(rolescopeSDN)) > 0) &&
|
||||
(slapi_dn_syntax_check(NULL, (char *)slapi_sdn_get_ndn(rolescopeSDN), 1) == 0)) {
|
||||
top_rolescopeSDN = roles_cache_get_top_suffix(rolescopeSDN);
|
||||
top_this_roleSDN = roles_cache_get_top_suffix(this_role->dn);
|
||||
- if (slapi_sdn_compare(top_rolescopeSDN, top_this_roleSDN) == 0) {
|
||||
+ if (top_rolescopeSDN && top_this_roleSDN && slapi_sdn_compare(top_rolescopeSDN, top_this_roleSDN) == 0) {
|
||||
/* rolescopeDN belongs to the same suffix as the role, we can use this scope */
|
||||
this_role->rolescopedn = rolescopeSDN;
|
||||
} else {
|
||||
@@ -1148,6 +1149,7 @@ roles_cache_create_object_from_entry(Slapi_Entry *role_entry, role_object **resu
|
||||
rolescopeDN);
|
||||
slapi_sdn_free(&rolescopeSDN);
|
||||
}
|
||||
+ slapi_ch_free_string(&rolescopeDN);
|
||||
}
|
||||
|
||||
/* Depending upon role type, pull out the remaining information we need */
|
||||
--
|
||||
2.49.0
|
||||
|
||||
58
0055-Issue-6848-AddressSanitizer-leak-in-do_search.patch
Normal file
58
0055-Issue-6848-AddressSanitizer-leak-in-do_search.patch
Normal file
@ -0,0 +1,58 @@
|
||||
From bf29c2f6a2413fe66983d2bb66c23003b59a8312 Mon Sep 17 00:00:00 2001
|
||||
From: Viktor Ashirov <vashirov@redhat.com>
|
||||
Date: Mon, 7 Jul 2025 22:01:09 +0200
|
||||
Subject: [PATCH] Issue 6848 - AddressSanitizer: leak in do_search
|
||||
|
||||
Bug Description:
|
||||
When there's a BER decoding error and the function goes to
|
||||
`free_and_return`, the `attrs` variable is not being freed because it's
|
||||
only freed if `!psearch || rc != 0 || err != 0`, but `err` is still 0 at
|
||||
that point.
|
||||
|
||||
If we reach `free_and_return` from the `ber_scanf` error path, `attrs`
|
||||
was never set in the pblock with `slapi_pblock_set()`, so the
|
||||
`slapi_pblock_get()` call will not retrieve the potentially partially
|
||||
allocated `attrs` from the BER decoding.
|
||||
|
||||
Fixes: https://github.com/389ds/389-ds-base/issues/6848
|
||||
|
||||
Reviewed by: @tbordaz, @droideck (Thanks!)
|
||||
---
|
||||
ldap/servers/slapd/search.c | 14 ++++++++++++--
|
||||
1 file changed, 12 insertions(+), 2 deletions(-)
|
||||
|
||||
diff --git a/ldap/servers/slapd/search.c b/ldap/servers/slapd/search.c
|
||||
index e9b2c3670..f9d03c090 100644
|
||||
--- a/ldap/servers/slapd/search.c
|
||||
+++ b/ldap/servers/slapd/search.c
|
||||
@@ -235,6 +235,7 @@ do_search(Slapi_PBlock *pb)
|
||||
log_search_access(pb, base, scope, fstr, "decoding error");
|
||||
send_ldap_result(pb, LDAP_PROTOCOL_ERROR, NULL, NULL, 0,
|
||||
NULL);
|
||||
+ err = 1; /* Make sure we free everything */
|
||||
goto free_and_return;
|
||||
}
|
||||
|
||||
@@ -420,8 +421,17 @@ free_and_return:
|
||||
if (!psearch || rc != 0 || err != 0) {
|
||||
slapi_ch_free_string(&fstr);
|
||||
slapi_filter_free(filter, 1);
|
||||
- slapi_pblock_get(pb, SLAPI_SEARCH_ATTRS, &attrs);
|
||||
- charray_free(attrs); /* passing NULL is fine */
|
||||
+
|
||||
+ /* Get attrs from pblock if it was set there, otherwise use local attrs */
|
||||
+ char **pblock_attrs = NULL;
|
||||
+ slapi_pblock_get(pb, SLAPI_SEARCH_ATTRS, &pblock_attrs);
|
||||
+ if (pblock_attrs != NULL) {
|
||||
+ charray_free(pblock_attrs); /* Free attrs from pblock */
|
||||
+ slapi_pblock_set(pb, SLAPI_SEARCH_ATTRS, NULL);
|
||||
+ } else if (attrs != NULL) {
|
||||
+ /* Free attrs that were allocated but never put in pblock */
|
||||
+ charray_free(attrs);
|
||||
+ }
|
||||
charray_free(gerattrs); /* passing NULL is fine */
|
||||
/*
|
||||
* Fix for defect 526719 / 553356 : Persistent search op failed.
|
||||
--
|
||||
2.49.0
|
||||
|
||||
268
0056-Issue-6940-dsconf-monitor-server-fails-with-ldapi-du.patch
Normal file
268
0056-Issue-6940-dsconf-monitor-server-fails-with-ldapi-du.patch
Normal file
@ -0,0 +1,268 @@
|
||||
From 125fe587e974b5ebcf159f9d9b6e016becc603c5 Mon Sep 17 00:00:00 2001
|
||||
From: Simon Pichugin <spichugi@redhat.com>
|
||||
Date: Tue, 19 Aug 2025 16:10:09 -0700
|
||||
Subject: [PATCH] Issue 6940 - dsconf monitor server fails with ldapi:// due to
|
||||
absent server ID (#6941)
|
||||
|
||||
Description: The dsconf monitor server command fails when using ldapi://
|
||||
protocol because the server ID is not set, preventing PID retrieval from
|
||||
defaults.inf. This causes the Web console to fail displaying the "Server
|
||||
Version" field and potentially other CLI/WebUI issues.
|
||||
|
||||
The fix attempts to derive the server ID from the LDAPI socket path when
|
||||
not explicitly provided. This covers the common case where the socket name
|
||||
contains the instance name (e.g., slapd-instance.socket).
|
||||
If that's not possible, it also attempts to derive the server ID from the
|
||||
nsslapd-instancedir configuration attribute. The derived server ID
|
||||
is validated against actual system instances to ensure it exists.
|
||||
Note that socket names can vary and nsslapd-instancedir can be changed.
|
||||
This is a best-effort approach for the common naming pattern.
|
||||
|
||||
Also fixes the LDAPI socket path extraction which was incorrectly using
|
||||
offset 9 instead of 8 for ldapi:// URIs.
|
||||
|
||||
The monitor command now handles missing PIDs gracefully, returning zero
|
||||
values for process-specific stats instead of failing completely.
|
||||
|
||||
Fixes: https://github.com/389ds/389-ds-base/issues/6940
|
||||
|
||||
Reviewed by: @vashirov, @mreynolds389 (Thanks!!)
|
||||
---
|
||||
src/lib389/lib389/__init__.py | 93 +++++++++++++++++++++++++++---
|
||||
src/lib389/lib389/cli_base/dsrc.py | 4 +-
|
||||
src/lib389/lib389/monitor.py | 50 ++++++++++++----
|
||||
3 files changed, 124 insertions(+), 23 deletions(-)
|
||||
|
||||
diff --git a/src/lib389/lib389/__init__.py b/src/lib389/lib389/__init__.py
|
||||
index 1ac9770b0..246da1820 100644
|
||||
--- a/src/lib389/lib389/__init__.py
|
||||
+++ b/src/lib389/lib389/__init__.py
|
||||
@@ -17,7 +17,7 @@
|
||||
|
||||
import sys
|
||||
import os
|
||||
-from urllib.parse import urlparse
|
||||
+from urllib.parse import urlparse, unquote
|
||||
import stat
|
||||
import pwd
|
||||
import grp
|
||||
@@ -67,7 +67,8 @@ from lib389.utils import (
|
||||
get_default_db_lib,
|
||||
selinux_present,
|
||||
selinux_label_port,
|
||||
- get_user_is_root)
|
||||
+ get_user_is_root,
|
||||
+ get_instance_list)
|
||||
from lib389.paths import Paths
|
||||
from lib389.nss_ssl import NssSsl
|
||||
from lib389.tasks import BackupTask, RestoreTask, Task
|
||||
@@ -304,6 +305,57 @@ class DirSrv(SimpleLDAPObject, object):
|
||||
self.dbdir = self.ds_paths.db_dir
|
||||
self.changelogdir = os.path.join(os.path.dirname(self.dbdir), DEFAULT_CHANGELOG_DB)
|
||||
|
||||
+ def _extract_serverid_from_string(self, text):
|
||||
+ """Extract serverid from a string containing 'slapd-<serverid>' pattern.
|
||||
+ Returns the serverid or None if not found or validation fails.
|
||||
+ Only attempts derivation if serverid is currently None.
|
||||
+ """
|
||||
+ if getattr(self, 'serverid', None) is not None:
|
||||
+ return None
|
||||
+ if not text:
|
||||
+ return None
|
||||
+
|
||||
+ # Use regex to extract serverid from "slapd-<serverid>" or "slapd-<serverid>.socket"
|
||||
+ match = re.search(r'slapd-([A-Za-z0-9._-]+?)(?:\.socket)?(?:$|/)', text)
|
||||
+ if not match:
|
||||
+ return None
|
||||
+ candidate = match.group(1)
|
||||
+
|
||||
+ self.serverid = candidate
|
||||
+ try:
|
||||
+ insts = get_instance_list()
|
||||
+ except Exception:
|
||||
+ self.serverid = None
|
||||
+ return None
|
||||
+ if f'slapd-{candidate}' in insts or candidate in insts:
|
||||
+ return candidate
|
||||
+ # restore original and report failure
|
||||
+ self.serverid = None
|
||||
+ return None
|
||||
+
|
||||
+ def _derive_serverid_from_ldapi(self):
|
||||
+ """Attempt to derive serverid from an LDAPI socket path or URI and
|
||||
+ verify it exists on the system. Returns the serverid or None.
|
||||
+ """
|
||||
+ socket_path = None
|
||||
+ if hasattr(self, 'ldapi_socket') and self.ldapi_socket:
|
||||
+ socket_path = unquote(self.ldapi_socket)
|
||||
+ elif hasattr(self, 'ldapuri') and isinstance(self.ldapuri, str) and self.ldapuri.startswith('ldapi://'):
|
||||
+ socket_path = unquote(self.ldapuri[len('ldapi://'):])
|
||||
+
|
||||
+ return self._extract_serverid_from_string(socket_path)
|
||||
+
|
||||
+ def _derive_serverid_from_instancedir(self):
|
||||
+ """Extract serverid from nsslapd-instancedir path like '/usr/lib64/dirsrv/slapd-<serverid>'"""
|
||||
+ try:
|
||||
+ from lib389.config import Config
|
||||
+ config = Config(self)
|
||||
+ instancedir = config.get_attr_val_utf8_l("nsslapd-instancedir")
|
||||
+ except Exception:
|
||||
+ return None
|
||||
+
|
||||
+ return self._extract_serverid_from_string(instancedir)
|
||||
+
|
||||
def rebind(self):
|
||||
"""Reconnect to the DS
|
||||
|
||||
@@ -576,6 +628,15 @@ class DirSrv(SimpleLDAPObject, object):
|
||||
self.ldapi_autobind = args.get(SER_LDAPI_AUTOBIND, 'off')
|
||||
self.isLocal = True
|
||||
self.log.debug("Allocate %s with %s", self.__class__, self.ldapi_socket)
|
||||
+ elif self.ldapuri is not None and isinstance(self.ldapuri, str) and self.ldapuri.startswith('ldapi://'):
|
||||
+ # Try to learn serverid from ldapi uri
|
||||
+ try:
|
||||
+ self.ldapi_enabled = 'on'
|
||||
+ self.ldapi_socket = unquote(self.ldapuri[len('ldapi://'):])
|
||||
+ self.ldapi_autobind = args.get(SER_LDAPI_AUTOBIND, 'off')
|
||||
+ self.isLocal = True
|
||||
+ except Exception:
|
||||
+ pass
|
||||
# Settings from args of server attributes
|
||||
self.strict_hostname = args.get(SER_STRICT_HOSTNAME_CHECKING, False)
|
||||
if self.strict_hostname is True:
|
||||
@@ -596,9 +657,16 @@ class DirSrv(SimpleLDAPObject, object):
|
||||
|
||||
self.log.debug("Allocate %s with %s:%s", self.__class__, self.host, (self.sslport or self.port))
|
||||
|
||||
- if SER_SERVERID_PROP in args:
|
||||
- self.ds_paths = Paths(serverid=args[SER_SERVERID_PROP], instance=self, local=self.isLocal)
|
||||
+ # Try to determine serverid if not provided
|
||||
+ if SER_SERVERID_PROP in args and args.get(SER_SERVERID_PROP) is not None:
|
||||
self.serverid = args.get(SER_SERVERID_PROP, None)
|
||||
+ elif getattr(self, 'serverid', None) is None and self.isLocal:
|
||||
+ sid = self._derive_serverid_from_ldapi()
|
||||
+ if sid:
|
||||
+ self.serverid = sid
|
||||
+
|
||||
+ if getattr(self, 'serverid', None):
|
||||
+ self.ds_paths = Paths(serverid=self.serverid, instance=self, local=self.isLocal)
|
||||
else:
|
||||
self.ds_paths = Paths(instance=self, local=self.isLocal)
|
||||
|
||||
@@ -1032,6 +1100,17 @@ class DirSrv(SimpleLDAPObject, object):
|
||||
self.__initPart2()
|
||||
self.state = DIRSRV_STATE_ONLINE
|
||||
# Now that we're online, some of our methods may try to query the version online.
|
||||
+
|
||||
+ # After transitioning online, attempt to derive serverid if still unknown.
|
||||
+ # If we find it, refresh ds_paths and rerun __initPart2
|
||||
+ if getattr(self, 'serverid', None) is None and self.isLocal:
|
||||
+ sid = self._derive_serverid_from_instancedir()
|
||||
+ if sid:
|
||||
+ self.serverid = sid
|
||||
+ # Reinitialize paths with the new serverid
|
||||
+ self.ds_paths = Paths(serverid=self.serverid, instance=self, local=self.isLocal)
|
||||
+ if not connOnly:
|
||||
+ self.__initPart2()
|
||||
self.__add_brookers__()
|
||||
|
||||
def close(self):
|
||||
@@ -3569,8 +3648,4 @@ class DirSrv(SimpleLDAPObject, object):
|
||||
"""
|
||||
Get the pid of the running server
|
||||
"""
|
||||
- pid = pid_from_file(self.pid_file())
|
||||
- if pid == 0 or pid is None:
|
||||
- return 0
|
||||
- else:
|
||||
- return pid
|
||||
+ return pid_from_file(self.pid_file())
|
||||
diff --git a/src/lib389/lib389/cli_base/dsrc.py b/src/lib389/lib389/cli_base/dsrc.py
|
||||
index 84567b990..498228ce0 100644
|
||||
--- a/src/lib389/lib389/cli_base/dsrc.py
|
||||
+++ b/src/lib389/lib389/cli_base/dsrc.py
|
||||
@@ -56,7 +56,7 @@ def dsrc_arg_concat(args, dsrc_inst):
|
||||
new_dsrc_inst['args'][SER_ROOT_DN] = new_dsrc_inst['binddn']
|
||||
if new_dsrc_inst['uri'][0:8] == 'ldapi://':
|
||||
new_dsrc_inst['args'][SER_LDAPI_ENABLED] = "on"
|
||||
- new_dsrc_inst['args'][SER_LDAPI_SOCKET] = new_dsrc_inst['uri'][9:]
|
||||
+ new_dsrc_inst['args'][SER_LDAPI_SOCKET] = new_dsrc_inst['uri'][8:]
|
||||
new_dsrc_inst['args'][SER_LDAPI_AUTOBIND] = "on"
|
||||
|
||||
# Make new
|
||||
@@ -170,7 +170,7 @@ def dsrc_to_ldap(path, instance_name, log):
|
||||
dsrc_inst['args'][SER_ROOT_DN] = dsrc_inst['binddn']
|
||||
if dsrc_inst['uri'][0:8] == 'ldapi://':
|
||||
dsrc_inst['args'][SER_LDAPI_ENABLED] = "on"
|
||||
- dsrc_inst['args'][SER_LDAPI_SOCKET] = dsrc_inst['uri'][9:]
|
||||
+ dsrc_inst['args'][SER_LDAPI_SOCKET] = dsrc_inst['uri'][8:]
|
||||
dsrc_inst['args'][SER_LDAPI_AUTOBIND] = "on"
|
||||
|
||||
# Return the dict.
|
||||
diff --git a/src/lib389/lib389/monitor.py b/src/lib389/lib389/monitor.py
|
||||
index ec82b0346..9bf8aa993 100644
|
||||
--- a/src/lib389/lib389/monitor.py
|
||||
+++ b/src/lib389/lib389/monitor.py
|
||||
@@ -92,21 +92,47 @@ class Monitor(DSLdapObject):
|
||||
Get CPU and memory stats
|
||||
"""
|
||||
stats = {}
|
||||
- pid = self._instance.get_pid()
|
||||
+ try:
|
||||
+ pid = self._instance.get_pid()
|
||||
+ except Exception:
|
||||
+ pid = None
|
||||
total_mem = psutil.virtual_memory()[0]
|
||||
- p = psutil.Process(pid)
|
||||
- memory_stats = p.memory_full_info()
|
||||
|
||||
- # Get memory & CPU stats
|
||||
+ # Always include total system memory
|
||||
stats['total_mem'] = [str(total_mem)]
|
||||
- stats['rss'] = [str(memory_stats[0])]
|
||||
- stats['vms'] = [str(memory_stats[1])]
|
||||
- stats['swap'] = [str(memory_stats[9])]
|
||||
- stats['mem_rss_percent'] = [str(round(p.memory_percent("rss")))]
|
||||
- stats['mem_vms_percent'] = [str(round(p.memory_percent("vms")))]
|
||||
- stats['mem_swap_percent'] = [str(round(p.memory_percent("swap")))]
|
||||
- stats['total_threads'] = [str(p.num_threads())]
|
||||
- stats['cpu_usage'] = [str(round(p.cpu_percent(interval=0.1)))]
|
||||
+
|
||||
+ # Process-specific stats - only if process is running (pid is not None)
|
||||
+ if pid is not None:
|
||||
+ try:
|
||||
+ p = psutil.Process(pid)
|
||||
+ memory_stats = p.memory_full_info()
|
||||
+
|
||||
+ # Get memory & CPU stats
|
||||
+ stats['rss'] = [str(memory_stats[0])]
|
||||
+ stats['vms'] = [str(memory_stats[1])]
|
||||
+ stats['swap'] = [str(memory_stats[9])]
|
||||
+ stats['mem_rss_percent'] = [str(round(p.memory_percent("rss")))]
|
||||
+ stats['mem_vms_percent'] = [str(round(p.memory_percent("vms")))]
|
||||
+ stats['mem_swap_percent'] = [str(round(p.memory_percent("swap")))]
|
||||
+ stats['total_threads'] = [str(p.num_threads())]
|
||||
+ stats['cpu_usage'] = [str(round(p.cpu_percent(interval=0.1)))]
|
||||
+ except (psutil.NoSuchProcess, psutil.AccessDenied):
|
||||
+ # Process exists in PID file but is not accessible or doesn't exist
|
||||
+ pid = None
|
||||
+
|
||||
+ # If no valid PID, provide zero values for process stats
|
||||
+ if pid is None:
|
||||
+ stats['rss'] = ['0']
|
||||
+ stats['vms'] = ['0']
|
||||
+ stats['swap'] = ['0']
|
||||
+ stats['mem_rss_percent'] = ['0']
|
||||
+ stats['mem_vms_percent'] = ['0']
|
||||
+ stats['mem_swap_percent'] = ['0']
|
||||
+ stats['total_threads'] = ['0']
|
||||
+ stats['cpu_usage'] = ['0']
|
||||
+ stats['server_status'] = ['PID unavailable']
|
||||
+ else:
|
||||
+ stats['server_status'] = ['Server running']
|
||||
|
||||
# Connections to DS
|
||||
if self._instance.port == "0":
|
||||
--
|
||||
2.49.0
|
||||
|
||||
@ -0,0 +1,81 @@
|
||||
From 3f9ecd8bc1557c7e34c28ebe9fc91941036ae87d Mon Sep 17 00:00:00 2001
|
||||
From: James Chapman <jachapma@redhat.com>
|
||||
Date: Sun, 6 Apr 2025 21:10:46 +0000
|
||||
Subject: [PATCH] Issue 6720 - Remove BDB attribute from MDB DB Monitor (#6721)
|
||||
|
||||
Bug description: Reference to a BDB attribute exists in DatabaseMonitorMDB
|
||||
class.
|
||||
|
||||
Fix description: Remove it
|
||||
|
||||
Fixes: https://github.com/389ds/389-ds-base/issues/6720
|
||||
|
||||
Relates: https://github.com/389ds/389-ds-base/issues/6614
|
||||
|
||||
Reviewed by: @progier389 (Thank you)
|
||||
---
|
||||
.../389-console/src/lib/monitor/dbMonitor.jsx | 18 ------------------
|
||||
1 file changed, 18 deletions(-)
|
||||
|
||||
diff --git a/src/cockpit/389-console/src/lib/monitor/dbMonitor.jsx b/src/cockpit/389-console/src/lib/monitor/dbMonitor.jsx
|
||||
index 08aa1aaea..bb9950ccd 100644
|
||||
--- a/src/cockpit/389-console/src/lib/monitor/dbMonitor.jsx
|
||||
+++ b/src/cockpit/389-console/src/lib/monitor/dbMonitor.jsx
|
||||
@@ -605,12 +605,6 @@ export class DatabaseMonitorMDB extends React.Component {
|
||||
count = 1;
|
||||
}
|
||||
|
||||
- // Build up the DB Cache chart data
|
||||
- const dbratio = config.attrs.dbcachehitratio[0];
|
||||
- const chart_data = this.state.dbCacheList;
|
||||
- chart_data.shift();
|
||||
- chart_data.push({ name: _("Cache Hit Ratio"), x: count.toString(), y: parseInt(dbratio) });
|
||||
-
|
||||
// Build up the NDN Cache chart data
|
||||
const ndnratio = config.attrs.normalizeddncachehitratio[0];
|
||||
const ndn_chart_data = this.state.ndnCacheList;
|
||||
@@ -628,7 +622,6 @@ export class DatabaseMonitorMDB extends React.Component {
|
||||
this.setState({
|
||||
data: config.attrs,
|
||||
loading: false,
|
||||
- dbCacheList: chart_data,
|
||||
ndnCacheList: ndn_chart_data,
|
||||
ndnCacheUtilList: ndn_util_chart_data,
|
||||
count,
|
||||
@@ -651,10 +644,8 @@ export class DatabaseMonitorMDB extends React.Component {
|
||||
}
|
||||
|
||||
render() {
|
||||
- let chartColor = ChartThemeColor.green;
|
||||
let ndnChartColor = ChartThemeColor.green;
|
||||
let ndnUtilColor = ChartThemeColor.green;
|
||||
- let dbcachehit = 0;
|
||||
let ndncachehit = 0;
|
||||
let ndncachemax = 0;
|
||||
let ndncachecurr = 0;
|
||||
@@ -671,7 +662,6 @@ export class DatabaseMonitorMDB extends React.Component {
|
||||
);
|
||||
|
||||
if (!this.state.loading) {
|
||||
- dbcachehit = parseInt(this.state.data.dbcachehitratio[0]);
|
||||
ndncachehit = parseInt(this.state.data.normalizeddncachehitratio[0]);
|
||||
ndncachemax = parseInt(this.state.data.maxnormalizeddncachesize[0]);
|
||||
ndncachecurr = parseInt(this.state.data.currentnormalizeddncachesize[0]);
|
||||
@@ -681,14 +671,6 @@ export class DatabaseMonitorMDB extends React.Component {
|
||||
utilratio = 1;
|
||||
}
|
||||
|
||||
- // Database cache
|
||||
- if (dbcachehit > 89) {
|
||||
- chartColor = ChartThemeColor.green;
|
||||
- } else if (dbcachehit > 74) {
|
||||
- chartColor = ChartThemeColor.orange;
|
||||
- } else {
|
||||
- chartColor = ChartThemeColor.purple;
|
||||
- }
|
||||
// NDN cache ratio
|
||||
if (ndncachehit > 89) {
|
||||
ndnChartColor = ChartThemeColor.green;
|
||||
--
|
||||
2.49.0
|
||||
|
||||
375
0058-Issue-6614-CLI-Error-when-trying-to-display-global-D.patch
Normal file
375
0058-Issue-6614-CLI-Error-when-trying-to-display-global-D.patch
Normal file
@ -0,0 +1,375 @@
|
||||
From 119971e87d2f2f8a47209cd7fe3c6cc807934880 Mon Sep 17 00:00:00 2001
|
||||
From: James Chapman <jachapma@redhat.com>
|
||||
Date: Thu, 24 Apr 2025 20:16:38 +0000
|
||||
Subject: [PATCH] Issue 6614 - CLI - Error when trying to display global DB
|
||||
stats with LMDB (#6622)
|
||||
|
||||
Bug description:
|
||||
Displaying global monitor stats fails with key error. Caused by BDB
|
||||
backend keys being used when MDB is the configured DB implementation.
|
||||
|
||||
Fix description:
|
||||
Ensure backend and monitor keys match the configured DB implementation.
|
||||
|
||||
Fixes: https://github.com/389ds/389-ds-base/issues/6614
|
||||
|
||||
Reviewed by: @droideck, @progier389 (Thank you)
|
||||
---
|
||||
.../tests/suites/monitor/monitor_test.py | 3 +-
|
||||
src/lib389/lib389/_constants.py | 4 +
|
||||
src/lib389/lib389/cli_conf/monitor.py | 130 ++++++++++--------
|
||||
src/lib389/lib389/monitor.py | 110 +++++++--------
|
||||
4 files changed, 128 insertions(+), 119 deletions(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/monitor/monitor_test.py b/dirsrvtests/tests/suites/monitor/monitor_test.py
|
||||
index 04b2f35fe..ada465890 100644
|
||||
--- a/dirsrvtests/tests/suites/monitor/monitor_test.py
|
||||
+++ b/dirsrvtests/tests/suites/monitor/monitor_test.py
|
||||
@@ -101,13 +101,12 @@ def test_monitor_ldbm(topo):
|
||||
|
||||
# Check that known attributes exist (only NDN cache stats)
|
||||
assert 'normalizeddncachehits' in monitor
|
||||
-
|
||||
# Check for library specific attributes
|
||||
if db_lib == 'bdb':
|
||||
assert 'dbcachehits' in monitor
|
||||
assert 'nsslapd-db-configured-locks' in monitor
|
||||
elif db_lib == 'mdb':
|
||||
- pass
|
||||
+ assert 'dbcachehits' not in monitor
|
||||
else:
|
||||
# Unknown - the server would probably fail to start but check it anyway
|
||||
log.fatal(f'Unknown backend library: {db_lib}')
|
||||
diff --git a/src/lib389/lib389/_constants.py b/src/lib389/lib389/_constants.py
|
||||
index a3b8a4bcb..80c0b2773 100644
|
||||
--- a/src/lib389/lib389/_constants.py
|
||||
+++ b/src/lib389/lib389/_constants.py
|
||||
@@ -383,3 +383,7 @@ BDB_IMPL_STATUS = Enum('BDB_IMPL_STATUS', [
|
||||
'BUNDLED', # lib389 bundled rpm is installed
|
||||
'READ_ONLY', # Read-only version is available
|
||||
'NONE' ]) # bdb is not usasable
|
||||
+
|
||||
+# DB implementation
|
||||
+DB_IMPL_BDB = "bdb"
|
||||
+DB_IMPL_MDB = "mdb"
|
||||
diff --git a/src/lib389/lib389/cli_conf/monitor.py b/src/lib389/lib389/cli_conf/monitor.py
|
||||
index 1f55fd8f8..b01796549 100644
|
||||
--- a/src/lib389/lib389/cli_conf/monitor.py
|
||||
+++ b/src/lib389/lib389/cli_conf/monitor.py
|
||||
@@ -10,8 +10,8 @@
|
||||
import datetime
|
||||
import json
|
||||
import os
|
||||
-from lib389.monitor import (Monitor, MonitorLDBM, MonitorSNMP,
|
||||
- MonitorDiskSpace)
|
||||
+from lib389._constants import (DB_IMPL_BDB, DB_IMPL_MDB)
|
||||
+from lib389.monitor import (Monitor, MonitorLDBM, MonitorSNMP, MonitorDiskSpace)
|
||||
from lib389.chaining import (ChainingLinks)
|
||||
from lib389.backend import Backends
|
||||
from lib389.utils import convert_bytes
|
||||
@@ -129,27 +129,30 @@ def db_monitor(inst, basedn, log, args):
|
||||
# Gather the global DB stats
|
||||
report_time = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
||||
ldbm_mon = ldbm_monitor.get_status()
|
||||
- dbcachesize = int(ldbm_mon['nsslapd-db-cache-size-bytes'][0])
|
||||
- # Warning: there are two different page sizes associated with bdb:
|
||||
- # - nsslapd-db-mp-pagesize the db mempool (i.e the db cache) page size which is usually 4K
|
||||
- # - nsslapd-db-pagesize the db instances (i.e id2entry, indexes, changelog) page size which
|
||||
- # is usually 8K
|
||||
- # To compute the db cache statistics we must use the nsslapd-db-mp-pagesize
|
||||
- if 'nsslapd-db-mp-pagesize' in ldbm_mon:
|
||||
- pagesize = int(ldbm_mon['nsslapd-db-mp-pagesize'][0])
|
||||
- else:
|
||||
- # targeting a remote instance that does not have github issue 5550 fix.
|
||||
- # So lets use the usual default file system preferred block size
|
||||
- # db cache free statistics may be wrong but we gave no way to
|
||||
- # compute it rightly.
|
||||
- pagesize = 4096
|
||||
- dbhitratio = ldbm_mon['dbcachehitratio'][0]
|
||||
- dbcachepagein = ldbm_mon['dbcachepagein'][0]
|
||||
- dbcachepageout = ldbm_mon['dbcachepageout'][0]
|
||||
- dbroevict = ldbm_mon['nsslapd-db-page-ro-evict-rate'][0]
|
||||
- dbpages = int(ldbm_mon['nsslapd-db-pages-in-use'][0])
|
||||
- dbcachefree = max(int(dbcachesize - (pagesize * dbpages)), 0)
|
||||
- dbcachefreeratio = dbcachefree/dbcachesize
|
||||
+ if ldbm_monitor.inst_db_impl == DB_IMPL_BDB:
|
||||
+ dbcachesize = int(ldbm_mon['nsslapd-db-cache-size-bytes'][0])
|
||||
+ # Warning: there are two different page sizes associated with bdb:
|
||||
+ # - nsslapd-db-mp-pagesize the db mempool (i.e the db cache) page size which is usually 4K
|
||||
+ # - nsslapd-db-pagesize the db instances (i.e id2entry, indexes, changelog) page size which
|
||||
+ # is usually 8K
|
||||
+ # To compute the db cache statistics we must use the nsslapd-db-mp-pagesize
|
||||
+ if 'nsslapd-db-mp-pagesize' in ldbm_mon:
|
||||
+ pagesize = int(ldbm_mon['nsslapd-db-mp-pagesize'][0])
|
||||
+ else:
|
||||
+ # targeting a remote instance that does not have github issue 5550 fix.
|
||||
+ # So lets use the usual default file system preferred block size
|
||||
+ # db cache free statistics may be wrong but we gave no way to
|
||||
+ # compute it rightly.
|
||||
+ pagesize = 4096
|
||||
+
|
||||
+ dbhitratio = ldbm_mon['dbcachehitratio'][0]
|
||||
+ dbcachepagein = ldbm_mon['dbcachepagein'][0]
|
||||
+ dbcachepageout = ldbm_mon['dbcachepageout'][0]
|
||||
+ dbroevict = ldbm_mon['nsslapd-db-page-ro-evict-rate'][0]
|
||||
+ dbpages = int(ldbm_mon['nsslapd-db-pages-in-use'][0])
|
||||
+ dbcachefree = max(int(dbcachesize - (pagesize * dbpages)), 0)
|
||||
+ dbcachefreeratio = dbcachefree/dbcachesize
|
||||
+
|
||||
ndnratio = ldbm_mon['normalizeddncachehitratio'][0]
|
||||
ndncursize = int(ldbm_mon['currentnormalizeddncachesize'][0])
|
||||
ndnmaxsize = int(ldbm_mon['maxnormalizeddncachesize'][0])
|
||||
@@ -165,14 +168,6 @@ def db_monitor(inst, basedn, log, args):
|
||||
# Build global cache stats
|
||||
result = {
|
||||
'date': report_time,
|
||||
- 'dbcache': {
|
||||
- 'hit_ratio': dbhitratio,
|
||||
- 'free': convert_bytes(str(dbcachefree)),
|
||||
- 'free_percentage': "{:.1f}".format(dbcachefreeratio * 100),
|
||||
- 'roevicts': dbroevict,
|
||||
- 'pagein': dbcachepagein,
|
||||
- 'pageout': dbcachepageout
|
||||
- },
|
||||
'ndncache': {
|
||||
'hit_ratio': ndnratio,
|
||||
'free': convert_bytes(str(ndnfree)),
|
||||
@@ -183,6 +178,16 @@ def db_monitor(inst, basedn, log, args):
|
||||
'backends': {},
|
||||
}
|
||||
|
||||
+ if ldbm_monitor.inst_db_impl == DB_IMPL_BDB:
|
||||
+ result['dbcache'] = {
|
||||
+ 'hit_ratio': dbhitratio,
|
||||
+ 'free': convert_bytes(str(dbcachefree)),
|
||||
+ 'free_percentage': "{:.1f}".format(dbcachefreeratio * 100),
|
||||
+ 'roevicts': dbroevict,
|
||||
+ 'pagein': dbcachepagein,
|
||||
+ 'pageout': dbcachepageout
|
||||
+ }
|
||||
+
|
||||
# Build the backend results
|
||||
for be in backend_objs:
|
||||
be_name = be.rdn
|
||||
@@ -202,17 +207,18 @@ def db_monitor(inst, basedn, log, args):
|
||||
else:
|
||||
entsize = int(entcur / entcnt)
|
||||
|
||||
- # Process DN cache stats
|
||||
- dncur = int(all_attrs['currentdncachesize'][0])
|
||||
- dnmax = int(all_attrs['maxdncachesize'][0])
|
||||
- dncnt = int(all_attrs['currentdncachecount'][0])
|
||||
- dnratio = all_attrs['dncachehitratio'][0]
|
||||
- dnfree = dnmax - dncur
|
||||
- dnfreep = "{:.1f}".format(dnfree / dnmax * 100)
|
||||
- if dncnt == 0:
|
||||
- dnsize = 0
|
||||
- else:
|
||||
- dnsize = int(dncur / dncnt)
|
||||
+ if ldbm_monitor.inst_db_impl == DB_IMPL_BDB:
|
||||
+ # Process DN cache stats
|
||||
+ dncur = int(all_attrs['currentdncachesize'][0])
|
||||
+ dnmax = int(all_attrs['maxdncachesize'][0])
|
||||
+ dncnt = int(all_attrs['currentdncachecount'][0])
|
||||
+ dnratio = all_attrs['dncachehitratio'][0]
|
||||
+ dnfree = dnmax - dncur
|
||||
+ dnfreep = "{:.1f}".format(dnfree / dnmax * 100)
|
||||
+ if dncnt == 0:
|
||||
+ dnsize = 0
|
||||
+ else:
|
||||
+ dnsize = int(dncur / dncnt)
|
||||
|
||||
# Build the backend result
|
||||
result['backends'][be_name] = {
|
||||
@@ -222,13 +228,15 @@ def db_monitor(inst, basedn, log, args):
|
||||
'entry_cache_free_percentage': entfreep,
|
||||
'entry_cache_size': convert_bytes(str(entsize)),
|
||||
'entry_cache_hit_ratio': entratio,
|
||||
- 'dn_cache_count': all_attrs['currentdncachecount'][0],
|
||||
- 'dn_cache_free': convert_bytes(str(dnfree)),
|
||||
- 'dn_cache_free_percentage': dnfreep,
|
||||
- 'dn_cache_size': convert_bytes(str(dnsize)),
|
||||
- 'dn_cache_hit_ratio': dnratio,
|
||||
'indexes': []
|
||||
}
|
||||
+ if ldbm_monitor.inst_db_impl == DB_IMPL_BDB:
|
||||
+ backend = result['backends'][be_name]
|
||||
+ backend['dn_cache_count'] = all_attrs['currentdncachecount'][0]
|
||||
+ backend['dn_cache_free'] = convert_bytes(str(dnfree))
|
||||
+ backend['dn_cache_free_percentage'] = dnfreep
|
||||
+ backend['dn_cache_size'] = convert_bytes(str(dnsize))
|
||||
+ backend['dn_cache_hit_ratio'] = dnratio
|
||||
|
||||
# Process indexes if requested
|
||||
if args.indexes:
|
||||
@@ -260,14 +268,15 @@ def db_monitor(inst, basedn, log, args):
|
||||
else:
|
||||
log.info("DB Monitor Report: " + result['date'])
|
||||
log.info("--------------------------------------------------------")
|
||||
- log.info("Database Cache:")
|
||||
- log.info(" - Cache Hit Ratio: {}%".format(result['dbcache']['hit_ratio']))
|
||||
- log.info(" - Free Space: {}".format(result['dbcache']['free']))
|
||||
- log.info(" - Free Percentage: {}%".format(result['dbcache']['free_percentage']))
|
||||
- log.info(" - RO Page Drops: {}".format(result['dbcache']['roevicts']))
|
||||
- log.info(" - Pages In: {}".format(result['dbcache']['pagein']))
|
||||
- log.info(" - Pages Out: {}".format(result['dbcache']['pageout']))
|
||||
- log.info("")
|
||||
+ if ldbm_monitor.inst_db_impl == DB_IMPL_BDB:
|
||||
+ log.info("Database Cache:")
|
||||
+ log.info(" - Cache Hit Ratio: {}%".format(result['dbcache']['hit_ratio']))
|
||||
+ log.info(" - Free Space: {}".format(result['dbcache']['free']))
|
||||
+ log.info(" - Free Percentage: {}%".format(result['dbcache']['free_percentage']))
|
||||
+ log.info(" - RO Page Drops: {}".format(result['dbcache']['roevicts']))
|
||||
+ log.info(" - Pages In: {}".format(result['dbcache']['pagein']))
|
||||
+ log.info(" - Pages Out: {}".format(result['dbcache']['pageout']))
|
||||
+ log.info("")
|
||||
log.info("Normalized DN Cache:")
|
||||
log.info(" - Cache Hit Ratio: {}%".format(result['ndncache']['hit_ratio']))
|
||||
log.info(" - Free Space: {}".format(result['ndncache']['free']))
|
||||
@@ -283,11 +292,12 @@ def db_monitor(inst, basedn, log, args):
|
||||
log.info(" - Entry Cache Free Space: {}".format(attr_dict['entry_cache_free']))
|
||||
log.info(" - Entry Cache Free Percentage: {}%".format(attr_dict['entry_cache_free_percentage']))
|
||||
log.info(" - Entry Cache Average Size: {}".format(attr_dict['entry_cache_size']))
|
||||
- log.info(" - DN Cache Hit Ratio: {}%".format(attr_dict['dn_cache_hit_ratio']))
|
||||
- log.info(" - DN Cache Count: {}".format(attr_dict['dn_cache_count']))
|
||||
- log.info(" - DN Cache Free Space: {}".format(attr_dict['dn_cache_free']))
|
||||
- log.info(" - DN Cache Free Percentage: {}%".format(attr_dict['dn_cache_free_percentage']))
|
||||
- log.info(" - DN Cache Average Size: {}".format(attr_dict['dn_cache_size']))
|
||||
+ if ldbm_monitor.inst_db_impl == DB_IMPL_BDB:
|
||||
+ log.info(" - DN Cache Hit Ratio: {}%".format(attr_dict['dn_cache_hit_ratio']))
|
||||
+ log.info(" - DN Cache Count: {}".format(attr_dict['dn_cache_count']))
|
||||
+ log.info(" - DN Cache Free Space: {}".format(attr_dict['dn_cache_free']))
|
||||
+ log.info(" - DN Cache Free Percentage: {}%".format(attr_dict['dn_cache_free_percentage']))
|
||||
+ log.info(" - DN Cache Average Size: {}".format(attr_dict['dn_cache_size']))
|
||||
if len(result['backends'][be_name]['indexes']) > 0:
|
||||
log.info(" - Indexes:")
|
||||
for index in result['backends'][be_name]['indexes']:
|
||||
diff --git a/src/lib389/lib389/monitor.py b/src/lib389/lib389/monitor.py
|
||||
index 9bf8aa993..bf3e1df76 100644
|
||||
--- a/src/lib389/lib389/monitor.py
|
||||
+++ b/src/lib389/lib389/monitor.py
|
||||
@@ -202,68 +202,64 @@ class MonitorLDBM(DSLdapObject):
|
||||
:type instance: lib389.DirSrv
|
||||
:param dn: not used
|
||||
"""
|
||||
+ DB_KEYS = {
|
||||
+ DB_IMPL_BDB: [
|
||||
+ 'dbcachehits', 'dbcachetries', 'dbcachehitratio',
|
||||
+ 'dbcachepagein', 'dbcachepageout', 'dbcacheroevict',
|
||||
+ 'dbcacherwevict'
|
||||
+ ],
|
||||
+ DB_IMPL_MDB: [
|
||||
+ 'normalizeddncachetries', 'normalizeddncachehits',
|
||||
+ 'normalizeddncachemisses', 'normalizeddncachehitratio',
|
||||
+ 'normalizeddncacheevictions', 'currentnormalizeddncachesize',
|
||||
+ 'maxnormalizeddncachesize', 'currentnormalizeddncachecount',
|
||||
+ 'normalizeddncachethreadsize', 'normalizeddncachethreadslots'
|
||||
+ ]
|
||||
+ }
|
||||
+ DB_MONITOR_KEYS = {
|
||||
+ DB_IMPL_BDB: [
|
||||
+ 'nsslapd-db-abort-rate', 'nsslapd-db-active-txns', 'nsslapd-db-cache-hit',
|
||||
+ 'nsslapd-db-cache-try', 'nsslapd-db-cache-region-wait-rate',
|
||||
+ 'nsslapd-db-cache-size-bytes', 'nsslapd-db-clean-pages', 'nsslapd-db-commit-rate',
|
||||
+ 'nsslapd-db-deadlock-rate', 'nsslapd-db-dirty-pages', 'nsslapd-db-hash-buckets',
|
||||
+ 'nsslapd-db-hash-elements-examine-rate', 'nsslapd-db-hash-search-rate',
|
||||
+ 'nsslapd-db-lock-conflicts', 'nsslapd-db-lock-region-wait-rate',
|
||||
+ 'nsslapd-db-lock-request-rate', 'nsslapd-db-lockers', 'nsslapd-db-configured-locks',
|
||||
+ 'nsslapd-db-current-locks', 'nsslapd-db-max-locks', 'nsslapd-db-current-lock-objects',
|
||||
+ 'nsslapd-db-max-lock-objects', 'nsslapd-db-log-bytes-since-checkpoint',
|
||||
+ 'nsslapd-db-log-region-wait-rate', 'nsslapd-db-log-write-rate',
|
||||
+ 'nsslapd-db-longest-chain-length', 'nsslapd-db-page-create-rate',
|
||||
+ 'nsslapd-db-page-read-rate', 'nsslapd-db-page-ro-evict-rate',
|
||||
+ 'nsslapd-db-page-rw-evict-rate', 'nsslapd-db-page-trickle-rate',
|
||||
+ 'nsslapd-db-page-write-rate', 'nsslapd-db-pages-in-use',
|
||||
+ 'nsslapd-db-txn-region-wait-rate', 'nsslapd-db-mp-pagesize'
|
||||
+ ],
|
||||
+ DB_IMPL_MDB: [
|
||||
+ 'dbenvmapmaxsize', 'dbenvmapsize', 'dbenvlastpageno',
|
||||
+ 'dbenvlasttxnid', 'dbenvmaxreaders', 'dbenvnumreaders',
|
||||
+ 'dbenvnumdbis', 'waitingrwtxn', 'activerwtxn',
|
||||
+ 'abortrwtxn', 'commitrwtxn', 'granttimerwtxn',
|
||||
+ 'lifetimerwtxn', 'waitingrotxn', 'activerotxn',
|
||||
+ 'abortrotxn', 'commitrotxn', 'granttimerotxn',
|
||||
+ 'lifetimerotxn'
|
||||
+ ]
|
||||
+ }
|
||||
+
|
||||
def __init__(self, instance, dn=None):
|
||||
super(MonitorLDBM, self).__init__(instance=instance)
|
||||
self._dn = DN_MONITOR_LDBM
|
||||
self._db_mon = MonitorDatabase(instance)
|
||||
- self._backend_keys = [
|
||||
- 'dbcachehits',
|
||||
- 'dbcachetries',
|
||||
- 'dbcachehitratio',
|
||||
- 'dbcachepagein',
|
||||
- 'dbcachepageout',
|
||||
- 'dbcacheroevict',
|
||||
- 'dbcacherwevict',
|
||||
- ]
|
||||
- self._db_mon_keys = [
|
||||
- 'nsslapd-db-abort-rate',
|
||||
- 'nsslapd-db-active-txns',
|
||||
- 'nsslapd-db-cache-hit',
|
||||
- 'nsslapd-db-cache-try',
|
||||
- 'nsslapd-db-cache-region-wait-rate',
|
||||
- 'nsslapd-db-cache-size-bytes',
|
||||
- 'nsslapd-db-clean-pages',
|
||||
- 'nsslapd-db-commit-rate',
|
||||
- 'nsslapd-db-deadlock-rate',
|
||||
- 'nsslapd-db-dirty-pages',
|
||||
- 'nsslapd-db-hash-buckets',
|
||||
- 'nsslapd-db-hash-elements-examine-rate',
|
||||
- 'nsslapd-db-hash-search-rate',
|
||||
- 'nsslapd-db-lock-conflicts',
|
||||
- 'nsslapd-db-lock-region-wait-rate',
|
||||
- 'nsslapd-db-lock-request-rate',
|
||||
- 'nsslapd-db-lockers',
|
||||
- 'nsslapd-db-configured-locks',
|
||||
- 'nsslapd-db-current-locks',
|
||||
- 'nsslapd-db-max-locks',
|
||||
- 'nsslapd-db-current-lock-objects',
|
||||
- 'nsslapd-db-max-lock-objects',
|
||||
- 'nsslapd-db-log-bytes-since-checkpoint',
|
||||
- 'nsslapd-db-log-region-wait-rate',
|
||||
- 'nsslapd-db-log-write-rate',
|
||||
- 'nsslapd-db-longest-chain-length',
|
||||
- 'nsslapd-db-page-create-rate',
|
||||
- 'nsslapd-db-page-read-rate',
|
||||
- 'nsslapd-db-page-ro-evict-rate',
|
||||
- 'nsslapd-db-page-rw-evict-rate',
|
||||
- 'nsslapd-db-page-trickle-rate',
|
||||
- 'nsslapd-db-page-write-rate',
|
||||
- 'nsslapd-db-pages-in-use',
|
||||
- 'nsslapd-db-txn-region-wait-rate',
|
||||
- 'nsslapd-db-mp-pagesize',
|
||||
- ]
|
||||
- if not ds_is_older("1.4.0", instance=instance):
|
||||
+ self.inst_db_impl = self._instance.get_db_lib()
|
||||
+ self._backend_keys = list(self.DB_KEYS.get(self.inst_db_impl, []))
|
||||
+ self._db_mon_keys = list(self.DB_MONITOR_KEYS.get(self.inst_db_impl, []))
|
||||
+
|
||||
+ if self.inst_db_impl == DB_IMPL_BDB and not ds_is_older("1.4.0", instance=instance):
|
||||
self._backend_keys.extend([
|
||||
- 'normalizeddncachetries',
|
||||
- 'normalizeddncachehits',
|
||||
- 'normalizeddncachemisses',
|
||||
- 'normalizeddncachehitratio',
|
||||
- 'normalizeddncacheevictions',
|
||||
- 'currentnormalizeddncachesize',
|
||||
- 'maxnormalizeddncachesize',
|
||||
- 'currentnormalizeddncachecount',
|
||||
- 'normalizeddncachethreadsize',
|
||||
- 'normalizeddncachethreadslots'
|
||||
+ 'normalizeddncachetries', 'normalizeddncachehits',
|
||||
+ 'normalizeddncachemisses', 'normalizeddncachehitratio',
|
||||
+ 'normalizeddncacheevictions', 'currentnormalizeddncachesize',
|
||||
+ 'maxnormalizeddncachesize', 'currentnormalizeddncachecount',
|
||||
+ 'normalizeddncachethreadsize', 'normalizeddncachethreadslots'
|
||||
])
|
||||
|
||||
def get_status(self, use_json=False):
|
||||
--
|
||||
2.49.0
|
||||
|
||||
1201
0059-Issue-6756-CLI-UI-Properly-handle-disabled-NDN-cache.patch
Normal file
1201
0059-Issue-6756-CLI-UI-Properly-handle-disabled-NDN-cache.patch
Normal file
File diff suppressed because it is too large
Load Diff
533
0060-Issue-6693-Fix-error-messages-inconsistencies-6694.patch
Normal file
533
0060-Issue-6693-Fix-error-messages-inconsistencies-6694.patch
Normal file
@ -0,0 +1,533 @@
|
||||
From 2a8dc5efe710b34e6c515f157e49b7e15d631b73 Mon Sep 17 00:00:00 2001
|
||||
From: progier389 <progier@redhat.com>
|
||||
Date: Thu, 27 Mar 2025 12:02:19 +0100
|
||||
Subject: [PATCH] Issue 6693 - Fix error messages inconsistencies (#6694)
|
||||
|
||||
Fix missing function names and missing new lines in error message.
|
||||
Sometime the newline was moved back from message arguments to the message
|
||||
to have a better consistency and to avoid that the tool detects a false positive
|
||||
|
||||
Issue: #6693
|
||||
|
||||
Reviewed by: @droideck (Thanks!)
|
||||
|
||||
(cherry picked from commit b550579365b2dfd2ea0b057dea980111973aff12)
|
||||
---
|
||||
.../plugins/replication/repl5_inc_protocol.c | 2 +-
|
||||
.../slapd/back-ldbm/db-bdb/bdb_config.c | 4 +-
|
||||
.../back-ldbm/db-bdb/bdb_import_threads.c | 4 +-
|
||||
.../servers/slapd/back-ldbm/db-bdb/bdb_misc.c | 8 ++--
|
||||
ldap/servers/slapd/back-ldbm/idl.c | 2 +-
|
||||
ldap/servers/slapd/back-ldbm/ldbm_add.c | 2 +-
|
||||
ldap/servers/slapd/back-ldbm/ldbm_usn.c | 2 +-
|
||||
ldap/servers/slapd/back-ldbm/vlv.c | 2 +-
|
||||
ldap/servers/slapd/daemon.c | 2 +-
|
||||
ldap/servers/slapd/entry.c | 4 +-
|
||||
ldap/servers/slapd/filterentry.c | 2 +-
|
||||
ldap/servers/slapd/ldaputil.c | 2 +-
|
||||
ldap/servers/slapd/libglobs.c | 4 +-
|
||||
ldap/servers/slapd/log.c | 2 +-
|
||||
ldap/servers/slapd/modrdn.c | 4 +-
|
||||
ldap/servers/slapd/passwd_extop.c | 48 +++++++++----------
|
||||
ldap/servers/slapd/pw.c | 4 +-
|
||||
ldap/servers/slapd/schema.c | 2 +-
|
||||
ldap/servers/slapd/util.c | 2 +-
|
||||
19 files changed, 51 insertions(+), 51 deletions(-)
|
||||
|
||||
diff --git a/ldap/servers/plugins/replication/repl5_inc_protocol.c b/ldap/servers/plugins/replication/repl5_inc_protocol.c
|
||||
index 68d93e1b0..41a20c370 100644
|
||||
--- a/ldap/servers/plugins/replication/repl5_inc_protocol.c
|
||||
+++ b/ldap/servers/plugins/replication/repl5_inc_protocol.c
|
||||
@@ -1724,7 +1724,7 @@ send_updates(Private_Repl_Protocol *prp, RUV *remote_update_vector, PRUint32 *nu
|
||||
} else {
|
||||
agmt_inc_last_update_changecount(prp->agmt, csn_get_replicaid(entry.op->csn), 1 /*skipped*/);
|
||||
}
|
||||
- slapi_log_err(finished ? SLAPI_LOG_WARNING : slapi_log_urp,
|
||||
+ slapi_log_err(finished ? SLAPI_LOG_WARNING : slapi_log_urp, repl_plugin_name,
|
||||
"send_updates - %s: Failed to send update operation to receiver (uniqueid %s, CSN %s): %s. %s.\n",
|
||||
(char *)agmt_get_long_name(prp->agmt),
|
||||
entry.op->target_address.uniqueid, csn_str,
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_config.c b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_config.c
|
||||
index 8f847f418..a1d6c6af1 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_config.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_config.c
|
||||
@@ -284,7 +284,7 @@ bdb_config_db_lock_pause_set(void *arg, void *value, char *errorbuf, int phase _
|
||||
|
||||
if (val == 0) {
|
||||
slapi_log_err(SLAPI_LOG_NOTICE, "bdb_config_db_lock_pause_set",
|
||||
- "%s was set to '0'. The default value will be used (%s)",
|
||||
+ "%s was set to '0'. The default value will be used (%s)\n",
|
||||
CONFIG_DB_LOCKS_PAUSE, DEFAULT_DBLOCK_PAUSE_STR);
|
||||
val = DEFAULT_DBLOCK_PAUSE;
|
||||
}
|
||||
@@ -315,7 +315,7 @@ bdb_config_db_lock_threshold_set(void *arg, void *value, char *errorbuf, int pha
|
||||
"%s: \"%d\" is invalid, threshold is indicated as a percentage and it must lie in range of 70 and 95",
|
||||
CONFIG_DB_LOCKS_THRESHOLD, val);
|
||||
slapi_log_err(SLAPI_LOG_ERR, "bdb_config_db_lock_threshold_set",
|
||||
- "%s: \"%d\" is invalid, threshold is indicated as a percentage and it must lie in range of 70 and 95",
|
||||
+ "%s: \"%d\" is invalid, threshold is indicated as a percentage and it must lie in range of 70 and 95\n",
|
||||
CONFIG_DB_LOCKS_THRESHOLD, val);
|
||||
retval = LDAP_OPERATIONS_ERROR;
|
||||
return retval;
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_import_threads.c b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_import_threads.c
|
||||
index dec2bac9e..b0c60581a 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_import_threads.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_import_threads.c
|
||||
@@ -1768,7 +1768,7 @@ bdb_upgradedn_producer(void *param)
|
||||
inst->inst_name, dn_id);
|
||||
}
|
||||
slapi_log_err(SLAPI_LOG_ERR, "bdb_upgradedn_producer",
|
||||
- "%s: Error: failed to write a line \"%s\"",
|
||||
+ "%s: Error: failed to write a line \"%s\"\n",
|
||||
inst->inst_name, dn_id);
|
||||
slapi_ch_free_string(&dn_id);
|
||||
goto error;
|
||||
@@ -3725,7 +3725,7 @@ bdb_dse_conf_verify_core(struct ldbminfo *li, char *src_dir, char *file_name, ch
|
||||
slapi_ch_free_string(&estr);
|
||||
if (!e) {
|
||||
slapi_log_err(SLAPI_LOG_WARNING, "bdb_dse_conf_verify_core",
|
||||
- "Skipping bad LDIF entry ending line %d of file \"%s\"",
|
||||
+ "Skipping bad LDIF entry ending line %d of file \"%s\"\n",
|
||||
curr_lineno, filename);
|
||||
continue;
|
||||
}
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_misc.c b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_misc.c
|
||||
index eeafbf995..4cea7b879 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_misc.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_misc.c
|
||||
@@ -192,13 +192,13 @@ bdb_start_autotune(struct ldbminfo *li)
|
||||
/* First, set our message. In the case autosize is 0, we calculate some
|
||||
* sane defaults and populate these values, but it's only on first run.
|
||||
*/
|
||||
- msg = "This can be corrected by altering the values of nsslapd-dbcachesize, nsslapd-cachememsize and nsslapd-dncachememsize\n";
|
||||
+ msg = "This can be corrected by altering the values of nsslapd-dbcachesize, nsslapd-cachememsize and nsslapd-dncachememsize";
|
||||
autosize_percentage = 25;
|
||||
} else {
|
||||
/* In this case we really are setting the values each start up, so
|
||||
* change the msg.
|
||||
*/
|
||||
- msg = "This can be corrected by altering the values of nsslapd-cache-autosize, nsslapd-cache-autosize-split and nsslapd-dncachememsize\n";
|
||||
+ msg = "This can be corrected by altering the values of nsslapd-cache-autosize, nsslapd-cache-autosize-split and nsslapd-dncachememsize";
|
||||
autosize_percentage = li->li_cache_autosize;
|
||||
}
|
||||
/* Has to be less than 0, 0 means to disable I think */
|
||||
@@ -240,7 +240,7 @@ bdb_start_autotune(struct ldbminfo *li)
|
||||
issane = util_is_cachesize_sane(mi, &zone_size);
|
||||
if (issane == UTIL_CACHESIZE_REDUCED) {
|
||||
slapi_log_err(SLAPI_LOG_WARNING, "bdb_start_autotune", "Your autosized cache values have been reduced. Likely your nsslapd-cache-autosize percentage is too high.\n");
|
||||
- slapi_log_err(SLAPI_LOG_WARNING, "bdb_start_autotune", "%s", msg);
|
||||
+ slapi_log_err(SLAPI_LOG_WARNING, "bdb_start_autotune", "%s\n", msg);
|
||||
}
|
||||
/* It's valid, lets divide it up and set according to user prefs */
|
||||
db_size = (autosize_db_percentage_split * zone_size) / 100;
|
||||
@@ -382,7 +382,7 @@ bdb_start_autotune(struct ldbminfo *li)
|
||||
slapi_log_err(SLAPI_LOG_WARNING, "bdb_start_autotune", "In a future release this WILL prevent server start up. You MUST alter your configuration.\n");
|
||||
slapi_log_err(SLAPI_LOG_WARNING, "bdb_start_autotune", "Total entry cache size: %" PRIu64 " B; dbcache size: %" PRIu64 " B; available memory size: %" PRIu64 " B; \n",
|
||||
total_cache_size, (uint64_t)li->li_dbcachesize, mi->system_available_bytes);
|
||||
- slapi_log_err(SLAPI_LOG_WARNING, "bdb_start_autotune", "%s", msg);
|
||||
+ slapi_log_err(SLAPI_LOG_WARNING, "bdb_start_autotune", "%s\n", msg);
|
||||
/* WB 2016 - This should be UNCOMMENTED in a future release */
|
||||
/* return SLAPI_FAIL_GENERAL; */
|
||||
}
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/idl.c b/ldap/servers/slapd/back-ldbm/idl.c
|
||||
index f690827b5..5574f840a 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/idl.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/idl.c
|
||||
@@ -1370,7 +1370,7 @@ idl_old_delete_key(
|
||||
if ((idl = idl_fetch_one(be, db, key, txn, &rc)) == NULL) {
|
||||
idl_unlock_list(a->ai_idl, key);
|
||||
if (rc != 0 && rc != DBI_RC_NOTFOUND && rc != DBI_RC_RETRY) {
|
||||
- slapi_log_err(SLAPI_LOG_ERR, "idl_old_delete_key - (%s) 0 BAD %d %s\n",
|
||||
+ slapi_log_err(SLAPI_LOG_ERR, "idl_old_delete_key", "(%s) 0 BAD %d %s\n",
|
||||
(char *)key->dptr, rc, (msg = dblayer_strerror(rc)) ? msg : "");
|
||||
}
|
||||
if (0 == rc || DBI_RC_NOTFOUND == rc)
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/ldbm_add.c b/ldap/servers/slapd/back-ldbm/ldbm_add.c
|
||||
index dec3a0c6d..42454c890 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/ldbm_add.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/ldbm_add.c
|
||||
@@ -637,7 +637,7 @@ ldbm_back_add(Slapi_PBlock *pb)
|
||||
if ((addingentry->ep_id = next_id(be)) >= MAXID) {
|
||||
slapi_log_err(SLAPI_LOG_ERR, "ldbm_back_add ",
|
||||
"Maximum ID reached, cannot add entry to "
|
||||
- "backend '%s'",
|
||||
+ "backend '%s'\n",
|
||||
be->be_name);
|
||||
ldap_result_code = LDAP_OPERATIONS_ERROR;
|
||||
goto error_return;
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/ldbm_usn.c b/ldap/servers/slapd/back-ldbm/ldbm_usn.c
|
||||
index d002e3e4f..fd4e264fe 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/ldbm_usn.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/ldbm_usn.c
|
||||
@@ -123,7 +123,7 @@ usn_get_last_usn(Slapi_Backend *be, PRUint64 *last_usn)
|
||||
rc = dblayer_new_cursor(be, db, NULL, &dbc);
|
||||
if (0 != rc) {
|
||||
slapi_log_err(SLAPI_LOG_ERR, "usn_get_last_usn",
|
||||
- "Failed to create a cursor: %d", rc);
|
||||
+ "Failed to create a cursor: %d\n", rc);
|
||||
goto bail;
|
||||
}
|
||||
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/vlv.c b/ldap/servers/slapd/back-ldbm/vlv.c
|
||||
index c504b4bd2..fd592b8a9 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/vlv.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/vlv.c
|
||||
@@ -1670,7 +1670,7 @@ vlv_trim_candidates_byvalue(backend *be, const IDList *candidates, const sort_sp
|
||||
slapi_attr_values2keys(&sort_control->sattr, invalue, &typedown_value, LDAP_FILTER_EQUALITY); /* JCM SLOW FUNCTION */
|
||||
if (compare_fn == NULL) {
|
||||
slapi_log_err(SLAPI_LOG_WARNING, "vlv_trim_candidates_byvalue",
|
||||
- "Attempt to compare an unordered attribute");
|
||||
+ "Attempt to compare an unordered attribute\n");
|
||||
compare_fn = slapi_berval_cmp;
|
||||
}
|
||||
}
|
||||
diff --git a/ldap/servers/slapd/daemon.c b/ldap/servers/slapd/daemon.c
|
||||
index a9922958a..a43fc9285 100644
|
||||
--- a/ldap/servers/slapd/daemon.c
|
||||
+++ b/ldap/servers/slapd/daemon.c
|
||||
@@ -582,7 +582,7 @@ disk_monitoring_thread(void *nothing __attribute__((unused)))
|
||||
{
|
||||
if (be_list_count == BE_LIST_SIZE) { /* error - too many backends */
|
||||
slapi_log_err(SLAPI_LOG_ERR, "disk_monitoring_thread",
|
||||
- "Too many backends match search request - cannot proceed");
|
||||
+ "Too many backends match search request - cannot proceed\n");
|
||||
} else {
|
||||
slapi_log_err(SLAPI_LOG_ALERT, "disk_monitoring_thread",
|
||||
"Putting the backend '%s' to read-only mode\n", be->be_name);
|
||||
diff --git a/ldap/servers/slapd/entry.c b/ldap/servers/slapd/entry.c
|
||||
index 658b9b279..235410e45 100644
|
||||
--- a/ldap/servers/slapd/entry.c
|
||||
+++ b/ldap/servers/slapd/entry.c
|
||||
@@ -887,8 +887,8 @@ str2entry_dupcheck(const char *rawdn, const char *s, int flags, int read_statein
|
||||
if (strcasecmp(type, "dn") == 0) {
|
||||
if (slapi_entry_get_dn_const(e) != NULL) {
|
||||
char ebuf[BUFSIZ];
|
||||
- slapi_log_err(SLAPI_LOG_TRACE, "str2entry_dupcheck"
|
||||
- "Entry has multiple dns \"%s\" and \"%s\" (second ignored)\n",
|
||||
+ slapi_log_err(SLAPI_LOG_TRACE, "str2entry_dupcheck",
|
||||
+ "Entry has multiple dns \"%s\" and \"%s\" (second ignored)\n",
|
||||
(char *)slapi_entry_get_dn_const(e),
|
||||
escape_string(valuecharptr, ebuf));
|
||||
/* the memory below was not allocated by the slapi_ch_ functions */
|
||||
diff --git a/ldap/servers/slapd/filterentry.c b/ldap/servers/slapd/filterentry.c
|
||||
index f5604161d..c3737eb9e 100644
|
||||
--- a/ldap/servers/slapd/filterentry.c
|
||||
+++ b/ldap/servers/slapd/filterentry.c
|
||||
@@ -828,7 +828,7 @@ slapi_vattr_filter_test_ext(
|
||||
|
||||
if (only_check_access != 0) {
|
||||
slapi_log_err(SLAPI_LOG_ERR, "slapi_vattr_filter_test_ext",
|
||||
- "⚠️ DANGER ⚠️ - only_check_access mode is BROKEN!!! YOU MUST CHECK ACCESS WITH FILTER MATCHING");
|
||||
+ "⚠️ DANGER ⚠️ - only_check_access mode is BROKEN!!! YOU MUST CHECK ACCESS WITH FILTER MATCHING\n");
|
||||
}
|
||||
PR_ASSERT(only_check_access == 0);
|
||||
|
||||
diff --git a/ldap/servers/slapd/ldaputil.c b/ldap/servers/slapd/ldaputil.c
|
||||
index 3910bdf7f..e59d22893 100644
|
||||
--- a/ldap/servers/slapd/ldaputil.c
|
||||
+++ b/ldap/servers/slapd/ldaputil.c
|
||||
@@ -1305,7 +1305,7 @@ slapi_add_auth_response_control(Slapi_PBlock *pb, const char *binddn)
|
||||
|
||||
if (slapi_pblock_set(pb, SLAPI_ADD_RESCONTROL, &arctrl) != 0) {
|
||||
slapi_log_err(SLAPI_LOG_ERR, "slapi_add_auth_response_control",
|
||||
- "Unable to add authentication response control");
|
||||
+ "Unable to add authentication response control\n");
|
||||
}
|
||||
|
||||
if (NULL != dnbuf_dynamic) {
|
||||
diff --git a/ldap/servers/slapd/libglobs.c b/ldap/servers/slapd/libglobs.c
|
||||
index d08267426..a57d704d3 100644
|
||||
--- a/ldap/servers/slapd/libglobs.c
|
||||
+++ b/ldap/servers/slapd/libglobs.c
|
||||
@@ -1758,13 +1758,13 @@ FrontendConfig_init(void)
|
||||
/* initialize the read/write configuration lock */
|
||||
if ((cfg->cfg_rwlock = slapi_new_rwlock()) == NULL) {
|
||||
slapi_log_err(SLAPI_LOG_EMERG, "FrontendConfig_init",
|
||||
- "Failed to initialize cfg_rwlock. Exiting now.");
|
||||
+ "Failed to initialize cfg_rwlock. Exiting now.\n");
|
||||
exit(-1);
|
||||
}
|
||||
#else
|
||||
if ((cfg->cfg_lock = PR_NewLock()) == NULL) {
|
||||
slapi_log_err(SLAPI_LOG_EMERG, "FrontendConfig_init",
|
||||
- "Failed to initialize cfg_lock. Exiting now.");
|
||||
+ "Failed to initialize cfg_lock. Exiting now.\n");
|
||||
exit(-1);
|
||||
}
|
||||
#endif
|
||||
diff --git a/ldap/servers/slapd/log.c b/ldap/servers/slapd/log.c
|
||||
index 3344894d1..c4aa99332 100644
|
||||
--- a/ldap/servers/slapd/log.c
|
||||
+++ b/ldap/servers/slapd/log.c
|
||||
@@ -1245,7 +1245,7 @@ log_set_numlogsperdir(const char *attrname, char *numlogs_str, int logtype, char
|
||||
default:
|
||||
rv = LDAP_OPERATIONS_ERROR;
|
||||
slapi_log_err(SLAPI_LOG_ERR, "log_set_numlogsperdir",
|
||||
- "Invalid log type %d", logtype);
|
||||
+ "Invalid log type %d\n", logtype);
|
||||
}
|
||||
}
|
||||
return rv;
|
||||
diff --git a/ldap/servers/slapd/modrdn.c b/ldap/servers/slapd/modrdn.c
|
||||
index e1ba086d2..784060284 100644
|
||||
--- a/ldap/servers/slapd/modrdn.c
|
||||
+++ b/ldap/servers/slapd/modrdn.c
|
||||
@@ -566,13 +566,13 @@ op_shared_rename(Slapi_PBlock *pb, int passin_args)
|
||||
"Syntax check of newSuperior failed\n");
|
||||
if (!internal_op) {
|
||||
slapi_log_err(SLAPI_LOG_ARGS, "op_shared_rename",
|
||||
- "conn=%" PRIu64 " op=%d MODRDN invalid new superior (\"%s\")",
|
||||
+ "conn=%" PRIu64 " op=%d MODRDN invalid new superior (\"%s\")\n",
|
||||
pb_conn->c_connid,
|
||||
operation->o_opid,
|
||||
newsuperior ? newsuperior : "(null)");
|
||||
} else {
|
||||
slapi_log_err(SLAPI_LOG_ARGS, "op_shared_rename",
|
||||
- "conn=%s op=%d MODRDN invalid new superior (\"%s\")",
|
||||
+ "conn=%s op=%d MODRDN invalid new superior (\"%s\")\n",
|
||||
LOG_INTERNAL_OP_CON_ID,
|
||||
LOG_INTERNAL_OP_OP_ID,
|
||||
newsuperior ? newsuperior : "(null)");
|
||||
diff --git a/ldap/servers/slapd/passwd_extop.c b/ldap/servers/slapd/passwd_extop.c
|
||||
index 8fad2ee96..f758ac018 100644
|
||||
--- a/ldap/servers/slapd/passwd_extop.c
|
||||
+++ b/ldap/servers/slapd/passwd_extop.c
|
||||
@@ -470,10 +470,10 @@ passwd_modify_extop(Slapi_PBlock *pb)
|
||||
* match this very plugin's OID: EXTOP_PASSWD_OID. */
|
||||
slapi_pblock_get(pb, SLAPI_EXT_OP_REQ_OID, &oid);
|
||||
if (oid == NULL) {
|
||||
- errMesg = "Could not get OID value from request.\n";
|
||||
+ errMesg = "Could not get OID value from request.";
|
||||
rc = LDAP_OPERATIONS_ERROR;
|
||||
slapi_log_err(SLAPI_LOG_PLUGIN, "passwd_modify_extop",
|
||||
- "%s", errMesg);
|
||||
+ "%s\n", errMesg);
|
||||
goto free_and_return;
|
||||
} else {
|
||||
slapi_log_err(SLAPI_LOG_PLUGIN, "passwd_modify_extop",
|
||||
@@ -481,7 +481,7 @@ passwd_modify_extop(Slapi_PBlock *pb)
|
||||
}
|
||||
|
||||
if (strcasecmp(oid, EXTOP_PASSWD_OID) != 0) {
|
||||
- errMesg = "Request OID does not match Passwd OID.\n";
|
||||
+ errMesg = "Request OID does not match Passwd OID.";
|
||||
rc = LDAP_OPERATIONS_ERROR;
|
||||
goto free_and_return;
|
||||
} else {
|
||||
@@ -500,24 +500,24 @@ passwd_modify_extop(Slapi_PBlock *pb)
|
||||
goto free_and_return;
|
||||
}
|
||||
if (slapi_pblock_get(pb, SLAPI_CONN_SASL_SSF, &sasl_ssf) != 0) {
|
||||
- errMesg = "Could not get SASL SSF from connection\n";
|
||||
+ errMesg = "Could not get SASL SSF from connection";
|
||||
rc = LDAP_OPERATIONS_ERROR;
|
||||
slapi_log_err(SLAPI_LOG_PLUGIN, "passwd_modify_extop",
|
||||
- "%s", errMesg);
|
||||
+ "%s\n", errMesg);
|
||||
goto free_and_return;
|
||||
}
|
||||
|
||||
if (slapi_pblock_get(pb, SLAPI_CONN_LOCAL_SSF, &local_ssf) != 0) {
|
||||
- errMesg = "Could not get local SSF from connection\n";
|
||||
+ errMesg = "Could not get local SSF from connection";
|
||||
rc = LDAP_OPERATIONS_ERROR;
|
||||
slapi_log_err(SLAPI_LOG_PLUGIN, "passwd_modify_extop",
|
||||
- "%s", errMesg);
|
||||
+ "%s\n", errMesg);
|
||||
goto free_and_return;
|
||||
}
|
||||
|
||||
if (((conn->c_flags & CONN_FLAG_SSL) != CONN_FLAG_SSL) &&
|
||||
(sasl_ssf <= 1) && (local_ssf <= 1)) {
|
||||
- errMesg = "Operation requires a secure connection.\n";
|
||||
+ errMesg = "Operation requires a secure connection.";
|
||||
rc = LDAP_CONFIDENTIALITY_REQUIRED;
|
||||
goto free_and_return;
|
||||
}
|
||||
@@ -536,7 +536,7 @@ passwd_modify_extop(Slapi_PBlock *pb)
|
||||
}
|
||||
|
||||
if ((ber = ber_init(extop_value)) == NULL) {
|
||||
- errMesg = "PasswdModify Request decode failed.\n";
|
||||
+ errMesg = "PasswdModify Request decode failed.";
|
||||
rc = LDAP_PROTOCOL_ERROR;
|
||||
goto free_and_return;
|
||||
}
|
||||
@@ -571,7 +571,7 @@ passwd_modify_extop(Slapi_PBlock *pb)
|
||||
if (ber_scanf(ber, "a", &rawdn) == LBER_ERROR) {
|
||||
slapi_ch_free_string(&rawdn);
|
||||
slapi_log_err(SLAPI_LOG_ERR, "passwd_modify_extop", "ber_scanf failed :{\n");
|
||||
- errMesg = "ber_scanf failed at userID parse.\n";
|
||||
+ errMesg = "ber_scanf failed at userID parse.";
|
||||
rc = LDAP_PROTOCOL_ERROR;
|
||||
goto free_and_return;
|
||||
}
|
||||
@@ -583,7 +583,7 @@ passwd_modify_extop(Slapi_PBlock *pb)
|
||||
if (rc) { /* syntax check failed */
|
||||
op_shared_log_error_access(pb, "EXT", rawdn ? rawdn : "",
|
||||
"strict: invalid target dn");
|
||||
- errMesg = "invalid target dn.\n";
|
||||
+ errMesg = "invalid target dn.";
|
||||
slapi_ch_free_string(&rawdn);
|
||||
rc = LDAP_INVALID_SYNTAX;
|
||||
goto free_and_return;
|
||||
@@ -597,7 +597,7 @@ passwd_modify_extop(Slapi_PBlock *pb)
|
||||
if (ber_scanf(ber, "a", &oldPasswd) == LBER_ERROR) {
|
||||
slapi_ch_free_string(&oldPasswd);
|
||||
slapi_log_err(SLAPI_LOG_ERR, "passwd_modify_extop", "ber_scanf failed :{\n");
|
||||
- errMesg = "ber_scanf failed at oldPasswd parse.\n";
|
||||
+ errMesg = "ber_scanf failed at oldPasswd parse.";
|
||||
rc = LDAP_PROTOCOL_ERROR;
|
||||
goto free_and_return;
|
||||
}
|
||||
@@ -609,7 +609,7 @@ passwd_modify_extop(Slapi_PBlock *pb)
|
||||
if (ber_scanf(ber, "a", &newPasswd) == LBER_ERROR) {
|
||||
slapi_ch_free_string(&newPasswd);
|
||||
slapi_log_err(SLAPI_LOG_ERR, "passwd_modify_extop", "ber_scanf failed :{\n");
|
||||
- errMesg = "ber_scanf failed at newPasswd parse.\n";
|
||||
+ errMesg = "ber_scanf failed at newPasswd parse.";
|
||||
rc = LDAP_PROTOCOL_ERROR;
|
||||
goto free_and_return;
|
||||
}
|
||||
@@ -626,7 +626,7 @@ parse_req_done:
|
||||
/* If the connection is bound anonymously, we must refuse to process this operation. */
|
||||
if (bindDN == NULL || *bindDN == '\0') {
|
||||
/* Refuse the operation because they're bound anonymously */
|
||||
- errMesg = "Anonymous Binds are not allowed.\n";
|
||||
+ errMesg = "Anonymous Binds are not allowed.";
|
||||
rc = LDAP_INSUFFICIENT_ACCESS;
|
||||
goto free_and_return;
|
||||
}
|
||||
@@ -640,7 +640,7 @@ parse_req_done:
|
||||
dn = slapi_sdn_get_ndn(target_sdn);
|
||||
if (dn == NULL || *dn == '\0') {
|
||||
/* Refuse the operation because they're bound anonymously */
|
||||
- errMesg = "Invalid dn.\n";
|
||||
+ errMesg = "Invalid dn.";
|
||||
rc = LDAP_INVALID_DN_SYNTAX;
|
||||
goto free_and_return;
|
||||
}
|
||||
@@ -657,7 +657,7 @@ parse_req_done:
|
||||
* the bind operation (or used sasl or client cert auth or OS creds) */
|
||||
slapi_pblock_get(pb, SLAPI_CONN_AUTHMETHOD, &authmethod);
|
||||
if (!authmethod || !strcmp(authmethod, SLAPD_AUTH_NONE)) {
|
||||
- errMesg = "User must be authenticated to the directory server.\n";
|
||||
+ errMesg = "User must be authenticated to the directory server.";
|
||||
rc = LDAP_INSUFFICIENT_ACCESS;
|
||||
goto free_and_return;
|
||||
}
|
||||
@@ -680,14 +680,14 @@ parse_req_done:
|
||||
|
||||
if (rval != LDAP_SUCCESS) {
|
||||
if (!errMesg)
|
||||
- errMesg = "Error generating new password.\n";
|
||||
+ errMesg = "Error generating new password.";
|
||||
rc = LDAP_OPERATIONS_ERROR;
|
||||
goto free_and_return;
|
||||
}
|
||||
|
||||
/* Make sure a passwd was actually generated */
|
||||
if (newPasswd == NULL || *newPasswd == '\0') {
|
||||
- errMesg = "Error generating new password.\n";
|
||||
+ errMesg = "Error generating new password.";
|
||||
rc = LDAP_OPERATIONS_ERROR;
|
||||
goto free_and_return;
|
||||
}
|
||||
@@ -723,7 +723,7 @@ parse_req_done:
|
||||
/* If we can't find the entry, then that's an error */
|
||||
if (ret) {
|
||||
/* Couldn't find the entry, fail */
|
||||
- errMesg = "No such Entry exists.\n";
|
||||
+ errMesg = "No such Entry exists.";
|
||||
rc = LDAP_NO_SUCH_OBJECT;
|
||||
goto free_and_return;
|
||||
}
|
||||
@@ -767,7 +767,7 @@ parse_req_done:
|
||||
if (need_pwpolicy_ctrl) {
|
||||
slapi_pwpolicy_make_response_control(pb, -1, -1, LDAP_PWPOLICY_PWDMODNOTALLOWED);
|
||||
}
|
||||
- errMesg = "Insufficient access rights\n";
|
||||
+ errMesg = "Insufficient access rights";
|
||||
rc = LDAP_INSUFFICIENT_ACCESS;
|
||||
goto free_and_return;
|
||||
}
|
||||
@@ -780,7 +780,7 @@ parse_req_done:
|
||||
ret = passwd_check_pwd(targetEntry, oldPasswd);
|
||||
if (ret) {
|
||||
/* No, then we fail this operation */
|
||||
- errMesg = "Invalid oldPasswd value.\n";
|
||||
+ errMesg = "Invalid oldPasswd value.";
|
||||
rc = ret;
|
||||
goto free_and_return;
|
||||
}
|
||||
@@ -801,7 +801,7 @@ parse_req_done:
|
||||
if (need_pwpolicy_ctrl) {
|
||||
slapi_pwpolicy_make_response_control(pb, -1, -1, LDAP_PWPOLICY_PWDMODNOTALLOWED);
|
||||
}
|
||||
- errMesg = "User is not allowed to change password\n";
|
||||
+ errMesg = "User is not allowed to change password";
|
||||
rc = LDAP_UNWILLING_TO_PERFORM;
|
||||
goto free_and_return;
|
||||
}
|
||||
@@ -823,7 +823,7 @@ parse_req_done:
|
||||
|
||||
if (ret != LDAP_SUCCESS) {
|
||||
/* Failed to modify the password, e.g. because password policy, etc. */
|
||||
- errMesg = "Failed to update password\n";
|
||||
+ errMesg = "Failed to update password";
|
||||
rc = ret;
|
||||
goto free_and_return;
|
||||
}
|
||||
@@ -838,7 +838,7 @@ parse_req_done:
|
||||
/* Free anything that we allocated above */
|
||||
free_and_return:
|
||||
slapi_log_err(SLAPI_LOG_PLUGIN, "passwd_modify_extop",
|
||||
- "%s", errMesg ? errMesg : "success");
|
||||
+ "%s\n", errMesg ? errMesg : "success");
|
||||
|
||||
if ((rc == LDAP_REFERRAL) && (referrals)) {
|
||||
send_referrals_from_entry(pb, referrals);
|
||||
diff --git a/ldap/servers/slapd/pw.c b/ldap/servers/slapd/pw.c
|
||||
index 493907e78..65e491590 100644
|
||||
--- a/ldap/servers/slapd/pw.c
|
||||
+++ b/ldap/servers/slapd/pw.c
|
||||
@@ -243,8 +243,8 @@ slapi_encode_ext(Slapi_PBlock *pb, const Slapi_DN *sdn, char *value, char *alg)
|
||||
slapi_ch_free((void **)&scheme_list);
|
||||
} else {
|
||||
slapi_log_err(SLAPI_LOG_ERR, "slapi_encode_ext",
|
||||
- "Invalid scheme - %s\n"
|
||||
- "no pwdstorage scheme plugin loaded",
|
||||
+ "Invalid scheme: %s ==> "
|
||||
+ "no pwdstorage scheme plugin loaded\n",
|
||||
alg);
|
||||
}
|
||||
return NULL;
|
||||
diff --git a/ldap/servers/slapd/schema.c b/ldap/servers/slapd/schema.c
|
||||
index 9dee642b9..9ef4ee4bf 100644
|
||||
--- a/ldap/servers/slapd/schema.c
|
||||
+++ b/ldap/servers/slapd/schema.c
|
||||
@@ -6563,7 +6563,7 @@ supplier_get_new_definitions(struct berval **objectclasses, struct berval **attr
|
||||
* it and look for objectclasses
|
||||
*/
|
||||
slapi_log_err(SLAPI_LOG_ERR, "supplier_get_new_definitions",
|
||||
- "Not able to build an attributes list from the consumer schema");
|
||||
+ "Not able to build an attributes list from the consumer schema\n");
|
||||
}
|
||||
schema_dse_unlock();
|
||||
*new_oc = oc2learn_list;
|
||||
diff --git a/ldap/servers/slapd/util.c b/ldap/servers/slapd/util.c
|
||||
index bf065261f..cc89d3226 100644
|
||||
--- a/ldap/servers/slapd/util.c
|
||||
+++ b/ldap/servers/slapd/util.c
|
||||
@@ -1544,7 +1544,7 @@ util_is_cachesize_sane(slapi_pal_meminfo *mi, uint64_t *cachesize)
|
||||
*/
|
||||
uint64_t adjust_cachesize = (mi->system_available_bytes * 0.5);
|
||||
if (adjust_cachesize > *cachesize) {
|
||||
- slapi_log_err(SLAPI_LOG_CRIT, "util_is_cachesize_sane", "Invalid adjusted cachesize is greater than request %" PRIu64, adjust_cachesize);
|
||||
+ slapi_log_err(SLAPI_LOG_CRIT, "util_is_cachesize_sane", "Invalid adjusted cachesize is greater than request %" PRIu64 "\n", adjust_cachesize);
|
||||
return UTIL_CACHESIZE_ERROR;
|
||||
}
|
||||
if (adjust_cachesize < (16 * mi->pagesize_bytes)) {
|
||||
--
|
||||
2.49.0
|
||||
|
||||
143
0061-Issue-6893-Log-user-that-is-updated-during-password-.patch
Normal file
143
0061-Issue-6893-Log-user-that-is-updated-during-password-.patch
Normal file
@ -0,0 +1,143 @@
|
||||
From 5b21b1f3636647a02e633eaeb064f9e2906b3a02 Mon Sep 17 00:00:00 2001
|
||||
From: Mark Reynolds <mreynolds@redhat.com>
|
||||
Date: Mon, 21 Jul 2025 18:07:21 -0400
|
||||
Subject: [PATCH] Issue 6893 - Log user that is updated during password modify
|
||||
extended operation
|
||||
|
||||
Description:
|
||||
|
||||
When a user's password is updated via an extended operation (password modify
|
||||
plugin) we only log the bind DN and not what user was updated. While "internal
|
||||
operation" logging will display the the user it should be logged by the default
|
||||
logging level.
|
||||
|
||||
Add access logging using "EXT_INFO" where we display the bind dn, target
|
||||
dn, and message.
|
||||
|
||||
Relates: https://github.com/389ds/389-ds-base/issues/6893
|
||||
|
||||
Reviewed by: spichugi & tbordaz(Thanks!!)
|
||||
---
|
||||
ldap/servers/slapd/passwd_extop.c | 56 +++++++++++++++----------------
|
||||
1 file changed, 28 insertions(+), 28 deletions(-)
|
||||
|
||||
diff --git a/ldap/servers/slapd/passwd_extop.c b/ldap/servers/slapd/passwd_extop.c
|
||||
index f758ac018..4d185f8dd 100644
|
||||
--- a/ldap/servers/slapd/passwd_extop.c
|
||||
+++ b/ldap/servers/slapd/passwd_extop.c
|
||||
@@ -456,12 +456,13 @@ passwd_modify_extop(Slapi_PBlock *pb)
|
||||
BerElement *response_ber = NULL;
|
||||
Slapi_Entry *targetEntry = NULL;
|
||||
Connection *conn = NULL;
|
||||
+ Operation *pb_op = NULL;
|
||||
LDAPControl **req_controls = NULL;
|
||||
LDAPControl **resp_controls = NULL;
|
||||
passwdPolicy *pwpolicy = NULL;
|
||||
Slapi_DN *target_sdn = NULL;
|
||||
Slapi_Entry *referrals = NULL;
|
||||
- /* Slapi_DN sdn; */
|
||||
+ Slapi_Backend *be = NULL;
|
||||
|
||||
slapi_log_err(SLAPI_LOG_TRACE, "passwd_modify_extop", "=>\n");
|
||||
|
||||
@@ -639,7 +640,7 @@ parse_req_done:
|
||||
}
|
||||
dn = slapi_sdn_get_ndn(target_sdn);
|
||||
if (dn == NULL || *dn == '\0') {
|
||||
- /* Refuse the operation because they're bound anonymously */
|
||||
+ /* Invalid DN - refuse the operation */
|
||||
errMesg = "Invalid dn.";
|
||||
rc = LDAP_INVALID_DN_SYNTAX;
|
||||
goto free_and_return;
|
||||
@@ -716,14 +717,19 @@ parse_req_done:
|
||||
ber_free(response_ber, 1);
|
||||
}
|
||||
|
||||
- slapi_pblock_set(pb, SLAPI_ORIGINAL_TARGET, (void *)dn);
|
||||
+ slapi_pblock_get(pb, SLAPI_OPERATION, &pb_op);
|
||||
+ if (pb_op == NULL) {
|
||||
+ slapi_log_err(SLAPI_LOG_ERR, "passwd_modify_extop", "pb_op is NULL\n");
|
||||
+ goto free_and_return;
|
||||
+ }
|
||||
|
||||
+ slapi_pblock_set(pb, SLAPI_ORIGINAL_TARGET, (void *)dn);
|
||||
/* Now we have the DN, look for the entry */
|
||||
ret = passwd_modify_getEntry(dn, &targetEntry);
|
||||
/* If we can't find the entry, then that's an error */
|
||||
if (ret) {
|
||||
/* Couldn't find the entry, fail */
|
||||
- errMesg = "No such Entry exists.";
|
||||
+ errMesg = "No such entry exists.";
|
||||
rc = LDAP_NO_SUCH_OBJECT;
|
||||
goto free_and_return;
|
||||
}
|
||||
@@ -734,30 +740,18 @@ parse_req_done:
|
||||
leak any useful information to the client such as current password
|
||||
wrong, etc.
|
||||
*/
|
||||
- Operation *pb_op = NULL;
|
||||
- slapi_pblock_get(pb, SLAPI_OPERATION, &pb_op);
|
||||
- if (pb_op == NULL) {
|
||||
- slapi_log_err(SLAPI_LOG_ERR, "passwd_modify_extop", "pb_op is NULL\n");
|
||||
- goto free_and_return;
|
||||
- }
|
||||
-
|
||||
operation_set_target_spec(pb_op, slapi_entry_get_sdn(targetEntry));
|
||||
slapi_pblock_set(pb, SLAPI_REQUESTOR_ISROOT, &pb_op->o_isroot);
|
||||
|
||||
- /* In order to perform the access control check , we need to select a backend (even though
|
||||
- * we don't actually need it otherwise).
|
||||
- */
|
||||
- {
|
||||
- Slapi_Backend *be = NULL;
|
||||
-
|
||||
- be = slapi_mapping_tree_find_backend_for_sdn(slapi_entry_get_sdn(targetEntry));
|
||||
- if (NULL == be) {
|
||||
- errMesg = "Failed to find backend for target entry";
|
||||
- rc = LDAP_OPERATIONS_ERROR;
|
||||
- goto free_and_return;
|
||||
- }
|
||||
- slapi_pblock_set(pb, SLAPI_BACKEND, be);
|
||||
+ /* In order to perform the access control check, we need to select a backend (even though
|
||||
+ * we don't actually need it otherwise). */
|
||||
+ be = slapi_mapping_tree_find_backend_for_sdn(slapi_entry_get_sdn(targetEntry));
|
||||
+ if (NULL == be) {
|
||||
+ errMesg = "Failed to find backend for target entry";
|
||||
+ rc = LDAP_NO_SUCH_OBJECT;
|
||||
+ goto free_and_return;
|
||||
}
|
||||
+ slapi_pblock_set(pb, SLAPI_BACKEND, be);
|
||||
|
||||
/* Check if the pwpolicy control is present */
|
||||
slapi_pblock_get(pb, SLAPI_PWPOLICY, &need_pwpolicy_ctrl);
|
||||
@@ -789,10 +783,7 @@ parse_req_done:
|
||||
/* Check if password policy allows users to change their passwords. We need to do
|
||||
* this here since the normal modify code doesn't perform this check for
|
||||
* internal operations. */
|
||||
-
|
||||
- Connection *pb_conn;
|
||||
- slapi_pblock_get(pb, SLAPI_CONNECTION, &pb_conn);
|
||||
- if (!pb_op->o_isroot && !pb_conn->c_needpw && !pwpolicy->pw_change) {
|
||||
+ if (!pb_op->o_isroot && !conn->c_needpw && !pwpolicy->pw_change) {
|
||||
if (NULL == bindSDN) {
|
||||
bindSDN = slapi_sdn_new_normdn_byref(bindDN);
|
||||
}
|
||||
@@ -840,6 +831,15 @@ free_and_return:
|
||||
slapi_log_err(SLAPI_LOG_PLUGIN, "passwd_modify_extop",
|
||||
"%s\n", errMesg ? errMesg : "success");
|
||||
|
||||
+ if (dn) {
|
||||
+ /* Log the target ndn (if we have a target ndn) */
|
||||
+ slapi_log_access(LDAP_DEBUG_STATS,
|
||||
+ "conn=%" PRIu64 " op=%d EXT_INFO name=\"passwd_modify_plugin\" bind_dn=\"%s\" target_dn=\"%s\" msg=\"%s\" rc=%d\n",
|
||||
+ conn ? conn->c_connid : -1, pb_op ? pb_op->o_opid : -1,
|
||||
+ bindDN ? bindDN : "", dn,
|
||||
+ errMesg ? errMesg : "success", rc);
|
||||
+ }
|
||||
+
|
||||
if ((rc == LDAP_REFERRAL) && (referrals)) {
|
||||
send_referrals_from_entry(pb, referrals);
|
||||
} else {
|
||||
--
|
||||
2.49.0
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
From 23e56fd01eaa24a2fa945430f91600dd9c726d34 Mon Sep 17 00:00:00 2001
|
||||
From e4a3a17f97d40e8c69811c99ca29d058daba3fc8 Mon Sep 17 00:00:00 2001
|
||||
From: Simon Pichugin <spichugi@redhat.com>
|
||||
Date: Tue, 19 Aug 2025 14:30:15 -0700
|
||||
Subject: [PATCH] Issue 6936 - Make user/subtree policy creation idempotent
|
||||
@ -17,9 +17,6 @@ and preventing regressions.
|
||||
Fixes: https://github.com/389ds/389-ds-base/issues/6936
|
||||
|
||||
Reviewed by: @mreynolds389 (Thanks!)
|
||||
|
||||
(cherry picked from commit da4eea126cc9019f540b57c1db9dec7988cade10)
|
||||
Signed-off-by: Masahiro Matsuya <mmatsuya@redhat.com>
|
||||
---
|
||||
.../pwp_history_local_override_test.py | 351 ++++++++++++++++++
|
||||
src/lib389/lib389/cli_conf/pwpolicy.py | 4 +-
|
||||
@ -568,5 +565,5 @@ index 6a47a44fe..539c230a9 100644
|
||||
# Starting deleting the policy, ignore the parts that might already have been removed
|
||||
pwp_container = nsContainer(self._instance, 'cn=nsPwPolicyContainer,%s' % parentdn)
|
||||
--
|
||||
2.51.1
|
||||
2.49.0
|
||||
|
||||
@ -0,0 +1,58 @@
|
||||
From 7f8a606a79cf26815adc894c4b9a6f65518e832e Mon Sep 17 00:00:00 2001
|
||||
From: Viktor Ashirov <vashirov@redhat.com>
|
||||
Date: Fri, 11 Jul 2025 12:32:38 +0200
|
||||
Subject: [PATCH] Issue 6865 - AddressSanitizer: leak in
|
||||
agmt_update_init_status
|
||||
|
||||
Bug Description:
|
||||
We allocate an array of `LDAPMod *` pointers, but never free it:
|
||||
|
||||
```
|
||||
=================================================================
|
||||
==2748356==ERROR: LeakSanitizer: detected memory leaks
|
||||
|
||||
Direct leak of 24 byte(s) in 1 object(s) allocated from:
|
||||
#0 0x7f05e8cb4a07 in __interceptor_malloc (/lib64/libasan.so.6+0xb4a07)
|
||||
#1 0x7f05e85c0138 in slapi_ch_malloc (/usr/lib64/dirsrv/libslapd.so.0+0x1c0138)
|
||||
#2 0x7f05e109e481 in agmt_update_init_status ldap/servers/plugins/replication/repl5_agmt.c:2583
|
||||
#3 0x7f05e10a0aa5 in agmtlist_shutdown ldap/servers/plugins/replication/repl5_agmtlist.c:789
|
||||
#4 0x7f05e10ab6bc in multisupplier_stop ldap/servers/plugins/replication/repl5_init.c:844
|
||||
#5 0x7f05e10ab6bc in multisupplier_stop ldap/servers/plugins/replication/repl5_init.c:837
|
||||
#6 0x7f05e862507d in plugin_call_func ldap/servers/slapd/plugin.c:2001
|
||||
#7 0x7f05e8625be1 in plugin_call_one ldap/servers/slapd/plugin.c:1950
|
||||
#8 0x7f05e8625be1 in plugin_dependency_closeall ldap/servers/slapd/plugin.c:1844
|
||||
#9 0x55e1a7ff9815 in slapd_daemon ldap/servers/slapd/daemon.c:1275
|
||||
#10 0x55e1a7fd36ef in main (/usr/sbin/ns-slapd+0x3e6ef)
|
||||
#11 0x7f05e80295cf in __libc_start_call_main (/lib64/libc.so.6+0x295cf)
|
||||
#12 0x7f05e802967f in __libc_start_main_alias_2 (/lib64/libc.so.6+0x2967f)
|
||||
#13 0x55e1a7fd74a4 in _start (/usr/sbin/ns-slapd+0x424a4)
|
||||
|
||||
SUMMARY: AddressSanitizer: 24 byte(s) leaked in 1 allocation(s).
|
||||
```
|
||||
|
||||
Fix Description:
|
||||
Ensure `mods` is freed in the cleanup code.
|
||||
|
||||
Fixes: https://github.com/389ds/389-ds-base/issues/6865
|
||||
Relates: https://github.com/389ds/389-ds-base/issues/6470
|
||||
|
||||
Reviewed by: @mreynolds389 (Thanks!)
|
||||
---
|
||||
ldap/servers/plugins/replication/repl5_agmt.c | 1 +
|
||||
1 file changed, 1 insertion(+)
|
||||
|
||||
diff --git a/ldap/servers/plugins/replication/repl5_agmt.c b/ldap/servers/plugins/replication/repl5_agmt.c
|
||||
index 6ffb074d4..c6cfcda07 100644
|
||||
--- a/ldap/servers/plugins/replication/repl5_agmt.c
|
||||
+++ b/ldap/servers/plugins/replication/repl5_agmt.c
|
||||
@@ -2653,6 +2653,7 @@ agmt_update_init_status(Repl_Agmt *ra)
|
||||
} else {
|
||||
PR_Unlock(ra->lock);
|
||||
}
|
||||
+ slapi_ch_free((void **)&mods);
|
||||
slapi_mod_done(&smod_start_time);
|
||||
slapi_mod_done(&smod_end_time);
|
||||
slapi_mod_done(&smod_status);
|
||||
--
|
||||
2.49.0
|
||||
|
||||
172
0064-Issue-6594-Add-test-for-numSubordinates-replication-.patch
Normal file
172
0064-Issue-6594-Add-test-for-numSubordinates-replication-.patch
Normal file
@ -0,0 +1,172 @@
|
||||
From 2c43201666a5da1ca0a0777ddeb3d5ed0d19ed10 Mon Sep 17 00:00:00 2001
|
||||
From: Simon Pichugin <spichugi@redhat.com>
|
||||
Date: Mon, 28 Jul 2025 15:35:50 -0700
|
||||
Subject: [PATCH] Issue 6594 - Add test for numSubordinates replication
|
||||
consistency with tombstones (#6862)
|
||||
|
||||
Description: Add a comprehensive test to verify that numSubordinates and
|
||||
tombstoneNumSubordinates attributes are correctly replicated between
|
||||
instances when tombstone entries are present.
|
||||
|
||||
Fixes: https://github.com/389ds/389-ds-base/issues/6594
|
||||
|
||||
Reviewed by: @progier389 (Thanks!)
|
||||
---
|
||||
.../numsubordinates_replication_test.py | 144 ++++++++++++++++++
|
||||
1 file changed, 144 insertions(+)
|
||||
create mode 100644 dirsrvtests/tests/suites/replication/numsubordinates_replication_test.py
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/replication/numsubordinates_replication_test.py b/dirsrvtests/tests/suites/replication/numsubordinates_replication_test.py
|
||||
new file mode 100644
|
||||
index 000000000..9ba10657d
|
||||
--- /dev/null
|
||||
+++ b/dirsrvtests/tests/suites/replication/numsubordinates_replication_test.py
|
||||
@@ -0,0 +1,144 @@
|
||||
+# --- BEGIN COPYRIGHT BLOCK ---
|
||||
+# Copyright (C) 2025 Red Hat, Inc.
|
||||
+# All rights reserved.
|
||||
+#
|
||||
+# License: GPL (version 3 or any later version).
|
||||
+# See LICENSE for details.
|
||||
+# --- END COPYRIGHT BLOCK ---
|
||||
+
|
||||
+import os
|
||||
+import logging
|
||||
+import pytest
|
||||
+from lib389._constants import DEFAULT_SUFFIX
|
||||
+from lib389.replica import ReplicationManager
|
||||
+from lib389.idm.organizationalunit import OrganizationalUnits
|
||||
+from lib389.idm.user import UserAccounts
|
||||
+from lib389.topologies import topology_i2 as topo_i2
|
||||
+
|
||||
+
|
||||
+pytestmark = pytest.mark.tier1
|
||||
+
|
||||
+DEBUGGING = os.getenv("DEBUGGING", default=False)
|
||||
+if DEBUGGING:
|
||||
+ logging.getLogger(__name__).setLevel(logging.DEBUG)
|
||||
+else:
|
||||
+ logging.getLogger(__name__).setLevel(logging.INFO)
|
||||
+log = logging.getLogger(__name__)
|
||||
+
|
||||
+
|
||||
+def test_numsubordinates_tombstone_replication_mismatch(topo_i2):
|
||||
+ """Test that numSubordinates values match between replicas after tombstone creation
|
||||
+
|
||||
+ :id: c43ecc7a-d706-42e8-9179-1ff7d0e7163a
|
||||
+ :setup: Two standalone instances
|
||||
+ :steps:
|
||||
+ 1. Create a container (organizational unit) on the first instance
|
||||
+ 2. Create a user object in that container
|
||||
+ 3. Delete the user object (this creates a tombstone)
|
||||
+ 4. Set up replication between the two instances
|
||||
+ 5. Wait for replication to complete
|
||||
+ 6. Check numSubordinates on both instances
|
||||
+ 7. Check tombstoneNumSubordinates on both instances
|
||||
+ 8. Verify that numSubordinates values match on both instances
|
||||
+ :expectedresults:
|
||||
+ 1. Container should be created successfully
|
||||
+ 2. User object should be created successfully
|
||||
+ 3. User object should be deleted successfully
|
||||
+ 4. Replication should be set up successfully
|
||||
+ 5. Replication should complete successfully
|
||||
+ 6. numSubordinates should be accessible on both instances
|
||||
+ 7. tombstoneNumSubordinates should be accessible on both instances
|
||||
+ 8. numSubordinates values should match on both instances
|
||||
+ """
|
||||
+
|
||||
+ instance1 = topo_i2.ins["standalone1"]
|
||||
+ instance2 = topo_i2.ins["standalone2"]
|
||||
+
|
||||
+ log.info("Create a container (organizational unit) on the first instance")
|
||||
+ ous1 = OrganizationalUnits(instance1, DEFAULT_SUFFIX)
|
||||
+ container = ous1.create(properties={
|
||||
+ 'ou': 'test_container',
|
||||
+ 'description': 'Test container for numSubordinates replication test'
|
||||
+ })
|
||||
+ container_rdn = container.rdn
|
||||
+ log.info(f"Created container: {container_rdn}")
|
||||
+
|
||||
+ log.info("Create a user object in that container")
|
||||
+ users1 = UserAccounts(instance1, DEFAULT_SUFFIX, rdn=f"ou={container_rdn}")
|
||||
+ test_user = users1.create_test_user(uid=1001)
|
||||
+ log.info(f"Created user: {test_user.dn}")
|
||||
+
|
||||
+ log.info("Checking initial numSubordinates on container")
|
||||
+ container_obj1 = OrganizationalUnits(instance1, DEFAULT_SUFFIX).get(container_rdn)
|
||||
+ initial_numsubordinates = container_obj1.get_attr_val_int('numSubordinates')
|
||||
+ log.info(f"Initial numSubordinates: {initial_numsubordinates}")
|
||||
+ assert initial_numsubordinates == 1
|
||||
+
|
||||
+ log.info("Delete the user object (this creates a tombstone)")
|
||||
+ test_user.delete()
|
||||
+
|
||||
+ log.info("Checking numSubordinates after deletion")
|
||||
+ after_delete_numsubordinates = container_obj1.get_attr_val_int('numSubordinates')
|
||||
+ log.info(f"numSubordinates after deletion: {after_delete_numsubordinates}")
|
||||
+
|
||||
+ log.info("Checking tombstoneNumSubordinates after deletion")
|
||||
+ try:
|
||||
+ tombstone_numsubordinates = container_obj1.get_attr_val_int('tombstoneNumSubordinates')
|
||||
+ log.info(f"tombstoneNumSubordinates: {tombstone_numsubordinates}")
|
||||
+ except Exception as e:
|
||||
+ log.info(f"tombstoneNumSubordinates not found or error: {e}")
|
||||
+ tombstone_numsubordinates = 0
|
||||
+
|
||||
+ log.info("Set up replication between the two instances")
|
||||
+ repl = ReplicationManager(DEFAULT_SUFFIX)
|
||||
+ repl.create_first_supplier(instance1)
|
||||
+ repl.join_supplier(instance1, instance2)
|
||||
+
|
||||
+ log.info("Wait for replication to complete")
|
||||
+ repl.wait_for_replication(instance1, instance2)
|
||||
+
|
||||
+ log.info("Check numSubordinates on both instances")
|
||||
+ container_obj1 = OrganizationalUnits(instance1, DEFAULT_SUFFIX).get(container_rdn)
|
||||
+ numsubordinates_instance1 = container_obj1.get_attr_val_int('numSubordinates')
|
||||
+ log.info(f"numSubordinates on instance1: {numsubordinates_instance1}")
|
||||
+
|
||||
+ container_obj2 = OrganizationalUnits(instance2, DEFAULT_SUFFIX).get(container_rdn)
|
||||
+ numsubordinates_instance2 = container_obj2.get_attr_val_int('numSubordinates')
|
||||
+ log.info(f"numSubordinates on instance2: {numsubordinates_instance2}")
|
||||
+
|
||||
+ log.info("Check tombstoneNumSubordinates on both instances")
|
||||
+ try:
|
||||
+ tombstone_numsubordinates_instance1 = container_obj1.get_attr_val_int('tombstoneNumSubordinates')
|
||||
+ log.info(f"tombstoneNumSubordinates on instance1: {tombstone_numsubordinates_instance1}")
|
||||
+ except Exception as e:
|
||||
+ log.info(f"tombstoneNumSubordinates not found on instance1: {e}")
|
||||
+ tombstone_numsubordinates_instance1 = 0
|
||||
+
|
||||
+ try:
|
||||
+ tombstone_numsubordinates_instance2 = container_obj2.get_attr_val_int('tombstoneNumSubordinates')
|
||||
+ log.info(f"tombstoneNumSubordinates on instance2: {tombstone_numsubordinates_instance2}")
|
||||
+ except Exception as e:
|
||||
+ log.info(f"tombstoneNumSubordinates not found on instance2: {e}")
|
||||
+ tombstone_numsubordinates_instance2 = 0
|
||||
+
|
||||
+ log.info("Verify that numSubordinates values match on both instances")
|
||||
+ log.info(f"Comparison: instance1 numSubordinates={numsubordinates_instance1}, "
|
||||
+ f"instance2 numSubordinates={numsubordinates_instance2}")
|
||||
+ log.info(f"Comparison: instance1 tombstoneNumSubordinates={tombstone_numsubordinates_instance1}, "
|
||||
+ f"instance2 tombstoneNumSubordinates={tombstone_numsubordinates_instance2}")
|
||||
+
|
||||
+ assert numsubordinates_instance1 == numsubordinates_instance2, (
|
||||
+ f"numSubordinates mismatch: instance1 has {numsubordinates_instance1}, "
|
||||
+ f"instance2 has {numsubordinates_instance2}. "
|
||||
+ )
|
||||
+ assert tombstone_numsubordinates_instance1 == tombstone_numsubordinates_instance2, (
|
||||
+ f"tombstoneNumSubordinates mismatch: instance1 has {tombstone_numsubordinates_instance1}, "
|
||||
+ f"instance2 has {tombstone_numsubordinates_instance2}. "
|
||||
+ )
|
||||
+
|
||||
+
|
||||
+if __name__ == '__main__':
|
||||
+ # Run isolated
|
||||
+ # -s for DEBUG mode
|
||||
+ CURRENT_FILE = os.path.realpath(__file__)
|
||||
+ pytest.main("-s %s" % CURRENT_FILE)
|
||||
\ No newline at end of file
|
||||
--
|
||||
2.49.0
|
||||
|
||||
1448
0065-Issue-6919-numSubordinates-tombstoneNumSubordinates-.patch
Normal file
1448
0065-Issue-6919-numSubordinates-tombstoneNumSubordinates-.patch
Normal file
File diff suppressed because it is too large
Load Diff
@ -1,4 +1,4 @@
|
||||
From 4667e657fe4d3eab1e900cc1f278bc9a9e2fcf0a Mon Sep 17 00:00:00 2001
|
||||
From bedd7703de7717e95b8800c0a44d9f55446bf3d5 Mon Sep 17 00:00:00 2001
|
||||
From: Viktor Ashirov <vashirov@redhat.com>
|
||||
Date: Mon, 18 Aug 2025 09:13:12 +0200
|
||||
Subject: [PATCH] Issue 6928 - The parentId attribute is indexed with improper
|
||||
@ -22,9 +22,6 @@ Fixes: https://github.com/389ds/389-ds-base/issues/6928
|
||||
Fixes: https://github.com/389ds/389-ds-base/issues/6915
|
||||
|
||||
Reviewed by: @progier389, @tbordaz (Thanks!)
|
||||
|
||||
(cherry picked from commit fd45579f8111c371852686dafe761fe535a5bef3)
|
||||
Signed-off-by: Masahiro Matsuya <mmatsuya@redhat.com>
|
||||
---
|
||||
dirsrvtests/tests/suites/basic/basic_test.py | 2 +-
|
||||
.../healthcheck/health_system_indexes_test.py | 456 ++++++++++++++++++
|
||||
@ -36,17 +33,17 @@ Signed-off-by: Masahiro Matsuya <mmatsuya@redhat.com>
|
||||
create mode 100644 dirsrvtests/tests/suites/healthcheck/health_system_indexes_test.py
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/basic/basic_test.py b/dirsrvtests/tests/suites/basic/basic_test.py
|
||||
index 8bf89cb33..4a45f9dbe 100644
|
||||
index 8f5de91aa..13eb9266f 100644
|
||||
--- a/dirsrvtests/tests/suites/basic/basic_test.py
|
||||
+++ b/dirsrvtests/tests/suites/basic/basic_test.py
|
||||
@@ -461,7 +461,7 @@ def test_basic_db2index(topology_st):
|
||||
@@ -734,7 +734,7 @@ def test_basic_db2index(topology_st):
|
||||
topology_st.standalone.db2index(bename=DEFAULT_BENAME, attrs=indexes)
|
||||
log.info('Checking the server logs for %d backend indexes INFO' % numIndexes)
|
||||
for indexNum, index in enumerate(indexes):
|
||||
- if index in "entryrdn":
|
||||
+ if index in ["entryrdn", "ancestorid"]:
|
||||
assert topology_st.standalone.searchErrorsLog(
|
||||
'INFO - bdb_db2index - ' + DEFAULT_BENAME + ':' + ' Indexing ' + index)
|
||||
f'INFO - {dbprefix}_db2index - {DEFAULT_BENAME}: Indexing {index}')
|
||||
else:
|
||||
diff --git a/dirsrvtests/tests/suites/healthcheck/health_system_indexes_test.py b/dirsrvtests/tests/suites/healthcheck/health_system_indexes_test.py
|
||||
new file mode 100644
|
||||
@ -511,10 +508,10 @@ index 000000000..61972d60c
|
||||
+ # -s for DEBUG mode
|
||||
+ CURRENT_FILE = os.path.realpath(__file__)
|
||||
diff --git a/ldap/ldif/template-dse.ldif.in b/ldap/ldif/template-dse.ldif.in
|
||||
index 2ddaf5fb3..c2754adf8 100644
|
||||
index 1def62aed..75cacaa09 100644
|
||||
--- a/ldap/ldif/template-dse.ldif.in
|
||||
+++ b/ldap/ldif/template-dse.ldif.in
|
||||
@@ -973,6 +973,14 @@ cn: aci
|
||||
@@ -990,6 +990,14 @@ cn: aci
|
||||
nssystemindex: true
|
||||
nsindextype: pres
|
||||
|
||||
@ -530,7 +527,7 @@ index 2ddaf5fb3..c2754adf8 100644
|
||||
objectclass: top
|
||||
objectclass: nsIndex
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/instance.c b/ldap/servers/slapd/back-ldbm/instance.c
|
||||
index e82cd17cc..f6a9817a7 100644
|
||||
index e57962248..f9a546661 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/instance.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/instance.c
|
||||
@@ -16,7 +16,7 @@
|
||||
@ -564,7 +561,7 @@ index e82cd17cc..f6a9817a7 100644
|
||||
return e;
|
||||
}
|
||||
|
||||
@@ -184,24 +190,24 @@ ldbm_instance_create_default_indexes(backend *be)
|
||||
@@ -184,47 +190,47 @@ ldbm_instance_create_default_indexes(backend *be)
|
||||
* ACL routines.
|
||||
*/
|
||||
if (entryrdn_get_switch()) { /* subtree-rename: on */
|
||||
@ -594,10 +591,6 @@ index e82cd17cc..f6a9817a7 100644
|
||||
ldbm_instance_config_add_index_entry(inst, e, flags);
|
||||
slapi_entry_free(e);
|
||||
|
||||
@@ -211,26 +217,26 @@ ldbm_instance_create_default_indexes(backend *be)
|
||||
slapi_entry_free(e);
|
||||
#endif
|
||||
|
||||
- e = ldbm_instance_init_config_entry(LDBM_NUMSUBORDINATES_STR, "pres", 0, 0, 0);
|
||||
+ e = ldbm_instance_init_config_entry(LDBM_NUMSUBORDINATES_STR, "pres", 0, 0, 0, 0);
|
||||
ldbm_instance_config_add_index_entry(inst, e, flags);
|
||||
@ -626,7 +619,7 @@ index e82cd17cc..f6a9817a7 100644
|
||||
attr_index_config(be, "ldbm index init", 0, e, 1, 0, NULL);
|
||||
slapi_entry_free(e);
|
||||
|
||||
@@ -239,7 +245,7 @@ ldbm_instance_create_default_indexes(backend *be)
|
||||
@@ -233,7 +239,7 @@ ldbm_instance_create_default_indexes(backend *be)
|
||||
* ancestorid is special, there is actually no such attr type
|
||||
* but we still want to use the attr index file APIs.
|
||||
*/
|
||||
@ -636,20 +629,20 @@ index e82cd17cc..f6a9817a7 100644
|
||||
slapi_entry_free(e);
|
||||
}
|
||||
diff --git a/src/lib389/lib389/backend.py b/src/lib389/lib389/backend.py
|
||||
index cee073ea7..a97def17e 100644
|
||||
index 02169384a..793293019 100644
|
||||
--- a/src/lib389/lib389/backend.py
|
||||
+++ b/src/lib389/lib389/backend.py
|
||||
@@ -34,7 +34,8 @@ from lib389.encrypted_attributes import EncryptedAttr, EncryptedAttrs
|
||||
# This is for sample entry creation.
|
||||
from lib389.configurations import get_sample_entries
|
||||
|
||||
-from lib389.lint import DSBLE0001, DSBLE0002, DSBLE0003, DSVIRTLE0001, DSCLLE0001
|
||||
+from lib389.lint import DSBLE0001, DSBLE0002, DSBLE0003, DSBLE0007, DSVIRTLE0001, DSCLLE0001
|
||||
-from lib389.lint import DSBLE0001, DSBLE0002, DSBLE0003, DSBLE0004, DSBLE0005, DSBLE0006, DSVIRTLE0001, DSCLLE0001
|
||||
+from lib389.lint import DSBLE0001, DSBLE0002, DSBLE0003, DSBLE0004, DSBLE0005, DSBLE0006, DSBLE0007, DSVIRTLE0001, DSCLLE0001
|
||||
+from lib389.plugins import USNPlugin
|
||||
|
||||
|
||||
class BackendLegacy(object):
|
||||
@@ -531,6 +532,136 @@ class Backend(DSLdapObject):
|
||||
@@ -607,6 +608,136 @@ class Backend(DSLdapObject):
|
||||
self._log.debug(f"_lint_cl_trimming - backend ({suffix}) is not replicated")
|
||||
pass
|
||||
|
||||
@ -787,11 +780,11 @@ index cee073ea7..a97def17e 100644
|
||||
"""Creates sample entries under nsslapd-suffix value
|
||||
|
||||
diff --git a/src/lib389/lib389/lint.py b/src/lib389/lib389/lint.py
|
||||
index 3d3c79ea3..1e48c790d 100644
|
||||
index 460bf64fc..bbfa0ca5b 100644
|
||||
--- a/src/lib389/lib389/lint.py
|
||||
+++ b/src/lib389/lib389/lint.py
|
||||
@@ -57,6 +57,35 @@ DSBLE0003 = {
|
||||
'fix': """You need to import an LDIF file, or create the suffix entry, in order to initialize the database."""
|
||||
@@ -86,6 +86,35 @@ DSBLE0006 = {
|
||||
'fix': 'Migrate the backend to MDB.'
|
||||
}
|
||||
|
||||
+DSBLE0007 = {
|
||||
@ -824,8 +817,8 @@ index 3d3c79ea3..1e48c790d 100644
|
||||
+}
|
||||
+
|
||||
# Config checks
|
||||
DSCLE0001 = {
|
||||
'dsle': 'DSCLE0001',
|
||||
DSCLE0002 = {
|
||||
'dsle': 'DSCLE0002',
|
||||
--
|
||||
2.51.1
|
||||
2.49.0
|
||||
|
||||
597
0067-Issue-6910-Fix-latest-coverity-issues.patch
Normal file
597
0067-Issue-6910-Fix-latest-coverity-issues.patch
Normal file
@ -0,0 +1,597 @@
|
||||
From d2f068809ad1aa996e12aa351a71f6b2692c2c4f Mon Sep 17 00:00:00 2001
|
||||
From: Mark Reynolds <mreynolds@redhat.com>
|
||||
Date: Mon, 28 Jul 2025 17:12:33 -0400
|
||||
Subject: [PATCH] Issue 6910 - Fix latest coverity issues
|
||||
|
||||
Description:
|
||||
|
||||
Fix various coverity/ASAN warnings:
|
||||
|
||||
- CID 1618831: Resource leak (RESOURCE_LEAK) - bdb_layer.c
|
||||
- CID 1612606: Resource leak (RESOURCE_LEAK) - log.c
|
||||
- CID 1611461: Uninitialized pointer read (UNINIT) - repl5_agmt.c
|
||||
- CID 1568589: Dereference before null check (REVERSE_INULL) - repl5_agmt.c
|
||||
- CID 1590353: Logically dead code (DEADCODE) - repl5_agmt.c
|
||||
- CID 1611460: Logically dead code (DEADCODE) - control.c
|
||||
- CID 1610568: Dereference after null check (FORWARD_NULL) - modify.c
|
||||
- CID 1591259: Out-of-bounds read (OVERRUN) - memberof.c
|
||||
- CID 1550231: Unsigned compared against 0 (NO_EFFECT) - memberof_config.c
|
||||
- CID 1548904: Overflowed constant (INTEGER_OVERFLOW) - ch_malloc.c
|
||||
- CID 1548902: Overflowed constant (INTEGER_OVERFLOW) - dse.lc
|
||||
- CID 1548900: Overflowed return value (INTEGER_OVERFLOW) - acct_util.c
|
||||
- CID 1548898: Overflowed constant (INTEGER_OVERFLOW) - parents.c
|
||||
- CID 1546849: Resource leak (RESOURCE_LEAK) - referint.c
|
||||
- ASAN - Use after free - automember.c
|
||||
|
||||
Relates: http://github.com/389ds/389-ds-base/issues/6910
|
||||
|
||||
Reviewed by: progier & spichugi(Thanks!)
|
||||
---
|
||||
ldap/servers/plugins/acctpolicy/acct_util.c | 6 ++-
|
||||
ldap/servers/plugins/automember/automember.c | 9 +++--
|
||||
ldap/servers/plugins/memberof/memberof.c | 15 +++++--
|
||||
.../plugins/memberof/memberof_config.c | 26 ++++++++++---
|
||||
ldap/servers/plugins/referint/referint.c | 11 +++++-
|
||||
ldap/servers/plugins/replication/repl5_agmt.c | 39 ++++++++-----------
|
||||
.../slapd/back-ldbm/db-bdb/bdb_import.c | 5 ++-
|
||||
.../back-ldbm/db-bdb/bdb_instance_config.c | 3 +-
|
||||
.../slapd/back-ldbm/db-bdb/bdb_layer.c | 13 +++++--
|
||||
ldap/servers/slapd/back-ldbm/parents.c | 4 +-
|
||||
ldap/servers/slapd/ch_malloc.c | 4 +-
|
||||
ldap/servers/slapd/control.c | 28 +++++++++++++
|
||||
ldap/servers/slapd/dse.c | 4 +-
|
||||
ldap/servers/slapd/log.c | 5 ++-
|
||||
ldap/servers/slapd/modify.c | 6 +--
|
||||
ldap/servers/slapd/passwd_extop.c | 2 +-
|
||||
ldap/servers/slapd/unbind.c | 12 ++++--
|
||||
17 files changed, 134 insertions(+), 58 deletions(-)
|
||||
|
||||
diff --git a/ldap/servers/plugins/acctpolicy/acct_util.c b/ldap/servers/plugins/acctpolicy/acct_util.c
|
||||
index b27eeaff1..7735d10e6 100644
|
||||
--- a/ldap/servers/plugins/acctpolicy/acct_util.c
|
||||
+++ b/ldap/servers/plugins/acctpolicy/acct_util.c
|
||||
@@ -17,7 +17,7 @@ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
Contributors:
|
||||
Hewlett-Packard Development Company, L.P.
|
||||
|
||||
-Copyright (C) 2021 Red Hat, Inc.
|
||||
+Copyright (C) 2025 Red Hat, Inc.
|
||||
******************************************************************************/
|
||||
|
||||
#include <stdio.h>
|
||||
@@ -248,6 +248,10 @@ gentimeToEpochtime(char *gentimestr)
|
||||
|
||||
/* Turn tm object into local epoch time */
|
||||
epochtime = mktime(&t);
|
||||
+ if (epochtime == (time_t) -1) {
|
||||
+ /* mktime failed */
|
||||
+ return 0;
|
||||
+ }
|
||||
|
||||
/* Turn local epoch time into GMT epoch time */
|
||||
epochtime -= zone_offset;
|
||||
diff --git a/ldap/servers/plugins/automember/automember.c b/ldap/servers/plugins/automember/automember.c
|
||||
index f900db7f2..9eade495e 100644
|
||||
--- a/ldap/servers/plugins/automember/automember.c
|
||||
+++ b/ldap/servers/plugins/automember/automember.c
|
||||
@@ -1,5 +1,5 @@
|
||||
/** BEGIN COPYRIGHT BLOCK
|
||||
- * Copyright (C) 2022 Red Hat, Inc.
|
||||
+ * Copyright (C) 2025 Red Hat, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* License: GPL (version 3 or any later version).
|
||||
@@ -1756,9 +1756,10 @@ automember_update_member_value(Slapi_Entry *member_e, const char *group_dn, char
|
||||
|
||||
mod_pb = slapi_pblock_new();
|
||||
/* Do a single mod with error overrides for DEL/ADD */
|
||||
- result = slapi_single_modify_internal_override(mod_pb, slapi_sdn_new_dn_byval(group_dn), mods,
|
||||
- automember_get_plugin_id(), 0);
|
||||
-
|
||||
+ Slapi_DN *sdn = slapi_sdn_new_normdn_byref(group_dn);
|
||||
+ result = slapi_single_modify_internal_override(mod_pb, sdn, mods,
|
||||
+ automember_get_plugin_id(), 0);
|
||||
+ slapi_sdn_free(&sdn);
|
||||
if(add){
|
||||
if (result != LDAP_SUCCESS) {
|
||||
slapi_log_err(SLAPI_LOG_ERR, AUTOMEMBER_PLUGIN_SUBSYSTEM,
|
||||
diff --git a/ldap/servers/plugins/memberof/memberof.c b/ldap/servers/plugins/memberof/memberof.c
|
||||
index 3775e52c9..82cb60c96 100644
|
||||
--- a/ldap/servers/plugins/memberof/memberof.c
|
||||
+++ b/ldap/servers/plugins/memberof/memberof.c
|
||||
@@ -1,5 +1,5 @@
|
||||
/** BEGIN COPYRIGHT BLOCK
|
||||
- * Copyright (C) 2021 Red Hat, Inc.
|
||||
+ * Copyright (C) 2025 Red Hat, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* License: GPL (version 3 or any later version).
|
||||
@@ -1657,6 +1657,7 @@ memberof_call_foreach_dn(Slapi_PBlock *pb __attribute__((unused)), Slapi_DN *sdn
|
||||
/* We already did the search for this backend, don't
|
||||
* do it again when we fall through */
|
||||
do_suffix_search = PR_FALSE;
|
||||
+ slapi_pblock_init(search_pb);
|
||||
}
|
||||
}
|
||||
} else if (!all_backends) {
|
||||
@@ -3755,6 +3756,10 @@ memberof_replace_list(Slapi_PBlock *pb, MemberOfConfig *config, Slapi_DN *group_
|
||||
|
||||
pre_index++;
|
||||
} else {
|
||||
+ if (pre_index >= pre_total || post_index >= post_total) {
|
||||
+ /* Don't overrun pre_array/post_array */
|
||||
+ break;
|
||||
+ }
|
||||
/* decide what to do */
|
||||
int cmp = memberof_compare(
|
||||
config,
|
||||
@@ -4445,10 +4450,12 @@ memberof_add_memberof_attr(LDAPMod **mods, const char *dn, char *add_oc)
|
||||
|
||||
while (1) {
|
||||
slapi_pblock_init(mod_pb);
|
||||
-
|
||||
+ Slapi_DN *sdn = slapi_sdn_new_normdn_byref(dn);
|
||||
/* Internal mod with error overrides for DEL/ADD */
|
||||
- rc = slapi_single_modify_internal_override(mod_pb, slapi_sdn_new_normdn_byref(dn), single_mod,
|
||||
- memberof_get_plugin_id(), SLAPI_OP_FLAG_BYPASS_REFERRALS);
|
||||
+ rc = slapi_single_modify_internal_override(mod_pb, sdn, single_mod,
|
||||
+ memberof_get_plugin_id(),
|
||||
+ SLAPI_OP_FLAG_BYPASS_REFERRALS);
|
||||
+ slapi_sdn_free(&sdn);
|
||||
if (rc == LDAP_OBJECT_CLASS_VIOLATION) {
|
||||
if (!add_oc || added_oc) {
|
||||
/*
|
||||
diff --git a/ldap/servers/plugins/memberof/memberof_config.c b/ldap/servers/plugins/memberof/memberof_config.c
|
||||
index fca5251d0..bd7d25140 100644
|
||||
--- a/ldap/servers/plugins/memberof/memberof_config.c
|
||||
+++ b/ldap/servers/plugins/memberof/memberof_config.c
|
||||
@@ -1,5 +1,5 @@
|
||||
/** BEGIN COPYRIGHT BLOCK
|
||||
- * Copyright (C) 2021 Red Hat, Inc.
|
||||
+ * Copyright (C) 2025 Red Hat, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* License: GPL (version 3 or any later version).
|
||||
@@ -568,18 +568,32 @@ memberof_apply_config(Slapi_PBlock *pb __attribute__((unused)),
|
||||
slapi_filter_free(theConfig.group_filter, 1);
|
||||
|
||||
if (num_groupattrs > 1) {
|
||||
- int bytes_out = 0;
|
||||
- int filter_str_len = groupattr_name_len + (num_groupattrs * 4) + 4;
|
||||
+ size_t bytes_out = 0;
|
||||
+ size_t filter_str_len = groupattr_name_len + (num_groupattrs * 4) + 4;
|
||||
+ int32_t rc = 0;
|
||||
|
||||
/* Allocate enough space for the filter */
|
||||
filter_str = slapi_ch_malloc(filter_str_len);
|
||||
|
||||
/* Add beginning of filter. */
|
||||
- bytes_out = snprintf(filter_str, filter_str_len - bytes_out, "(|");
|
||||
+ rc = snprintf(filter_str, filter_str_len - bytes_out, "(|");
|
||||
+ if (rc < 0) {
|
||||
+ slapi_log_err(SLAPI_LOG_ERR, MEMBEROF_PLUGIN_SUBSYSTEM, "snprintf unexpectly failed in memberof_apply_config.\n");
|
||||
+ *returncode = LDAP_UNWILLING_TO_PERFORM;
|
||||
+ goto done;
|
||||
+ } else {
|
||||
+ bytes_out = rc;
|
||||
+ }
|
||||
|
||||
/* Add filter section for each groupattr. */
|
||||
- for (i = 0; theConfig.groupattrs && theConfig.groupattrs[i]; i++) {
|
||||
- bytes_out += snprintf(filter_str + bytes_out, filter_str_len - bytes_out, "(%s=*)", theConfig.groupattrs[i]);
|
||||
+ for (size_t i=0; theConfig.groupattrs && theConfig.groupattrs[i]; i++) {
|
||||
+ int32_t bytes_read = snprintf(filter_str + bytes_out, filter_str_len - bytes_out, "(%s=*)", theConfig.groupattrs[i]);
|
||||
+ if (bytes_read<0) {
|
||||
+ slapi_log_err(SLAPI_LOG_ERR, MEMBEROF_PLUGIN_SUBSYSTEM, "snprintf unexpectly failed in memberof_apply_config.\n");
|
||||
+ *returncode = LDAP_UNWILLING_TO_PERFORM;
|
||||
+ goto done;
|
||||
+ }
|
||||
+ bytes_out += bytes_read;
|
||||
}
|
||||
|
||||
/* Add end of filter. */
|
||||
diff --git a/ldap/servers/plugins/referint/referint.c b/ldap/servers/plugins/referint/referint.c
|
||||
index a2f2e4706..cf79f973e 100644
|
||||
--- a/ldap/servers/plugins/referint/referint.c
|
||||
+++ b/ldap/servers/plugins/referint/referint.c
|
||||
@@ -1,6 +1,6 @@
|
||||
/** BEGIN COPYRIGHT BLOCK
|
||||
* Copyright (C) 2001 Sun Microsystems, Inc. Used by permission.
|
||||
- * Copyright (C) 2021 Red Hat, Inc.
|
||||
+ * Copyright (C) 2025 Red Hat, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* License: GPL (version 3 or any later version).
|
||||
@@ -1492,6 +1492,15 @@ referint_thread_func(void *arg __attribute__((unused)))
|
||||
}
|
||||
|
||||
ptoken = ldap_utf8strtok_r(NULL, delimiter, &iter);
|
||||
+ if (ptoken == NULL) {
|
||||
+ /* Invalid line in referint log, skip it */
|
||||
+ slapi_log_err(SLAPI_LOG_ERR, REFERINT_PLUGIN_SUBSYSTEM,
|
||||
+ "Skipping invalid referint log line: (%s)\n", thisline);
|
||||
+ slapi_sdn_free(&sdn);
|
||||
+ continue;
|
||||
+ }
|
||||
+
|
||||
+ slapi_sdn_free(&tmpsuperior);
|
||||
if (!strcasecmp(ptoken, "NULL")) {
|
||||
tmpsuperior = NULL;
|
||||
} else {
|
||||
diff --git a/ldap/servers/plugins/replication/repl5_agmt.c b/ldap/servers/plugins/replication/repl5_agmt.c
|
||||
index c6cfcda07..9b2d82547 100644
|
||||
--- a/ldap/servers/plugins/replication/repl5_agmt.c
|
||||
+++ b/ldap/servers/plugins/replication/repl5_agmt.c
|
||||
@@ -1,6 +1,6 @@
|
||||
/** BEGIN COPYRIGHT BLOCK
|
||||
* Copyright (C) 2001 Sun Microsystems, Inc. Used by permission.
|
||||
- * Copyright (C) 2021 Red Hat, Inc.
|
||||
+ * Copyright (C) 2025 Red Hat, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* License: GPL (version 3 or any later version).
|
||||
@@ -2628,31 +2628,26 @@ agmt_update_init_status(Repl_Agmt *ra)
|
||||
mod_idx++;
|
||||
}
|
||||
|
||||
- if (nb_mods) {
|
||||
- /* it is ok to release the lock here because we are done with the agreement data.
|
||||
- we have to do it before issuing the modify operation because it causes
|
||||
- agmtlist_notify_all to be called which uses the same lock - hence the deadlock */
|
||||
- PR_Unlock(ra->lock);
|
||||
-
|
||||
- pb = slapi_pblock_new();
|
||||
- mods[nb_mods] = NULL;
|
||||
+ /* it is ok to release the lock here because we are done with the agreement data.
|
||||
+ we have to do it before issuing the modify operation because it causes
|
||||
+ agmtlist_notify_all to be called which uses the same lock - hence the deadlock */
|
||||
+ PR_Unlock(ra->lock);
|
||||
|
||||
- slapi_modify_internal_set_pb_ext(pb, ra->dn, mods, NULL, NULL,
|
||||
- repl_get_plugin_identity(PLUGIN_MULTISUPPLIER_REPLICATION), 0);
|
||||
- slapi_modify_internal_pb(pb);
|
||||
+ pb = slapi_pblock_new();
|
||||
+ mods[nb_mods] = NULL;
|
||||
|
||||
- slapi_pblock_get(pb, SLAPI_PLUGIN_INTOP_RESULT, &rc);
|
||||
- if (rc != LDAP_SUCCESS && rc != LDAP_NO_SUCH_ATTRIBUTE) {
|
||||
- slapi_log_err(SLAPI_LOG_ERR, repl_plugin_name, "agmt_update_consumer_ruv - "
|
||||
- "%s: agmt_update_consumer_ruv: "
|
||||
- "failed to update consumer's RUV; LDAP error - %d\n",
|
||||
- ra->long_name, rc);
|
||||
- }
|
||||
+ slapi_modify_internal_set_pb_ext(pb, ra->dn, mods, NULL, NULL,
|
||||
+ repl_get_plugin_identity(PLUGIN_MULTISUPPLIER_REPLICATION), 0);
|
||||
+ slapi_modify_internal_pb(pb);
|
||||
|
||||
- slapi_pblock_destroy(pb);
|
||||
- } else {
|
||||
- PR_Unlock(ra->lock);
|
||||
+ slapi_pblock_get(pb, SLAPI_PLUGIN_INTOP_RESULT, &rc);
|
||||
+ if (rc != LDAP_SUCCESS && rc != LDAP_NO_SUCH_ATTRIBUTE) {
|
||||
+ slapi_log_err(SLAPI_LOG_ERR, repl_plugin_name, "agmt_update_consumer_ruv - "
|
||||
+ "%s: agmt_update_consumer_ruv: failed to update consumer's RUV; LDAP error - %d\n",
|
||||
+ ra->long_name, rc);
|
||||
}
|
||||
+
|
||||
+ slapi_pblock_destroy(pb);
|
||||
slapi_ch_free((void **)&mods);
|
||||
slapi_mod_done(&smod_start_time);
|
||||
slapi_mod_done(&smod_end_time);
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_import.c b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_import.c
|
||||
index 39edb7d0e..2bb6b0267 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_import.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_import.c
|
||||
@@ -1,5 +1,5 @@
|
||||
/** BEGIN COPYRIGHT BLOCK
|
||||
- * Copyright (C) 2020 Red Hat, Inc.
|
||||
+ * Copyright (C) 2025 Red Hat, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* License: GPL (version 3 or any later version).
|
||||
@@ -947,6 +947,7 @@ bdb_ancestorid_new_idl_create_index(backend *be, ImportJob *job)
|
||||
EQ_PREFIX, (u_long)id);
|
||||
key.size++; /* include the null terminator */
|
||||
ret = NEW_IDL_NO_ALLID;
|
||||
+ idl_free(&children);
|
||||
children = idl_fetch(be, db_pid, &key, txn, ai_pid, &ret);
|
||||
if (ret != 0) {
|
||||
ldbm_nasty("bdb_ancestorid_new_idl_create_index", sourcefile, 13070, ret);
|
||||
@@ -957,6 +958,7 @@ bdb_ancestorid_new_idl_create_index(backend *be, ImportJob *job)
|
||||
if (job->flags & FLAG_ABORT) {
|
||||
import_log_notice(job, SLAPI_LOG_ERR, "bdb_ancestorid_new_idl_create_index",
|
||||
"ancestorid creation aborted.");
|
||||
+ idl_free(&children);
|
||||
ret = -1;
|
||||
break;
|
||||
}
|
||||
@@ -1290,6 +1292,7 @@ bdb_update_subordinatecounts(backend *be, ImportJob *job, DB_TXN *txn)
|
||||
}
|
||||
bdb_close_subcount_cursor(&c_entryrdn);
|
||||
bdb_close_subcount_cursor(&c_objectclass);
|
||||
+
|
||||
return ret;
|
||||
}
|
||||
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_instance_config.c b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_instance_config.c
|
||||
index bb515a23f..44a624fde 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_instance_config.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_instance_config.c
|
||||
@@ -1,5 +1,5 @@
|
||||
/** BEGIN COPYRIGHT BLOCK
|
||||
- * Copyright (C) 2020 Red Hat, Inc.
|
||||
+ * Copyright (C) 2025 Red Hat, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* License: GPL (version 3 or any later version).
|
||||
@@ -261,6 +261,7 @@ bdb_instance_cleanup(struct ldbm_instance *inst)
|
||||
if (inst_dirp && *inst_dir) {
|
||||
return_value = env->remove(env, inst_dirp, 0);
|
||||
} else {
|
||||
+ slapi_ch_free((void **)&env);
|
||||
return_value = -1;
|
||||
}
|
||||
if (return_value == EBUSY) {
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.c b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.c
|
||||
index f2b380f9b..ccd7c163e 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.c
|
||||
@@ -1,5 +1,5 @@
|
||||
/** BEGIN COPYRIGHT BLOCK
|
||||
- * Copyright (C) 2023 Red Hat, Inc.
|
||||
+ * Copyright (C) 2025 Red Hat, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* License: GPL (version 3 or any later version).
|
||||
@@ -2034,9 +2034,13 @@ bdb_pre_close(struct ldbminfo *li)
|
||||
conf = (bdb_config *)li->li_dblayer_config;
|
||||
bdb_db_env *pEnv = (bdb_db_env *)priv->dblayer_env;
|
||||
|
||||
+ if (pEnv == NULL) {
|
||||
+ return;
|
||||
+ }
|
||||
+
|
||||
pthread_mutex_lock(&pEnv->bdb_thread_count_lock);
|
||||
|
||||
- if (conf->bdb_stop_threads || !pEnv) {
|
||||
+ if (conf->bdb_stop_threads) {
|
||||
/* already stopped. do nothing... */
|
||||
goto timeout_escape;
|
||||
}
|
||||
@@ -2210,6 +2214,7 @@ bdb_remove_env(struct ldbminfo *li)
|
||||
}
|
||||
if (NULL == li) {
|
||||
slapi_log_err(SLAPI_LOG_ERR, "bdb_remove_env", "No ldbm info is given\n");
|
||||
+ slapi_ch_free((void **)&env);
|
||||
return -1;
|
||||
}
|
||||
|
||||
@@ -2219,10 +2224,11 @@ bdb_remove_env(struct ldbminfo *li)
|
||||
if (rc) {
|
||||
slapi_log_err(SLAPI_LOG_ERR,
|
||||
"bdb_remove_env", "Failed to remove DB environment files. "
|
||||
- "Please remove %s/__db.00# (# is 1 through 6)\n",
|
||||
+ "Please remove %s/__db.00# (# is 1 through 6)\n",
|
||||
home_dir);
|
||||
}
|
||||
}
|
||||
+ slapi_ch_free((void **)&env);
|
||||
return rc;
|
||||
}
|
||||
|
||||
@@ -6297,6 +6303,7 @@ bdb_back_ctrl(Slapi_Backend *be, int cmd, void *info)
|
||||
db->close(db, 0);
|
||||
rc = bdb_db_remove_ex((bdb_db_env *)priv->dblayer_env, path, NULL, PR_TRUE);
|
||||
inst->inst_changelog = NULL;
|
||||
+ slapi_ch_free_string(&path);
|
||||
slapi_ch_free_string(&instancedir);
|
||||
}
|
||||
}
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/parents.c b/ldap/servers/slapd/back-ldbm/parents.c
|
||||
index 31107591e..52c665ca4 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/parents.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/parents.c
|
||||
@@ -1,6 +1,6 @@
|
||||
/** BEGIN COPYRIGHT BLOCK
|
||||
* Copyright (C) 2001 Sun Microsystems, Inc. Used by permission.
|
||||
- * Copyright (C) 2005 Red Hat, Inc.
|
||||
+ * Copyright (C) 2025 Red Hat, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* License: GPL (version 3 or any later version).
|
||||
@@ -123,7 +123,7 @@ parent_update_on_childchange(modify_context *mc, int op, size_t *new_sub_count)
|
||||
/* Now compute the new value */
|
||||
if ((PARENTUPDATE_ADD == op) || (PARENTUPDATE_RESURECT == op)) {
|
||||
current_sub_count++;
|
||||
- } else {
|
||||
+ } else if (current_sub_count > 0) {
|
||||
current_sub_count--;
|
||||
}
|
||||
|
||||
diff --git a/ldap/servers/slapd/ch_malloc.c b/ldap/servers/slapd/ch_malloc.c
|
||||
index 75e791135..bacbc9371 100644
|
||||
--- a/ldap/servers/slapd/ch_malloc.c
|
||||
+++ b/ldap/servers/slapd/ch_malloc.c
|
||||
@@ -1,6 +1,6 @@
|
||||
/** BEGIN COPYRIGHT BLOCK
|
||||
* Copyright (C) 2001 Sun Microsystems, Inc. Used by permission.
|
||||
- * Copyright (C) 2005 Red Hat, Inc.
|
||||
+ * Copyright (C) 2025 Red Hat, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* License: GPL (version 3 or any later version).
|
||||
@@ -234,7 +234,7 @@ slapi_ch_bvecdup(struct berval **v)
|
||||
++i;
|
||||
newberval = (struct berval **)slapi_ch_malloc((i + 1) * sizeof(struct berval *));
|
||||
newberval[i] = NULL;
|
||||
- while (i-- > 0) {
|
||||
+ while (i > 0 && i-- > 0) {
|
||||
newberval[i] = slapi_ch_bvdup(v[i]);
|
||||
}
|
||||
}
|
||||
diff --git a/ldap/servers/slapd/control.c b/ldap/servers/slapd/control.c
|
||||
index 538a387da..75f228916 100644
|
||||
--- a/ldap/servers/slapd/control.c
|
||||
+++ b/ldap/servers/slapd/control.c
|
||||
@@ -166,6 +166,34 @@ slapi_get_supported_controls_copy(char ***ctrloidsp, unsigned long **ctrlopsp)
|
||||
return (0);
|
||||
}
|
||||
|
||||
+int
|
||||
+create_sessiontracking_ctrl(const char *session_tracking_id, LDAPControl **session_tracking_ctrl)
|
||||
+{
|
||||
+ BerElement *ctrlber = NULL;
|
||||
+ char *undefined_sid = "undefined sid";
|
||||
+ const char *sid;
|
||||
+ int rc = 0;
|
||||
+ LDAPControl *ctrl = NULL;
|
||||
+
|
||||
+ if (session_tracking_id) {
|
||||
+ sid = session_tracking_id;
|
||||
+ } else {
|
||||
+ sid = undefined_sid;
|
||||
+ }
|
||||
+ ctrlber = ber_alloc();
|
||||
+ if ((rc = ber_printf( ctrlber, "{nnno}", sid, strlen(sid)) == LBER_ERROR)) {
|
||||
+ goto done;
|
||||
+ }
|
||||
+ slapi_build_control(LDAP_CONTROL_X_SESSION_TRACKING, ctrlber, 0, &ctrl);
|
||||
+ *session_tracking_ctrl = ctrl;
|
||||
+
|
||||
+done:
|
||||
+ if (ctrlber) {
|
||||
+ ber_free(ctrlber, 1);
|
||||
+ }
|
||||
+ return rc;
|
||||
+}
|
||||
+
|
||||
/* Parse the Session Tracking control
|
||||
* see https://datatracker.ietf.org/doc/html/draft-wahl-ldap-session-03
|
||||
* LDAPString ::= OCTET STRING -- UTF-8 encoded
|
||||
diff --git a/ldap/servers/slapd/dse.c b/ldap/servers/slapd/dse.c
|
||||
index e3157c1ce..9994c6911 100644
|
||||
--- a/ldap/servers/slapd/dse.c
|
||||
+++ b/ldap/servers/slapd/dse.c
|
||||
@@ -1,6 +1,6 @@
|
||||
/** BEGIN COPYRIGHT BLOCK
|
||||
* Copyright (C) 2001 Sun Microsystems, Inc. Used by permission.
|
||||
- * Copyright (C) 2005 Red Hat, Inc.
|
||||
+ * Copyright (C) 2025 Red Hat, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* License: GPL (version 3 or any later version).
|
||||
@@ -637,7 +637,7 @@ dse_updateNumSubordinates(Slapi_Entry *entry, int op)
|
||||
/* Now compute the new value */
|
||||
if (SLAPI_OPERATION_ADD == op) {
|
||||
current_sub_count++;
|
||||
- } else {
|
||||
+ } else if (current_sub_count > 0) {
|
||||
current_sub_count--;
|
||||
}
|
||||
{
|
||||
diff --git a/ldap/servers/slapd/log.c b/ldap/servers/slapd/log.c
|
||||
index c4aa99332..121585fb9 100644
|
||||
--- a/ldap/servers/slapd/log.c
|
||||
+++ b/ldap/servers/slapd/log.c
|
||||
@@ -1,6 +1,6 @@
|
||||
/** BEGIN COPYRIGHT BLOCK
|
||||
* Copyright (C) 2001 Sun Microsystems, Inc. Used by permission.
|
||||
- * Copyright (C) 2005-2024 Red Hat, Inc.
|
||||
+ * Copyright (C) 2025 Red Hat, Inc.
|
||||
* Copyright (C) 2010 Hewlett-Packard Development Company, L.P.
|
||||
* All rights reserved.
|
||||
*
|
||||
@@ -199,6 +199,7 @@ compress_log_file(char *log_name, int32_t mode)
|
||||
|
||||
if ((source = fopen(log_name, "r")) == NULL) {
|
||||
/* Failed to open log file */
|
||||
+ /* coverity[leaked_storage] gzclose does close FD */
|
||||
gzclose(outfile);
|
||||
return -1;
|
||||
}
|
||||
@@ -209,11 +210,13 @@ compress_log_file(char *log_name, int32_t mode)
|
||||
if (bytes_written == 0)
|
||||
{
|
||||
fclose(source);
|
||||
+ /* coverity[leaked_storage] gzclose does close FD */
|
||||
gzclose(outfile);
|
||||
return -1;
|
||||
}
|
||||
bytes_read = fread(buf, 1, LOG_CHUNK, source);
|
||||
}
|
||||
+ /* coverity[leaked_storage] gzclose does close FD */
|
||||
gzclose(outfile);
|
||||
fclose(source);
|
||||
PR_Delete(log_name); /* remove the old uncompressed log */
|
||||
diff --git a/ldap/servers/slapd/modify.c b/ldap/servers/slapd/modify.c
|
||||
index 8241fce28..af6771859 100644
|
||||
--- a/ldap/servers/slapd/modify.c
|
||||
+++ b/ldap/servers/slapd/modify.c
|
||||
@@ -1,6 +1,6 @@
|
||||
/** BEGIN COPYRIGHT BLOCK
|
||||
* Copyright (C) 2001 Sun Microsystems, Inc. Used by permission.
|
||||
- * Copyright (C) 2009 Red Hat, Inc.
|
||||
+ * Copyright (C) 2025 Red Hat, Inc.
|
||||
* Copyright (C) 2009, 2010 Hewlett-Packard Development Company, L.P.
|
||||
* All rights reserved.
|
||||
*
|
||||
@@ -498,7 +498,7 @@ slapi_modify_internal_set_pb_ext(Slapi_PBlock *pb, const Slapi_DN *sdn, LDAPMod
|
||||
*
|
||||
* Any other errors encountered during the operation will be returned as-is.
|
||||
*/
|
||||
-int
|
||||
+int
|
||||
slapi_single_modify_internal_override(Slapi_PBlock *pb, const Slapi_DN *sdn, LDAPMod **mod, Slapi_ComponentId *plugin_id, int op_flags)
|
||||
{
|
||||
int rc = 0;
|
||||
@@ -512,7 +512,7 @@ slapi_single_modify_internal_override(Slapi_PBlock *pb, const Slapi_DN *sdn, LDA
|
||||
!pb ? "pb " : "",
|
||||
!sdn ? "sdn " : "",
|
||||
!mod ? "mod " : "",
|
||||
- !mod[0] ? "mod[0] " : "");
|
||||
+ !mod || !mod[0] ? "mod[0] " : "");
|
||||
|
||||
return LDAP_PARAM_ERROR;
|
||||
}
|
||||
diff --git a/ldap/servers/slapd/passwd_extop.c b/ldap/servers/slapd/passwd_extop.c
|
||||
index 4d185f8dd..ef2cb69cf 100644
|
||||
--- a/ldap/servers/slapd/passwd_extop.c
|
||||
+++ b/ldap/servers/slapd/passwd_extop.c
|
||||
@@ -1,5 +1,5 @@
|
||||
/** BEGIN COPYRIGHT BLOCK
|
||||
- * Copyright (C) 2005 Red Hat, Inc.
|
||||
+ * Copyright (C) 2025 Red Hat, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* License: GPL (version 3 or any later version).
|
||||
diff --git a/ldap/servers/slapd/unbind.c b/ldap/servers/slapd/unbind.c
|
||||
index fa8cd649f..c4e7a5efd 100644
|
||||
--- a/ldap/servers/slapd/unbind.c
|
||||
+++ b/ldap/servers/slapd/unbind.c
|
||||
@@ -1,6 +1,6 @@
|
||||
/** BEGIN COPYRIGHT BLOCK
|
||||
* Copyright (C) 2001 Sun Microsystems, Inc. Used by permission.
|
||||
- * Copyright (C) 2005 Red Hat, Inc.
|
||||
+ * Copyright (C) 2025 Red Hat, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* License: GPL (version 3 or any later version).
|
||||
@@ -112,8 +112,12 @@ do_unbind(Slapi_PBlock *pb)
|
||||
/* pass the unbind to all backends */
|
||||
be_unbindall(pb_conn, operation);
|
||||
|
||||
-free_and_return:;
|
||||
+free_and_return:
|
||||
|
||||
- /* close the connection to the client */
|
||||
- disconnect_server(pb_conn, operation->o_connid, operation->o_opid, SLAPD_DISCONNECT_UNBIND, 0);
|
||||
+ /* close the connection to the client after refreshing the operation */
|
||||
+ slapi_pblock_get(pb, SLAPI_OPERATION, &operation);
|
||||
+ disconnect_server(pb_conn,
|
||||
+ operation ? operation->o_connid : -1,
|
||||
+ operation ? operation->o_opid : -1,
|
||||
+ SLAPD_DISCONNECT_UNBIND, 0);
|
||||
}
|
||||
--
|
||||
2.49.0
|
||||
|
||||
394
0068-Issue-7014-memberOf-ignored-deferred-updates-with-LM.patch
Normal file
394
0068-Issue-7014-memberOf-ignored-deferred-updates-with-LM.patch
Normal file
@ -0,0 +1,394 @@
|
||||
From 26c2f2fdd5ec3b152517946635b894474374aebd Mon Sep 17 00:00:00 2001
|
||||
From: Mark Reynolds <mreynolds@redhat.com>
|
||||
Date: Tue, 23 Sep 2025 16:41:12 -0400
|
||||
Subject: [PATCH] Issue 7014 - memberOf - ignored deferred updates with LMDB
|
||||
|
||||
Description:
|
||||
|
||||
When processing the memberOf plugin conifguration simply ignore the
|
||||
deferred update settings if LMDB is in use. Log a message in the error
|
||||
log but do not reject the update because rejecting the update causes the
|
||||
server to not start up.
|
||||
|
||||
Relates: https://github.com/389ds/389-ds-base/issues/7014
|
||||
|
||||
Reviewed by: tbordaz & progier (Thanks!!)
|
||||
---
|
||||
.../memberof_deferred_lmdb_test.py | 128 ++++++++++++++++++
|
||||
.../memberof_deferred_repl_test.py | 2 +
|
||||
.../suites/memberof_plugin/regression_test.py | 7 +-
|
||||
ldap/servers/plugins/memberof/memberof.c | 11 +-
|
||||
ldap/servers/plugins/memberof/memberof.h | 1 +
|
||||
.../plugins/memberof/memberof_config.c | 14 +-
|
||||
.../plugins/posix-winsync/posix-wsp-ident.h | 2 -
|
||||
ldap/servers/slapd/slapi-private.h | 2 +
|
||||
ldap/servers/slapd/util.c | 41 ++++++
|
||||
9 files changed, 194 insertions(+), 14 deletions(-)
|
||||
create mode 100644 dirsrvtests/tests/suites/memberof_plugin/memberof_deferred_lmdb_test.py
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/memberof_plugin/memberof_deferred_lmdb_test.py b/dirsrvtests/tests/suites/memberof_plugin/memberof_deferred_lmdb_test.py
|
||||
new file mode 100644
|
||||
index 000000000..0d9f793c1
|
||||
--- /dev/null
|
||||
+++ b/dirsrvtests/tests/suites/memberof_plugin/memberof_deferred_lmdb_test.py
|
||||
@@ -0,0 +1,128 @@
|
||||
+# --- BEGIN COPYRIGHT BLOCK ---
|
||||
+# Copyright (C) 2025 Red Hat, Inc.
|
||||
+# All rights reserved.
|
||||
+#
|
||||
+# License: GPL (version 3 or any later version).
|
||||
+# See LICENSE for details.
|
||||
+# --- END COPYRIGHT BLOCK ---
|
||||
+#
|
||||
+import logging
|
||||
+import pytest
|
||||
+import os
|
||||
+import time
|
||||
+import ldap
|
||||
+from lib389._constants import *
|
||||
+from lib389.topologies import topology_st as topo
|
||||
+from lib389.plugins import MemberOfPlugin
|
||||
+from lib389.config import LMDB_LDBMConfig
|
||||
+from lib389.utils import get_default_db_lib
|
||||
+from lib389.idm.user import UserAccounts
|
||||
+from lib389.idm.group import Groups
|
||||
+
|
||||
+log = logging.getLogger(__name__)
|
||||
+
|
||||
+DEBUGGING = os.getenv('DEBUGGING', False)
|
||||
+if DEBUGGING:
|
||||
+ logging.getLogger(__name__).setLevel(logging.DEBUG)
|
||||
+else:
|
||||
+ logging.getLogger(__name__).setLevel(logging.INFO)
|
||||
+
|
||||
+
|
||||
+@pytest.mark.skipif(get_default_db_lib() != "mdb", reason="Not supported over mdb")
|
||||
+def test_memberof_deferred_update_lmdb_rejection(topo):
|
||||
+ """Test that memberOf plugin rejects deferred update configuration with LMDB backend
|
||||
+
|
||||
+ :id: a7f079dd-d269-41ca-95ec-91428e77626f
|
||||
+ :setup: Standalone Instance with LMDB backend
|
||||
+ :steps:
|
||||
+ 1. Enable memberOf plugin
|
||||
+ 2. Try to set deferred_update to "on"
|
||||
+ 3. Check error log for appropriate error message
|
||||
+ :expectedresults:
|
||||
+ 1. Plugin enables successfully
|
||||
+ 2. Setting deferred_update fails
|
||||
+ 3. Error log contains "deferred_update is not supported with LMDB backend"
|
||||
+ """
|
||||
+
|
||||
+ inst = topo.standalone
|
||||
+
|
||||
+ # Enable memberOf plugin
|
||||
+ log.info("Step 1: Enabling memberOf plugin")
|
||||
+ memberof_plugin = MemberOfPlugin(inst)
|
||||
+ memberof_plugin.enable()
|
||||
+ log.info("✓ MemberOf plugin enabled")
|
||||
+ inst.deleteErrorLogs(restart=True)
|
||||
+
|
||||
+ # Try to set deferred_update to "on"
|
||||
+ log.info("Step 2: Attempting to set deferred_update to 'on'")
|
||||
+
|
||||
+ # Try to modify the plugin configuration
|
||||
+ plugin_dn = f"cn={PLUGIN_MEMBER_OF},cn=plugins,cn=config"
|
||||
+ memberof_plugin.set_memberofdeferredupdate('on')
|
||||
+
|
||||
+ # Check error log for appropriate error message
|
||||
+ log.info("Step 3: Checking error log for LMDB-specific error message")
|
||||
+ assert inst.ds_error_log.match(".*deferred_update is not supported with LMDB backend.*")
|
||||
+
|
||||
+ log.info("✓ Test completed successfully - memberOf plugin correctly rejects deferred update with LMDB backend")
|
||||
+
|
||||
+
|
||||
+@pytest.mark.skipif(get_default_db_lib() == "mdb", reason="Not supported over mdb")
|
||||
+def test_memberof_deferred_update_non_lmdb_success(topo):
|
||||
+ """Test that memberOf plugin allows deferred update configuration with non-LMDB backends
|
||||
+
|
||||
+ :id: a4b640c8-ef54-4cbf-8d1a-8b29fcdd59d1
|
||||
+ :setup: Standalone Instance with non-LMDB backend (BDB)
|
||||
+ :steps:
|
||||
+ 1. Enable memberOf plugin
|
||||
+ 2. Set deferred_update to "on"
|
||||
+ 3. Verify the operation succeeds
|
||||
+ 4. Verify deferred_update remains "on"
|
||||
+ :expectedresults:
|
||||
+ 1. Plugin enables successfully
|
||||
+ 2. Setting deferred_update succeeds
|
||||
+ 3. No error occurs
|
||||
+ 4. deferred_update is "on"
|
||||
+ """
|
||||
+
|
||||
+ inst = topo.standalone
|
||||
+
|
||||
+ # Enable memberOf plugin
|
||||
+ log.info("Step 1: Enabling memberOf plugin")
|
||||
+ memberof_plugin = MemberOfPlugin(inst)
|
||||
+ memberof_plugin.enable()
|
||||
+ log.info("✓ MemberOf plugin enabled")
|
||||
+ inst.deleteErrorLogs(restart=True)
|
||||
+
|
||||
+ # Set deferred_update to "on"
|
||||
+ log.info("Step 2: Setting deferred_update to 'on'")
|
||||
+
|
||||
+ # Try to modify the plugin configuration
|
||||
+ try:
|
||||
+ memberof_plugin.set_memberofdeferredupdate('on')
|
||||
+ log.info("✓ Successfully set deferred_update to 'on'")
|
||||
+ except Exception as e:
|
||||
+ assert False, f"Expected success when setting deferred_update with non-LMDB backend, got: {type(e).__name__}: {e}"
|
||||
+
|
||||
+ # Verify no error occurred
|
||||
+ log.info("Step 3: Verifying no error occurred")
|
||||
+ assert not inst.ds_error_log.match(".*deferred_update is not supported with LMDB backend.*")
|
||||
+
|
||||
+ log.info("✓ No LMDB-related error messages found")
|
||||
+
|
||||
+ # Verify deferred_update remains "on"
|
||||
+ log.info("Step 4: Verifying deferred_update is 'on'")
|
||||
+ current_deferred = memberof_plugin.get_memberofdeferredupdate()
|
||||
+ assert current_deferred is None or current_deferred.lower() == 'on', \
|
||||
+ f"Expected deferred_update to be 'on', got: {current_deferred}"
|
||||
+ log.info("✓ deferred_update is 'on'")
|
||||
+
|
||||
+ log.info("✓ Test completed successfully - memberOf plugin correctly allows deferred update with non-LMDB backend")
|
||||
+
|
||||
+
|
||||
+if __name__ == '__main__':
|
||||
+ # Run isolated
|
||||
+ # -s for DEBUG mode
|
||||
+ CURRENT_FILE = os.path.realpath(__file__)
|
||||
+ pytest.main(["-s", CURRENT_FILE])
|
||||
+
|
||||
diff --git a/dirsrvtests/tests/suites/memberof_plugin/memberof_deferred_repl_test.py b/dirsrvtests/tests/suites/memberof_plugin/memberof_deferred_repl_test.py
|
||||
index e92df0661..25cbf4890 100644
|
||||
--- a/dirsrvtests/tests/suites/memberof_plugin/memberof_deferred_repl_test.py
|
||||
+++ b/dirsrvtests/tests/suites/memberof_plugin/memberof_deferred_repl_test.py
|
||||
@@ -17,10 +17,12 @@ from lib389.replica import Replicas
|
||||
from lib389.plugins import MemberOfPlugin
|
||||
from lib389.idm.user import UserAccounts
|
||||
from lib389.idm.group import Groups
|
||||
+from lib389.utils import get_default_db_lib
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
+@pytest.mark.skipif(get_default_db_lib() == "mdb", reason="Not supported over mdb")
|
||||
def test_repl_deferred_updates(topo_m2):
|
||||
"""Test memberOf plugin deferred updates work in different types of
|
||||
replicated environments
|
||||
diff --git a/dirsrvtests/tests/suites/memberof_plugin/regression_test.py b/dirsrvtests/tests/suites/memberof_plugin/regression_test.py
|
||||
index cb21c32a2..7983da1cb 100644
|
||||
--- a/dirsrvtests/tests/suites/memberof_plugin/regression_test.py
|
||||
+++ b/dirsrvtests/tests/suites/memberof_plugin/regression_test.py
|
||||
@@ -1,5 +1,5 @@
|
||||
# --- BEGIN COPYRIGHT BLOCK ---
|
||||
-# Copyright (C) 2020 Red Hat, Inc.
|
||||
+# Copyright (C) 2025 Red Hat, Inc.
|
||||
# All rights reserved.
|
||||
#
|
||||
# License: GPL (version 3 or any later version).
|
||||
@@ -28,6 +28,7 @@ from lib389.idm.domain import Domain
|
||||
from lib389.dirsrv_log import DirsrvErrorLog
|
||||
from lib389.dseldif import DSEldif
|
||||
from contextlib import suppress
|
||||
+from lib389.utils import get_default_db_lib
|
||||
|
||||
|
||||
# Skip on older versions
|
||||
@@ -1278,7 +1279,8 @@ def _kill_instance(inst, sig=signal.SIGTERM, delay=None):
|
||||
time.sleep(delay)
|
||||
os.kill(pid, signal.SIGKILL)
|
||||
|
||||
-def test_shutdown_on_deferred_memberof(topology_st):
|
||||
+@pytest.mark.skipif(get_default_db_lib() == "mdb", reason="Not supported over mdb")
|
||||
+def test_shutdown_on_deferred_memberof(topology_st, request):
|
||||
"""This test checks that shutdown is handled properly if memberof updayes are deferred.
|
||||
|
||||
:id: c5629cae-15a0-11ee-8807-482ae39447e5
|
||||
@@ -1409,4 +1411,3 @@ if __name__ == '__main__':
|
||||
# -s for DEBUG mode
|
||||
CURRENT_FILE = os.path.realpath(__file__)
|
||||
pytest.main("-s %s" % CURRENT_FILE)
|
||||
-
|
||||
diff --git a/ldap/servers/plugins/memberof/memberof.c b/ldap/servers/plugins/memberof/memberof.c
|
||||
index 82cb60c96..e00ad8de8 100644
|
||||
--- a/ldap/servers/plugins/memberof/memberof.c
|
||||
+++ b/ldap/servers/plugins/memberof/memberof.c
|
||||
@@ -1179,7 +1179,7 @@ memberof_postop_start(Slapi_PBlock *pb)
|
||||
memberof_rlock_config();
|
||||
mainConfig = memberof_get_config();
|
||||
/* if the update of the members is deferred then allocate mutex/cv */
|
||||
- if (mainConfig->deferred_update) {
|
||||
+ if (mainConfig->deferred_update && !mainConfig->is_lmdb) {
|
||||
MemberofDeferredList *deferred_list;
|
||||
pthread_condattr_t condAttr;
|
||||
|
||||
@@ -1362,7 +1362,7 @@ memberof_postop_del(Slapi_PBlock *pb)
|
||||
/* retrieve deferred update params that are valid until shutdown */
|
||||
memberof_rlock_config();
|
||||
mainConfig = memberof_get_config();
|
||||
- deferred_update = mainConfig->deferred_update;
|
||||
+ deferred_update = mainConfig->is_lmdb ? false : mainConfig->deferred_update;
|
||||
memberof_unlock_config();
|
||||
|
||||
if (deferred_update) {
|
||||
@@ -1738,7 +1738,7 @@ memberof_postop_modrdn(Slapi_PBlock *pb)
|
||||
/* retrieve deferred update params that are valid until shutdown */
|
||||
memberof_rlock_config();
|
||||
mainConfig = memberof_get_config();
|
||||
- deferred_update = mainConfig->deferred_update;
|
||||
+ deferred_update = mainConfig->is_lmdb ? false : mainConfig->deferred_update;
|
||||
memberof_unlock_config();
|
||||
|
||||
if (deferred_update) {
|
||||
@@ -2057,7 +2057,8 @@ memberof_postop_modify(Slapi_PBlock *pb)
|
||||
/* retrieve deferred update params that are valid until shutdown */
|
||||
memberof_rlock_config();
|
||||
mainConfig = memberof_get_config();
|
||||
- deferred_update = mainConfig->deferred_update;
|
||||
+
|
||||
+ deferred_update = mainConfig->is_lmdb ? false : mainConfig->deferred_update;
|
||||
memberof_unlock_config();
|
||||
|
||||
if (deferred_update) {
|
||||
@@ -2317,7 +2318,7 @@ memberof_postop_add(Slapi_PBlock *pb)
|
||||
/* retrieve deferred update params that are valid until shutdown */
|
||||
memberof_rlock_config();
|
||||
mainConfig = memberof_get_config();
|
||||
- deferred_update = mainConfig->deferred_update;
|
||||
+ deferred_update = mainConfig->is_lmdb ? false : mainConfig->deferred_update;
|
||||
memberof_unlock_config();
|
||||
|
||||
if (deferred_update) {
|
||||
diff --git a/ldap/servers/plugins/memberof/memberof.h b/ldap/servers/plugins/memberof/memberof.h
|
||||
index 64b17067b..1c0d2d308 100644
|
||||
--- a/ldap/servers/plugins/memberof/memberof.h
|
||||
+++ b/ldap/servers/plugins/memberof/memberof.h
|
||||
@@ -138,6 +138,7 @@ typedef struct memberofconfig
|
||||
PLHashTable *fixup_cache;
|
||||
Slapi_Task *task;
|
||||
int need_fixup;
|
||||
+ bool is_lmdb;
|
||||
} MemberOfConfig;
|
||||
|
||||
/* The key to access the hash table is the normalized DN
|
||||
diff --git a/ldap/servers/plugins/memberof/memberof_config.c b/ldap/servers/plugins/memberof/memberof_config.c
|
||||
index bd7d25140..0336d8517 100644
|
||||
--- a/ldap/servers/plugins/memberof/memberof_config.c
|
||||
+++ b/ldap/servers/plugins/memberof/memberof_config.c
|
||||
@@ -519,6 +519,8 @@ memberof_apply_config(Slapi_PBlock *pb __attribute__((unused)),
|
||||
*/
|
||||
memberof_wlock_config();
|
||||
theConfig.need_fixup = (needfixup != NULL);
|
||||
+ /* DB implementation */
|
||||
+ theConfig.is_lmdb = slapi_db_is_lmdb();
|
||||
|
||||
if (groupattrs) {
|
||||
int i = 0;
|
||||
@@ -634,12 +636,16 @@ memberof_apply_config(Slapi_PBlock *pb __attribute__((unused)),
|
||||
}
|
||||
}
|
||||
|
||||
-
|
||||
if (deferred_update) {
|
||||
+ theConfig.deferred_update = PR_FALSE;
|
||||
if (strcasecmp(deferred_update, "on") == 0) {
|
||||
- theConfig.deferred_update = PR_TRUE;
|
||||
- } else {
|
||||
- theConfig.deferred_update = PR_FALSE;
|
||||
+ if (theConfig.is_lmdb) {
|
||||
+ slapi_log_err(SLAPI_LOG_WARNING, MEMBEROF_PLUGIN_SUBSYSTEM,
|
||||
+ "memberof_apply_config - "
|
||||
+ "deferred_update is not supported with LMDB backend and will be ignored\n");
|
||||
+ } else {
|
||||
+ theConfig.deferred_update = PR_TRUE;
|
||||
+ }
|
||||
}
|
||||
}
|
||||
|
||||
diff --git a/ldap/servers/plugins/posix-winsync/posix-wsp-ident.h b/ldap/servers/plugins/posix-winsync/posix-wsp-ident.h
|
||||
index 440bb833a..509468a6f 100644
|
||||
--- a/ldap/servers/plugins/posix-winsync/posix-wsp-ident.h
|
||||
+++ b/ldap/servers/plugins/posix-winsync/posix-wsp-ident.h
|
||||
@@ -12,8 +12,6 @@
|
||||
#define PLUGIN_MAGIC_VENDOR_STR "contac Datentechnik GmbH"
|
||||
#define PRODUCTTEXT "1.1"
|
||||
#define null NULL
|
||||
-#define true - 1
|
||||
-#define false 0
|
||||
#define POSIX_WINSYNC_MSSFU_SCHEMA "posixWinsyncMsSFUSchema"
|
||||
#define POSIX_WINSYNC_MAP_MEMBERUID "posixWinsyncMapMemberUID"
|
||||
#define POSIX_WINSYNC_CREATE_MEMBEROFTASK "posixWinsyncCreateMemberOfTask"
|
||||
diff --git a/ldap/servers/slapd/slapi-private.h b/ldap/servers/slapd/slapi-private.h
|
||||
index ccc7b6928..7f5c2a05a 100644
|
||||
--- a/ldap/servers/slapd/slapi-private.h
|
||||
+++ b/ldap/servers/slapd/slapi-private.h
|
||||
@@ -27,6 +27,7 @@ extern "C" {
|
||||
#include "nspr.h"
|
||||
#include "portable.h"
|
||||
#include "slapi-plugin.h"
|
||||
+#include <stdbool.h>
|
||||
/*
|
||||
* XXXmcs: we can stop including slapi-plugin-compat4.h once we stop using
|
||||
* deprecated functions internally.
|
||||
@@ -1262,6 +1263,7 @@ char get_sep(char *path);
|
||||
int mkdir_p(char *dir, unsigned int mode);
|
||||
const char *ldif_getline_ro( const char **next);
|
||||
void dup_ldif_line(struct berval *copy, const char *line, const char *endline);
|
||||
+bool slapi_db_is_lmdb(void);
|
||||
|
||||
/* slapi-memberof.c */
|
||||
int slapi_memberof(Slapi_MemberOfConfig *config, Slapi_DN *member_sdn, Slapi_MemberOfResult *result);
|
||||
diff --git a/ldap/servers/slapd/util.c b/ldap/servers/slapd/util.c
|
||||
index cc89d3226..1418613c4 100644
|
||||
--- a/ldap/servers/slapd/util.c
|
||||
+++ b/ldap/servers/slapd/util.c
|
||||
@@ -1801,3 +1801,44 @@ void dup_ldif_line(struct berval *copy, const char *line, const char *endline)
|
||||
buf[pos] = 0;
|
||||
copy->bv_len = copylen;
|
||||
}
|
||||
+
|
||||
+/*
|
||||
+ * Return true if the backend is lmdb
|
||||
+ */
|
||||
+bool
|
||||
+slapi_db_is_lmdb(void)
|
||||
+{
|
||||
+ Slapi_PBlock *search_pb;
|
||||
+ Slapi_Entry **entries = NULL;
|
||||
+ const char *config_dn = "cn=config,cn=ldbm database,cn=plugins,cn=config";
|
||||
+ int result = 0;
|
||||
+ bool is_lmdb = false;
|
||||
+
|
||||
+ search_pb = slapi_pblock_new();
|
||||
+ slapi_search_internal_set_pb(search_pb, config_dn, LDAP_SCOPE_BASE,
|
||||
+ "objectclass=*",
|
||||
+ NULL, 0, NULL, NULL,
|
||||
+ plugin_get_default_component_id(), 0);
|
||||
+ slapi_search_internal_pb(search_pb);
|
||||
+ slapi_pblock_get(search_pb, SLAPI_PLUGIN_INTOP_RESULT, &result);
|
||||
+ if (LDAP_SUCCESS != result) {
|
||||
+ /* Failed to search cn=config */
|
||||
+ slapi_log_err(SLAPI_LOG_ERR, "slapi_db_is_lmdb",
|
||||
+ "Unable to search ldbm config entry, err=%d\n", result);
|
||||
+ } else {
|
||||
+ slapi_pblock_get(search_pb, SLAPI_PLUGIN_INTOP_SEARCH_ENTRIES, &entries);
|
||||
+ if (entries && entries[0]) {
|
||||
+ Slapi_Entry *config_e = entries[0];
|
||||
+ const char *db_type = slapi_entry_attr_get_ref(config_e,
|
||||
+ "nsslapd-backend-implement");
|
||||
+ if (db_type && strcmp(db_type, "mdb") == 0) {
|
||||
+ is_lmdb = true;
|
||||
+ }
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ slapi_free_search_results_internal(search_pb);
|
||||
+ slapi_pblock_destroy(search_pb);
|
||||
+
|
||||
+ return is_lmdb;
|
||||
+}
|
||||
--
|
||||
2.49.0
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
From 16cde9b2e584a75f987c1e5f1151d8703f23263e Mon Sep 17 00:00:00 2001
|
||||
From 4663229d526035c06fe9086360ca6ce56586e37b Mon Sep 17 00:00:00 2001
|
||||
From: tbordaz <tbordaz@redhat.com>
|
||||
Date: Mon, 1 Sep 2025 18:23:33 +0200
|
||||
Subject: [PATCH] Issue 6933 - When deferred memberof update is enabled after
|
||||
@ -23,9 +23,6 @@ Fix description:
|
||||
fixes: #6933
|
||||
|
||||
Reviewed by: Simon Pichugin (Thanks !)
|
||||
|
||||
(cherry picked from commit 72f621c56114e1fd3ba3f6c25c731496b881075a)
|
||||
Signed-off-by: Masahiro Matsuya <mmatsuya@redhat.com>
|
||||
---
|
||||
.../suites/memberof_plugin/regression_test.py | 109 ++++++++++++------
|
||||
ldap/servers/plugins/memberof/memberof.c | 13 ++-
|
||||
@ -36,10 +33,10 @@ Signed-off-by: Masahiro Matsuya <mmatsuya@redhat.com>
|
||||
6 files changed, 136 insertions(+), 38 deletions(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/memberof_plugin/regression_test.py b/dirsrvtests/tests/suites/memberof_plugin/regression_test.py
|
||||
index 9ba40a0c3..976729c2f 100644
|
||||
index 7983da1cb..f3c21d92d 100644
|
||||
--- a/dirsrvtests/tests/suites/memberof_plugin/regression_test.py
|
||||
+++ b/dirsrvtests/tests/suites/memberof_plugin/regression_test.py
|
||||
@@ -1289,15 +1289,19 @@ def test_shutdown_on_deferred_memberof(topology_st):
|
||||
@@ -1287,15 +1287,19 @@ def test_shutdown_on_deferred_memberof(topology_st, request):
|
||||
:setup: Standalone Instance
|
||||
:steps:
|
||||
1. Enable memberof plugin to scope SUFFIX
|
||||
@ -65,7 +62,7 @@ index 9ba40a0c3..976729c2f 100644
|
||||
:expectedresults:
|
||||
1. should succeed
|
||||
2. should succeed
|
||||
@@ -1308,14 +1312,18 @@ def test_shutdown_on_deferred_memberof(topology_st):
|
||||
@@ -1306,14 +1310,18 @@ def test_shutdown_on_deferred_memberof(topology_st, request):
|
||||
7. should succeed
|
||||
8. should succeed
|
||||
9. should succeed
|
||||
@ -85,7 +82,7 @@ index 9ba40a0c3..976729c2f 100644
|
||||
# Step 1. Enable memberof plugin to scope SUFFIX
|
||||
memberof = MemberOfPlugin(inst)
|
||||
delay=0
|
||||
@@ -1336,8 +1344,8 @@ def test_shutdown_on_deferred_memberof(topology_st):
|
||||
@@ -1334,8 +1342,8 @@ def test_shutdown_on_deferred_memberof(topology_st, request):
|
||||
#Creates users and groups
|
||||
users_dn = []
|
||||
|
||||
@ -96,7 +93,7 @@ index 9ba40a0c3..976729c2f 100644
|
||||
CN = '%s%d' % (USER_CN, i)
|
||||
users = UserAccounts(inst, SUFFIX)
|
||||
user_props = TEST_USER_PROPERTIES.copy()
|
||||
@@ -1347,7 +1355,7 @@ def test_shutdown_on_deferred_memberof(topology_st):
|
||||
@@ -1345,7 +1353,7 @@ def test_shutdown_on_deferred_memberof(topology_st, request):
|
||||
|
||||
# Step 3. Create a large groups with 250 members
|
||||
groups = Groups(inst, SUFFIX)
|
||||
@ -105,7 +102,7 @@ index 9ba40a0c3..976729c2f 100644
|
||||
|
||||
# Step 4. Restart the instance (using the default 2 minutes timeout)
|
||||
time.sleep(10)
|
||||
@@ -1361,7 +1369,7 @@ def test_shutdown_on_deferred_memberof(topology_st):
|
||||
@@ -1359,7 +1367,7 @@ def test_shutdown_on_deferred_memberof(topology_st, request):
|
||||
check_memberof_consistency(inst, testgroup)
|
||||
|
||||
# Step 6. Modify the group to get another big group.
|
||||
@ -114,7 +111,7 @@ index 9ba40a0c3..976729c2f 100644
|
||||
|
||||
# Step 7. Restart the instance with short timeout
|
||||
pattern = 'deferred_thread_func - thread has stopped'
|
||||
@@ -1374,40 +1382,71 @@ def test_shutdown_on_deferred_memberof(topology_st):
|
||||
@@ -1372,40 +1380,71 @@ def test_shutdown_on_deferred_memberof(topology_st, request):
|
||||
nbcleanstop = len(errlog.match(pattern))
|
||||
assert nbcleanstop == original_nbcleanstop
|
||||
|
||||
@ -211,10 +208,10 @@ index 9ba40a0c3..976729c2f 100644
|
||||
# Run isolated
|
||||
# -s for DEBUG mode
|
||||
diff --git a/ldap/servers/plugins/memberof/memberof.c b/ldap/servers/plugins/memberof/memberof.c
|
||||
index ce1788e35..2ee7ee319 100644
|
||||
index e00ad8de8..7b3f55342 100644
|
||||
--- a/ldap/servers/plugins/memberof/memberof.c
|
||||
+++ b/ldap/servers/plugins/memberof/memberof.c
|
||||
@@ -1012,9 +1012,16 @@ deferred_thread_func(void *arg)
|
||||
@@ -1022,9 +1022,16 @@ deferred_thread_func(void *arg)
|
||||
* keep running this thread until plugin is signaled to close
|
||||
*/
|
||||
g_incr_active_threadcnt();
|
||||
@ -235,7 +232,7 @@ index ce1788e35..2ee7ee319 100644
|
||||
slapi_log_err(SLAPI_LOG_PLUGIN, MEMBEROF_PLUGIN_SUBSYSTEM,
|
||||
"deferred_thread_func - thread is starting "
|
||||
diff --git a/ldap/servers/plugins/memberof/memberof.h b/ldap/servers/plugins/memberof/memberof.h
|
||||
index c11d901ab..f2bb1d1cf 100644
|
||||
index 1c0d2d308..7ef97b0cd 100644
|
||||
--- a/ldap/servers/plugins/memberof/memberof.h
|
||||
+++ b/ldap/servers/plugins/memberof/memberof.h
|
||||
@@ -44,6 +44,7 @@
|
||||
@ -246,16 +243,16 @@ index c11d901ab..f2bb1d1cf 100644
|
||||
#define NSMEMBEROF "nsMemberOf"
|
||||
#define MEMBEROF_ENTRY_SCOPE_EXCLUDE_SUBTREE "memberOfEntryScopeExcludeSubtree"
|
||||
#define DN_SYNTAX_OID "1.3.6.1.4.1.1466.115.121.1.12"
|
||||
@@ -138,6 +139,7 @@ typedef struct memberofconfig
|
||||
PLHashTable *fixup_cache;
|
||||
@@ -139,6 +140,7 @@ typedef struct memberofconfig
|
||||
Slapi_Task *task;
|
||||
int need_fixup;
|
||||
bool is_lmdb;
|
||||
+ PRBool launch_fixup;
|
||||
} MemberOfConfig;
|
||||
|
||||
/* The key to access the hash table is the normalized DN
|
||||
diff --git a/ldap/servers/plugins/memberof/memberof_config.c b/ldap/servers/plugins/memberof/memberof_config.c
|
||||
index 89c44b014..e17c91fb9 100644
|
||||
index 0336d8517..d1c6a78ce 100644
|
||||
--- a/ldap/servers/plugins/memberof/memberof_config.c
|
||||
+++ b/ldap/servers/plugins/memberof/memberof_config.c
|
||||
@@ -472,6 +472,7 @@ memberof_apply_config(Slapi_PBlock *pb __attribute__((unused)),
|
||||
@ -274,8 +271,8 @@ index 89c44b014..e17c91fb9 100644
|
||||
|
||||
if (auto_add_oc == NULL) {
|
||||
auto_add_oc = slapi_ch_strdup(NSMEMBEROF);
|
||||
@@ -628,6 +630,15 @@ memberof_apply_config(Slapi_PBlock *pb __attribute__((unused)),
|
||||
theConfig.deferred_update = PR_FALSE;
|
||||
@@ -648,6 +650,15 @@ memberof_apply_config(Slapi_PBlock *pb __attribute__((unused)),
|
||||
}
|
||||
}
|
||||
}
|
||||
+ theConfig.launch_fixup = PR_FALSE;
|
||||
@ -318,7 +315,7 @@ index 90c1af2c3..598fe0bbc 100644
|
||||
|
||||
def create_parser(subparsers):
|
||||
diff --git a/src/lib389/lib389/plugins.py b/src/lib389/lib389/plugins.py
|
||||
index 25b49dae4..4f177adef 100644
|
||||
index 31bbfa502..0878e18d1 100644
|
||||
--- a/src/lib389/lib389/plugins.py
|
||||
+++ b/src/lib389/lib389/plugins.py
|
||||
@@ -962,6 +962,36 @@ class MemberOfPlugin(Plugin):
|
||||
@ -359,5 +356,5 @@ index 25b49dae4..4f177adef 100644
|
||||
"""Get memberofautoaddoc attribute"""
|
||||
|
||||
--
|
||||
2.51.1
|
||||
2.49.0
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
From aced6f575f3be70f16756860f8b852d3447df867 Mon Sep 17 00:00:00 2001
|
||||
From 19b26e46c8b1ba730e7e420bdcb6a2ec542fee29 Mon Sep 17 00:00:00 2001
|
||||
From: tbordaz <tbordaz@redhat.com>
|
||||
Date: Tue, 6 May 2025 16:09:36 +0200
|
||||
Subject: [PATCH] Issue 6764 - statistics about index lookup report a wrong
|
||||
@ -17,50 +17,25 @@ Fix description:
|
||||
fixes: #6764
|
||||
|
||||
Reviewed by: Pierre Rogier (Thanks !)
|
||||
|
||||
(cherry picked from commit cd8069a76bcbb2d7bb4ac3bb9466019b01cc6db3)
|
||||
Signed-off-by: Masahiro Matsuya <mmatsuya@redhat.com>
|
||||
---
|
||||
ldap/servers/slapd/back-ldbm/filterindex.c | 17 +++++++-----
|
||||
ldap/servers/slapd/back-ldbm/filterindex.c | 7 +++--
|
||||
ldap/servers/slapd/back-ldbm/ldbm_search.c | 31 +++++++++++++++-------
|
||||
ldap/servers/slapd/result.c | 12 +++++----
|
||||
ldap/servers/slapd/slapi-plugin.h | 9 +++++++
|
||||
ldap/servers/slapd/slapi-private.h | 2 ++
|
||||
ldap/servers/slapd/time.c | 13 +++++++++
|
||||
6 files changed, 62 insertions(+), 22 deletions(-)
|
||||
6 files changed, 57 insertions(+), 17 deletions(-)
|
||||
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/filterindex.c b/ldap/servers/slapd/back-ldbm/filterindex.c
|
||||
index 30550dde7..abc502b96 100644
|
||||
index 52f51713b..778bc73e4 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/filterindex.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/filterindex.c
|
||||
@@ -1040,8 +1040,7 @@ keys2idl(
|
||||
int allidslimit)
|
||||
{
|
||||
IDList *idl = NULL;
|
||||
- Op_stat *op_stat;
|
||||
- PRBool collect_stat = PR_FALSE;
|
||||
+ Op_stat *op_stat = NULL;
|
||||
|
||||
slapi_log_err(SLAPI_LOG_TRACE, "keys2idl", "=> type %s indextype %s\n", type, indextype);
|
||||
|
||||
@@ -1049,8 +1048,9 @@ keys2idl(
|
||||
if (LDAP_STAT_READ_INDEX & config_get_statlog_level()) {
|
||||
op_stat = op_stat_get_operation_extension(pb);
|
||||
if (op_stat->search_stat) {
|
||||
- collect_stat = PR_TRUE;
|
||||
clock_gettime(CLOCK_MONOTONIC, &(op_stat->search_stat->keys_lookup_start));
|
||||
+ } else {
|
||||
+ op_stat = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1059,11 +1059,14 @@ keys2idl(
|
||||
@@ -1105,11 +1105,14 @@ keys2idl(
|
||||
struct component_keys_lookup *key_stat;
|
||||
int key_len;
|
||||
|
||||
- idl2 = index_read_ext_allids(pb, be, type, indextype, slapi_value_get_berval(ivals[i]), txn, err, unindexed, allidslimit);
|
||||
- if (collect_stat) {
|
||||
+ if (op_stat) {
|
||||
if (op_stat) {
|
||||
/* gather the index lookup statistics */
|
||||
key_stat = (struct component_keys_lookup *) slapi_ch_calloc(1, sizeof (struct component_keys_lookup));
|
||||
-
|
||||
@ -72,17 +47,8 @@ index 30550dde7..abc502b96 100644
|
||||
/* indextype e.g. "eq" or "sub" (see index.c) */
|
||||
if (indextype) {
|
||||
key_stat->index_type = slapi_ch_strdup(indextype);
|
||||
@@ -1125,7 +1128,7 @@ keys2idl(
|
||||
}
|
||||
|
||||
/* All the keys have been fetch, time to take the completion time */
|
||||
- if (collect_stat) {
|
||||
+ if (op_stat) {
|
||||
clock_gettime(CLOCK_MONOTONIC, &(op_stat->search_stat->keys_lookup_end));
|
||||
}
|
||||
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/ldbm_search.c b/ldap/servers/slapd/back-ldbm/ldbm_search.c
|
||||
index 5d98e288e..27301f453 100644
|
||||
index 97702d5a6..424d3aecb 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/ldbm_search.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/ldbm_search.c
|
||||
@@ -35,7 +35,7 @@ static IDList *onelevel_candidates(Slapi_PBlock *pb, backend *be, const char *ba
|
||||
@ -94,7 +60,7 @@ index 5d98e288e..27301f453 100644
|
||||
|
||||
/* This is for performance testing, allows us to disable ACL checking altogether */
|
||||
#if defined(DISABLE_ACL_CHECK)
|
||||
@@ -1169,17 +1169,12 @@ create_subtree_filter(Slapi_Filter *filter, int managedsait, Slapi_Filter **focr
|
||||
@@ -1249,17 +1249,12 @@ create_subtree_filter(Slapi_Filter *filter, int managedsait)
|
||||
}
|
||||
|
||||
static void
|
||||
@ -114,7 +80,7 @@ index 5d98e288e..27301f453 100644
|
||||
/* indextype is "eq" */
|
||||
if (index_type) {
|
||||
key_stat->index_type = slapi_ch_strdup(index_type);
|
||||
@@ -1286,23 +1281,39 @@ subtree_candidates(
|
||||
@@ -1349,23 +1344,39 @@ subtree_candidates(
|
||||
|
||||
slapi_pblock_get(pb, SLAPI_TXN, &txn.back_txn_txn);
|
||||
if (entryrdn_get_noancestorid()) {
|
||||
@ -157,41 +123,51 @@ index 5d98e288e..27301f453 100644
|
||||
idl_insert(&descendants, e->ep_id);
|
||||
candidates = idl_intersection(be, candidates, descendants);
|
||||
diff --git a/ldap/servers/slapd/result.c b/ldap/servers/slapd/result.c
|
||||
index 87641e92f..f40556de8 100644
|
||||
index c0abbeef1..afd454d4f 100644
|
||||
--- a/ldap/servers/slapd/result.c
|
||||
+++ b/ldap/servers/slapd/result.c
|
||||
@@ -2089,19 +2089,21 @@ log_op_stat(Slapi_PBlock *pb, uint64_t connid, int32_t op_id, int32_t op_interna
|
||||
op_stat->search_stat) {
|
||||
struct component_keys_lookup *key_info;
|
||||
@@ -2098,6 +2098,8 @@ log_op_stat(Slapi_PBlock *pb, uint64_t connid, int32_t op_id, int32_t op_interna
|
||||
logpb.op_id = op_id;
|
||||
|
||||
for (key_info = op_stat->search_stat->keys_lookup; key_info; key_info = key_info->next) {
|
||||
+ slapi_timespec_diff(&key_info->key_lookup_end, &key_info->key_lookup_start, &duration);
|
||||
+ snprintf(stat_etime, ETIME_BUFSIZ, "%" PRId64 ".%.09" PRId64 "", (int64_t)duration.tv_sec, (int64_t)duration.tv_nsec);
|
||||
if (internal_op) {
|
||||
slapi_log_stat(LDAP_STAT_READ_INDEX,
|
||||
- connid == 0 ? STAT_LOG_CONN_OP_FMT_INT_INT "STAT read index: attribute=%s key(%s)=%s --> count %d\n":
|
||||
- STAT_LOG_CONN_OP_FMT_EXT_INT "STAT read index: attribute=%s key(%s)=%s --> count %d\n",
|
||||
+ connid == 0 ? STAT_LOG_CONN_OP_FMT_INT_INT "STAT read index: attribute=%s key(%s)=%s --> count %d (duration %s)\n":
|
||||
+ STAT_LOG_CONN_OP_FMT_EXT_INT "STAT read index: attribute=%s key(%s)=%s --> count %d (duration %s)\n",
|
||||
connid, op_id, op_internal_id, op_nested_count,
|
||||
key_info->attribute_type, key_info->index_type, key_info->key,
|
||||
- key_info->id_lookup_cnt);
|
||||
+ key_info->id_lookup_cnt, stat_etime);
|
||||
if (log_format != LOG_FORMAT_DEFAULT) {
|
||||
/* JSON logging */
|
||||
@@ -2110,11 +2112,11 @@ log_op_stat(Slapi_PBlock *pb, uint64_t connid, int32_t op_id, int32_t op_interna
|
||||
slapd_log_access_stat(&logpb);
|
||||
} else {
|
||||
slapi_log_stat(LDAP_STAT_READ_INDEX,
|
||||
- connid == 0 ? STAT_LOG_CONN_OP_FMT_INT_INT "STAT read index: attribute=%s key(%s)=%s --> count %d\n":
|
||||
- STAT_LOG_CONN_OP_FMT_EXT_INT "STAT read index: attribute=%s key(%s)=%s --> count %d\n",
|
||||
+ connid == 0 ? STAT_LOG_CONN_OP_FMT_INT_INT "STAT read index: attribute=%s key(%s)=%s --> count %d (duration %s)\n":
|
||||
+ STAT_LOG_CONN_OP_FMT_EXT_INT "STAT read index: attribute=%s key(%s)=%s --> count %d (duration %s)\n",
|
||||
connid, op_id, op_internal_id, op_nested_count,
|
||||
key_info->attribute_type, key_info->index_type, key_info->key,
|
||||
- key_info->id_lookup_cnt);
|
||||
+ key_info->id_lookup_cnt, stat_etime);
|
||||
}
|
||||
} else {
|
||||
slapi_log_stat(LDAP_STAT_READ_INDEX,
|
||||
- "conn=%" PRIu64 " op=%d STAT read index: attribute=%s key(%s)=%s --> count %d\n",
|
||||
+ "conn=%" PRIu64 " op=%d STAT read index: attribute=%s key(%s)=%s --> count %d (duration %s)\n",
|
||||
connid, op_id,
|
||||
key_info->attribute_type, key_info->index_type, key_info->key,
|
||||
- key_info->id_lookup_cnt);
|
||||
+ key_info->id_lookup_cnt, stat_etime);
|
||||
if (log_format != LOG_FORMAT_DEFAULT) {
|
||||
@@ -2128,10 +2130,10 @@ log_op_stat(Slapi_PBlock *pb, uint64_t connid, int32_t op_id, int32_t op_interna
|
||||
slapd_log_access_stat(&logpb);
|
||||
} else {
|
||||
slapi_log_stat(LDAP_STAT_READ_INDEX,
|
||||
- "conn=%" PRIu64 " op=%d STAT read index: attribute=%s key(%s)=%s --> count %d\n",
|
||||
+ "conn=%" PRIu64 " op=%d STAT read index: attribute=%s key(%s)=%s --> count %d (duration %s)\n",
|
||||
connid, op_id,
|
||||
key_info->attribute_type, key_info->index_type, key_info->key,
|
||||
- key_info->id_lookup_cnt);
|
||||
+ key_info->id_lookup_cnt, stat_etime);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
diff --git a/ldap/servers/slapd/slapi-plugin.h b/ldap/servers/slapd/slapi-plugin.h
|
||||
index a84a60c92..00e9722d2 100644
|
||||
index c30a2b8ec..2090c6396 100644
|
||||
--- a/ldap/servers/slapd/slapi-plugin.h
|
||||
+++ b/ldap/servers/slapd/slapi-plugin.h
|
||||
@@ -8314,6 +8314,15 @@ void DS_Sleep(PRIntervalTime ticks);
|
||||
@@ -8331,6 +8331,15 @@ void DS_Sleep(PRIntervalTime ticks);
|
||||
* \param struct timespec c the difference.
|
||||
*/
|
||||
void slapi_timespec_diff(struct timespec *a, struct timespec *b, struct timespec *diff);
|
||||
@ -208,10 +184,10 @@ index a84a60c92..00e9722d2 100644
|
||||
* Given an operation, determine the time elapsed since the op
|
||||
* began.
|
||||
diff --git a/ldap/servers/slapd/slapi-private.h b/ldap/servers/slapd/slapi-private.h
|
||||
index bd7a4b39d..dfb0e272a 100644
|
||||
index 7f5c2a05a..ba5e8f144 100644
|
||||
--- a/ldap/servers/slapd/slapi-private.h
|
||||
+++ b/ldap/servers/slapd/slapi-private.h
|
||||
@@ -457,6 +457,8 @@ struct component_keys_lookup
|
||||
@@ -463,6 +463,8 @@ struct component_keys_lookup
|
||||
char *attribute_type;
|
||||
char *key;
|
||||
int id_lookup_cnt;
|
||||
@ -221,7 +197,7 @@ index bd7a4b39d..dfb0e272a 100644
|
||||
};
|
||||
typedef struct op_search_stat
|
||||
diff --git a/ldap/servers/slapd/time.c b/ldap/servers/slapd/time.c
|
||||
index 0406c3689..0dd457fbe 100644
|
||||
index 5bd9279db..9c71475a7 100644
|
||||
--- a/ldap/servers/slapd/time.c
|
||||
+++ b/ldap/servers/slapd/time.c
|
||||
@@ -272,6 +272,19 @@ slapi_timespec_diff(struct timespec *a, struct timespec *b, struct timespec *dif
|
||||
@ -245,5 +221,5 @@ index 0406c3689..0dd457fbe 100644
|
||||
slapi_timespec_expire_at(time_t timeout, struct timespec *expire)
|
||||
{
|
||||
--
|
||||
2.51.1
|
||||
2.49.0
|
||||
|
||||
4
389-ds-base-devel.README
Normal file
4
389-ds-base-devel.README
Normal file
@ -0,0 +1,4 @@
|
||||
For detailed information on developing plugins for 389 Directory Server visit
|
||||
|
||||
https://www.port389.org/docs/389ds/design/plugins.html
|
||||
https://github.com/389ds/389-ds-base/blob/main/src/slapi_r_plugin/README.md
|
||||
1102
389-ds-base.spec
Normal file
1102
389-ds-base.spec
Normal file
File diff suppressed because it is too large
Load Diff
3
389-ds-base.sysusers
Normal file
3
389-ds-base.sysusers
Normal file
@ -0,0 +1,3 @@
|
||||
#Type Name ID GECOS Home directory Shell
|
||||
g dirsrv 389
|
||||
u dirsrv 389:389 "user for 389-ds-base" /usr/share/dirsrv/ /sbin/nologin
|
||||
@ -13,29 +13,15 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "adler2"
|
||||
version = "2.0.0"
|
||||
version = "2.0.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "512761e0bb2578dd7380c6baaa0f4ce03e84f95e960231d1dec8bf4d7d6e2627"
|
||||
checksum = "320119579fcad9c21884f5c4861d16174d0e06250625266f50fe6898340abefa"
|
||||
|
||||
[[package]]
|
||||
name = "ahash"
|
||||
version = "0.7.8"
|
||||
name = "allocator-api2"
|
||||
version = "0.2.21"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "891477e0c6a8957309ee5c45a6368af3ae14bb510732d2684ffa19af310920f9"
|
||||
dependencies = [
|
||||
"getrandom 0.2.16",
|
||||
"once_cell",
|
||||
"version_check",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "ansi_term"
|
||||
version = "0.12.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d52a9bb7ec0cf484c551830a7ce27bd20d67eac647e1befb56b0be4ee39a55d2"
|
||||
dependencies = [
|
||||
"winapi",
|
||||
]
|
||||
checksum = "683d7910e743518b0e34f1186f92494becacb047c7b6bf616c96772180fef923"
|
||||
|
||||
[[package]]
|
||||
name = "atty"
|
||||
@ -50,9 +36,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "autocfg"
|
||||
version = "1.4.0"
|
||||
version = "1.5.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ace50bade8e6234aa140d9a2f552bbee1db4d353f69b8217bc503490fc1a9f26"
|
||||
checksum = "c08606f8c3cbf4ce6ec8e28fb0014a2c086708fe954eaa885384a6165172e7e8"
|
||||
|
||||
[[package]]
|
||||
name = "backtrace"
|
||||
@ -66,7 +52,7 @@ dependencies = [
|
||||
"miniz_oxide",
|
||||
"object",
|
||||
"rustc-demangle",
|
||||
"windows-targets",
|
||||
"windows-targets 0.52.6",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@ -83,9 +69,9 @@ checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a"
|
||||
|
||||
[[package]]
|
||||
name = "bitflags"
|
||||
version = "2.9.1"
|
||||
version = "2.9.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "1b8e56985ec62d17e9c1001dc89c88ecd7dc08e47eba5ec7c29c7b5eeecde967"
|
||||
checksum = "6a65b545ab31d687cff52899d4890855fec459eb6afe0da6417b8a18da87aa29"
|
||||
|
||||
[[package]]
|
||||
name = "byteorder"
|
||||
@ -95,11 +81,13 @@ checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b"
|
||||
|
||||
[[package]]
|
||||
name = "cbindgen"
|
||||
version = "0.9.1"
|
||||
version = "0.26.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9daec6140ab4dcd38c3dd57e580b59a621172a526ac79f1527af760a55afeafd"
|
||||
checksum = "da6bc11b07529f16944307272d5bd9b22530bc7d05751717c9d416586cedab49"
|
||||
dependencies = [
|
||||
"clap",
|
||||
"heck",
|
||||
"indexmap",
|
||||
"log",
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
@ -112,9 +100,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "cc"
|
||||
version = "1.2.25"
|
||||
version = "1.2.33"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d0fc897dc1e865cc67c0e05a836d9d3f1df3cbe442aa4a9473b18e12624a4951"
|
||||
checksum = "3ee0f8803222ba5a7e2777dd72ca451868909b1ac410621b676adf07280e9b5f"
|
||||
dependencies = [
|
||||
"jobserver",
|
||||
"libc",
|
||||
@ -123,72 +111,49 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "cfg-if"
|
||||
version = "1.0.0"
|
||||
version = "1.0.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
|
||||
checksum = "2fd1289c04a9ea8cb22300a459a72a385d7c73d3259e2ed7dcb2af674838cfa9"
|
||||
|
||||
[[package]]
|
||||
name = "clap"
|
||||
version = "2.34.0"
|
||||
version = "3.2.25"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a0610544180c38b88101fecf2dd634b174a62eef6946f84dfc6a7127512b381c"
|
||||
checksum = "4ea181bf566f71cb9a5d17a59e1871af638180a18fb0035c92ae62b705207123"
|
||||
dependencies = [
|
||||
"ansi_term",
|
||||
"atty",
|
||||
"bitflags 1.3.2",
|
||||
"clap_lex",
|
||||
"indexmap",
|
||||
"strsim",
|
||||
"termcolor",
|
||||
"textwrap",
|
||||
"unicode-width",
|
||||
"vec_map",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "clap_lex"
|
||||
version = "0.2.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "2850f2f5a82cbf437dd5af4d49848fbdfc27c157c3d010345776f952765261c5"
|
||||
dependencies = [
|
||||
"os_str_bytes",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "concread"
|
||||
version = "0.2.21"
|
||||
version = "0.5.7"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "dcc9816f5ac93ebd51c37f7f9a6bf2b40dfcd42978ad2aea5d542016e9244cf6"
|
||||
checksum = "07fd8c4b53f0aafeec114fa1cd863f323880f790656f2d7508af83a9b5110e8d"
|
||||
dependencies = [
|
||||
"ahash",
|
||||
"crossbeam",
|
||||
"crossbeam-epoch",
|
||||
"crossbeam-utils",
|
||||
"lru",
|
||||
"parking_lot",
|
||||
"rand",
|
||||
"smallvec",
|
||||
"tokio",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "crossbeam"
|
||||
version = "0.8.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "1137cd7e7fc0fb5d3c5a8678be38ec56e819125d8d7907411fe24ccb943faca8"
|
||||
dependencies = [
|
||||
"crossbeam-channel",
|
||||
"crossbeam-deque",
|
||||
"crossbeam-epoch",
|
||||
"crossbeam-queue",
|
||||
"crossbeam-utils",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "crossbeam-channel"
|
||||
version = "0.5.15"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "82b8f8f868b36967f9606790d1903570de9ceaf870a7bf9fbbd3016d636a2cb2"
|
||||
dependencies = [
|
||||
"crossbeam-utils",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "crossbeam-deque"
|
||||
version = "0.8.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9dd111b7b7f7d55b72c0a6ae361660ee5853c9af73f70c3c2ef6858b950e2e51"
|
||||
dependencies = [
|
||||
"crossbeam-epoch",
|
||||
"crossbeam-utils",
|
||||
"foldhash",
|
||||
"lru",
|
||||
"smallvec",
|
||||
"sptr",
|
||||
"tokio",
|
||||
"tracing",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@ -238,13 +203,19 @@ dependencies = [
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "errno"
|
||||
version = "0.3.12"
|
||||
name = "equivalent"
|
||||
version = "1.0.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "cea14ef9355e3beab063703aa9dab15afd25f0667c341310c1e5274bb1d0da18"
|
||||
checksum = "877a4ace8713b0bcf2a4e7eec82529c029f1d0619886d18145fea96c3ffe5c0f"
|
||||
|
||||
[[package]]
|
||||
name = "errno"
|
||||
version = "0.3.13"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "778e2ac28f6c47af28e4907f13ffd1e1ddbd400980a9abd7c8df189bf578a5ad"
|
||||
dependencies = [
|
||||
"libc",
|
||||
"windows-sys",
|
||||
"windows-sys 0.60.2",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@ -266,6 +237,12 @@ dependencies = [
|
||||
"zeroize",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "foldhash"
|
||||
version = "0.1.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d9c4f5dac5e15c24eb999c26181a6ca40b39fe946cbe4c263c7209467bc83af2"
|
||||
|
||||
[[package]]
|
||||
name = "foreign-types"
|
||||
version = "0.3.2"
|
||||
@ -289,7 +266,7 @@ checksum = "335ff9f135e4384c8150d6f27c6daed433577f86b4750418338c01a1a2528592"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
"libc",
|
||||
"wasi 0.11.0+wasi-snapshot-preview1",
|
||||
"wasi 0.11.1+wasi-snapshot-preview1",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@ -315,10 +292,24 @@ name = "hashbrown"
|
||||
version = "0.12.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888"
|
||||
|
||||
[[package]]
|
||||
name = "hashbrown"
|
||||
version = "0.15.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9229cfe53dfd69f0609a49f65461bd93001ea1ef889cd5529dd176593f5338a1"
|
||||
dependencies = [
|
||||
"ahash",
|
||||
"allocator-api2",
|
||||
"equivalent",
|
||||
"foldhash",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "heck"
|
||||
version = "0.4.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8"
|
||||
|
||||
[[package]]
|
||||
name = "hermit-abi"
|
||||
version = "0.1.19"
|
||||
@ -329,12 +320,24 @@ dependencies = [
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "instant"
|
||||
version = "0.1.13"
|
||||
name = "indexmap"
|
||||
version = "1.9.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e0242819d153cba4b4b05a5a8f2a7e9bbf97b6055b2a002b395c96b5ff3c0222"
|
||||
checksum = "bd070e393353796e801d209ad339e89596eb4c8d430d18ede6a1cced8fafbd99"
|
||||
dependencies = [
|
||||
"autocfg",
|
||||
"hashbrown 0.12.3",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "io-uring"
|
||||
version = "0.7.9"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d93587f37623a1a17d94ef2bc9ada592f5465fe7732084ab7beefabe5c77c0c4"
|
||||
dependencies = [
|
||||
"bitflags 2.9.2",
|
||||
"cfg-if",
|
||||
"libc",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@ -355,9 +358,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "libc"
|
||||
version = "0.2.172"
|
||||
version = "0.2.175"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d750af042f7ef4f724306de029d18836c26c1765a54a6a3f094cbd23a7267ffa"
|
||||
checksum = "6a82ae493e598baaea5209805c49bbf2ea7de956d50d7da0da1164f9c6d28543"
|
||||
|
||||
[[package]]
|
||||
name = "librnsslapd"
|
||||
@ -384,16 +387,6 @@ version = "0.9.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "cd945864f07fe9f5371a27ad7b52a172b4b499999f1d97574c9fa68373937e12"
|
||||
|
||||
[[package]]
|
||||
name = "lock_api"
|
||||
version = "0.4.13"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "96936507f153605bddfcda068dd804796c84324ed2510809e5b2a624c81da765"
|
||||
dependencies = [
|
||||
"autocfg",
|
||||
"scopeguard",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "log"
|
||||
version = "0.4.27"
|
||||
@ -402,28 +395,39 @@ checksum = "13dc2df351e3202783a1fe0d44375f7295ffb4049267b0f3018346dc122a1d94"
|
||||
|
||||
[[package]]
|
||||
name = "lru"
|
||||
version = "0.7.8"
|
||||
version = "0.13.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e999beba7b6e8345721bd280141ed958096a2e4abdf74f67ff4ce49b4b54e47a"
|
||||
checksum = "227748d55f2f0ab4735d87fd623798cb6b664512fe979705f829c9f81c934465"
|
||||
dependencies = [
|
||||
"hashbrown",
|
||||
"hashbrown 0.15.5",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "memchr"
|
||||
version = "2.7.4"
|
||||
version = "2.7.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "78ca9ab1a0babb1e7d5695e3530886289c18cf2f87ec19a575a0abdce112e3a3"
|
||||
checksum = "32a282da65faaf38286cf3be983213fcf1d2e2a58700e808f83f4ea9a4804bc0"
|
||||
|
||||
[[package]]
|
||||
name = "miniz_oxide"
|
||||
version = "0.8.8"
|
||||
version = "0.8.9"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "3be647b768db090acb35d5ec5db2b0e1f1de11133ca123b9eacf5137868f892a"
|
||||
checksum = "1fa76a2c86f704bdb222d66965fb3d63269ce38518b83cb0575fca855ebb6316"
|
||||
dependencies = [
|
||||
"adler2",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "mio"
|
||||
version = "1.0.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "78bed444cc8a2160f01cbcf811ef18cac863ad68ae8ca62092e8db51d51c761c"
|
||||
dependencies = [
|
||||
"libc",
|
||||
"wasi 0.11.1+wasi-snapshot-preview1",
|
||||
"windows-sys 0.59.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "object"
|
||||
version = "0.36.7"
|
||||
@ -445,7 +449,7 @@ version = "0.10.73"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8505734d46c8ab1e19a1dce3aef597ad87dcb4c37e7188231769bd6bd51cebf8"
|
||||
dependencies = [
|
||||
"bitflags 2.9.1",
|
||||
"bitflags 2.9.2",
|
||||
"cfg-if",
|
||||
"foreign-types",
|
||||
"libc",
|
||||
@ -462,7 +466,7 @@ checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn 2.0.101",
|
||||
"syn 2.0.106",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@ -478,29 +482,10 @@ dependencies = [
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "parking_lot"
|
||||
version = "0.11.2"
|
||||
name = "os_str_bytes"
|
||||
version = "6.6.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7d17b78036a60663b797adeaee46f5c9dfebb86948d1255007a1d6be0271ff99"
|
||||
dependencies = [
|
||||
"instant",
|
||||
"lock_api",
|
||||
"parking_lot_core",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "parking_lot_core"
|
||||
version = "0.8.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "60a2cfe6f0ad2bfc16aefa463b497d5c7a5ecd44a23efa72aa342d90177356dc"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
"instant",
|
||||
"libc",
|
||||
"redox_syscall",
|
||||
"smallvec",
|
||||
"winapi",
|
||||
]
|
||||
checksum = "e2355d85b9a3786f481747ced0e0ff2ba35213a1f9bd406ed906554d7af805a1"
|
||||
|
||||
[[package]]
|
||||
name = "paste"
|
||||
@ -533,15 +518,6 @@ version = "0.3.32"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7edddbd0b52d732b21ad9a5fab5c704c14cd949e5e9a1ec5929a24fded1b904c"
|
||||
|
||||
[[package]]
|
||||
name = "ppv-lite86"
|
||||
version = "0.2.21"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "85eae3c4ed2f50dcfe72643da4befc30deadb458a9b590d720cde2f2b1e97da9"
|
||||
dependencies = [
|
||||
"zerocopy",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "proc-macro-hack"
|
||||
version = "0.5.20+deprecated"
|
||||
@ -550,9 +526,9 @@ checksum = "dc375e1527247fe1a97d8b7156678dfe7c1af2fc075c9a4db3690ecd2a148068"
|
||||
|
||||
[[package]]
|
||||
name = "proc-macro2"
|
||||
version = "1.0.95"
|
||||
version = "1.0.101"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "02b3e5e68a3a1a02aad3ec490a98007cbc13c37cbe84a3cd7b8e406d76e7f778"
|
||||
checksum = "89ae43fd86e4158d6db51ad8e2b80f313af9cc74f5c0e03ccb87de09998732de"
|
||||
dependencies = [
|
||||
"unicode-ident",
|
||||
]
|
||||
@ -581,70 +557,27 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "r-efi"
|
||||
version = "5.2.0"
|
||||
version = "5.3.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "74765f6d916ee2faa39bc8e68e4f3ed8949b48cccdac59983d287a7cb71ce9c5"
|
||||
|
||||
[[package]]
|
||||
name = "rand"
|
||||
version = "0.8.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404"
|
||||
dependencies = [
|
||||
"libc",
|
||||
"rand_chacha",
|
||||
"rand_core",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "rand_chacha"
|
||||
version = "0.3.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88"
|
||||
dependencies = [
|
||||
"ppv-lite86",
|
||||
"rand_core",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "rand_core"
|
||||
version = "0.6.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c"
|
||||
dependencies = [
|
||||
"getrandom 0.2.16",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "redox_syscall"
|
||||
version = "0.2.16"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "fb5a58c1855b4b6819d59012155603f0b22ad30cad752600aadfcb695265519a"
|
||||
dependencies = [
|
||||
"bitflags 1.3.2",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "rsds"
|
||||
version = "0.1.0"
|
||||
checksum = "69cdb34c158ceb288df11e18b4bd39de994f6657d83847bdffdbd7f346754b0f"
|
||||
|
||||
[[package]]
|
||||
name = "rustc-demangle"
|
||||
version = "0.1.24"
|
||||
version = "0.1.26"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "719b953e2095829ee67db738b3bfa9fa368c94900df327b3f07fe6e794d2fe1f"
|
||||
checksum = "56f7d92ca342cea22a06f2121d944b4fd82af56988c270852495420f961d4ace"
|
||||
|
||||
[[package]]
|
||||
name = "rustix"
|
||||
version = "1.0.7"
|
||||
version = "1.0.8"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c71e83d6afe7ff64890ec6b71d6a69bb8a610ab78ce364b3352876bb4c801266"
|
||||
checksum = "11181fbabf243db407ef8df94a6ce0b2f9a733bd8be4ad02b4eda9602296cac8"
|
||||
dependencies = [
|
||||
"bitflags 2.9.1",
|
||||
"bitflags 2.9.2",
|
||||
"errno",
|
||||
"libc",
|
||||
"linux-raw-sys",
|
||||
"windows-sys",
|
||||
"windows-sys 0.60.2",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@ -653,12 +586,6 @@ version = "1.0.20"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "28d3b2b1366ec20994f1fd18c3c594f05c5dd4bc44d8bb0c1c632c8d6829481f"
|
||||
|
||||
[[package]]
|
||||
name = "scopeguard"
|
||||
version = "1.2.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49"
|
||||
|
||||
[[package]]
|
||||
name = "serde"
|
||||
version = "1.0.219"
|
||||
@ -676,14 +603,14 @@ checksum = "5b0276cf7f2c73365f7157c8123c21cd9a50fbbd844757af28ca1f5925fc2a00"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn 2.0.101",
|
||||
"syn 2.0.106",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "serde_json"
|
||||
version = "1.0.140"
|
||||
version = "1.0.143"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "20068b6e96dc6c9bd23e01df8827e6c7e1f2fddd43c21810382803c136b99373"
|
||||
checksum = "d401abef1d108fbd9cbaebc3e46611f4b1021f714a0597a71f41ee463f5f4a5a"
|
||||
dependencies = [
|
||||
"itoa",
|
||||
"memchr",
|
||||
@ -697,6 +624,12 @@ version = "1.3.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64"
|
||||
|
||||
[[package]]
|
||||
name = "slab"
|
||||
version = "0.4.11"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7a2ae44ef20feb57a68b23d846850f861394c2e02dc425a50098ae8c90267589"
|
||||
|
||||
[[package]]
|
||||
name = "slapd"
|
||||
version = "0.1.0"
|
||||
@ -715,15 +648,21 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "smallvec"
|
||||
version = "1.15.0"
|
||||
version = "1.15.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8917285742e9f3e1683f0a9c4e6b57960b7314d0b08d30d1ecd426713ee2eee9"
|
||||
checksum = "67b1b7a3b5fe4f1376887184045fcf45c69e92af734b7aaddc05fb777b6fbd03"
|
||||
|
||||
[[package]]
|
||||
name = "sptr"
|
||||
version = "0.3.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "3b9b39299b249ad65f3b7e96443bad61c02ca5cd3589f46cb6d610a0fd6c0d6a"
|
||||
|
||||
[[package]]
|
||||
name = "strsim"
|
||||
version = "0.8.0"
|
||||
version = "0.10.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8ea5119cdb4c55b55d432abb513a0429384878c15dde60cc77b1c99de1a95a6a"
|
||||
checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623"
|
||||
|
||||
[[package]]
|
||||
name = "syn"
|
||||
@ -738,9 +677,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "syn"
|
||||
version = "2.0.101"
|
||||
version = "2.0.106"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8ce2b7fc941b3a24138a0a7cf8e858bfc6a992e7978a068a5c760deb0ed43caf"
|
||||
checksum = "ede7c438028d4436d71104916910f5bb611972c5cfd7f89b8300a8186e6fada6"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
@ -749,46 +688,44 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "tempfile"
|
||||
version = "3.20.0"
|
||||
version = "3.21.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e8a64e3985349f2441a1a9ef0b853f869006c3855f2cda6862a94d26ebb9d6a1"
|
||||
checksum = "15b61f8f20e3a6f7e0649d825294eaf317edce30f82cf6026e7e4cb9222a7d1e"
|
||||
dependencies = [
|
||||
"fastrand",
|
||||
"getrandom 0.3.3",
|
||||
"once_cell",
|
||||
"rustix",
|
||||
"windows-sys",
|
||||
"windows-sys 0.60.2",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "termcolor"
|
||||
version = "1.4.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "06794f8f6c5c898b3275aebefa6b8a1cb24cd2c6c79397ab15774837a0bc5755"
|
||||
dependencies = [
|
||||
"winapi-util",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "textwrap"
|
||||
version = "0.11.0"
|
||||
version = "0.16.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d326610f408c7a4eb6f51c37c330e496b08506c9457c9d34287ecc38809fb060"
|
||||
dependencies = [
|
||||
"unicode-width",
|
||||
]
|
||||
checksum = "c13547615a44dc9c452a8a534638acdf07120d4b6847c8178705da06306a3057"
|
||||
|
||||
[[package]]
|
||||
name = "tokio"
|
||||
version = "1.45.1"
|
||||
version = "1.47.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "75ef51a33ef1da925cea3e4eb122833cb377c61439ca401b770f54902b806779"
|
||||
checksum = "89e49afdadebb872d3145a5638b59eb0691ea23e46ca484037cfab3b76b95038"
|
||||
dependencies = [
|
||||
"backtrace",
|
||||
"io-uring",
|
||||
"libc",
|
||||
"mio",
|
||||
"pin-project-lite",
|
||||
"tokio-macros",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tokio-macros"
|
||||
version = "2.5.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "6e06d43f1345a3bcd39f6a56dbb7dcab2ba47e68e8ac134855e7e2bdbaf8cab8"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn 2.0.101",
|
||||
"slab",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@ -800,18 +737,43 @@ dependencies = [
|
||||
"serde",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tracing"
|
||||
version = "0.1.41"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "784e0ac535deb450455cbfa28a6f0df145ea1bb7ae51b821cf5e7927fdcfbdd0"
|
||||
dependencies = [
|
||||
"pin-project-lite",
|
||||
"tracing-attributes",
|
||||
"tracing-core",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tracing-attributes"
|
||||
version = "0.1.30"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "81383ab64e72a7a8b8e13130c49e3dab29def6d0c7d76a03087b3cf71c5c6903"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn 2.0.106",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tracing-core"
|
||||
version = "0.1.34"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b9d12581f227e93f094d3af2ae690a574abb8a2b9b7a96e7cfe9647b2b617678"
|
||||
dependencies = [
|
||||
"once_cell",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "unicode-ident"
|
||||
version = "1.0.18"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "5a5f39404a5da50712a4c1eecf25e90dd62b613502b7e925fd4e4d19b5c96512"
|
||||
|
||||
[[package]]
|
||||
name = "unicode-width"
|
||||
version = "0.1.14"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7dd6e30e90baa6f72411720665d41d89b9a3d039dc45b8faea1ddd07f617f6af"
|
||||
|
||||
[[package]]
|
||||
name = "uuid"
|
||||
version = "0.8.2"
|
||||
@ -827,23 +789,11 @@ version = "0.2.15"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426"
|
||||
|
||||
[[package]]
|
||||
name = "vec_map"
|
||||
version = "0.8.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f1bddf1187be692e79c5ffeab891132dfb0f236ed36a43c7ed39f1165ee20191"
|
||||
|
||||
[[package]]
|
||||
name = "version_check"
|
||||
version = "0.9.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "0b928f33d975fc6ad9f86c8f283853ad26bdd5b10b7f1542aa2fa15e2289105a"
|
||||
|
||||
[[package]]
|
||||
name = "wasi"
|
||||
version = "0.11.0+wasi-snapshot-preview1"
|
||||
version = "0.11.1+wasi-snapshot-preview1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423"
|
||||
checksum = "ccf3ec651a847eb01de73ccad15eb7d99f80485de043efb2f370cd654f4ea44b"
|
||||
|
||||
[[package]]
|
||||
name = "wasi"
|
||||
@ -870,19 +820,43 @@ version = "0.4.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6"
|
||||
|
||||
[[package]]
|
||||
name = "winapi-util"
|
||||
version = "0.1.10"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "0978bf7171b3d90bac376700cb56d606feb40f251a475a5d6634613564460b22"
|
||||
dependencies = [
|
||||
"windows-sys 0.60.2",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "winapi-x86_64-pc-windows-gnu"
|
||||
version = "0.4.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f"
|
||||
|
||||
[[package]]
|
||||
name = "windows-link"
|
||||
version = "0.1.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "5e6ad25900d524eaabdbbb96d20b4311e1e7ae1699af4fb28c17ae66c80d798a"
|
||||
|
||||
[[package]]
|
||||
name = "windows-sys"
|
||||
version = "0.59.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "1e38bc4d79ed67fd075bcc251a1c39b32a1776bbe92e5bef1f0bf1f8c531853b"
|
||||
dependencies = [
|
||||
"windows-targets",
|
||||
"windows-targets 0.52.6",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "windows-sys"
|
||||
version = "0.60.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f2f500e4d28234f72040990ec9d39e3a6b950f9f22d3dba18416c35882612bcb"
|
||||
dependencies = [
|
||||
"windows-targets 0.53.3",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@ -891,14 +865,31 @@ version = "0.52.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973"
|
||||
dependencies = [
|
||||
"windows_aarch64_gnullvm",
|
||||
"windows_aarch64_msvc",
|
||||
"windows_i686_gnu",
|
||||
"windows_i686_gnullvm",
|
||||
"windows_i686_msvc",
|
||||
"windows_x86_64_gnu",
|
||||
"windows_x86_64_gnullvm",
|
||||
"windows_x86_64_msvc",
|
||||
"windows_aarch64_gnullvm 0.52.6",
|
||||
"windows_aarch64_msvc 0.52.6",
|
||||
"windows_i686_gnu 0.52.6",
|
||||
"windows_i686_gnullvm 0.52.6",
|
||||
"windows_i686_msvc 0.52.6",
|
||||
"windows_x86_64_gnu 0.52.6",
|
||||
"windows_x86_64_gnullvm 0.52.6",
|
||||
"windows_x86_64_msvc 0.52.6",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "windows-targets"
|
||||
version = "0.53.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d5fe6031c4041849d7c496a8ded650796e7b6ecc19df1a431c1a363342e5dc91"
|
||||
dependencies = [
|
||||
"windows-link",
|
||||
"windows_aarch64_gnullvm 0.53.0",
|
||||
"windows_aarch64_msvc 0.53.0",
|
||||
"windows_i686_gnu 0.53.0",
|
||||
"windows_i686_gnullvm 0.53.0",
|
||||
"windows_i686_msvc 0.53.0",
|
||||
"windows_x86_64_gnu 0.53.0",
|
||||
"windows_x86_64_gnullvm 0.53.0",
|
||||
"windows_x86_64_msvc 0.53.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@ -907,75 +898,103 @@ version = "0.52.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3"
|
||||
|
||||
[[package]]
|
||||
name = "windows_aarch64_gnullvm"
|
||||
version = "0.53.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "86b8d5f90ddd19cb4a147a5fa63ca848db3df085e25fee3cc10b39b6eebae764"
|
||||
|
||||
[[package]]
|
||||
name = "windows_aarch64_msvc"
|
||||
version = "0.52.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469"
|
||||
|
||||
[[package]]
|
||||
name = "windows_aarch64_msvc"
|
||||
version = "0.53.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c7651a1f62a11b8cbd5e0d42526e55f2c99886c77e007179efff86c2b137e66c"
|
||||
|
||||
[[package]]
|
||||
name = "windows_i686_gnu"
|
||||
version = "0.52.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b"
|
||||
|
||||
[[package]]
|
||||
name = "windows_i686_gnu"
|
||||
version = "0.53.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c1dc67659d35f387f5f6c479dc4e28f1d4bb90ddd1a5d3da2e5d97b42d6272c3"
|
||||
|
||||
[[package]]
|
||||
name = "windows_i686_gnullvm"
|
||||
version = "0.52.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66"
|
||||
|
||||
[[package]]
|
||||
name = "windows_i686_gnullvm"
|
||||
version = "0.53.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9ce6ccbdedbf6d6354471319e781c0dfef054c81fbc7cf83f338a4296c0cae11"
|
||||
|
||||
[[package]]
|
||||
name = "windows_i686_msvc"
|
||||
version = "0.52.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66"
|
||||
|
||||
[[package]]
|
||||
name = "windows_i686_msvc"
|
||||
version = "0.53.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "581fee95406bb13382d2f65cd4a908ca7b1e4c2f1917f143ba16efe98a589b5d"
|
||||
|
||||
[[package]]
|
||||
name = "windows_x86_64_gnu"
|
||||
version = "0.52.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78"
|
||||
|
||||
[[package]]
|
||||
name = "windows_x86_64_gnu"
|
||||
version = "0.53.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "2e55b5ac9ea33f2fc1716d1742db15574fd6fc8dadc51caab1c16a3d3b4190ba"
|
||||
|
||||
[[package]]
|
||||
name = "windows_x86_64_gnullvm"
|
||||
version = "0.52.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d"
|
||||
|
||||
[[package]]
|
||||
name = "windows_x86_64_gnullvm"
|
||||
version = "0.53.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "0a6e035dd0599267ce1ee132e51c27dd29437f63325753051e71dd9e42406c57"
|
||||
|
||||
[[package]]
|
||||
name = "windows_x86_64_msvc"
|
||||
version = "0.52.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec"
|
||||
|
||||
[[package]]
|
||||
name = "windows_x86_64_msvc"
|
||||
version = "0.53.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "271414315aff87387382ec3d271b52d7ae78726f5d44ac98b4f4030c91880486"
|
||||
|
||||
[[package]]
|
||||
name = "wit-bindgen-rt"
|
||||
version = "0.39.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "6f42320e61fe2cfd34354ecb597f86f413484a798ba44a8ca1165c58d42da6c1"
|
||||
dependencies = [
|
||||
"bitflags 2.9.1",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "zerocopy"
|
||||
version = "0.8.25"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a1702d9583232ddb9174e01bb7c15a2ab8fb1bc6f227aa1233858c351a3ba0cb"
|
||||
dependencies = [
|
||||
"zerocopy-derive",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "zerocopy-derive"
|
||||
version = "0.8.25"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "28a6e20d751156648aa063f3800b706ee209a32c0b4d9f24be3d980b01be55ef"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn 2.0.101",
|
||||
"bitflags 2.9.2",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@ -995,5 +1014,5 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn 2.0.101",
|
||||
"syn 2.0.106",
|
||||
]
|
||||
@ -1,119 +0,0 @@
|
||||
From dddb14210b402f317e566b6387c76a8e659bf7fa Mon Sep 17 00:00:00 2001
|
||||
From: progier389 <progier@redhat.com>
|
||||
Date: Tue, 14 Feb 2023 13:34:10 +0100
|
||||
Subject: [PATCH 1/2] issue 5647 - covscan: memory leak in audit log when
|
||||
adding entries (#5650)
|
||||
|
||||
covscan reported an issue about "vals" variable in auditlog.c:231 and indeed a charray_free is missing.
|
||||
Issue: 5647
|
||||
Reviewed by: @mreynolds389, @droideck
|
||||
---
|
||||
ldap/servers/slapd/auditlog.c | 71 +++++++++++++++++++----------------
|
||||
1 file changed, 38 insertions(+), 33 deletions(-)
|
||||
|
||||
diff --git a/ldap/servers/slapd/auditlog.c b/ldap/servers/slapd/auditlog.c
|
||||
index 68cbc674d..3128e0497 100644
|
||||
--- a/ldap/servers/slapd/auditlog.c
|
||||
+++ b/ldap/servers/slapd/auditlog.c
|
||||
@@ -177,6 +177,40 @@ write_auditfail_log_entry(Slapi_PBlock *pb)
|
||||
slapi_ch_free_string(&audit_config);
|
||||
}
|
||||
|
||||
+/*
|
||||
+ * Write the attribute values to the audit log as "comments"
|
||||
+ *
|
||||
+ * Slapi_Attr *entry - the attribute begin logged.
|
||||
+ * char *attrname - the attribute name.
|
||||
+ * lenstr *l - the audit log buffer
|
||||
+ *
|
||||
+ * Resulting output in the log:
|
||||
+ *
|
||||
+ * #ATTR: VALUE
|
||||
+ * #ATTR: VALUE
|
||||
+ */
|
||||
+static void
|
||||
+log_entry_attr(Slapi_Attr *entry_attr, char *attrname, lenstr *l)
|
||||
+{
|
||||
+ Slapi_Value **vals = attr_get_present_values(entry_attr);
|
||||
+ for(size_t i = 0; vals && vals[i]; i++) {
|
||||
+ char log_val[256] = "";
|
||||
+ const struct berval *bv = slapi_value_get_berval(vals[i]);
|
||||
+ if (bv->bv_len >= 256) {
|
||||
+ strncpy(log_val, bv->bv_val, 252);
|
||||
+ strcpy(log_val+252, "...");
|
||||
+ } else {
|
||||
+ strncpy(log_val, bv->bv_val, bv->bv_len);
|
||||
+ log_val[bv->bv_len] = 0;
|
||||
+ }
|
||||
+ addlenstr(l, "#");
|
||||
+ addlenstr(l, attrname);
|
||||
+ addlenstr(l, ": ");
|
||||
+ addlenstr(l, log_val);
|
||||
+ addlenstr(l, "\n");
|
||||
+ }
|
||||
+}
|
||||
+
|
||||
/*
|
||||
* Write "requested" attributes from the entry to the audit log as "comments"
|
||||
*
|
||||
@@ -212,21 +246,9 @@ add_entry_attrs(Slapi_Entry *entry, lenstr *l)
|
||||
for (req_attr = ldap_utf8strtok_r(display_attrs, ", ", &last); req_attr;
|
||||
req_attr = ldap_utf8strtok_r(NULL, ", ", &last))
|
||||
{
|
||||
- char **vals = slapi_entry_attr_get_charray(entry, req_attr);
|
||||
- for(size_t i = 0; vals && vals[i]; i++) {
|
||||
- char log_val[256] = {0};
|
||||
-
|
||||
- if (strlen(vals[i]) > 256) {
|
||||
- strncpy(log_val, vals[i], 252);
|
||||
- strcat(log_val, "...");
|
||||
- } else {
|
||||
- strcpy(log_val, vals[i]);
|
||||
- }
|
||||
- addlenstr(l, "#");
|
||||
- addlenstr(l, req_attr);
|
||||
- addlenstr(l, ": ");
|
||||
- addlenstr(l, log_val);
|
||||
- addlenstr(l, "\n");
|
||||
+ slapi_entry_attr_find(entry, req_attr, &entry_attr);
|
||||
+ if (entry_attr) {
|
||||
+ log_entry_attr(entry_attr, req_attr, l);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
@@ -234,7 +256,6 @@ add_entry_attrs(Slapi_Entry *entry, lenstr *l)
|
||||
for (; entry_attr; entry_attr = entry_attr->a_next) {
|
||||
Slapi_Value **vals = attr_get_present_values(entry_attr);
|
||||
char *attr = NULL;
|
||||
- const char *val = NULL;
|
||||
|
||||
slapi_attr_get_type(entry_attr, &attr);
|
||||
if (strcmp(attr, PSEUDO_ATTR_UNHASHEDUSERPASSWORD) == 0) {
|
||||
@@ -251,23 +272,7 @@ add_entry_attrs(Slapi_Entry *entry, lenstr *l)
|
||||
addlenstr(l, ": ****************************\n");
|
||||
continue;
|
||||
}
|
||||
-
|
||||
- for(size_t i = 0; vals && vals[i]; i++) {
|
||||
- char log_val[256] = {0};
|
||||
-
|
||||
- val = slapi_value_get_string(vals[i]);
|
||||
- if (strlen(val) > 256) {
|
||||
- strncpy(log_val, val, 252);
|
||||
- strcat(log_val, "...");
|
||||
- } else {
|
||||
- strcpy(log_val, val);
|
||||
- }
|
||||
- addlenstr(l, "#");
|
||||
- addlenstr(l, attr);
|
||||
- addlenstr(l, ": ");
|
||||
- addlenstr(l, log_val);
|
||||
- addlenstr(l, "\n");
|
||||
- }
|
||||
+ log_entry_attr(entry_attr, attr, l);
|
||||
}
|
||||
}
|
||||
slapi_ch_free_string(&display_attrs);
|
||||
--
|
||||
2.43.0
|
||||
|
||||
@ -1,27 +0,0 @@
|
||||
From be7c2b82958e91ce08775bf6b5da3c311d3b00e5 Mon Sep 17 00:00:00 2001
|
||||
From: progier389 <progier@redhat.com>
|
||||
Date: Mon, 20 Feb 2023 16:14:05 +0100
|
||||
Subject: [PATCH 2/2] Issue 5647 - Fix unused variable warning from previous
|
||||
commit (#5670)
|
||||
|
||||
* issue 5647 - memory leak in audit log when adding entries
|
||||
* Issue 5647 - Fix unused variable warning from previous commit
|
||||
---
|
||||
ldap/servers/slapd/auditlog.c | 1 -
|
||||
1 file changed, 1 deletion(-)
|
||||
|
||||
diff --git a/ldap/servers/slapd/auditlog.c b/ldap/servers/slapd/auditlog.c
|
||||
index 3128e0497..0597ecc6f 100644
|
||||
--- a/ldap/servers/slapd/auditlog.c
|
||||
+++ b/ldap/servers/slapd/auditlog.c
|
||||
@@ -254,7 +254,6 @@ add_entry_attrs(Slapi_Entry *entry, lenstr *l)
|
||||
} else {
|
||||
/* Return all attributes */
|
||||
for (; entry_attr; entry_attr = entry_attr->a_next) {
|
||||
- Slapi_Value **vals = attr_get_present_values(entry_attr);
|
||||
char *attr = NULL;
|
||||
|
||||
slapi_attr_get_type(entry_attr, &attr);
|
||||
--
|
||||
2.43.0
|
||||
|
||||
@ -1,147 +0,0 @@
|
||||
From 692c4cec6cc5c0086cf58f83bcfa690c766c9887 Mon Sep 17 00:00:00 2001
|
||||
From: Thierry Bordaz <tbordaz@redhat.com>
|
||||
Date: Fri, 2 Feb 2024 14:14:28 +0100
|
||||
Subject: [PATCH] Issue 5407 - sync_repl crashes if enabled while dynamic
|
||||
plugin is enabled (#5411)
|
||||
|
||||
Bug description:
|
||||
When dynamic plugin is enabled, if a MOD enables sync_repl plugin
|
||||
then sync_repl init function registers the postop callback
|
||||
that will be called for the MOD itself while the preop
|
||||
has not been called.
|
||||
postop expects preop to be called and so primary operation
|
||||
to be set. When it is not set it crashes
|
||||
|
||||
Fix description:
|
||||
If the primary operation is not set, just return
|
||||
|
||||
relates: #5407
|
||||
---
|
||||
.../suites/syncrepl_plugin/basic_test.py | 68 +++++++++++++++++++
|
||||
ldap/servers/plugins/sync/sync_persist.c | 23 ++++++-
|
||||
2 files changed, 90 insertions(+), 1 deletion(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/syncrepl_plugin/basic_test.py b/dirsrvtests/tests/suites/syncrepl_plugin/basic_test.py
|
||||
index eb3770b78..cdf35eeaa 100644
|
||||
--- a/dirsrvtests/tests/suites/syncrepl_plugin/basic_test.py
|
||||
+++ b/dirsrvtests/tests/suites/syncrepl_plugin/basic_test.py
|
||||
@@ -592,6 +592,74 @@ def test_sync_repl_cenotaph(topo_m2, request):
|
||||
|
||||
request.addfinalizer(fin)
|
||||
|
||||
+def test_sync_repl_dynamic_plugin(topology, request):
|
||||
+ """Test sync_repl with dynamic plugin
|
||||
+
|
||||
+ :id: d4f84913-c18a-459f-8525-110f610ca9e6
|
||||
+ :setup: install a standalone instance
|
||||
+ :steps:
|
||||
+ 1. reset instance to standard (no retroCL, no sync_repl, no dynamic plugin)
|
||||
+ 2. Enable dynamic plugin
|
||||
+ 3. Enable retroCL/content_sync
|
||||
+ 4. Establish a sync_repl req
|
||||
+ :expectedresults:
|
||||
+ 1. Should succeeds
|
||||
+ 2. Should succeeds
|
||||
+ 3. Should succeeds
|
||||
+ 4. Should succeeds
|
||||
+ """
|
||||
+
|
||||
+ # Reset the instance in a default config
|
||||
+ # Disable content sync plugin
|
||||
+ topology.standalone.plugins.disable(name=PLUGIN_REPL_SYNC)
|
||||
+
|
||||
+ # Disable retro changelog
|
||||
+ topology.standalone.plugins.disable(name=PLUGIN_RETRO_CHANGELOG)
|
||||
+
|
||||
+ # Disable dynamic plugins
|
||||
+ topology.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-dynamic-plugins', b'off')])
|
||||
+ topology.standalone.restart()
|
||||
+
|
||||
+ # Now start the test
|
||||
+ # Enable dynamic plugins
|
||||
+ try:
|
||||
+ topology.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-dynamic-plugins', b'on')])
|
||||
+ except ldap.LDAPError as e:
|
||||
+ log.error('Failed to enable dynamic plugin! {}'.format(e.args[0]['desc']))
|
||||
+ assert False
|
||||
+
|
||||
+ # Enable retro changelog
|
||||
+ topology.standalone.plugins.enable(name=PLUGIN_RETRO_CHANGELOG)
|
||||
+
|
||||
+ # Enbale content sync plugin
|
||||
+ topology.standalone.plugins.enable(name=PLUGIN_REPL_SYNC)
|
||||
+
|
||||
+ # create a sync repl client and wait 5 seconds to be sure it is running
|
||||
+ sync_repl = Sync_persist(topology.standalone)
|
||||
+ sync_repl.start()
|
||||
+ time.sleep(5)
|
||||
+
|
||||
+ # create users
|
||||
+ users = UserAccounts(topology.standalone, DEFAULT_SUFFIX)
|
||||
+ users_set = []
|
||||
+ for i in range(10001, 10004):
|
||||
+ users_set.append(users.create_test_user(uid=i))
|
||||
+
|
||||
+ time.sleep(10)
|
||||
+ # delete users, that automember/memberof will generate nested updates
|
||||
+ for user in users_set:
|
||||
+ user.delete()
|
||||
+ # stop the server to get the sync_repl result set (exit from while loop).
|
||||
+ # Only way I found to acheive that.
|
||||
+ # and wait a bit to let sync_repl thread time to set its result before fetching it.
|
||||
+ topology.standalone.stop()
|
||||
+ sync_repl.get_result()
|
||||
+ sync_repl.join()
|
||||
+ log.info('test_sync_repl_dynamic_plugin: PASS\n')
|
||||
+
|
||||
+ # Success
|
||||
+ log.info('Test complete')
|
||||
+
|
||||
def test_sync_repl_invalid_cookie(topology, request):
|
||||
"""Test sync_repl with invalid cookie
|
||||
|
||||
diff --git a/ldap/servers/plugins/sync/sync_persist.c b/ldap/servers/plugins/sync/sync_persist.c
|
||||
index d2210b64c..283607361 100644
|
||||
--- a/ldap/servers/plugins/sync/sync_persist.c
|
||||
+++ b/ldap/servers/plugins/sync/sync_persist.c
|
||||
@@ -156,6 +156,17 @@ ignore_op_pl(Slapi_PBlock *pb)
|
||||
* This is the same for ident
|
||||
*/
|
||||
prim_op = get_thread_primary_op();
|
||||
+ if (prim_op == NULL) {
|
||||
+ /* This can happen if the PRE_OP (sync_update_persist_betxn_pre_op) was not called.
|
||||
+ * The only known case it happens is with dynamic plugin enabled and an
|
||||
+ * update that enable the sync_repl plugin. In such case sync_repl registers
|
||||
+ * the postop (sync_update_persist_op) that is called while the preop was not called
|
||||
+ */
|
||||
+ slapi_log_err(SLAPI_LOG_PLUGIN, SYNC_PLUGIN_SUBSYSTEM,
|
||||
+ "ignore_op_pl - Operation without primary op set (0x%lx)\n",
|
||||
+ (ulong) op);
|
||||
+ return;
|
||||
+ }
|
||||
ident = sync_persist_get_operation_extension(pb);
|
||||
|
||||
if (ident) {
|
||||
@@ -232,8 +243,18 @@ sync_update_persist_op(Slapi_PBlock *pb, Slapi_Entry *e, Slapi_Entry *eprev, ber
|
||||
|
||||
|
||||
prim_op = get_thread_primary_op();
|
||||
+ if (prim_op == NULL) {
|
||||
+ /* This can happen if the PRE_OP (sync_update_persist_betxn_pre_op) was not called.
|
||||
+ * The only known case it happens is with dynamic plugin enabled and an
|
||||
+ * update that enable the sync_repl plugin. In such case sync_repl registers
|
||||
+ * the postop (sync_update_persist_op) that is called while the preop was not called
|
||||
+ */
|
||||
+ slapi_log_err(SLAPI_LOG_PLUGIN, SYNC_PLUGIN_SUBSYSTEM,
|
||||
+ "sync_update_persist_op - Operation without primary op set (0x%lx)\n",
|
||||
+ (ulong) pb_op);
|
||||
+ return;
|
||||
+ }
|
||||
ident = sync_persist_get_operation_extension(pb);
|
||||
- PR_ASSERT(prim_op);
|
||||
|
||||
if ((ident == NULL) && operation_is_flag_set(pb_op, OP_FLAG_NOOP)) {
|
||||
/* This happens for URP (add cenotaph, fixup rename, tombstone resurrect)
|
||||
--
|
||||
2.43.0
|
||||
|
||||
@ -1,840 +0,0 @@
|
||||
From 8dc61a176323f0d41df730abd715ccff3034c2be Mon Sep 17 00:00:00 2001
|
||||
From: Mark Reynolds <mreynolds@redhat.com>
|
||||
Date: Sun, 27 Nov 2022 09:37:19 -0500
|
||||
Subject: [PATCH] Issue 5547 - automember plugin improvements
|
||||
|
||||
Description:
|
||||
|
||||
Rebuild task has the following improvements:
|
||||
|
||||
- Only one task allowed at a time
|
||||
- Do not cleanup previous members by default. Add new CLI option to intentionally
|
||||
cleanup memberships before rebuilding from scratch.
|
||||
- Add better task logging to show fixup progress
|
||||
|
||||
To prevent automember from being called in a nested be_txn loop thread storage is
|
||||
used to check and skip these loops.
|
||||
|
||||
relates: https://github.com/389ds/389-ds-base/issues/5547
|
||||
|
||||
Reviewed by: spichugi(Thanks!)
|
||||
---
|
||||
.../automember_plugin/automember_mod_test.py | 43 +++-
|
||||
ldap/servers/plugins/automember/automember.c | 232 ++++++++++++++----
|
||||
ldap/servers/slapd/back-ldbm/ldbm_add.c | 11 +-
|
||||
ldap/servers/slapd/back-ldbm/ldbm_delete.c | 10 +-
|
||||
ldap/servers/slapd/back-ldbm/ldbm_modify.c | 11 +-
|
||||
.../lib389/cli_conf/plugins/automember.py | 10 +-
|
||||
src/lib389/lib389/plugins.py | 7 +-
|
||||
src/lib389/lib389/tasks.py | 9 +-
|
||||
8 files changed, 250 insertions(+), 83 deletions(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/automember_plugin/automember_mod_test.py b/dirsrvtests/tests/suites/automember_plugin/automember_mod_test.py
|
||||
index 8d25384bf..7a0ed3275 100644
|
||||
--- a/dirsrvtests/tests/suites/automember_plugin/automember_mod_test.py
|
||||
+++ b/dirsrvtests/tests/suites/automember_plugin/automember_mod_test.py
|
||||
@@ -5,12 +5,13 @@
|
||||
# License: GPL (version 3 or any later version).
|
||||
# See LICENSE for details.
|
||||
# --- END COPYRIGHT BLOCK ---
|
||||
-#
|
||||
+import ldap
|
||||
import logging
|
||||
import pytest
|
||||
import os
|
||||
+import time
|
||||
from lib389.utils import ds_is_older
|
||||
-from lib389._constants import *
|
||||
+from lib389._constants import DEFAULT_SUFFIX
|
||||
from lib389.plugins import AutoMembershipPlugin, AutoMembershipDefinitions
|
||||
from lib389.idm.user import UserAccounts
|
||||
from lib389.idm.group import Groups
|
||||
@@ -41,6 +42,11 @@ def automember_fixture(topo, request):
|
||||
user_accts = UserAccounts(topo.standalone, DEFAULT_SUFFIX)
|
||||
user = user_accts.create_test_user()
|
||||
|
||||
+ # Create extra users
|
||||
+ users = UserAccounts(topo.standalone, DEFAULT_SUFFIX)
|
||||
+ for i in range(0, 100):
|
||||
+ users.create_test_user(uid=i)
|
||||
+
|
||||
# Create automember definitions and regex rules
|
||||
automember_prop = {
|
||||
'cn': 'testgroup_definition',
|
||||
@@ -59,7 +65,7 @@ def automember_fixture(topo, request):
|
||||
automemberplugin.enable()
|
||||
topo.standalone.restart()
|
||||
|
||||
- return (user, groups)
|
||||
+ return user, groups
|
||||
|
||||
|
||||
def test_mods(automember_fixture, topo):
|
||||
@@ -72,19 +78,21 @@ def test_mods(automember_fixture, topo):
|
||||
2. Update user that should add it to group[1]
|
||||
3. Update user that should add it to group[2]
|
||||
4. Update user that should add it to group[0]
|
||||
- 5. Test rebuild task correctly moves user to group[1]
|
||||
+ 5. Test rebuild task adds user to group[1]
|
||||
+ 6. Test rebuild task cleanups groups and only adds it to group[1]
|
||||
:expectedresults:
|
||||
1. Success
|
||||
2. Success
|
||||
3. Success
|
||||
4. Success
|
||||
5. Success
|
||||
+ 6. Success
|
||||
"""
|
||||
(user, groups) = automember_fixture
|
||||
|
||||
# Update user which should go into group[0]
|
||||
user.replace('cn', 'whatever')
|
||||
- groups[0].is_member(user.dn)
|
||||
+ assert groups[0].is_member(user.dn)
|
||||
if groups[1].is_member(user.dn):
|
||||
assert False
|
||||
if groups[2].is_member(user.dn):
|
||||
@@ -92,7 +100,7 @@ def test_mods(automember_fixture, topo):
|
||||
|
||||
# Update user0 which should go into group[1]
|
||||
user.replace('cn', 'mark')
|
||||
- groups[1].is_member(user.dn)
|
||||
+ assert groups[1].is_member(user.dn)
|
||||
if groups[0].is_member(user.dn):
|
||||
assert False
|
||||
if groups[2].is_member(user.dn):
|
||||
@@ -100,7 +108,7 @@ def test_mods(automember_fixture, topo):
|
||||
|
||||
# Update user which should go into group[2]
|
||||
user.replace('cn', 'simon')
|
||||
- groups[2].is_member(user.dn)
|
||||
+ assert groups[2].is_member(user.dn)
|
||||
if groups[0].is_member(user.dn):
|
||||
assert False
|
||||
if groups[1].is_member(user.dn):
|
||||
@@ -108,7 +116,7 @@ def test_mods(automember_fixture, topo):
|
||||
|
||||
# Update user which should go back into group[0] (full circle)
|
||||
user.replace('cn', 'whatever')
|
||||
- groups[0].is_member(user.dn)
|
||||
+ assert groups[0].is_member(user.dn)
|
||||
if groups[1].is_member(user.dn):
|
||||
assert False
|
||||
if groups[2].is_member(user.dn):
|
||||
@@ -128,12 +136,24 @@ def test_mods(automember_fixture, topo):
|
||||
automemberplugin.enable()
|
||||
topo.standalone.restart()
|
||||
|
||||
- # Run rebuild task
|
||||
+ # Run rebuild task (no cleanup)
|
||||
task = automemberplugin.fixup(DEFAULT_SUFFIX, "objectclass=posixaccount")
|
||||
+ with pytest.raises(ldap.UNWILLING_TO_PERFORM):
|
||||
+ # test only one fixup task is allowed at a time
|
||||
+ automemberplugin.fixup(DEFAULT_SUFFIX, "objectclass=top")
|
||||
task.wait()
|
||||
|
||||
- # Test membership
|
||||
- groups[1].is_member(user.dn)
|
||||
+ # Test membership (user should still be in groups[0])
|
||||
+ assert groups[1].is_member(user.dn)
|
||||
+ if not groups[0].is_member(user.dn):
|
||||
+ assert False
|
||||
+
|
||||
+ # Run rebuild task with cleanup
|
||||
+ task = automemberplugin.fixup(DEFAULT_SUFFIX, "objectclass=posixaccount", cleanup=True)
|
||||
+ task.wait()
|
||||
+
|
||||
+ # Test membership (user should only be in groups[1])
|
||||
+ assert groups[1].is_member(user.dn)
|
||||
if groups[0].is_member(user.dn):
|
||||
assert False
|
||||
if groups[2].is_member(user.dn):
|
||||
@@ -148,4 +168,3 @@ if __name__ == '__main__':
|
||||
# -s for DEBUG mode
|
||||
CURRENT_FILE = os.path.realpath(__file__)
|
||||
pytest.main(["-s", CURRENT_FILE])
|
||||
-
|
||||
diff --git a/ldap/servers/plugins/automember/automember.c b/ldap/servers/plugins/automember/automember.c
|
||||
index 3494d0343..419adb052 100644
|
||||
--- a/ldap/servers/plugins/automember/automember.c
|
||||
+++ b/ldap/servers/plugins/automember/automember.c
|
||||
@@ -1,5 +1,5 @@
|
||||
/** BEGIN COPYRIGHT BLOCK
|
||||
- * Copyright (C) 2011 Red Hat, Inc.
|
||||
+ * Copyright (C) 2022 Red Hat, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* License: GPL (version 3 or any later version).
|
||||
@@ -14,7 +14,7 @@
|
||||
* Auto Membership Plug-in
|
||||
*/
|
||||
#include "automember.h"
|
||||
-
|
||||
+#include <pthread.h>
|
||||
|
||||
/*
|
||||
* Plug-in globals
|
||||
@@ -22,7 +22,9 @@
|
||||
static PRCList *g_automember_config = NULL;
|
||||
static Slapi_RWLock *g_automember_config_lock = NULL;
|
||||
static uint64_t abort_rebuild_task = 0;
|
||||
-
|
||||
+static pthread_key_t td_automem_block_nested;
|
||||
+static PRBool fixup_running = PR_FALSE;
|
||||
+static PRLock *fixup_lock = NULL;
|
||||
static void *_PluginID = NULL;
|
||||
static Slapi_DN *_PluginDN = NULL;
|
||||
static Slapi_DN *_ConfigAreaDN = NULL;
|
||||
@@ -93,9 +95,43 @@ static void automember_task_export_destructor(Slapi_Task *task);
|
||||
static void automember_task_map_destructor(Slapi_Task *task);
|
||||
|
||||
#define DEFAULT_FILE_MODE PR_IRUSR | PR_IWUSR
|
||||
+#define FIXUP_PROGRESS_LIMIT 1000
|
||||
static uint64_t plugin_do_modify = 0;
|
||||
static uint64_t plugin_is_betxn = 0;
|
||||
|
||||
+/* automember_plugin fixup task and add operations should block other be_txn
|
||||
+ * plugins from calling automember_post_op_mod() */
|
||||
+static int32_t
|
||||
+slapi_td_block_nested_post_op(void)
|
||||
+{
|
||||
+ int32_t val = 12345;
|
||||
+
|
||||
+ if (pthread_setspecific(td_automem_block_nested, (void *)&val) != 0) {
|
||||
+ return PR_FAILURE;
|
||||
+ }
|
||||
+ return PR_SUCCESS;
|
||||
+}
|
||||
+
|
||||
+static int32_t
|
||||
+slapi_td_unblock_nested_post_op(void)
|
||||
+{
|
||||
+ if (pthread_setspecific(td_automem_block_nested, NULL) != 0) {
|
||||
+ return PR_FAILURE;
|
||||
+ }
|
||||
+ return PR_SUCCESS;
|
||||
+}
|
||||
+
|
||||
+static int32_t
|
||||
+slapi_td_is_post_op_nested(void)
|
||||
+{
|
||||
+ int32_t *value = pthread_getspecific(td_automem_block_nested);
|
||||
+
|
||||
+ if (value == NULL) {
|
||||
+ return 0;
|
||||
+ }
|
||||
+ return 1;
|
||||
+}
|
||||
+
|
||||
/*
|
||||
* Config cache locking functions
|
||||
*/
|
||||
@@ -317,6 +353,14 @@ automember_start(Slapi_PBlock *pb)
|
||||
return -1;
|
||||
}
|
||||
|
||||
+ if (fixup_lock == NULL) {
|
||||
+ if ((fixup_lock = PR_NewLock()) == NULL) {
|
||||
+ slapi_log_err(SLAPI_LOG_ERR, AUTOMEMBER_PLUGIN_SUBSYSTEM,
|
||||
+ "automember_start - Failed to create fixup lock.\n");
|
||||
+ return -1;
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
/*
|
||||
* Get the plug-in target dn from the system
|
||||
* and store it for future use. */
|
||||
@@ -360,6 +404,11 @@ automember_start(Slapi_PBlock *pb)
|
||||
}
|
||||
}
|
||||
|
||||
+ if (pthread_key_create(&td_automem_block_nested, NULL) != 0) {
|
||||
+ slapi_log_err(SLAPI_LOG_ERR, AUTOMEMBER_PLUGIN_SUBSYSTEM,
|
||||
+ "automember_start - pthread_key_create failed\n");
|
||||
+ }
|
||||
+
|
||||
slapi_log_err(SLAPI_LOG_PLUGIN, AUTOMEMBER_PLUGIN_SUBSYSTEM,
|
||||
"automember_start - ready for service\n");
|
||||
slapi_log_err(SLAPI_LOG_TRACE, AUTOMEMBER_PLUGIN_SUBSYSTEM,
|
||||
@@ -394,6 +443,8 @@ automember_close(Slapi_PBlock *pb __attribute__((unused)))
|
||||
slapi_sdn_free(&_ConfigAreaDN);
|
||||
slapi_destroy_rwlock(g_automember_config_lock);
|
||||
g_automember_config_lock = NULL;
|
||||
+ PR_DestroyLock(fixup_lock);
|
||||
+ fixup_lock = NULL;
|
||||
|
||||
slapi_log_err(SLAPI_LOG_TRACE, AUTOMEMBER_PLUGIN_SUBSYSTEM,
|
||||
"<-- automember_close\n");
|
||||
@@ -1619,7 +1670,6 @@ out:
|
||||
return rc;
|
||||
}
|
||||
|
||||
-
|
||||
/*
|
||||
* automember_update_member_value()
|
||||
*
|
||||
@@ -1634,7 +1684,7 @@ automember_update_member_value(Slapi_Entry *member_e, const char *group_dn, char
|
||||
LDAPMod *mods[2];
|
||||
char *vals[2];
|
||||
char *member_value = NULL;
|
||||
- int rc = 0;
|
||||
+ int rc = LDAP_SUCCESS;
|
||||
Slapi_DN *group_sdn;
|
||||
|
||||
/* First thing check that the group still exists */
|
||||
@@ -1653,7 +1703,7 @@ automember_update_member_value(Slapi_Entry *member_e, const char *group_dn, char
|
||||
"automember_update_member_value - group (default or target) can not be retrieved (%s) err=%d\n",
|
||||
group_dn, rc);
|
||||
}
|
||||
- return rc;
|
||||
+ goto out;
|
||||
}
|
||||
|
||||
/* If grouping_value is dn, we need to fetch the dn instead. */
|
||||
@@ -1879,6 +1929,13 @@ automember_mod_post_op(Slapi_PBlock *pb)
|
||||
PRCList *list = NULL;
|
||||
int rc = SLAPI_PLUGIN_SUCCESS;
|
||||
|
||||
+ if (slapi_td_is_post_op_nested()) {
|
||||
+ /* don't process op twice in the same thread */
|
||||
+ return rc;
|
||||
+ } else {
|
||||
+ slapi_td_block_nested_post_op();
|
||||
+ }
|
||||
+
|
||||
slapi_log_err(SLAPI_LOG_TRACE, AUTOMEMBER_PLUGIN_SUBSYSTEM,
|
||||
"--> automember_mod_post_op\n");
|
||||
|
||||
@@ -2005,6 +2062,7 @@ automember_mod_post_op(Slapi_PBlock *pb)
|
||||
}
|
||||
}
|
||||
}
|
||||
+ slapi_td_unblock_nested_post_op();
|
||||
|
||||
slapi_log_err(SLAPI_LOG_TRACE, AUTOMEMBER_PLUGIN_SUBSYSTEM,
|
||||
"<-- automember_mod_post_op (%d)\n", rc);
|
||||
@@ -2024,6 +2082,13 @@ automember_add_post_op(Slapi_PBlock *pb)
|
||||
slapi_log_err(SLAPI_LOG_TRACE, AUTOMEMBER_PLUGIN_SUBSYSTEM,
|
||||
"--> automember_add_post_op\n");
|
||||
|
||||
+ if (slapi_td_is_post_op_nested()) {
|
||||
+ /* don't process op twice in the same thread */
|
||||
+ return rc;
|
||||
+ } else {
|
||||
+ slapi_td_block_nested_post_op();
|
||||
+ }
|
||||
+
|
||||
/* Reload config if a config entry was added. */
|
||||
if ((sdn = automember_get_sdn(pb))) {
|
||||
if (automember_dn_is_config(sdn)) {
|
||||
@@ -2039,7 +2104,7 @@ automember_add_post_op(Slapi_PBlock *pb)
|
||||
|
||||
/* If replication, just bail. */
|
||||
if (automember_isrepl(pb)) {
|
||||
- return SLAPI_PLUGIN_SUCCESS;
|
||||
+ goto bail;
|
||||
}
|
||||
|
||||
/* Get the newly added entry. */
|
||||
@@ -2052,7 +2117,7 @@ automember_add_post_op(Slapi_PBlock *pb)
|
||||
tombstone);
|
||||
slapi_value_free(&tombstone);
|
||||
if (is_tombstone) {
|
||||
- return SLAPI_PLUGIN_SUCCESS;
|
||||
+ goto bail;
|
||||
}
|
||||
|
||||
/* Check if a config entry applies
|
||||
@@ -2063,21 +2128,19 @@ automember_add_post_op(Slapi_PBlock *pb)
|
||||
list = PR_LIST_HEAD(g_automember_config);
|
||||
while (list != g_automember_config) {
|
||||
config = (struct configEntry *)list;
|
||||
-
|
||||
/* Does the entry meet scope and filter requirements? */
|
||||
if (slapi_dn_issuffix(slapi_sdn_get_dn(sdn), config->scope) &&
|
||||
- (slapi_filter_test_simple(e, config->filter) == 0)) {
|
||||
+ (slapi_filter_test_simple(e, config->filter) == 0))
|
||||
+ {
|
||||
/* Find out what membership changes are needed and make them. */
|
||||
if (automember_update_membership(config, e, NULL) == SLAPI_PLUGIN_FAILURE) {
|
||||
rc = SLAPI_PLUGIN_FAILURE;
|
||||
break;
|
||||
}
|
||||
}
|
||||
-
|
||||
list = PR_NEXT_LINK(list);
|
||||
}
|
||||
}
|
||||
-
|
||||
automember_config_unlock();
|
||||
} else {
|
||||
slapi_log_err(SLAPI_LOG_PLUGIN, AUTOMEMBER_PLUGIN_SUBSYSTEM,
|
||||
@@ -2098,6 +2161,7 @@ bail:
|
||||
slapi_pblock_set(pb, SLAPI_RESULT_CODE, &result);
|
||||
slapi_pblock_set(pb, SLAPI_PB_RESULT_TEXT, &errtxt);
|
||||
}
|
||||
+ slapi_td_unblock_nested_post_op();
|
||||
|
||||
return rc;
|
||||
}
|
||||
@@ -2138,6 +2202,7 @@ typedef struct _task_data
|
||||
Slapi_DN *base_dn;
|
||||
char *bind_dn;
|
||||
int scope;
|
||||
+ PRBool cleanup;
|
||||
} task_data;
|
||||
|
||||
static void
|
||||
@@ -2270,6 +2335,7 @@ automember_task_abort_thread(void *arg)
|
||||
* basedn: dc=example,dc=com
|
||||
* filter: (uid=*)
|
||||
* scope: sub
|
||||
+ * cleanup: yes/on (default is off)
|
||||
*
|
||||
* basedn and filter are required. If scope is omitted, the default is sub
|
||||
*/
|
||||
@@ -2284,9 +2350,22 @@ automember_task_add(Slapi_PBlock *pb, Slapi_Entry *e, Slapi_Entry *eAfter __attr
|
||||
const char *base_dn;
|
||||
const char *filter;
|
||||
const char *scope;
|
||||
+ const char *cleanup_str;
|
||||
+ PRBool cleanup = PR_FALSE;
|
||||
|
||||
*returncode = LDAP_SUCCESS;
|
||||
|
||||
+ PR_Lock(fixup_lock);
|
||||
+ if (fixup_running) {
|
||||
+ PR_Unlock(fixup_lock);
|
||||
+ *returncode = LDAP_UNWILLING_TO_PERFORM;
|
||||
+ slapi_log_err(SLAPI_LOG_ERR, AUTOMEMBER_PLUGIN_SUBSYSTEM,
|
||||
+ "automember_task_add - there is already a fixup task running\n");
|
||||
+ rv = SLAPI_DSE_CALLBACK_ERROR;
|
||||
+ goto out;
|
||||
+ }
|
||||
+ PR_Unlock(fixup_lock);
|
||||
+
|
||||
/*
|
||||
* Grab the task params
|
||||
*/
|
||||
@@ -2300,6 +2379,12 @@ automember_task_add(Slapi_PBlock *pb, Slapi_Entry *e, Slapi_Entry *eAfter __attr
|
||||
rv = SLAPI_DSE_CALLBACK_ERROR;
|
||||
goto out;
|
||||
}
|
||||
+ if ((cleanup_str = slapi_entry_attr_get_ref(e, "cleanup"))) {
|
||||
+ if (strcasecmp(cleanup_str, "yes") == 0 || strcasecmp(cleanup_str, "on")) {
|
||||
+ cleanup = PR_TRUE;
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
scope = slapi_fetch_attr(e, "scope", "sub");
|
||||
/*
|
||||
* setup our task data
|
||||
@@ -2315,6 +2400,7 @@ automember_task_add(Slapi_PBlock *pb, Slapi_Entry *e, Slapi_Entry *eAfter __attr
|
||||
mytaskdata->bind_dn = slapi_ch_strdup(bind_dn);
|
||||
mytaskdata->base_dn = slapi_sdn_new_dn_byval(base_dn);
|
||||
mytaskdata->filter_str = slapi_ch_strdup(filter);
|
||||
+ mytaskdata->cleanup = cleanup;
|
||||
|
||||
if (scope) {
|
||||
if (strcasecmp(scope, "sub") == 0) {
|
||||
@@ -2334,6 +2420,9 @@ automember_task_add(Slapi_PBlock *pb, Slapi_Entry *e, Slapi_Entry *eAfter __attr
|
||||
task = slapi_plugin_new_task(slapi_entry_get_ndn(e), arg);
|
||||
slapi_task_set_destructor_fn(task, automember_task_destructor);
|
||||
slapi_task_set_data(task, mytaskdata);
|
||||
+ PR_Lock(fixup_lock);
|
||||
+ fixup_running = PR_TRUE;
|
||||
+ PR_Unlock(fixup_lock);
|
||||
/*
|
||||
* Start the task as a separate thread
|
||||
*/
|
||||
@@ -2345,6 +2434,9 @@ automember_task_add(Slapi_PBlock *pb, Slapi_Entry *e, Slapi_Entry *eAfter __attr
|
||||
"automember_task_add - Unable to create task thread!\n");
|
||||
*returncode = LDAP_OPERATIONS_ERROR;
|
||||
slapi_task_finish(task, *returncode);
|
||||
+ PR_Lock(fixup_lock);
|
||||
+ fixup_running = PR_FALSE;
|
||||
+ PR_Unlock(fixup_lock);
|
||||
rv = SLAPI_DSE_CALLBACK_ERROR;
|
||||
} else {
|
||||
rv = SLAPI_DSE_CALLBACK_OK;
|
||||
@@ -2372,6 +2464,9 @@ automember_rebuild_task_thread(void *arg)
|
||||
PRCList *list = NULL;
|
||||
PRCList *include_list = NULL;
|
||||
int result = 0;
|
||||
+ int64_t fixup_progress_count = 0;
|
||||
+ int64_t fixup_progress_elapsed = 0;
|
||||
+ int64_t fixup_start_time = 0;
|
||||
size_t i = 0;
|
||||
|
||||
/* Reset abort flag */
|
||||
@@ -2380,6 +2475,7 @@ automember_rebuild_task_thread(void *arg)
|
||||
if (!task) {
|
||||
return; /* no task */
|
||||
}
|
||||
+
|
||||
slapi_task_inc_refcount(task);
|
||||
slapi_log_err(SLAPI_LOG_PLUGIN, AUTOMEMBER_PLUGIN_SUBSYSTEM,
|
||||
"automember_rebuild_task_thread - Refcount incremented.\n");
|
||||
@@ -2393,9 +2489,11 @@ automember_rebuild_task_thread(void *arg)
|
||||
slapi_task_log_status(task, "Automember rebuild task starting (base dn: (%s) filter (%s)...",
|
||||
slapi_sdn_get_dn(td->base_dn), td->filter_str);
|
||||
/*
|
||||
- * Set the bind dn in the local thread data
|
||||
+ * Set the bind dn in the local thread data, and block post op mods
|
||||
*/
|
||||
slapi_td_set_dn(slapi_ch_strdup(td->bind_dn));
|
||||
+ slapi_td_block_nested_post_op();
|
||||
+ fixup_start_time = slapi_current_rel_time_t();
|
||||
/*
|
||||
* Take the config lock now and search the database
|
||||
*/
|
||||
@@ -2426,6 +2524,21 @@ automember_rebuild_task_thread(void *arg)
|
||||
* Loop over the entries
|
||||
*/
|
||||
for (i = 0; entries && (entries[i] != NULL); i++) {
|
||||
+ fixup_progress_count++;
|
||||
+ if (fixup_progress_count % FIXUP_PROGRESS_LIMIT == 0 ) {
|
||||
+ slapi_task_log_notice(task,
|
||||
+ "Processed %ld entries in %ld seconds (+%ld seconds)",
|
||||
+ fixup_progress_count,
|
||||
+ slapi_current_rel_time_t() - fixup_start_time,
|
||||
+ slapi_current_rel_time_t() - fixup_progress_elapsed);
|
||||
+ slapi_task_log_status(task,
|
||||
+ "Processed %ld entries in %ld seconds (+%ld seconds)",
|
||||
+ fixup_progress_count,
|
||||
+ slapi_current_rel_time_t() - fixup_start_time,
|
||||
+ slapi_current_rel_time_t() - fixup_progress_elapsed);
|
||||
+ slapi_task_inc_progress(task);
|
||||
+ fixup_progress_elapsed = slapi_current_rel_time_t();
|
||||
+ }
|
||||
if (slapi_atomic_load_64(&abort_rebuild_task, __ATOMIC_ACQUIRE) == 1) {
|
||||
/* The task was aborted */
|
||||
slapi_task_log_notice(task, "Automember rebuild task was intentionally aborted");
|
||||
@@ -2443,48 +2556,66 @@ automember_rebuild_task_thread(void *arg)
|
||||
if (slapi_dn_issuffix(slapi_entry_get_dn(entries[i]), config->scope) &&
|
||||
(slapi_filter_test_simple(entries[i], config->filter) == 0))
|
||||
{
|
||||
- /* First clear out all the defaults groups */
|
||||
- for (size_t ii = 0; config->default_groups && config->default_groups[ii]; ii++) {
|
||||
- if ((result = automember_update_member_value(entries[i], config->default_groups[ii],
|
||||
- config->grouping_attr, config->grouping_value, NULL, DEL_MEMBER)))
|
||||
- {
|
||||
- slapi_task_log_notice(task, "Automember rebuild membership task unable to delete "
|
||||
- "member from default group (%s) error (%d)",
|
||||
- config->default_groups[ii], result);
|
||||
- slapi_task_log_status(task, "Automember rebuild membership task unable to delete "
|
||||
- "member from default group (%s) error (%d)",
|
||||
- config->default_groups[ii], result);
|
||||
- slapi_log_err(SLAPI_LOG_ERR, AUTOMEMBER_PLUGIN_SUBSYSTEM,
|
||||
- "automember_rebuild_task_thread - Unable to unable to delete from (%s) error (%d)\n",
|
||||
- config->default_groups[ii], result);
|
||||
- goto out;
|
||||
- }
|
||||
- }
|
||||
-
|
||||
- /* Then clear out the non-default group */
|
||||
- if (config->inclusive_rules && !PR_CLIST_IS_EMPTY((PRCList *)config->inclusive_rules)) {
|
||||
- include_list = PR_LIST_HEAD((PRCList *)config->inclusive_rules);
|
||||
- while (include_list != (PRCList *)config->inclusive_rules) {
|
||||
- struct automemberRegexRule *curr_rule = (struct automemberRegexRule *)include_list;
|
||||
- if ((result = automember_update_member_value(entries[i], slapi_sdn_get_dn(curr_rule->target_group_dn),
|
||||
- config->grouping_attr, config->grouping_value, NULL, DEL_MEMBER)))
|
||||
+ if (td->cleanup) {
|
||||
+
|
||||
+ slapi_log_err(SLAPI_LOG_PLUGIN, AUTOMEMBER_PLUGIN_SUBSYSTEM,
|
||||
+ "automember_rebuild_task_thread - Cleaning up groups (config %s)\n",
|
||||
+ config->dn);
|
||||
+ /* First clear out all the defaults groups */
|
||||
+ for (size_t ii = 0; config->default_groups && config->default_groups[ii]; ii++) {
|
||||
+ if ((result = automember_update_member_value(entries[i],
|
||||
+ config->default_groups[ii],
|
||||
+ config->grouping_attr,
|
||||
+ config->grouping_value,
|
||||
+ NULL, DEL_MEMBER)))
|
||||
{
|
||||
slapi_task_log_notice(task, "Automember rebuild membership task unable to delete "
|
||||
- "member from group (%s) error (%d)",
|
||||
- slapi_sdn_get_dn(curr_rule->target_group_dn), result);
|
||||
+ "member from default group (%s) error (%d)",
|
||||
+ config->default_groups[ii], result);
|
||||
slapi_task_log_status(task, "Automember rebuild membership task unable to delete "
|
||||
- "member from group (%s) error (%d)",
|
||||
- slapi_sdn_get_dn(curr_rule->target_group_dn), result);
|
||||
+ "member from default group (%s) error (%d)",
|
||||
+ config->default_groups[ii], result);
|
||||
slapi_log_err(SLAPI_LOG_ERR, AUTOMEMBER_PLUGIN_SUBSYSTEM,
|
||||
"automember_rebuild_task_thread - Unable to unable to delete from (%s) error (%d)\n",
|
||||
- slapi_sdn_get_dn(curr_rule->target_group_dn), result);
|
||||
+ config->default_groups[ii], result);
|
||||
goto out;
|
||||
}
|
||||
- include_list = PR_NEXT_LINK(include_list);
|
||||
}
|
||||
+
|
||||
+ /* Then clear out the non-default group */
|
||||
+ if (config->inclusive_rules && !PR_CLIST_IS_EMPTY((PRCList *)config->inclusive_rules)) {
|
||||
+ include_list = PR_LIST_HEAD((PRCList *)config->inclusive_rules);
|
||||
+ while (include_list != (PRCList *)config->inclusive_rules) {
|
||||
+ struct automemberRegexRule *curr_rule = (struct automemberRegexRule *)include_list;
|
||||
+ if ((result = automember_update_member_value(entries[i],
|
||||
+ slapi_sdn_get_dn(curr_rule->target_group_dn),
|
||||
+ config->grouping_attr,
|
||||
+ config->grouping_value,
|
||||
+ NULL, DEL_MEMBER)))
|
||||
+ {
|
||||
+ slapi_task_log_notice(task, "Automember rebuild membership task unable to delete "
|
||||
+ "member from group (%s) error (%d)",
|
||||
+ slapi_sdn_get_dn(curr_rule->target_group_dn), result);
|
||||
+ slapi_task_log_status(task, "Automember rebuild membership task unable to delete "
|
||||
+ "member from group (%s) error (%d)",
|
||||
+ slapi_sdn_get_dn(curr_rule->target_group_dn), result);
|
||||
+ slapi_log_err(SLAPI_LOG_ERR, AUTOMEMBER_PLUGIN_SUBSYSTEM,
|
||||
+ "automember_rebuild_task_thread - Unable to unable to delete from (%s) error (%d)\n",
|
||||
+ slapi_sdn_get_dn(curr_rule->target_group_dn), result);
|
||||
+ goto out;
|
||||
+ }
|
||||
+ include_list = PR_NEXT_LINK(include_list);
|
||||
+ }
|
||||
+ }
|
||||
+ slapi_log_err(SLAPI_LOG_PLUGIN, AUTOMEMBER_PLUGIN_SUBSYSTEM,
|
||||
+ "automember_rebuild_task_thread - Finished cleaning up groups (config %s)\n",
|
||||
+ config->dn);
|
||||
}
|
||||
|
||||
/* Update the memberships for this entries */
|
||||
+ slapi_log_err(SLAPI_LOG_PLUGIN, AUTOMEMBER_PLUGIN_SUBSYSTEM,
|
||||
+ "automember_rebuild_task_thread - Updating membership (config %s)\n",
|
||||
+ config->dn);
|
||||
if (slapi_is_shutting_down() ||
|
||||
automember_update_membership(config, entries[i], NULL) == SLAPI_PLUGIN_FAILURE)
|
||||
{
|
||||
@@ -2508,15 +2639,22 @@ out:
|
||||
slapi_task_log_notice(task, "Automember rebuild task aborted. Error (%d)", result);
|
||||
slapi_task_log_status(task, "Automember rebuild task aborted. Error (%d)", result);
|
||||
} else {
|
||||
- slapi_task_log_notice(task, "Automember rebuild task finished. Processed (%d) entries.", (int32_t)i);
|
||||
- slapi_task_log_status(task, "Automember rebuild task finished. Processed (%d) entries.", (int32_t)i);
|
||||
+ slapi_task_log_notice(task, "Automember rebuild task finished. Processed (%ld) entries in %ld seconds",
|
||||
+ (int64_t)i, slapi_current_rel_time_t() - fixup_start_time);
|
||||
+ slapi_task_log_status(task, "Automember rebuild task finished. Processed (%ld) entries in %ld seconds",
|
||||
+ (int64_t)i, slapi_current_rel_time_t() - fixup_start_time);
|
||||
}
|
||||
slapi_task_inc_progress(task);
|
||||
slapi_task_finish(task, result);
|
||||
slapi_task_dec_refcount(task);
|
||||
slapi_atomic_store_64(&abort_rebuild_task, 0, __ATOMIC_RELEASE);
|
||||
+ slapi_td_unblock_nested_post_op();
|
||||
+ PR_Lock(fixup_lock);
|
||||
+ fixup_running = PR_FALSE;
|
||||
+ PR_Unlock(fixup_lock);
|
||||
+
|
||||
slapi_log_err(SLAPI_LOG_PLUGIN, AUTOMEMBER_PLUGIN_SUBSYSTEM,
|
||||
- "automember_rebuild_task_thread - Refcount decremented.\n");
|
||||
+ "automember_rebuild_task_thread - task finished, refcount decremented.\n");
|
||||
}
|
||||
|
||||
/*
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/ldbm_add.c b/ldap/servers/slapd/back-ldbm/ldbm_add.c
|
||||
index ba2d73a84..ce4c314a1 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/ldbm_add.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/ldbm_add.c
|
||||
@@ -1,6 +1,6 @@
|
||||
/** BEGIN COPYRIGHT BLOCK
|
||||
* Copyright (C) 2001 Sun Microsystems, Inc. Used by permission.
|
||||
- * Copyright (C) 2005 Red Hat, Inc.
|
||||
+ * Copyright (C) 2022 Red Hat, Inc.
|
||||
* Copyright (C) 2009 Hewlett-Packard Development Company, L.P.
|
||||
* All rights reserved.
|
||||
*
|
||||
@@ -1264,10 +1264,6 @@ ldbm_back_add(Slapi_PBlock *pb)
|
||||
goto common_return;
|
||||
|
||||
error_return:
|
||||
- /* Revert the caches if this is the parent operation */
|
||||
- if (parent_op && betxn_callback_fails) {
|
||||
- revert_cache(inst, &parent_time);
|
||||
- }
|
||||
if (addingentry_id_assigned) {
|
||||
next_id_return(be, addingentry->ep_id);
|
||||
}
|
||||
@@ -1376,6 +1372,11 @@ diskfull_return:
|
||||
if (!not_an_error) {
|
||||
rc = SLAPI_FAIL_GENERAL;
|
||||
}
|
||||
+
|
||||
+ /* Revert the caches if this is the parent operation */
|
||||
+ if (parent_op && betxn_callback_fails) {
|
||||
+ revert_cache(inst, &parent_time);
|
||||
+ }
|
||||
}
|
||||
|
||||
common_return:
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/ldbm_delete.c b/ldap/servers/slapd/back-ldbm/ldbm_delete.c
|
||||
index de23190c3..27f0ac58a 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/ldbm_delete.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/ldbm_delete.c
|
||||
@@ -1407,11 +1407,6 @@ commit_return:
|
||||
goto common_return;
|
||||
|
||||
error_return:
|
||||
- /* Revert the caches if this is the parent operation */
|
||||
- if (parent_op && betxn_callback_fails) {
|
||||
- revert_cache(inst, &parent_time);
|
||||
- }
|
||||
-
|
||||
if (tombstone) {
|
||||
if (cache_is_in_cache(&inst->inst_cache, tombstone)) {
|
||||
tomb_ep_id = tombstone->ep_id; /* Otherwise, tombstone might have been freed. */
|
||||
@@ -1496,6 +1491,11 @@ error_return:
|
||||
conn_id, op_id, parent_modify_c.old_entry, parent_modify_c.new_entry, myrc);
|
||||
}
|
||||
|
||||
+ /* Revert the caches if this is the parent operation */
|
||||
+ if (parent_op && betxn_callback_fails) {
|
||||
+ revert_cache(inst, &parent_time);
|
||||
+ }
|
||||
+
|
||||
common_return:
|
||||
if (orig_entry) {
|
||||
/* NOTE: #define SLAPI_DELETE_BEPREOP_ENTRY SLAPI_ENTRY_PRE_OP */
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/ldbm_modify.c b/ldap/servers/slapd/back-ldbm/ldbm_modify.c
|
||||
index 537369055..64b293001 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/ldbm_modify.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/ldbm_modify.c
|
||||
@@ -1,6 +1,6 @@
|
||||
/** BEGIN COPYRIGHT BLOCK
|
||||
* Copyright (C) 2001 Sun Microsystems, Inc. Used by permission.
|
||||
- * Copyright (C) 2005 Red Hat, Inc.
|
||||
+ * Copyright (C) 2022 Red Hat, Inc.
|
||||
* Copyright (C) 2009 Hewlett-Packard Development Company, L.P.
|
||||
* All rights reserved.
|
||||
*
|
||||
@@ -1043,11 +1043,6 @@ ldbm_back_modify(Slapi_PBlock *pb)
|
||||
goto common_return;
|
||||
|
||||
error_return:
|
||||
- /* Revert the caches if this is the parent operation */
|
||||
- if (parent_op && betxn_callback_fails) {
|
||||
- revert_cache(inst, &parent_time);
|
||||
- }
|
||||
-
|
||||
if (postentry != NULL) {
|
||||
slapi_entry_free(postentry);
|
||||
postentry = NULL;
|
||||
@@ -1103,6 +1098,10 @@ error_return:
|
||||
if (!not_an_error) {
|
||||
rc = SLAPI_FAIL_GENERAL;
|
||||
}
|
||||
+ /* Revert the caches if this is the parent operation */
|
||||
+ if (parent_op && betxn_callback_fails) {
|
||||
+ revert_cache(inst, &parent_time);
|
||||
+ }
|
||||
}
|
||||
|
||||
/* if ec is in cache, remove it, then add back e if we still have it */
|
||||
diff --git a/src/lib389/lib389/cli_conf/plugins/automember.py b/src/lib389/lib389/cli_conf/plugins/automember.py
|
||||
index 15b00c633..568586ad8 100644
|
||||
--- a/src/lib389/lib389/cli_conf/plugins/automember.py
|
||||
+++ b/src/lib389/lib389/cli_conf/plugins/automember.py
|
||||
@@ -155,7 +155,7 @@ def fixup(inst, basedn, log, args):
|
||||
log.info('Attempting to add task entry... This will fail if Automembership plug-in is not enabled.')
|
||||
if not plugin.status():
|
||||
log.error("'%s' is disabled. Rebuild membership task can't be executed" % plugin.rdn)
|
||||
- fixup_task = plugin.fixup(args.DN, args.filter)
|
||||
+ fixup_task = plugin.fixup(args.DN, args.filter, args.cleanup)
|
||||
if args.wait:
|
||||
log.info(f'Waiting for fixup task "{fixup_task.dn}" to complete. You can safely exit by pressing Control C ...')
|
||||
fixup_task.wait(timeout=args.timeout)
|
||||
@@ -225,8 +225,8 @@ def create_parser(subparsers):
|
||||
subcommands = automember.add_subparsers(help='action')
|
||||
add_generic_plugin_parsers(subcommands, AutoMembershipPlugin)
|
||||
|
||||
- list = subcommands.add_parser('list', help='List Automembership definitions or regex rules.')
|
||||
- subcommands_list = list.add_subparsers(help='action')
|
||||
+ automember_list = subcommands.add_parser('list', help='List Automembership definitions or regex rules.')
|
||||
+ subcommands_list = automember_list.add_subparsers(help='action')
|
||||
list_definitions = subcommands_list.add_parser('definitions', help='Lists Automembership definitions.')
|
||||
list_definitions.set_defaults(func=definition_list)
|
||||
list_regexes = subcommands_list.add_parser('regexes', help='List Automembership regex rules.')
|
||||
@@ -269,6 +269,8 @@ def create_parser(subparsers):
|
||||
fixup_task.add_argument('-f', '--filter', required=True, help='Sets the LDAP filter for entries to fix up')
|
||||
fixup_task.add_argument('-s', '--scope', required=True, choices=['sub', 'base', 'one'], type=str.lower,
|
||||
help='Sets the LDAP search scope for entries to fix up')
|
||||
+ fixup_task.add_argument('--cleanup', action='store_true',
|
||||
+ help="Clean up previous group memberships before rebuilding")
|
||||
fixup_task.add_argument('--wait', action='store_true',
|
||||
help="Wait for the task to finish, this could take a long time")
|
||||
fixup_task.add_argument('--timeout', default=0, type=int,
|
||||
@@ -279,7 +281,7 @@ def create_parser(subparsers):
|
||||
fixup_status.add_argument('--dn', help="The task entry's DN")
|
||||
fixup_status.add_argument('--show-log', action='store_true', help="Display the task log")
|
||||
fixup_status.add_argument('--watch', action='store_true',
|
||||
- help="Watch the task's status and wait for it to finish")
|
||||
+ help="Watch the task's status and wait for it to finish")
|
||||
|
||||
abort_fixup = subcommands.add_parser('abort-fixup', help='Abort the rebuild membership task.')
|
||||
abort_fixup.set_defaults(func=abort)
|
||||
diff --git a/src/lib389/lib389/plugins.py b/src/lib389/lib389/plugins.py
|
||||
index 52691a44c..a1ad0a45b 100644
|
||||
--- a/src/lib389/lib389/plugins.py
|
||||
+++ b/src/lib389/lib389/plugins.py
|
||||
@@ -1141,13 +1141,15 @@ class AutoMembershipPlugin(Plugin):
|
||||
def __init__(self, instance, dn="cn=Auto Membership Plugin,cn=plugins,cn=config"):
|
||||
super(AutoMembershipPlugin, self).__init__(instance, dn)
|
||||
|
||||
- def fixup(self, basedn, _filter=None):
|
||||
+ def fixup(self, basedn, _filter=None, cleanup=False):
|
||||
"""Create an automember rebuild membership task
|
||||
|
||||
:param basedn: Basedn to fix up
|
||||
:type basedn: str
|
||||
:param _filter: a filter for entries to fix up
|
||||
:type _filter: str
|
||||
+ :param cleanup: cleanup old group memberships
|
||||
+ :type cleanup: boolean
|
||||
|
||||
:returns: an instance of Task(DSLdapObject)
|
||||
"""
|
||||
@@ -1156,6 +1158,9 @@ class AutoMembershipPlugin(Plugin):
|
||||
task_properties = {'basedn': basedn}
|
||||
if _filter is not None:
|
||||
task_properties['filter'] = _filter
|
||||
+ if cleanup:
|
||||
+ task_properties['cleanup'] = "yes"
|
||||
+
|
||||
task.create(properties=task_properties)
|
||||
|
||||
return task
|
||||
diff --git a/src/lib389/lib389/tasks.py b/src/lib389/lib389/tasks.py
|
||||
index 1a16bbb83..193805780 100644
|
||||
--- a/src/lib389/lib389/tasks.py
|
||||
+++ b/src/lib389/lib389/tasks.py
|
||||
@@ -1006,12 +1006,13 @@ class Tasks(object):
|
||||
return exitCode
|
||||
|
||||
def automemberRebuild(self, suffix=DEFAULT_SUFFIX, scope='sub',
|
||||
- filterstr='objectclass=top', args=None):
|
||||
+ filterstr='objectclass=top', cleanup=False, args=None):
|
||||
'''
|
||||
- @param suffix - The suffix the task should examine - defualt is
|
||||
+ @param suffix - The suffix the task should examine - default is
|
||||
"dc=example,dc=com"
|
||||
@param scope - The scope of the search to find entries
|
||||
- @param fitlerstr - THe search filter to find entries
|
||||
+ @param fitlerstr - The search filter to find entries
|
||||
+ @param cleanup - reset/clear the old group mmeberships prior to rebuilding
|
||||
@param args - is a dictionary that contains modifier of the task
|
||||
wait: True/[False] - If True, waits for the completion of
|
||||
the task before to return
|
||||
@@ -1027,6 +1028,8 @@ class Tasks(object):
|
||||
entry.setValues('basedn', suffix)
|
||||
entry.setValues('filter', filterstr)
|
||||
entry.setValues('scope', scope)
|
||||
+ if cleanup:
|
||||
+ entry.setValues('cleanup', 'yes')
|
||||
|
||||
# start the task and possibly wait for task completion
|
||||
try:
|
||||
--
|
||||
2.43.0
|
||||
|
||||
@ -1,83 +0,0 @@
|
||||
From 9319d5b022918f14cacb00e3faef85a6ab730a26 Mon Sep 17 00:00:00 2001
|
||||
From: Simon Pichugin <spichugi@redhat.com>
|
||||
Date: Tue, 27 Feb 2024 16:30:47 -0800
|
||||
Subject: [PATCH] Issue 3527 - Support HAProxy and Instance on the same machine
|
||||
configuration (#6107)
|
||||
|
||||
Description: Improve how we handle HAProxy connections to work better when
|
||||
the DS and HAProxy are on the same machine.
|
||||
Ensure the client and header destination IPs are checked against the trusted IP list.
|
||||
|
||||
Additionally, this change will also allow configuration having
|
||||
HAProxy is listening on a different subnet than the one used to forward the request.
|
||||
|
||||
Related: https://github.com/389ds/389-ds-base/issues/3527
|
||||
|
||||
Reviewed by: @progier389, @jchapma (Thanks!)
|
||||
---
|
||||
ldap/servers/slapd/connection.c | 35 +++++++++++++++++++++++++--------
|
||||
1 file changed, 27 insertions(+), 8 deletions(-)
|
||||
|
||||
diff --git a/ldap/servers/slapd/connection.c b/ldap/servers/slapd/connection.c
|
||||
index d28a39bf7..10a8cc577 100644
|
||||
--- a/ldap/servers/slapd/connection.c
|
||||
+++ b/ldap/servers/slapd/connection.c
|
||||
@@ -1187,6 +1187,8 @@ connection_read_operation(Connection *conn, Operation *op, ber_tag_t *tag, int *
|
||||
char str_ip[INET6_ADDRSTRLEN + 1] = {0};
|
||||
char str_haproxy_ip[INET6_ADDRSTRLEN + 1] = {0};
|
||||
char str_haproxy_destip[INET6_ADDRSTRLEN + 1] = {0};
|
||||
+ int trusted_matches_ip_found = 0;
|
||||
+ int trusted_matches_destip_found = 0;
|
||||
struct berval **bvals = NULL;
|
||||
int proxy_connection = 0;
|
||||
|
||||
@@ -1245,21 +1247,38 @@ connection_read_operation(Connection *conn, Operation *op, ber_tag_t *tag, int *
|
||||
normalize_IPv4(conn->cin_addr, buf_ip, sizeof(buf_ip), str_ip, sizeof(str_ip));
|
||||
normalize_IPv4(&pr_netaddr_dest, buf_haproxy_destip, sizeof(buf_haproxy_destip),
|
||||
str_haproxy_destip, sizeof(str_haproxy_destip));
|
||||
+ size_t ip_len = strlen(buf_ip);
|
||||
+ size_t destip_len = strlen(buf_haproxy_destip);
|
||||
|
||||
/* Now, reset RC and set it to 0 only if a match is found */
|
||||
haproxy_rc = -1;
|
||||
|
||||
- /* Allow only:
|
||||
- * Trusted IP == Original Client IP == HAProxy Header Destination IP */
|
||||
+ /*
|
||||
+ * We need to allow a configuration where DS instance and HAProxy are on the same machine.
|
||||
+ * In this case, we need to check if
|
||||
+ * the HAProxy client IP (which will be a loopback address) matches one of the the trusted IP addresses,
|
||||
+ * while still checking that
|
||||
+ * the HAProxy header destination IP address matches one of the trusted IP addresses.
|
||||
+ * Additionally, this change will also allow configuration having
|
||||
+ * HAProxy listening on a different subnet than one used to forward the request.
|
||||
+ */
|
||||
for (size_t i = 0; bvals[i] != NULL; ++i) {
|
||||
- if ((strlen(bvals[i]->bv_val) == strlen(buf_ip)) &&
|
||||
- (strlen(bvals[i]->bv_val) == strlen(buf_haproxy_destip)) &&
|
||||
- (strncasecmp(bvals[i]->bv_val, buf_ip, strlen(buf_ip)) == 0) &&
|
||||
- (strncasecmp(bvals[i]->bv_val, buf_haproxy_destip, strlen(buf_haproxy_destip)) == 0)) {
|
||||
- haproxy_rc = 0;
|
||||
- break;
|
||||
+ size_t bval_len = strlen(bvals[i]->bv_val);
|
||||
+
|
||||
+ /* Check if the Client IP (HAProxy's machine IP) address matches the trusted IP address */
|
||||
+ if (!trusted_matches_ip_found) {
|
||||
+ trusted_matches_ip_found = (bval_len == ip_len) && (strncasecmp(bvals[i]->bv_val, buf_ip, ip_len) == 0);
|
||||
+ }
|
||||
+ /* Check if the HAProxy header destination IP address matches the trusted IP address */
|
||||
+ if (!trusted_matches_destip_found) {
|
||||
+ trusted_matches_destip_found = (bval_len == destip_len) && (strncasecmp(bvals[i]->bv_val, buf_haproxy_destip, destip_len) == 0);
|
||||
}
|
||||
}
|
||||
+
|
||||
+ if (trusted_matches_ip_found && trusted_matches_destip_found) {
|
||||
+ haproxy_rc = 0;
|
||||
+ }
|
||||
+
|
||||
if (haproxy_rc == -1) {
|
||||
slapi_log_err(SLAPI_LOG_CONNS, "connection_read_operation", "HAProxy header received from unknown source.\n");
|
||||
disconnect_server_nomutex(conn, conn->c_connid, -1, SLAPD_DISCONNECT_PROXY_UNKNOWN, EPROTO);
|
||||
--
|
||||
2.45.0
|
||||
|
||||
@ -1,108 +0,0 @@
|
||||
From 016a2b6bd3e27cbff36609824a75b020dfd24823 Mon Sep 17 00:00:00 2001
|
||||
From: James Chapman <jachapma@redhat.com>
|
||||
Date: Wed, 1 May 2024 15:01:33 +0100
|
||||
Subject: [PATCH] CVE-2024-2199
|
||||
|
||||
---
|
||||
.../tests/suites/password/password_test.py | 56 +++++++++++++++++++
|
||||
ldap/servers/slapd/modify.c | 8 ++-
|
||||
2 files changed, 62 insertions(+), 2 deletions(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/password/password_test.py b/dirsrvtests/tests/suites/password/password_test.py
|
||||
index 38079476a..b3ff08904 100644
|
||||
--- a/dirsrvtests/tests/suites/password/password_test.py
|
||||
+++ b/dirsrvtests/tests/suites/password/password_test.py
|
||||
@@ -65,6 +65,62 @@ def test_password_delete_specific_password(topology_st):
|
||||
log.info('test_password_delete_specific_password: PASSED')
|
||||
|
||||
|
||||
+def test_password_modify_non_utf8(topology_st):
|
||||
+ """Attempt a modify of the userPassword attribute with
|
||||
+ an invalid non utf8 value
|
||||
+
|
||||
+ :id: a31af9d5-d665-42b9-8d6e-fea3d0837d36
|
||||
+ :setup: Standalone instance
|
||||
+ :steps:
|
||||
+ 1. Add a user if it doesnt exist and set its password
|
||||
+ 2. Verify password with a bind
|
||||
+ 3. Modify userPassword attr with invalid value
|
||||
+ 4. Attempt a bind with invalid password value
|
||||
+ 5. Verify original password with a bind
|
||||
+ :expectedresults:
|
||||
+ 1. The user with userPassword should be added successfully
|
||||
+ 2. Operation should be successful
|
||||
+ 3. Server returns ldap.UNWILLING_TO_PERFORM
|
||||
+ 4. Server returns ldap.INVALID_CREDENTIALS
|
||||
+ 5. Operation should be successful
|
||||
+ """
|
||||
+
|
||||
+ log.info('Running test_password_modify_non_utf8...')
|
||||
+
|
||||
+ # Create user and set password
|
||||
+ standalone = topology_st.standalone
|
||||
+ users = UserAccounts(standalone, DEFAULT_SUFFIX)
|
||||
+ if not users.exists(TEST_USER_PROPERTIES['uid'][0]):
|
||||
+ user = users.create(properties=TEST_USER_PROPERTIES)
|
||||
+ else:
|
||||
+ user = users.get(TEST_USER_PROPERTIES['uid'][0])
|
||||
+ user.set('userpassword', PASSWORD)
|
||||
+
|
||||
+ # Verify password
|
||||
+ try:
|
||||
+ user.bind(PASSWORD)
|
||||
+ except ldap.LDAPError as e:
|
||||
+ log.fatal('Failed to bind as {}, error: '.format(user.dn) + e.args[0]['desc'])
|
||||
+ assert False
|
||||
+
|
||||
+ # Modify userPassword with an invalid value
|
||||
+ password = b'tes\x82t-password' # A non UTF-8 encoded password
|
||||
+ with pytest.raises(ldap.UNWILLING_TO_PERFORM):
|
||||
+ user.replace('userpassword', password)
|
||||
+
|
||||
+ # Verify a bind fails with invalid pasword
|
||||
+ with pytest.raises(ldap.INVALID_CREDENTIALS):
|
||||
+ user.bind(password)
|
||||
+
|
||||
+ # Verify we can still bind with original password
|
||||
+ try:
|
||||
+ user.bind(PASSWORD)
|
||||
+ except ldap.LDAPError as e:
|
||||
+ log.fatal('Failed to bind as {}, error: '.format(user.dn) + e.args[0]['desc'])
|
||||
+ assert False
|
||||
+
|
||||
+ log.info('test_password_modify_non_utf8: PASSED')
|
||||
+
|
||||
if __name__ == '__main__':
|
||||
# Run isolated
|
||||
# -s for DEBUG mode
|
||||
diff --git a/ldap/servers/slapd/modify.c b/ldap/servers/slapd/modify.c
|
||||
index 5ca78539c..669bb104c 100644
|
||||
--- a/ldap/servers/slapd/modify.c
|
||||
+++ b/ldap/servers/slapd/modify.c
|
||||
@@ -765,8 +765,10 @@ op_shared_modify(Slapi_PBlock *pb, int pw_change, char *old_pw)
|
||||
* flagged - leave mod attributes alone */
|
||||
if (!repl_op && !skip_modified_attrs && lastmod) {
|
||||
modify_update_last_modified_attr(pb, &smods);
|
||||
+ slapi_pblock_set(pb, SLAPI_MODIFY_MODS, slapi_mods_get_ldapmods_byref(&smods));
|
||||
}
|
||||
|
||||
+
|
||||
if (0 == slapi_mods_get_num_mods(&smods)) {
|
||||
/* nothing to do - no mods - this is not an error - just
|
||||
send back LDAP_SUCCESS */
|
||||
@@ -933,8 +935,10 @@ op_shared_modify(Slapi_PBlock *pb, int pw_change, char *old_pw)
|
||||
|
||||
/* encode password */
|
||||
if (pw_encodevals_ext(pb, sdn, va)) {
|
||||
- slapi_log_err(SLAPI_LOG_CRIT, "op_shared_modify", "Unable to hash userPassword attribute for %s.\n", slapi_entry_get_dn_const(e));
|
||||
- send_ldap_result(pb, LDAP_UNWILLING_TO_PERFORM, NULL, "Unable to store attribute \"userPassword\" correctly\n", 0, NULL);
|
||||
+ slapi_log_err(SLAPI_LOG_CRIT, "op_shared_modify", "Unable to hash userPassword attribute for %s, "
|
||||
+ "check value is utf8 string.\n", slapi_entry_get_dn_const(e));
|
||||
+ send_ldap_result(pb, LDAP_UNWILLING_TO_PERFORM, NULL, "Unable to hash \"userPassword\" attribute, "
|
||||
+ "check value is utf8 string.\n", 0, NULL);
|
||||
valuearray_free(&va);
|
||||
goto free_and_return;
|
||||
}
|
||||
--
|
||||
2.45.0
|
||||
|
||||
@ -1,213 +0,0 @@
|
||||
From d5bbe52fbe84a7d3b5938bf82d5c4af15061a8e2 Mon Sep 17 00:00:00 2001
|
||||
From: Pierre Rogier <progier@redhat.com>
|
||||
Date: Wed, 17 Apr 2024 18:18:04 +0200
|
||||
Subject: [PATCH] CVE-2024-3657
|
||||
|
||||
---
|
||||
.../tests/suites/filter/large_filter_test.py | 34 +++++-
|
||||
ldap/servers/slapd/back-ldbm/index.c | 111 ++++++++++--------
|
||||
2 files changed, 92 insertions(+), 53 deletions(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/filter/large_filter_test.py b/dirsrvtests/tests/suites/filter/large_filter_test.py
|
||||
index ecc7bf979..40526bb16 100644
|
||||
--- a/dirsrvtests/tests/suites/filter/large_filter_test.py
|
||||
+++ b/dirsrvtests/tests/suites/filter/large_filter_test.py
|
||||
@@ -13,19 +13,29 @@ verify and testing Filter from a search
|
||||
|
||||
import os
|
||||
import pytest
|
||||
+import ldap
|
||||
|
||||
-from lib389._constants import PW_DM
|
||||
+from lib389._constants import PW_DM, DEFAULT_SUFFIX, ErrorLog
|
||||
from lib389.topologies import topology_st as topo
|
||||
from lib389.idm.user import UserAccounts, UserAccount
|
||||
from lib389.idm.account import Accounts
|
||||
from lib389.backend import Backends
|
||||
from lib389.idm.domain import Domain
|
||||
+from lib389.utils import get_ldapurl_from_serverid
|
||||
|
||||
SUFFIX = 'dc=anuj,dc=com'
|
||||
|
||||
pytestmark = pytest.mark.tier1
|
||||
|
||||
|
||||
+def open_new_ldapi_conn(dsinstance):
|
||||
+ ldapurl, certdir = get_ldapurl_from_serverid(dsinstance)
|
||||
+ assert 'ldapi://' in ldapurl
|
||||
+ conn = ldap.initialize(ldapurl)
|
||||
+ conn.sasl_interactive_bind_s("", ldap.sasl.external())
|
||||
+ return conn
|
||||
+
|
||||
+
|
||||
@pytest.fixture(scope="module")
|
||||
def _create_entries(request, topo):
|
||||
"""
|
||||
@@ -160,6 +170,28 @@ def test_large_filter(topo, _create_entries, real_value):
|
||||
assert len(Accounts(conn, SUFFIX).filter(real_value)) == 3
|
||||
|
||||
|
||||
+def test_long_filter_value(topo):
|
||||
+ """Exercise large eq filter with dn syntax attributes
|
||||
+
|
||||
+ :id: b069ef72-fcc3-11ee-981c-482ae39447e5
|
||||
+ :setup: Standalone
|
||||
+ :steps:
|
||||
+ 1. Try to pass filter rules as per the condition.
|
||||
+ :expectedresults:
|
||||
+ 1. Pass
|
||||
+ """
|
||||
+ inst = topo.standalone
|
||||
+ conn = open_new_ldapi_conn(inst.serverid)
|
||||
+ inst.config.loglevel(vals=(ErrorLog.DEFAULT,ErrorLog.TRACE,ErrorLog.SEARCH_FILTER))
|
||||
+ filter_value = "a\x1Edmin" * 1025
|
||||
+ conn.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, f'(cn={filter_value})')
|
||||
+ filter_value = "aAdmin" * 1025
|
||||
+ conn.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, f'(cn={filter_value})')
|
||||
+ filter_value = "*"
|
||||
+ conn.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, f'(cn={filter_value})')
|
||||
+ inst.config.loglevel(vals=(ErrorLog.DEFAULT,))
|
||||
+
|
||||
+
|
||||
if __name__ == '__main__':
|
||||
CURRENT_FILE = os.path.realpath(__file__)
|
||||
pytest.main("-s -v %s" % CURRENT_FILE)
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/index.c b/ldap/servers/slapd/back-ldbm/index.c
|
||||
index 410db23d1..30fa09ebb 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/index.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/index.c
|
||||
@@ -71,6 +71,32 @@ typedef struct _index_buffer_handle index_buffer_handle;
|
||||
#define INDEX_BUFFER_FLAG_SERIALIZE 1
|
||||
#define INDEX_BUFFER_FLAG_STATS 2
|
||||
|
||||
+/*
|
||||
+ * space needed to encode a byte:
|
||||
+ * 0x00-0x31 and 0x7f-0xff requires 3 bytes: \xx
|
||||
+ * 0x22 and 0x5C requires 2 bytes: \" and \\
|
||||
+ * other requires 1 byte: c
|
||||
+ */
|
||||
+static char encode_size[] = {
|
||||
+ /* 0x00 */ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
|
||||
+ /* 0x10 */ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
|
||||
+ /* 0x20 */ 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
|
||||
+ /* 0x30 */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
|
||||
+ /* 0x40 */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
|
||||
+ /* 0x50 */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1,
|
||||
+ /* 0x60 */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
|
||||
+ /* 0x70 */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 3,
|
||||
+ /* 0x80 */ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
|
||||
+ /* 0x90 */ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
|
||||
+ /* 0xA0 */ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
|
||||
+ /* 0xB0 */ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
|
||||
+ /* 0xC0 */ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
|
||||
+ /* 0xD0 */ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
|
||||
+ /* 0xE0 */ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
|
||||
+ /* 0xF0 */ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
|
||||
+};
|
||||
+
|
||||
+
|
||||
/* Index buffering functions */
|
||||
|
||||
static int
|
||||
@@ -799,65 +825,46 @@ index_add_mods(
|
||||
|
||||
/*
|
||||
* Convert a 'struct berval' into a displayable ASCII string
|
||||
+ * returns the printable string
|
||||
*/
|
||||
-
|
||||
-#define SPECIAL(c) (c < 32 || c > 126 || c == '\\' || c == '"')
|
||||
-
|
||||
const char *
|
||||
encode(const struct berval *data, char buf[BUFSIZ])
|
||||
{
|
||||
- char *s;
|
||||
- char *last;
|
||||
- if (data == NULL || data->bv_len == 0)
|
||||
- return "";
|
||||
- last = data->bv_val + data->bv_len - 1;
|
||||
- for (s = data->bv_val; s < last; ++s) {
|
||||
- if (SPECIAL(*s)) {
|
||||
- char *first = data->bv_val;
|
||||
- char *bufNext = buf;
|
||||
- size_t bufSpace = BUFSIZ - 4;
|
||||
- while (1) {
|
||||
- /* printf ("%lu bytes ASCII\n", (unsigned long)(s - first)); */
|
||||
- if (bufSpace < (size_t)(s - first))
|
||||
- s = first + bufSpace - 1;
|
||||
- if (s != first) {
|
||||
- memcpy(bufNext, first, s - first);
|
||||
- bufNext += (s - first);
|
||||
- bufSpace -= (s - first);
|
||||
- }
|
||||
- do {
|
||||
- if (bufSpace) {
|
||||
- *bufNext++ = '\\';
|
||||
- --bufSpace;
|
||||
- }
|
||||
- if (bufSpace < 2) {
|
||||
- memcpy(bufNext, "..", 2);
|
||||
- bufNext += 2;
|
||||
- goto bail;
|
||||
- }
|
||||
- if (*s == '\\' || *s == '"') {
|
||||
- *bufNext++ = *s;
|
||||
- --bufSpace;
|
||||
- } else {
|
||||
- sprintf(bufNext, "%02x", (unsigned)*(unsigned char *)s);
|
||||
- bufNext += 2;
|
||||
- bufSpace -= 2;
|
||||
- }
|
||||
- } while (++s <= last && SPECIAL(*s));
|
||||
- if (s > last)
|
||||
- break;
|
||||
- first = s;
|
||||
- while (!SPECIAL(*s) && s <= last)
|
||||
- ++s;
|
||||
- }
|
||||
- bail:
|
||||
- *bufNext = '\0';
|
||||
- /* printf ("%lu chars in buffer\n", (unsigned long)(bufNext - buf)); */
|
||||
+ if (!data || !data->bv_val) {
|
||||
+ strcpy(buf, "<NULL>");
|
||||
+ return buf;
|
||||
+ }
|
||||
+ char *endbuff = &buf[BUFSIZ-4]; /* Reserve space to append "...\0" */
|
||||
+ char *ptout = buf;
|
||||
+ unsigned char *ptin = (unsigned char*) data->bv_val;
|
||||
+ unsigned char *endptin = ptin+data->bv_len;
|
||||
+
|
||||
+ while (ptin < endptin) {
|
||||
+ if (ptout >= endbuff) {
|
||||
+ /*
|
||||
+ * BUFSIZ(8K) > SLAPI_LOG_BUFSIZ(2K) so the error log message will be
|
||||
+ * truncated anyway. So there is no real interrest to test if the original
|
||||
+ * data contains no special characters and return it as is.
|
||||
+ */
|
||||
+ strcpy(endbuff, "...");
|
||||
return buf;
|
||||
}
|
||||
+ switch (encode_size[*ptin]) {
|
||||
+ case 1:
|
||||
+ *ptout++ = *ptin++;
|
||||
+ break;
|
||||
+ case 2:
|
||||
+ *ptout++ = '\\';
|
||||
+ *ptout++ = *ptin++;
|
||||
+ break;
|
||||
+ case 3:
|
||||
+ sprintf(ptout, "\\%02x", *ptin++);
|
||||
+ ptout += 3;
|
||||
+ break;
|
||||
+ }
|
||||
}
|
||||
- /* printf ("%lu bytes, all ASCII\n", (unsigned long)(s - data->bv_val)); */
|
||||
- return data->bv_val;
|
||||
+ *ptout = 0;
|
||||
+ return buf;
|
||||
}
|
||||
|
||||
static const char *
|
||||
--
|
||||
2.45.0
|
||||
|
||||
@ -1,143 +0,0 @@
|
||||
From 6e5f03d5872129963106024f53765234a282406c Mon Sep 17 00:00:00 2001
|
||||
From: James Chapman <jachapma@redhat.com>
|
||||
Date: Fri, 16 Feb 2024 11:13:16 +0000
|
||||
Subject: [PATCH] Issue 6096 - Improve connection timeout error logging (#6097)
|
||||
|
||||
Bug description: When a paged result search is run with a time limit,
|
||||
if the time limit is exceed the server closes the connection with
|
||||
closed IO timeout (nsslapd-ioblocktimeout) - T2. This error message
|
||||
is incorrect as the reason the connection has been closed was because
|
||||
the specified time limit on a paged result search has been exceeded.
|
||||
|
||||
Fix description: Correct error message
|
||||
|
||||
Relates: https://github.com/389ds/389-ds-base/issues/6096
|
||||
|
||||
Reviewed by: @tbordaz (Thank you)
|
||||
---
|
||||
ldap/admin/src/logconv.pl | 24 ++++++++++++++++++-
|
||||
ldap/servers/slapd/daemon.c | 4 ++--
|
||||
ldap/servers/slapd/disconnect_error_strings.h | 1 +
|
||||
ldap/servers/slapd/disconnect_errors.h | 2 +-
|
||||
4 files changed, 27 insertions(+), 4 deletions(-)
|
||||
|
||||
diff --git a/ldap/admin/src/logconv.pl b/ldap/admin/src/logconv.pl
|
||||
index 7698c383a..2a933c4a3 100755
|
||||
--- a/ldap/admin/src/logconv.pl
|
||||
+++ b/ldap/admin/src/logconv.pl
|
||||
@@ -267,7 +267,7 @@ my $optimeAvg = 0;
|
||||
my %cipher = ();
|
||||
my @removefiles = ();
|
||||
|
||||
-my @conncodes = qw(A1 B1 B4 T1 T2 B2 B3 R1 P1 P2 U1);
|
||||
+my @conncodes = qw(A1 B1 B4 T1 T2 T3 B2 B3 R1 P1 P2 U1);
|
||||
my %conn = ();
|
||||
map {$conn{$_} = $_} @conncodes;
|
||||
|
||||
@@ -355,6 +355,7 @@ $connmsg{"B1"} = "Bad Ber Tag Encountered";
|
||||
$connmsg{"B4"} = "Server failed to flush data (response) back to Client";
|
||||
$connmsg{"T1"} = "Idle Timeout Exceeded";
|
||||
$connmsg{"T2"} = "IO Block Timeout Exceeded or NTSSL Timeout";
|
||||
+$connmsg{"T3"} = "Paged Search Time Limit Exceeded";
|
||||
$connmsg{"B2"} = "Ber Too Big";
|
||||
$connmsg{"B3"} = "Ber Peek";
|
||||
$connmsg{"R1"} = "Revents";
|
||||
@@ -1723,6 +1724,10 @@ if ($usage =~ /j/i || $verb eq "yes"){
|
||||
print "\n $recCount. You have some coonections that are being closed by the ioblocktimeout setting. You may want to increase the ioblocktimeout.\n";
|
||||
$recCount++;
|
||||
}
|
||||
+ if (defined($conncount->{"T3"}) and $conncount->{"T3"} > 0){
|
||||
+ print "\n $recCount. You have some connections that are being closed because a paged result search limit has been exceeded. You may want to increase the search time limit.\n";
|
||||
+ $recCount++;
|
||||
+ }
|
||||
# compare binds to unbinds, if the difference is more than 30% of the binds, then report a issue
|
||||
if (($bindCount - $unbindCount) > ($bindCount*.3)){
|
||||
print "\n $recCount. You have a significant difference between binds and unbinds. You may want to investigate this difference.\n";
|
||||
@@ -2366,6 +2371,7 @@ sub parseLineNormal
|
||||
$brokenPipeCount++;
|
||||
if (m/- T1/){ $hashes->{rc}->{"T1"}++; }
|
||||
elsif (m/- T2/){ $hashes->{rc}->{"T2"}++; }
|
||||
+ elsif (m/- T3/){ $hashes->{rc}->{"T3"}++; }
|
||||
elsif (m/- A1/){ $hashes->{rc}->{"A1"}++; }
|
||||
elsif (m/- B1/){ $hashes->{rc}->{"B1"}++; }
|
||||
elsif (m/- B4/){ $hashes->{rc}->{"B4"}++; }
|
||||
@@ -2381,6 +2387,7 @@ sub parseLineNormal
|
||||
$connResetByPeerCount++;
|
||||
if (m/- T1/){ $hashes->{src}->{"T1"}++; }
|
||||
elsif (m/- T2/){ $hashes->{src}->{"T2"}++; }
|
||||
+ elsif (m/- T3/){ $hashes->{src}->{"T3"}++; }
|
||||
elsif (m/- A1/){ $hashes->{src}->{"A1"}++; }
|
||||
elsif (m/- B1/){ $hashes->{src}->{"B1"}++; }
|
||||
elsif (m/- B4/){ $hashes->{src}->{"B4"}++; }
|
||||
@@ -2396,6 +2403,7 @@ sub parseLineNormal
|
||||
$resourceUnavailCount++;
|
||||
if (m/- T1/){ $hashes->{rsrc}->{"T1"}++; }
|
||||
elsif (m/- T2/){ $hashes->{rsrc}->{"T2"}++; }
|
||||
+ elsif (m/- T3/){ $hashes->{rsrc}->{"T3"}++; }
|
||||
elsif (m/- A1/){ $hashes->{rsrc}->{"A1"}++; }
|
||||
elsif (m/- B1/){ $hashes->{rsrc}->{"B1"}++; }
|
||||
elsif (m/- B4/){ $hashes->{rsrc}->{"B4"}++; }
|
||||
@@ -2494,6 +2502,20 @@ sub parseLineNormal
|
||||
}
|
||||
}
|
||||
}
|
||||
+ if (m/- T3/){
|
||||
+ if ($_ =~ /conn= *([0-9A-Z]+)/i) {
|
||||
+ $exc = "no";
|
||||
+ $ip = getIPfromConn($1, $serverRestartCount);
|
||||
+ for (my $xxx = 0; $xxx < $#excludeIP; $xxx++){
|
||||
+ if ($ip eq $excludeIP[$xxx]){$exc = "yes";}
|
||||
+ }
|
||||
+ if ($exc ne "yes"){
|
||||
+ $hashes->{T3}->{$ip}++;
|
||||
+ $hashes->{conncount}->{"T3"}++;
|
||||
+ $connCodeCount++;
|
||||
+ }
|
||||
+ }
|
||||
+ }
|
||||
if (m/- B2/){
|
||||
if ($_ =~ /conn= *([0-9A-Z]+)/i) {
|
||||
$exc = "no";
|
||||
diff --git a/ldap/servers/slapd/daemon.c b/ldap/servers/slapd/daemon.c
|
||||
index 5a48aa66f..bb80dae36 100644
|
||||
--- a/ldap/servers/slapd/daemon.c
|
||||
+++ b/ldap/servers/slapd/daemon.c
|
||||
@@ -1599,9 +1599,9 @@ setup_pr_read_pds(Connection_Table *ct)
|
||||
int add_fd = 1;
|
||||
/* check timeout for PAGED RESULTS */
|
||||
if (pagedresults_is_timedout_nolock(c)) {
|
||||
- /* Exceeded the timelimit; disconnect the client */
|
||||
+ /* Exceeded the paged search timelimit; disconnect the client */
|
||||
disconnect_server_nomutex(c, c->c_connid, -1,
|
||||
- SLAPD_DISCONNECT_IO_TIMEOUT,
|
||||
+ SLAPD_DISCONNECT_PAGED_SEARCH_LIMIT,
|
||||
0);
|
||||
connection_table_move_connection_out_of_active_list(ct,
|
||||
c);
|
||||
diff --git a/ldap/servers/slapd/disconnect_error_strings.h b/ldap/servers/slapd/disconnect_error_strings.h
|
||||
index f7a31d728..c2d9e283b 100644
|
||||
--- a/ldap/servers/slapd/disconnect_error_strings.h
|
||||
+++ b/ldap/servers/slapd/disconnect_error_strings.h
|
||||
@@ -27,6 +27,7 @@ ER2(SLAPD_DISCONNECT_BER_FLUSH, "B4")
|
||||
ER2(SLAPD_DISCONNECT_IDLE_TIMEOUT, "T1")
|
||||
ER2(SLAPD_DISCONNECT_REVENTS, "R1")
|
||||
ER2(SLAPD_DISCONNECT_IO_TIMEOUT, "T2")
|
||||
+ER2(SLAPD_DISCONNECT_PAGED_SEARCH_LIMIT, "T3")
|
||||
ER2(SLAPD_DISCONNECT_PLUGIN, "P1")
|
||||
ER2(SLAPD_DISCONNECT_UNBIND, "U1")
|
||||
ER2(SLAPD_DISCONNECT_POLL, "P2")
|
||||
diff --git a/ldap/servers/slapd/disconnect_errors.h b/ldap/servers/slapd/disconnect_errors.h
|
||||
index a0484f1c2..e118f674c 100644
|
||||
--- a/ldap/servers/slapd/disconnect_errors.h
|
||||
+++ b/ldap/servers/slapd/disconnect_errors.h
|
||||
@@ -35,6 +35,6 @@
|
||||
#define SLAPD_DISCONNECT_SASL_FAIL SLAPD_DISCONNECT_ERROR_BASE + 12
|
||||
#define SLAPD_DISCONNECT_PROXY_INVALID_HEADER SLAPD_DISCONNECT_ERROR_BASE + 13
|
||||
#define SLAPD_DISCONNECT_PROXY_UNKNOWN SLAPD_DISCONNECT_ERROR_BASE + 14
|
||||
-
|
||||
+#define SLAPD_DISCONNECT_PAGED_SEARCH_LIMIT SLAPD_DISCONNECT_ERROR_BASE + 15
|
||||
|
||||
#endif /* __DISCONNECT_ERRORS_H_ */
|
||||
--
|
||||
2.45.0
|
||||
|
||||
@ -1,44 +0,0 @@
|
||||
From a112394af3a20787755029804684d57a9c3ffa9a Mon Sep 17 00:00:00 2001
|
||||
From: James Chapman <jachapma@redhat.com>
|
||||
Date: Wed, 21 Feb 2024 12:43:03 +0000
|
||||
Subject: [PATCH] Issue 6103 - New connection timeout error breaks errormap
|
||||
(#6104)
|
||||
|
||||
Bug description: A recent addition to the connection disconnect error
|
||||
messaging, conflicts with how errormap.c maps error codes/strings.
|
||||
|
||||
Fix description: errormap expects error codes/strings to be in ascending
|
||||
order. Moved the new error code to the bottom of the list.
|
||||
|
||||
Relates: https://github.com/389ds/389-ds-base/issues/6103
|
||||
|
||||
Reviewed by: @droideck. @progier389 (Thank you)
|
||||
---
|
||||
ldap/servers/slapd/disconnect_error_strings.h | 5 +++--
|
||||
1 file changed, 3 insertions(+), 2 deletions(-)
|
||||
|
||||
diff --git a/ldap/servers/slapd/disconnect_error_strings.h b/ldap/servers/slapd/disconnect_error_strings.h
|
||||
index c2d9e283b..f603a08ce 100644
|
||||
--- a/ldap/servers/slapd/disconnect_error_strings.h
|
||||
+++ b/ldap/servers/slapd/disconnect_error_strings.h
|
||||
@@ -14,7 +14,8 @@
|
||||
/* disconnect_error_strings.h
|
||||
*
|
||||
* Strings describing the errors used in logging the reason a connection
|
||||
- * was closed.
|
||||
+ * was closed. Ensure definitions are in the same order as the error codes
|
||||
+ * defined in disconnect_errors.h
|
||||
*/
|
||||
#ifndef __DISCONNECT_ERROR_STRINGS_H_
|
||||
#define __DISCONNECT_ERROR_STRINGS_H_
|
||||
@@ -35,6 +36,6 @@ ER2(SLAPD_DISCONNECT_NTSSL_TIMEOUT, "T2")
|
||||
ER2(SLAPD_DISCONNECT_SASL_FAIL, "S1")
|
||||
ER2(SLAPD_DISCONNECT_PROXY_INVALID_HEADER, "P3")
|
||||
ER2(SLAPD_DISCONNECT_PROXY_UNKNOWN, "P4")
|
||||
-
|
||||
+ER2(SLAPD_DISCONNECT_PAGED_SEARCH_LIMIT, "T3")
|
||||
|
||||
#endif /* __DISCONNECT_ERROR_STRINGS_H_ */
|
||||
--
|
||||
2.45.0
|
||||
|
||||
@ -1,30 +0,0 @@
|
||||
From edd9abc8901604dde1d739d87ca2906734d53dd3 Mon Sep 17 00:00:00 2001
|
||||
From: Viktor Ashirov <vashirov@redhat.com>
|
||||
Date: Thu, 13 Jun 2024 13:35:09 +0200
|
||||
Subject: [PATCH] Issue 6103 - New connection timeout error breaks errormap
|
||||
|
||||
Description:
|
||||
Remove duplicate SLAPD_DISCONNECT_PAGED_SEARCH_LIMIT error code.
|
||||
|
||||
Fixes: https://github.com/389ds/389-ds-base/issues/6103
|
||||
|
||||
Reviewed by: @tbordaz (Thanks!)
|
||||
---
|
||||
ldap/servers/slapd/disconnect_error_strings.h | 1 -
|
||||
1 file changed, 1 deletion(-)
|
||||
|
||||
diff --git a/ldap/servers/slapd/disconnect_error_strings.h b/ldap/servers/slapd/disconnect_error_strings.h
|
||||
index f603a08ce..d49cc79a2 100644
|
||||
--- a/ldap/servers/slapd/disconnect_error_strings.h
|
||||
+++ b/ldap/servers/slapd/disconnect_error_strings.h
|
||||
@@ -28,7 +28,6 @@ ER2(SLAPD_DISCONNECT_BER_FLUSH, "B4")
|
||||
ER2(SLAPD_DISCONNECT_IDLE_TIMEOUT, "T1")
|
||||
ER2(SLAPD_DISCONNECT_REVENTS, "R1")
|
||||
ER2(SLAPD_DISCONNECT_IO_TIMEOUT, "T2")
|
||||
-ER2(SLAPD_DISCONNECT_PAGED_SEARCH_LIMIT, "T3")
|
||||
ER2(SLAPD_DISCONNECT_PLUGIN, "P1")
|
||||
ER2(SLAPD_DISCONNECT_UNBIND, "U1")
|
||||
ER2(SLAPD_DISCONNECT_POLL, "P2")
|
||||
--
|
||||
2.45.0
|
||||
|
||||
@ -1,220 +0,0 @@
|
||||
From 8cf981c00ae18d3efaeb10819282cd991621e9a2 Mon Sep 17 00:00:00 2001
|
||||
From: tbordaz <tbordaz@redhat.com>
|
||||
Date: Wed, 22 May 2024 11:29:05 +0200
|
||||
Subject: [PATCH] Issue 6172 - RFE: improve the performance of evaluation of
|
||||
filter component when tested against a large valueset (like group members)
|
||||
(#6173)
|
||||
|
||||
Bug description:
|
||||
Before returning an entry (to a SRCH) the server checks that the entry matches the SRCH filter.
|
||||
If a filter component (equality) is testing the value (ava) against a
|
||||
large valueset (like uniquemember values), it takes a long time because
|
||||
of the large number of values and required normalization of the values.
|
||||
This can be improved taking benefit of sorted valueset. Those sorted
|
||||
valueset were created to improve updates of large valueset (groups) but
|
||||
at that time not implemented in SRCH path.
|
||||
|
||||
Fix description:
|
||||
In case of LDAP_FILTER_EQUALITY component, the server can get
|
||||
benefit of the sorted valuearray.
|
||||
To limit the risk of regression, we use the sorted valuearray
|
||||
only for the DN syntax attribute. Indeed the sorted valuearray was
|
||||
designed for those type of attribute.
|
||||
With those two limitations, there is no need of a toggle and
|
||||
the call to plugin_call_syntax_filter_ava can be replaced by
|
||||
a call to slapi_valueset_find.
|
||||
In both cases, sorted valueset and plugin_call_syntax_filter_ava, ava and
|
||||
values are normalized.
|
||||
In sorted valueset, the values have been normalized to insert the index
|
||||
in the sorted array and then comparison is done on normalized values.
|
||||
In plugin_call_syntax_filter_ava, all values in valuearray (of valueset) are normalized
|
||||
before comparison.
|
||||
|
||||
relates: #6172
|
||||
|
||||
Reviewed by: Pierre Rogier, Simon Pichugin (Big Thanks !!!)
|
||||
---
|
||||
.../tests/suites/filter/filter_test.py | 125 ++++++++++++++++++
|
||||
ldap/servers/slapd/filterentry.c | 22 ++-
|
||||
2 files changed, 146 insertions(+), 1 deletion(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/filter/filter_test.py b/dirsrvtests/tests/suites/filter/filter_test.py
|
||||
index d6bfa5a3b..4baaf04a7 100644
|
||||
--- a/dirsrvtests/tests/suites/filter/filter_test.py
|
||||
+++ b/dirsrvtests/tests/suites/filter/filter_test.py
|
||||
@@ -9,7 +9,11 @@
|
||||
import logging
|
||||
|
||||
import pytest
|
||||
+import time
|
||||
+from lib389.dirsrv_log import DirsrvAccessLog
|
||||
from lib389.tasks import *
|
||||
+from lib389.backend import Backends, Backend
|
||||
+from lib389.dbgen import dbgen_users, dbgen_groups
|
||||
from lib389.topologies import topology_st
|
||||
from lib389._constants import PASSWORD, DEFAULT_SUFFIX, DN_DM, SUFFIX
|
||||
from lib389.utils import *
|
||||
@@ -304,6 +308,127 @@ def test_extended_search(topology_st):
|
||||
ents = topology_st.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, myfilter)
|
||||
assert len(ents) == 1
|
||||
|
||||
+def test_match_large_valueset(topology_st):
|
||||
+ """Test that when returning a big number of entries
|
||||
+ and that we need to match the filter from a large valueset
|
||||
+ we get benefit to use the sorted valueset
|
||||
+
|
||||
+ :id: 7db5aa88-50e0-4c31-85dd-1d2072cb674c
|
||||
+
|
||||
+ :setup: Standalone instance
|
||||
+
|
||||
+ :steps:
|
||||
+ 1. Create a users and groups backends and tune them
|
||||
+ 2. Generate a test ldif (2k users and 1K groups with all users)
|
||||
+ 3. Import test ldif file using Offline import (ldif2db).
|
||||
+ 4. Prim the 'groups' entrycache with a "fast" search
|
||||
+ 5. Search the 'groups' with a difficult matching value
|
||||
+ 6. check that etime from step 5 is less than a second
|
||||
+
|
||||
+ :expectedresults:
|
||||
+ 1. Create a users and groups backends should PASS
|
||||
+ 2. Generate LDIF should PASS.
|
||||
+ 3. Offline import should PASS.
|
||||
+ 4. Priming should PASS.
|
||||
+ 5. Performance search should PASS.
|
||||
+ 6. Etime of performance search should PASS.
|
||||
+ """
|
||||
+
|
||||
+ log.info('Running test_match_large_valueset...')
|
||||
+ #
|
||||
+ # Test online/offline LDIF imports
|
||||
+ #
|
||||
+ inst = topology_st.standalone
|
||||
+ inst.start()
|
||||
+ backends = Backends(inst)
|
||||
+ users_suffix = "ou=users,%s" % DEFAULT_SUFFIX
|
||||
+ users_backend = 'users'
|
||||
+ users_ldif = 'users_import.ldif'
|
||||
+ groups_suffix = "ou=groups,%s" % DEFAULT_SUFFIX
|
||||
+ groups_backend = 'groups'
|
||||
+ groups_ldif = 'groups_import.ldif'
|
||||
+ groups_entrycache = '200000000'
|
||||
+ users_number = 2000
|
||||
+ groups_number = 1000
|
||||
+
|
||||
+
|
||||
+ # For priming the cache we just want to be fast
|
||||
+ # taking the first value in the valueset is good
|
||||
+ # whether the valueset is sorted or not
|
||||
+ priming_user_rdn = "user0001"
|
||||
+
|
||||
+ # For performance testing, this is important to use
|
||||
+ # user1000 rather then user0001
|
||||
+ # Because user0001 is the first value in the valueset
|
||||
+ # whether we use the sorted valuearray or non sorted
|
||||
+ # valuearray the performance will be similar.
|
||||
+ # With middle value user1000, the performance boost of
|
||||
+ # the sorted valuearray will make the difference.
|
||||
+ perf_user_rdn = "user1000"
|
||||
+
|
||||
+ # Step 1. Prepare the backends and tune the groups entrycache
|
||||
+ try:
|
||||
+ be_users = backends.create(properties={'parent': DEFAULT_SUFFIX, 'nsslapd-suffix': users_suffix, 'name': users_backend})
|
||||
+ be_groups = backends.create(properties={'parent': DEFAULT_SUFFIX, 'nsslapd-suffix': groups_suffix, 'name': groups_backend})
|
||||
+
|
||||
+ # set the entry cache to 200Mb as the 1K groups of 2K users require at least 170Mb
|
||||
+ be_groups.replace('nsslapd-cachememsize', groups_entrycache)
|
||||
+ except:
|
||||
+ raise
|
||||
+
|
||||
+ # Step 2. Generate a test ldif (10k users entries)
|
||||
+ log.info("Generating users LDIF...")
|
||||
+ ldif_dir = inst.get_ldif_dir()
|
||||
+ users_import_ldif = "%s/%s" % (ldif_dir, users_ldif)
|
||||
+ groups_import_ldif = "%s/%s" % (ldif_dir, groups_ldif)
|
||||
+ dbgen_users(inst, users_number, users_import_ldif, suffix=users_suffix, generic=True, parent=users_suffix)
|
||||
+
|
||||
+ # Generate a test ldif (800 groups with 10k members) that fit in 700Mb entry cache
|
||||
+ props = {
|
||||
+ "name": "group",
|
||||
+ "suffix": groups_suffix,
|
||||
+ "parent": groups_suffix,
|
||||
+ "number": groups_number,
|
||||
+ "numMembers": users_number,
|
||||
+ "createMembers": False,
|
||||
+ "memberParent": users_suffix,
|
||||
+ "membershipAttr": "uniquemember",
|
||||
+ }
|
||||
+ dbgen_groups(inst, groups_import_ldif, props)
|
||||
+
|
||||
+ # Step 3. Do the both offline imports
|
||||
+ inst.stop()
|
||||
+ if not inst.ldif2db(users_backend, None, None, None, users_import_ldif):
|
||||
+ log.fatal('test_basic_import_export: Offline users import failed')
|
||||
+ assert False
|
||||
+ if not inst.ldif2db(groups_backend, None, None, None, groups_import_ldif):
|
||||
+ log.fatal('test_basic_import_export: Offline groups import failed')
|
||||
+ assert False
|
||||
+ inst.start()
|
||||
+
|
||||
+ # Step 4. first prime the cache
|
||||
+ # Just request the 'DN'. We are interested by the time of matching not by the time of transfert
|
||||
+ entries = topology_st.standalone.search_s(groups_suffix, ldap.SCOPE_SUBTREE, "(&(objectclass=groupOfUniqueNames)(uniquemember=uid=%s,%s))" % (priming_user_rdn, users_suffix), ['dn'])
|
||||
+ assert len(entries) == groups_number
|
||||
+
|
||||
+ # Step 5. Now do the real performance checking it should take less than a second
|
||||
+ # Just request the 'DN'. We are interested by the time of matching not by the time of transfert
|
||||
+ search_start = time.time()
|
||||
+ entries = topology_st.standalone.search_s(groups_suffix, ldap.SCOPE_SUBTREE, "(&(objectclass=groupOfUniqueNames)(uniquemember=uid=%s,%s))" % (perf_user_rdn, users_suffix), ['dn'])
|
||||
+ duration = time.time() - search_start
|
||||
+ log.info("Duration of the search was %f", duration)
|
||||
+
|
||||
+ # Step 6. Gather the etime from the access log
|
||||
+ inst.stop()
|
||||
+ access_log = DirsrvAccessLog(inst)
|
||||
+ search_result = access_log.match(".*RESULT err=0 tag=101 nentries=%s.*" % groups_number)
|
||||
+ log.info("Found patterns are %s", search_result[0])
|
||||
+ log.info("Found patterns are %s", search_result[1])
|
||||
+ etime = float(search_result[1].split('etime=')[1])
|
||||
+ log.info("Duration of the search from access log was %f", etime)
|
||||
+ assert len(entries) == groups_number
|
||||
+ assert (etime < 1)
|
||||
+
|
||||
if __name__ == '__main__':
|
||||
# Run isolated
|
||||
# -s for DEBUG mode
|
||||
diff --git a/ldap/servers/slapd/filterentry.c b/ldap/servers/slapd/filterentry.c
|
||||
index fd8fdda9f..cae5c7edc 100644
|
||||
--- a/ldap/servers/slapd/filterentry.c
|
||||
+++ b/ldap/servers/slapd/filterentry.c
|
||||
@@ -296,7 +296,27 @@ test_ava_filter(
|
||||
rc = -1;
|
||||
for (; a != NULL; a = a->a_next) {
|
||||
if (slapi_attr_type_cmp(ava->ava_type, a->a_type, SLAPI_TYPE_CMP_SUBTYPE) == 0) {
|
||||
- rc = plugin_call_syntax_filter_ava(a, ftype, ava);
|
||||
+ if ((ftype == LDAP_FILTER_EQUALITY) &&
|
||||
+ (slapi_attr_is_dn_syntax_type(a->a_type))) {
|
||||
+ /* This path is for a performance improvement */
|
||||
+
|
||||
+ /* In case of equality filter we can get benefit of the
|
||||
+ * sorted valuearray (from valueset).
|
||||
+ * This improvement is limited to DN syntax attributes for
|
||||
+ * which the sorted valueset was designed.
|
||||
+ */
|
||||
+ Slapi_Value *sval = NULL;
|
||||
+ sval = slapi_value_new_berval(&ava->ava_value);
|
||||
+ if (slapi_valueset_find((const Slapi_Attr *)a, &a->a_present_values, sval)) {
|
||||
+ rc = 0;
|
||||
+ }
|
||||
+ slapi_value_free(&sval);
|
||||
+ } else {
|
||||
+ /* When sorted valuearray optimization cannot be used
|
||||
+ * lets filter the value according to its syntax
|
||||
+ */
|
||||
+ rc = plugin_call_syntax_filter_ava(a, ftype, ava);
|
||||
+ }
|
||||
if (rc == 0) {
|
||||
break;
|
||||
}
|
||||
--
|
||||
2.46.0
|
||||
|
||||
@ -1,163 +0,0 @@
|
||||
From 57051154bafaf50b83fc27dadbd89a49fd1c8c36 Mon Sep 17 00:00:00 2001
|
||||
From: Pierre Rogier <progier@redhat.com>
|
||||
Date: Fri, 14 Jun 2024 13:27:10 +0200
|
||||
Subject: [PATCH] Security fix for CVE-2024-5953
|
||||
|
||||
Description:
|
||||
A denial of service vulnerability was found in the 389 Directory Server.
|
||||
This issue may allow an authenticated user to cause a server denial
|
||||
of service while attempting to log in with a user with a malformed hash
|
||||
in their password.
|
||||
|
||||
Fix Description:
|
||||
To prevent buffer overflow when a bind request is processed, the bind fails
|
||||
if the hash size is not coherent without even attempting to process further
|
||||
the hashed password.
|
||||
|
||||
References:
|
||||
- https://nvd.nist.gov/vuln/detail/CVE-2024-5953
|
||||
- https://access.redhat.com/security/cve/CVE-2024-5953
|
||||
- https://bugzilla.redhat.com/show_bug.cgi?id=2292104
|
||||
---
|
||||
.../tests/suites/password/regression_test.py | 54 ++++++++++++++++++-
|
||||
ldap/servers/plugins/pwdstorage/md5_pwd.c | 9 +++-
|
||||
ldap/servers/plugins/pwdstorage/pbkdf2_pwd.c | 6 +++
|
||||
3 files changed, 66 insertions(+), 3 deletions(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/password/regression_test.py b/dirsrvtests/tests/suites/password/regression_test.py
|
||||
index 8f1facb6d..1fa581643 100644
|
||||
--- a/dirsrvtests/tests/suites/password/regression_test.py
|
||||
+++ b/dirsrvtests/tests/suites/password/regression_test.py
|
||||
@@ -7,12 +7,14 @@
|
||||
#
|
||||
import pytest
|
||||
import time
|
||||
+import glob
|
||||
+import base64
|
||||
from lib389._constants import PASSWORD, DN_DM, DEFAULT_SUFFIX
|
||||
from lib389._constants import SUFFIX, PASSWORD, DN_DM, DN_CONFIG, PLUGIN_RETRO_CHANGELOG, DEFAULT_SUFFIX, DEFAULT_CHANGELOG_DB
|
||||
from lib389 import Entry
|
||||
from lib389.topologies import topology_m1 as topo_supplier
|
||||
-from lib389.idm.user import UserAccounts
|
||||
-from lib389.utils import ldap, os, logging, ensure_bytes, ds_is_newer
|
||||
+from lib389.idm.user import UserAccounts, UserAccount
|
||||
+from lib389.utils import ldap, os, logging, ensure_bytes, ds_is_newer, ds_supports_new_changelog
|
||||
from lib389.topologies import topology_st as topo
|
||||
from lib389.idm.organizationalunit import OrganizationalUnits
|
||||
|
||||
@@ -39,6 +41,13 @@ TEST_PASSWORDS += ['CNpwtest1ZZZZ', 'ZZZZZCNpwtest1',
|
||||
TEST_PASSWORDS2 = (
|
||||
'CN12pwtest31', 'SN3pwtest231', 'UID1pwtest123', 'MAIL2pwtest12@redhat.com', '2GN1pwtest123', 'People123')
|
||||
|
||||
+SUPPORTED_SCHEMES = (
|
||||
+ "{SHA}", "{SSHA}", "{SHA256}", "{SSHA256}",
|
||||
+ "{SHA384}", "{SSHA384}", "{SHA512}", "{SSHA512}",
|
||||
+ "{crypt}", "{NS-MTA-MD5}", "{clear}", "{MD5}",
|
||||
+ "{SMD5}", "{PBKDF2_SHA256}", "{PBKDF2_SHA512}",
|
||||
+ "{GOST_YESCRYPT}", "{PBKDF2-SHA256}", "{PBKDF2-SHA512}" )
|
||||
+
|
||||
def _check_unhashed_userpw(inst, user_dn, is_present=False):
|
||||
"""Check if unhashed#user#password attribute is present or not in the changelog"""
|
||||
unhashed_pwd_attribute = 'unhashed#user#password'
|
||||
@@ -319,6 +328,47 @@ def test_unhashed_pw_switch(topo_supplier):
|
||||
# Add debugging steps(if any)...
|
||||
pass
|
||||
|
||||
+@pytest.mark.parametrize("scheme", SUPPORTED_SCHEMES )
|
||||
+def test_long_hashed_password(topo, create_user, scheme):
|
||||
+ """Check that hashed password with very long value does not cause trouble
|
||||
+
|
||||
+ :id: 252a1f76-114b-11ef-8a7a-482ae39447e5
|
||||
+ :setup: standalone Instance
|
||||
+ :parametrized: yes
|
||||
+ :steps:
|
||||
+ 1. Add a test user user
|
||||
+ 2. Set a long password with requested scheme
|
||||
+ 3. Bind on that user using a wrong password
|
||||
+ 4. Check that instance is still alive
|
||||
+ 5. Remove the added user
|
||||
+ :expectedresults:
|
||||
+ 1. Success
|
||||
+ 2. Success
|
||||
+ 3. Should get ldap.INVALID_CREDENTIALS exception
|
||||
+ 4. Success
|
||||
+ 5. Success
|
||||
+ """
|
||||
+ inst = topo.standalone
|
||||
+ inst.simple_bind_s(DN_DM, PASSWORD)
|
||||
+ users = UserAccounts(inst, DEFAULT_SUFFIX)
|
||||
+ # Make sure that server is started as this test may crash it
|
||||
+ inst.start()
|
||||
+ # Adding Test user (It may already exists if previous test failed)
|
||||
+ user2 = UserAccount(inst, dn='uid=test_user_1002,ou=People,dc=example,dc=com')
|
||||
+ if not user2.exists():
|
||||
+ user2 = users.create_test_user(uid=1002, gid=2002)
|
||||
+ # Setting hashed password
|
||||
+ passwd = 'A'*4000
|
||||
+ hashed_passwd = scheme.encode('utf-8') + base64.b64encode(passwd.encode('utf-8'))
|
||||
+ user2.replace('userpassword', hashed_passwd)
|
||||
+ # Bind on that user using a wrong password
|
||||
+ with pytest.raises(ldap.INVALID_CREDENTIALS):
|
||||
+ conn = user2.bind(PASSWORD)
|
||||
+ # Check that instance is still alive
|
||||
+ assert inst.status()
|
||||
+ # Remove the added user
|
||||
+ user2.delete()
|
||||
+
|
||||
|
||||
if __name__ == '__main__':
|
||||
# Run isolated
|
||||
diff --git a/ldap/servers/plugins/pwdstorage/md5_pwd.c b/ldap/servers/plugins/pwdstorage/md5_pwd.c
|
||||
index 1e2cf58e7..b9a48d5ca 100644
|
||||
--- a/ldap/servers/plugins/pwdstorage/md5_pwd.c
|
||||
+++ b/ldap/servers/plugins/pwdstorage/md5_pwd.c
|
||||
@@ -37,6 +37,7 @@ md5_pw_cmp(const char *userpwd, const char *dbpwd)
|
||||
unsigned char hash_out[MD5_HASH_LEN];
|
||||
unsigned char b2a_out[MD5_HASH_LEN * 2]; /* conservative */
|
||||
SECItem binary_item;
|
||||
+ size_t dbpwd_len = strlen(dbpwd);
|
||||
|
||||
ctx = PK11_CreateDigestContext(SEC_OID_MD5);
|
||||
if (ctx == NULL) {
|
||||
@@ -45,6 +46,12 @@ md5_pw_cmp(const char *userpwd, const char *dbpwd)
|
||||
goto loser;
|
||||
}
|
||||
|
||||
+ if (dbpwd_len >= sizeof b2a_out) {
|
||||
+ slapi_log_err(SLAPI_LOG_PLUGIN, MD5_SUBSYSTEM_NAME,
|
||||
+ "The hashed password stored in the user entry is longer than any valid md5 hash");
|
||||
+ goto loser;
|
||||
+ }
|
||||
+
|
||||
/* create the hash */
|
||||
PK11_DigestBegin(ctx);
|
||||
PK11_DigestOp(ctx, (const unsigned char *)userpwd, strlen(userpwd));
|
||||
@@ -57,7 +64,7 @@ md5_pw_cmp(const char *userpwd, const char *dbpwd)
|
||||
bver = NSSBase64_EncodeItem(NULL, (char *)b2a_out, sizeof b2a_out, &binary_item);
|
||||
/* bver points to b2a_out upon success */
|
||||
if (bver) {
|
||||
- rc = slapi_ct_memcmp(bver, dbpwd, strlen(dbpwd));
|
||||
+ rc = slapi_ct_memcmp(bver, dbpwd, dbpwd_len);
|
||||
} else {
|
||||
slapi_log_err(SLAPI_LOG_PLUGIN, MD5_SUBSYSTEM_NAME,
|
||||
"Could not base64 encode hashed value for password compare");
|
||||
diff --git a/ldap/servers/plugins/pwdstorage/pbkdf2_pwd.c b/ldap/servers/plugins/pwdstorage/pbkdf2_pwd.c
|
||||
index dcac4fcdd..82b8c9501 100644
|
||||
--- a/ldap/servers/plugins/pwdstorage/pbkdf2_pwd.c
|
||||
+++ b/ldap/servers/plugins/pwdstorage/pbkdf2_pwd.c
|
||||
@@ -255,6 +255,12 @@ pbkdf2_sha256_pw_cmp(const char *userpwd, const char *dbpwd)
|
||||
passItem.data = (unsigned char *)userpwd;
|
||||
passItem.len = strlen(userpwd);
|
||||
|
||||
+ if (pwdstorage_base64_decode_len(dbpwd, dbpwd_len) > sizeof dbhash) {
|
||||
+ /* Hashed value is too long and cannot match any value generated by pbkdf2_sha256_hash */
|
||||
+ slapi_log_err(SLAPI_LOG_ERR, (char *)schemeName, "Unable to base64 decode dbpwd value. (hashed value is too long)\n");
|
||||
+ return result;
|
||||
+ }
|
||||
+
|
||||
/* Decode the DBpwd to bytes from b64 */
|
||||
if (PL_Base64Decode(dbpwd, dbpwd_len, dbhash) == NULL) {
|
||||
slapi_log_err(SLAPI_LOG_ERR, (char *)schemeName, "Unable to base64 decode dbpwd value\n");
|
||||
--
|
||||
2.46.0
|
||||
|
||||
@ -1,178 +0,0 @@
|
||||
From e8a5b1deef1b455aafecb71efc029d2407b1b06f Mon Sep 17 00:00:00 2001
|
||||
From: Simon Pichugin <spichugi@redhat.com>
|
||||
Date: Tue, 16 Jul 2024 08:32:21 -0700
|
||||
Subject: [PATCH] Issue 4778 - Add COMPACT_CL5 task to dsconf replication
|
||||
(#6260)
|
||||
|
||||
Description: In 1.4.3, the changelog is not part of a backend.
|
||||
It can be compacted with nsds5task: CAMPACT_CL5 as part of the replication entry.
|
||||
Add the task as a compact-changelog command under the dsconf replication tool.
|
||||
Add tests for the feature and fix old tests.
|
||||
|
||||
Related: https://github.com/389ds/389-ds-base/issues/4778
|
||||
|
||||
Reviewed by: @progier389 (Thanks!)
|
||||
---
|
||||
.../tests/suites/config/compact_test.py | 36 ++++++++++++++---
|
||||
src/lib389/lib389/cli_conf/replication.py | 10 +++++
|
||||
src/lib389/lib389/replica.py | 40 +++++++++++++++++++
|
||||
3 files changed, 81 insertions(+), 5 deletions(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/config/compact_test.py b/dirsrvtests/tests/suites/config/compact_test.py
|
||||
index 317258d0e..31d98d10c 100644
|
||||
--- a/dirsrvtests/tests/suites/config/compact_test.py
|
||||
+++ b/dirsrvtests/tests/suites/config/compact_test.py
|
||||
@@ -13,14 +13,14 @@ import time
|
||||
import datetime
|
||||
from lib389.tasks import DBCompactTask
|
||||
from lib389.backend import DatabaseConfig
|
||||
-from lib389.replica import Changelog5
|
||||
+from lib389.replica import Changelog5, Replicas
|
||||
from lib389.topologies import topology_m1 as topo
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def test_compact_db_task(topo):
|
||||
- """Specify a test case purpose or name here
|
||||
+ """Test compaction of database
|
||||
|
||||
:id: 1b3222ef-a336-4259-be21-6a52f76e1859
|
||||
:setup: Standalone Instance
|
||||
@@ -48,7 +48,7 @@ def test_compact_db_task(topo):
|
||||
|
||||
|
||||
def test_compaction_interval_and_time(topo):
|
||||
- """Specify a test case purpose or name here
|
||||
+ """Test compaction interval and time for database and changelog
|
||||
|
||||
:id: f361bee9-d7e7-4569-9255-d7b60dd9d92e
|
||||
:setup: Supplier Instance
|
||||
@@ -95,10 +95,36 @@ def test_compaction_interval_and_time(topo):
|
||||
|
||||
# Check compaction occurred as expected
|
||||
time.sleep(45)
|
||||
- assert not inst.searchErrorsLog("Compacting databases")
|
||||
+ assert not inst.searchErrorsLog("compacting replication changelogs")
|
||||
|
||||
time.sleep(90)
|
||||
- assert inst.searchErrorsLog("Compacting databases")
|
||||
+ assert inst.searchErrorsLog("compacting replication changelogs")
|
||||
+ inst.deleteErrorLogs(restart=False)
|
||||
+
|
||||
+
|
||||
+def test_compact_cl5_task(topo):
|
||||
+ """Test compaction of changelog5 database
|
||||
+
|
||||
+ :id: aadfa9f7-73c0-463a-912c-0a29aa1f8167
|
||||
+ :setup: Standalone Instance
|
||||
+ :steps:
|
||||
+ 1. Run compaction task
|
||||
+ 2. Check errors log to show task was run
|
||||
+ :expectedresults:
|
||||
+ 1. Success
|
||||
+ 2. Success
|
||||
+ """
|
||||
+ inst = topo.ms["supplier1"]
|
||||
+
|
||||
+ replicas = Replicas(inst)
|
||||
+ replicas.compact_changelog(log=log)
|
||||
+
|
||||
+ # Check compaction occurred as expected. But instead of time.sleep(5) check 1 sec in loop
|
||||
+ for _ in range(5):
|
||||
+ time.sleep(1)
|
||||
+ if inst.searchErrorsLog("compacting replication changelogs"):
|
||||
+ break
|
||||
+ assert inst.searchErrorsLog("compacting replication changelogs")
|
||||
inst.deleteErrorLogs(restart=False)
|
||||
|
||||
|
||||
diff --git a/src/lib389/lib389/cli_conf/replication.py b/src/lib389/lib389/cli_conf/replication.py
|
||||
index 352c0ee5b..ccc394255 100644
|
||||
--- a/src/lib389/lib389/cli_conf/replication.py
|
||||
+++ b/src/lib389/lib389/cli_conf/replication.py
|
||||
@@ -1199,6 +1199,11 @@ def restore_cl_dir(inst, basedn, log, args):
|
||||
replicas.restore_changelog(replica_roots=args.REPLICA_ROOTS, log=log)
|
||||
|
||||
|
||||
+def compact_cl5(inst, basedn, log, args):
|
||||
+ replicas = Replicas(inst)
|
||||
+ replicas.compact_changelog(replica_roots=args.REPLICA_ROOTS, log=log)
|
||||
+
|
||||
+
|
||||
def create_parser(subparsers):
|
||||
|
||||
############################################
|
||||
@@ -1326,6 +1331,11 @@ def create_parser(subparsers):
|
||||
help="Specify one replica root whose changelog you want to restore. "
|
||||
"The replica root will be consumed from the LDIF file name if the option is omitted.")
|
||||
|
||||
+ compact_cl = repl_subcommands.add_parser('compact-changelog', help='Compact the changelog database')
|
||||
+ compact_cl.set_defaults(func=compact_cl5)
|
||||
+ compact_cl.add_argument('REPLICA_ROOTS', nargs="+",
|
||||
+ help="Specify replica roots whose changelog you want to compact.")
|
||||
+
|
||||
restore_changelogdir = restore_subcommands.add_parser('from-changelogdir', help='Restore LDIF files from changelogdir.')
|
||||
restore_changelogdir.set_defaults(func=restore_cl_dir)
|
||||
restore_changelogdir.add_argument('REPLICA_ROOTS', nargs="+",
|
||||
diff --git a/src/lib389/lib389/replica.py b/src/lib389/lib389/replica.py
|
||||
index 94e1fdad5..1f321972d 100644
|
||||
--- a/src/lib389/lib389/replica.py
|
||||
+++ b/src/lib389/lib389/replica.py
|
||||
@@ -1648,6 +1648,11 @@ class Replica(DSLdapObject):
|
||||
"""
|
||||
self.replace('nsds5task', 'ldif2cl')
|
||||
|
||||
+ def begin_task_compact_cl5(self):
|
||||
+ """Begin COMPACT_CL5 task
|
||||
+ """
|
||||
+ self.replace('nsds5task', 'COMPACT_CL5')
|
||||
+
|
||||
def get_suffix(self):
|
||||
"""Return the suffix
|
||||
"""
|
||||
@@ -1829,6 +1834,41 @@ class Replicas(DSLdapObjects):
|
||||
log.error(f"Changelog LDIF for '{repl_root}' was not found")
|
||||
continue
|
||||
|
||||
+ def compact_changelog(self, replica_roots=[], log=None):
|
||||
+ """Compact Directory Server replication changelog
|
||||
+
|
||||
+ :param replica_roots: Replica suffixes that need to be processed (and optional LDIF file path)
|
||||
+ :type replica_roots: list of str
|
||||
+ :param log: The logger object
|
||||
+ :type log: logger
|
||||
+ """
|
||||
+
|
||||
+ if log is None:
|
||||
+ log = self._log
|
||||
+
|
||||
+ # Check if the changelog entry exists
|
||||
+ try:
|
||||
+ cl = Changelog5(self._instance)
|
||||
+ cl.get_attr_val_utf8_l("nsslapd-changelogdir")
|
||||
+ except ldap.NO_SUCH_OBJECT:
|
||||
+ raise ValueError("Changelog entry was not found. Probably, the replication is not enabled on this instance")
|
||||
+
|
||||
+ # Get all the replicas on the server if --replica-roots option is not specified
|
||||
+ repl_roots = []
|
||||
+ if not replica_roots:
|
||||
+ for replica in self.list():
|
||||
+ repl_roots.append(replica.get_attr_val_utf8("nsDS5ReplicaRoot"))
|
||||
+ else:
|
||||
+ for repl_root in replica_roots:
|
||||
+ repl_roots.append(repl_root)
|
||||
+
|
||||
+ # Dump the changelog for the replica
|
||||
+
|
||||
+ # Dump the changelog for the replica
|
||||
+ for repl_root in repl_roots:
|
||||
+ replica = self.get(repl_root)
|
||||
+ replica.begin_task_compact_cl5()
|
||||
+
|
||||
|
||||
class BootstrapReplicationManager(DSLdapObject):
|
||||
"""A Replication Manager credential for bootstrapping the repl process.
|
||||
--
|
||||
2.47.0
|
||||
|
||||
@ -1,55 +0,0 @@
|
||||
From d1cd9a5675e2953b7c8034ebb87a434cdd3ce0c3 Mon Sep 17 00:00:00 2001
|
||||
From: tbordaz <tbordaz@redhat.com>
|
||||
Date: Mon, 2 Dec 2024 17:18:32 +0100
|
||||
Subject: [PATCH] Issue 6417 - If an entry RDN is identical to the suffix, then
|
||||
Entryrdn gets broken during a reindex (#6418)
|
||||
|
||||
Bug description:
|
||||
During a reindex, the entryrdn index is built at the end from
|
||||
each entry in the suffix.
|
||||
If one entry has a RDN that is identical to the suffix DN,
|
||||
then entryrdn_lookup_dn may erroneously return the suffix DN
|
||||
as the DN of the entry.
|
||||
|
||||
Fix description:
|
||||
When the lookup entry has no parent (because index is under
|
||||
work) the loop lookup the entry using the RDN.
|
||||
If this RDN matches the suffix DN, then it exits from the loop
|
||||
with the suffix DN.
|
||||
Before exiting it checks that the original lookup entryID
|
||||
is equal to suffix entryID. If it does not match
|
||||
the function fails and then the DN from the entry will be
|
||||
built from id2enty
|
||||
|
||||
fixes: #6417
|
||||
|
||||
Reviewed by: Pierre Rogier, Simon Pichugin (Thanks !!!)
|
||||
---
|
||||
ldap/servers/slapd/back-ldbm/ldbm_entryrdn.c | 11 ++++++++++-
|
||||
1 file changed, 10 insertions(+), 1 deletion(-)
|
||||
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/ldbm_entryrdn.c b/ldap/servers/slapd/back-ldbm/ldbm_entryrdn.c
|
||||
index 5797dd779..83b041192 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/ldbm_entryrdn.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/ldbm_entryrdn.c
|
||||
@@ -1224,7 +1224,16 @@ entryrdn_lookup_dn(backend *be,
|
||||
}
|
||||
goto bail;
|
||||
}
|
||||
- maybesuffix = 1;
|
||||
+ if (workid == 1) {
|
||||
+ /* The loop (workid) iterates from the starting 'id'
|
||||
+ * up to the suffix ID (i.e. '1').
|
||||
+ * A corner case (#6417) is if an entry, on the path
|
||||
+ * 'id' -> suffix, has the same RDN than the suffix.
|
||||
+ * In order to erroneously believe the loop hits the suffix
|
||||
+ * we need to check that 'workid' is '1' (suffix)
|
||||
+ */
|
||||
+ maybesuffix = 1;
|
||||
+ }
|
||||
} else {
|
||||
_entryrdn_cursor_print_error("entryrdn_lookup_dn",
|
||||
key.data, data.size, data.ulen, rc);
|
||||
--
|
||||
2.48.0
|
||||
|
||||
@ -1,267 +0,0 @@
|
||||
From 9b2fc77a36156ea987dcea6e2043f8e4c4a6b259 Mon Sep 17 00:00:00 2001
|
||||
From: progier389 <progier@redhat.com>
|
||||
Date: Tue, 18 Jun 2024 14:21:07 +0200
|
||||
Subject: [PATCH] Issue 6224 - d2entry - Could not open id2entry err 0 - at
|
||||
startup when having sub-suffixes (#6225)
|
||||
|
||||
Problem:: d2entry - Could not open id2entry err 0 is logged at startup when having sub-suffixes
|
||||
Reason: The slapi_exist_referral internal search access a backend that is not yet started.
|
||||
Solution: Limit the internal search to a single backend
|
||||
|
||||
Issue: #6224
|
||||
|
||||
Reviewed by: @droideck Thanks!
|
||||
|
||||
(cherry picked from commit 796f703021e961fdd8cbc53b4ad4e20258af0e96)
|
||||
---
|
||||
.../tests/suites/ds_logs/ds_logs_test.py | 1 +
|
||||
.../suites/mapping_tree/regression_test.py | 161 +++++++++++++++++-
|
||||
ldap/servers/slapd/backend.c | 7 +-
|
||||
3 files changed, 159 insertions(+), 10 deletions(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/ds_logs/ds_logs_test.py b/dirsrvtests/tests/suites/ds_logs/ds_logs_test.py
|
||||
index 812936c62..84a9c6ec8 100644
|
||||
--- a/dirsrvtests/tests/suites/ds_logs/ds_logs_test.py
|
||||
+++ b/dirsrvtests/tests/suites/ds_logs/ds_logs_test.py
|
||||
@@ -1222,6 +1222,7 @@ def test_referral_check(topology_st, request):
|
||||
|
||||
request.addfinalizer(fin)
|
||||
|
||||
+<<<<<<< HEAD
|
||||
def test_referral_subsuffix(topology_st, request):
|
||||
"""Test the results of an inverted parent suffix definition in the configuration.
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/mapping_tree/regression_test.py b/dirsrvtests/tests/suites/mapping_tree/regression_test.py
|
||||
index 99d4a1d5f..689ff9f59 100644
|
||||
--- a/dirsrvtests/tests/suites/mapping_tree/regression_test.py
|
||||
+++ b/dirsrvtests/tests/suites/mapping_tree/regression_test.py
|
||||
@@ -11,10 +11,14 @@ import ldap
|
||||
import logging
|
||||
import os
|
||||
import pytest
|
||||
+import time
|
||||
from lib389.backend import Backends, Backend
|
||||
+from lib389._constants import HOST_STANDALONE, PORT_STANDALONE, DN_DM, PW_DM
|
||||
from lib389.dbgen import dbgen_users
|
||||
from lib389.mappingTree import MappingTrees
|
||||
from lib389.topologies import topology_st
|
||||
+from lib389.referral import Referrals, Referral
|
||||
+
|
||||
|
||||
try:
|
||||
from lib389.backend import BackendSuffixView
|
||||
@@ -31,14 +35,26 @@ else:
|
||||
logging.getLogger(__name__).setLevel(logging.INFO)
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
+PARENT_SUFFIX = "dc=parent"
|
||||
+CHILD1_SUFFIX = f"dc=child1,{PARENT_SUFFIX}"
|
||||
+CHILD2_SUFFIX = f"dc=child2,{PARENT_SUFFIX}"
|
||||
+
|
||||
+PARENT_REFERRAL_DN = f"cn=ref,ou=People,{PARENT_SUFFIX}"
|
||||
+CHILD1_REFERRAL_DN = f"cn=ref,ou=people,{CHILD1_SUFFIX}"
|
||||
+CHILD2_REFERRAL_DN = f"cn=ref,ou=people,{CHILD2_SUFFIX}"
|
||||
+
|
||||
+REFERRAL_CHECK_PEDIOD = 7
|
||||
+
|
||||
+
|
||||
+
|
||||
BESTRUCT = [
|
||||
- { "bename" : "parent", "suffix": "dc=parent" },
|
||||
- { "bename" : "child1", "suffix": "dc=child1,dc=parent" },
|
||||
- { "bename" : "child2", "suffix": "dc=child2,dc=parent" },
|
||||
+ { "bename" : "parent", "suffix": PARENT_SUFFIX },
|
||||
+ { "bename" : "child1", "suffix": CHILD1_SUFFIX },
|
||||
+ { "bename" : "child2", "suffix": CHILD2_SUFFIX },
|
||||
]
|
||||
|
||||
|
||||
-@pytest.fixture(scope="function")
|
||||
+@pytest.fixture(scope="module")
|
||||
def topo(topology_st, request):
|
||||
bes = []
|
||||
|
||||
@@ -50,6 +66,9 @@ def topo(topology_st, request):
|
||||
request.addfinalizer(fin)
|
||||
|
||||
inst = topology_st.standalone
|
||||
+ # Reduce nsslapd-referral-check-period to accelerate test
|
||||
+ topology_st.standalone.config.set("nsslapd-referral-check-period", str(REFERRAL_CHECK_PEDIOD))
|
||||
+
|
||||
ldif_files = {}
|
||||
for d in BESTRUCT:
|
||||
bename = d['bename']
|
||||
@@ -76,14 +95,13 @@ def topo(topology_st, request):
|
||||
inst.start()
|
||||
return topology_st
|
||||
|
||||
-# Parameters for test_change_repl_passwd
|
||||
-EXPECTED_ENTRIES = (("dc=parent", 39), ("dc=child1,dc=parent", 13), ("dc=child2,dc=parent", 13))
|
||||
+# Parameters for test_sub_suffixes
|
||||
@pytest.mark.parametrize(
|
||||
"orphan_param",
|
||||
[
|
||||
- pytest.param( ( True, { "dc=parent": 2, "dc=child1,dc=parent":1, "dc=child2,dc=parent":1}), id="orphan-is-true" ),
|
||||
- pytest.param( ( False, { "dc=parent": 3, "dc=child1,dc=parent":1, "dc=child2,dc=parent":1}), id="orphan-is-false" ),
|
||||
- pytest.param( ( None, { "dc=parent": 3, "dc=child1,dc=parent":1, "dc=child2,dc=parent":1}), id="no-orphan" ),
|
||||
+ pytest.param( ( True, { PARENT_SUFFIX: 2, CHILD1_SUFFIX:1, CHILD2_SUFFIX:1}), id="orphan-is-true" ),
|
||||
+ pytest.param( ( False, { PARENT_SUFFIX: 3, CHILD1_SUFFIX:1, CHILD2_SUFFIX:1}), id="orphan-is-false" ),
|
||||
+ pytest.param( ( None, { PARENT_SUFFIX: 3, CHILD1_SUFFIX:1, CHILD2_SUFFIX:1}), id="no-orphan" ),
|
||||
],
|
||||
)
|
||||
|
||||
@@ -128,3 +146,128 @@ def test_sub_suffixes(topo, orphan_param):
|
||||
log.info('Test PASSED')
|
||||
|
||||
|
||||
+def test_one_level_search_on_sub_suffixes(topo):
|
||||
+ """ Perform one level scoped search accross suffix and sub-suffix
|
||||
+
|
||||
+ :id: 92f3139e-280e-11ef-a989-482ae39447e5
|
||||
+ :feature: mapping-tree
|
||||
+ :setup: Standalone instance with 3 additional backends:
|
||||
+ dc=parent, dc=child1,dc=parent, dc=childr21,dc=parent
|
||||
+ :steps:
|
||||
+ 1. Perform a ONE LEVEL search on dc=parent
|
||||
+ 2. Check that all expected entries have been returned
|
||||
+ 3. Check that only the expected entries have been returned
|
||||
+ :expectedresults:
|
||||
+ 1. Success
|
||||
+ 2. each expected dn should be in the result set
|
||||
+ 3. Number of returned entries should be the same as the number of expected entries
|
||||
+ """
|
||||
+ expected_dns = ( 'dc=child1,dc=parent',
|
||||
+ 'dc=child2,dc=parent',
|
||||
+ 'ou=accounting,dc=parent',
|
||||
+ 'ou=product development,dc=parent',
|
||||
+ 'ou=product testing,dc=parent',
|
||||
+ 'ou=human resources,dc=parent',
|
||||
+ 'ou=payroll,dc=parent',
|
||||
+ 'ou=people,dc=parent',
|
||||
+ 'ou=groups,dc=parent', )
|
||||
+ entries = topo.standalone.search_s("dc=parent", ldap.SCOPE_ONELEVEL, "(objectClass=*)",
|
||||
+ attrlist=("dc","ou"), escapehatch='i am sure')
|
||||
+ log.info(f'one level search on dc=parent returned the following entries: {entries}')
|
||||
+ dns = [ entry.dn for entry in entries ]
|
||||
+ for dn in expected_dns:
|
||||
+ assert dn in dns
|
||||
+ assert len(entries) == len(expected_dns)
|
||||
+
|
||||
+
|
||||
+def test_sub_suffixes_errlog(topo):
|
||||
+ """ check the entries found on suffix/sub-suffix
|
||||
+ used int
|
||||
+
|
||||
+ :id: 1db9d52e-28de-11ef-b286-482ae39447e5
|
||||
+ :feature: mapping-tree
|
||||
+ :setup: Standalone instance with 3 additional backends:
|
||||
+ dc=parent, dc=child1,dc=parent, dc=childr21,dc=parent
|
||||
+ :steps:
|
||||
+ 1. Check that id2entry error message is not in the error log.
|
||||
+ :expectedresults:
|
||||
+ 1. Success
|
||||
+ """
|
||||
+ inst = topo.standalone
|
||||
+ assert not inst.searchErrorsLog('id2entry - Could not open id2entry err 0')
|
||||
+
|
||||
+
|
||||
+# Parameters for test_referral_subsuffix:
|
||||
+# a tuple pair containing:
|
||||
+# - list of referral dn that must be created
|
||||
+# - dict of searches basedn: expected_number_of_referrals
|
||||
+@pytest.mark.parametrize(
|
||||
+ "parameters",
|
||||
+ [
|
||||
+ pytest.param( ((PARENT_REFERRAL_DN, CHILD1_REFERRAL_DN), {PARENT_SUFFIX: 2, CHILD1_SUFFIX:1, CHILD2_SUFFIX:0}), id="Both"),
|
||||
+ pytest.param( ((PARENT_REFERRAL_DN,), {PARENT_SUFFIX: 1, CHILD1_SUFFIX:0, CHILD2_SUFFIX:0}) , id="Parent"),
|
||||
+ pytest.param( ((CHILD1_REFERRAL_DN,), {PARENT_SUFFIX: 1, CHILD1_SUFFIX:1, CHILD2_SUFFIX:0}) , id="Child"),
|
||||
+ pytest.param( ((), {PARENT_SUFFIX: 0, CHILD1_SUFFIX:0, CHILD2_SUFFIX:0}), id="None"),
|
||||
+ ])
|
||||
+
|
||||
+def test_referral_subsuffix(topo, request, parameters):
|
||||
+ """Test the results of an inverted parent suffix definition in the configuration.
|
||||
+
|
||||
+ For more details see:
|
||||
+ https://www.port389.org/docs/389ds/design/mapping_tree_assembly.html
|
||||
+
|
||||
+ :id: 4e111a22-2a5d-11ef-a890-482ae39447e5
|
||||
+ :feature: referrals
|
||||
+ :setup: Standalone instance with 3 additional backends:
|
||||
+ dc=parent, dc=child1,dc=parent, dc=childr21,dc=parent
|
||||
+
|
||||
+ :setup: Standalone instance
|
||||
+ :parametrized: yes
|
||||
+ :steps:
|
||||
+ refs,searches = referrals
|
||||
+
|
||||
+ 1. Create the referrals according to the current parameter
|
||||
+ 2. Wait enough time so they get detected
|
||||
+ 3. For each search base dn, in the current parameter, perform the two following steps
|
||||
+ 4. In 3. loop: Perform a search with provided base dn
|
||||
+ 5. In 3. loop: Check that the number of returned referrals is the expected one.
|
||||
+
|
||||
+ :expectedresults:
|
||||
+ all steps succeeds
|
||||
+ """
|
||||
+ inst = topo.standalone
|
||||
+
|
||||
+ def fin():
|
||||
+ log.info('Deleting all referrals')
|
||||
+ for ref in Referrals(inst, PARENT_SUFFIX).list():
|
||||
+ ref.delete()
|
||||
+
|
||||
+ # Set cleanup callback
|
||||
+ if DEBUGGING:
|
||||
+ request.addfinalizer(fin)
|
||||
+
|
||||
+ # Remove all referrals
|
||||
+ fin()
|
||||
+ # Add requested referrals
|
||||
+ for dn in parameters[0]:
|
||||
+ refs = Referral(inst, dn=dn)
|
||||
+ refs.create(basedn=dn, properties={ 'cn': 'ref', 'ref': f'ldap://remote/{dn}'})
|
||||
+ # Wait that the internal search detects the referrals
|
||||
+ time.sleep(REFERRAL_CHECK_PEDIOD + 1)
|
||||
+ # Open a test connection
|
||||
+ ldc = ldap.initialize(f"ldap://{HOST_STANDALONE}:{PORT_STANDALONE}")
|
||||
+ ldc.set_option(ldap.OPT_REFERRALS,0)
|
||||
+ ldc.simple_bind_s(DN_DM,PW_DM)
|
||||
+
|
||||
+ # For each search base dn:
|
||||
+ for basedn,nbref in parameters[1].items():
|
||||
+ log.info(f"Referrals are: {parameters[0]}")
|
||||
+ # Perform a search with provided base dn
|
||||
+ result = ldc.search_s(basedn, ldap.SCOPE_SUBTREE, filterstr="(ou=People)")
|
||||
+ found_dns = [ dn for dn,entry in result if dn is not None ]
|
||||
+ found_refs = [ entry for dn,entry in result if dn is None ]
|
||||
+ log.info(f"Search on {basedn} returned {found_dns} and {found_refs}")
|
||||
+ # Check that the number of returned referrals is the expected one.
|
||||
+ log.info(f"Search returned {len(found_refs)} referrals. {nbref} are expected.")
|
||||
+ assert len(found_refs) == nbref
|
||||
+ ldc.unbind()
|
||||
diff --git a/ldap/servers/slapd/backend.c b/ldap/servers/slapd/backend.c
|
||||
index 498f683b1..f86b0b9b6 100644
|
||||
--- a/ldap/servers/slapd/backend.c
|
||||
+++ b/ldap/servers/slapd/backend.c
|
||||
@@ -230,12 +230,17 @@ slapi_exist_referral(Slapi_Backend *be)
|
||||
|
||||
/* search for ("smart") referral entries */
|
||||
search_pb = slapi_pblock_new();
|
||||
- server_ctrls = (LDAPControl **) slapi_ch_calloc(2, sizeof (LDAPControl *));
|
||||
+ server_ctrls = (LDAPControl **) slapi_ch_calloc(3, sizeof (LDAPControl *));
|
||||
server_ctrls[0] = (LDAPControl *) slapi_ch_malloc(sizeof (LDAPControl));
|
||||
server_ctrls[0]->ldctl_oid = slapi_ch_strdup(LDAP_CONTROL_MANAGEDSAIT);
|
||||
server_ctrls[0]->ldctl_value.bv_val = NULL;
|
||||
server_ctrls[0]->ldctl_value.bv_len = 0;
|
||||
server_ctrls[0]->ldctl_iscritical = '\0';
|
||||
+ server_ctrls[1] = (LDAPControl *) slapi_ch_malloc(sizeof (LDAPControl));
|
||||
+ server_ctrls[1]->ldctl_oid = slapi_ch_strdup(MTN_CONTROL_USE_ONE_BACKEND_EXT_OID);
|
||||
+ server_ctrls[1]->ldctl_value.bv_val = NULL;
|
||||
+ server_ctrls[1]->ldctl_value.bv_len = 0;
|
||||
+ server_ctrls[1]->ldctl_iscritical = '\0';
|
||||
slapi_search_internal_set_pb(search_pb, suffix, LDAP_SCOPE_SUBTREE,
|
||||
filter, NULL, 0, server_ctrls, NULL,
|
||||
(void *) plugin_get_default_component_id(), 0);
|
||||
--
|
||||
2.48.0
|
||||
|
||||
@ -1,32 +0,0 @@
|
||||
From ab06b3cebbe0287ef557c0307ca2ee86fe8cb761 Mon Sep 17 00:00:00 2001
|
||||
From: progier389 <progier@redhat.com>
|
||||
Date: Thu, 21 Nov 2024 16:26:02 +0100
|
||||
Subject: [PATCH] Issue 6224 - Fix merge issue in 389-ds-base-2.1 for
|
||||
ds_log_test.py (#6414)
|
||||
|
||||
Fix a merge issue during cherry-pick over 389-ds-base-2.1 and 389-ds-base-1.4.3 branches
|
||||
|
||||
Issue: #6224
|
||||
|
||||
Reviewed by: @mreynolds389
|
||||
|
||||
(cherry picked from commit 2b541c64b8317209e4dafa4f82918d714039907c)
|
||||
---
|
||||
dirsrvtests/tests/suites/ds_logs/ds_logs_test.py | 1 -
|
||||
1 file changed, 1 deletion(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/ds_logs/ds_logs_test.py b/dirsrvtests/tests/suites/ds_logs/ds_logs_test.py
|
||||
index 84a9c6ec8..812936c62 100644
|
||||
--- a/dirsrvtests/tests/suites/ds_logs/ds_logs_test.py
|
||||
+++ b/dirsrvtests/tests/suites/ds_logs/ds_logs_test.py
|
||||
@@ -1222,7 +1222,6 @@ def test_referral_check(topology_st, request):
|
||||
|
||||
request.addfinalizer(fin)
|
||||
|
||||
-<<<<<<< HEAD
|
||||
def test_referral_subsuffix(topology_st, request):
|
||||
"""Test the results of an inverted parent suffix definition in the configuration.
|
||||
|
||||
--
|
||||
2.48.0
|
||||
|
||||
@ -1,214 +0,0 @@
|
||||
From 3fe2cf7cdedcdf5cafb59867e52a1fbe4a643571 Mon Sep 17 00:00:00 2001
|
||||
From: Masahiro Matsuya <mmatsuya@redhat.com>
|
||||
Date: Fri, 20 Dec 2024 22:37:15 +0900
|
||||
Subject: [PATCH] Issue 6224 - Remove test_referral_subsuffix from
|
||||
ds_logs_test.py (#6456)
|
||||
|
||||
Bug Description:
|
||||
|
||||
test_referral_subsuffix test was removed from main branch and some other
|
||||
ones for higher versions. But, it was not removed from 389-ds-base-1.4.3
|
||||
and 389-ds-base-2.1. The test doesn't work anymore with the fix for
|
||||
Issue 6224, because the added new control limited one backend for internal
|
||||
search. The test should be removed.
|
||||
|
||||
Fix Description:
|
||||
|
||||
remove the test from ds_logs_test.py
|
||||
|
||||
relates: https://github.com/389ds/389-ds-base/issues/6224
|
||||
---
|
||||
.../tests/suites/ds_logs/ds_logs_test.py | 177 ------------------
|
||||
1 file changed, 177 deletions(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/ds_logs/ds_logs_test.py b/dirsrvtests/tests/suites/ds_logs/ds_logs_test.py
|
||||
index 812936c62..84d721756 100644
|
||||
--- a/dirsrvtests/tests/suites/ds_logs/ds_logs_test.py
|
||||
+++ b/dirsrvtests/tests/suites/ds_logs/ds_logs_test.py
|
||||
@@ -1222,183 +1222,6 @@ def test_referral_check(topology_st, request):
|
||||
|
||||
request.addfinalizer(fin)
|
||||
|
||||
-def test_referral_subsuffix(topology_st, request):
|
||||
- """Test the results of an inverted parent suffix definition in the configuration.
|
||||
-
|
||||
- For more details see:
|
||||
- https://www.port389.org/docs/389ds/design/mapping_tree_assembly.html
|
||||
-
|
||||
- :id: 4faf210a-4fde-4e4f-8834-865bdc8f4d37
|
||||
- :setup: Standalone instance
|
||||
- :steps:
|
||||
- 1. First create two Backends, without mapping trees.
|
||||
- 2. create the mapping trees for these backends
|
||||
- 3. reduce nsslapd-referral-check-period to accelerate test
|
||||
- 4. Remove error log file
|
||||
- 5. Create a referral entry on parent suffix
|
||||
- 6. Check that the server detected the referral
|
||||
- 7. Delete the referral entry
|
||||
- 8. Check that the server detected the deletion of the referral
|
||||
- 9. Remove error log file
|
||||
- 10. Create a referral entry on child suffix
|
||||
- 11. Check that the server detected the referral on both parent and child suffixes
|
||||
- 12. Delete the referral entry
|
||||
- 13. Check that the server detected the deletion of the referral on both parent and child suffixes
|
||||
- 14. Remove error log file
|
||||
- 15. Create a referral entry on parent suffix
|
||||
- 16. Check that the server detected the referral on both parent and child suffixes
|
||||
- 17. Delete the child referral entry
|
||||
- 18. Check that the server detected the deletion of the referral on child suffix but not on parent suffix
|
||||
- 19. Delete the parent referral entry
|
||||
- 20. Check that the server detected the deletion of the referral parent suffix
|
||||
-
|
||||
- :expectedresults:
|
||||
- all steps succeeds
|
||||
- """
|
||||
- inst = topology_st.standalone
|
||||
- # Step 1 First create two Backends, without mapping trees.
|
||||
- PARENT_SUFFIX='dc=parent,dc=com'
|
||||
- CHILD_SUFFIX='dc=child,%s' % PARENT_SUFFIX
|
||||
- be1 = create_backend(inst, 'Parent', PARENT_SUFFIX)
|
||||
- be2 = create_backend(inst, 'Child', CHILD_SUFFIX)
|
||||
- # Step 2 create the mapping trees for these backends
|
||||
- mts = MappingTrees(inst)
|
||||
- mt1 = mts.create(properties={
|
||||
- 'cn': PARENT_SUFFIX,
|
||||
- 'nsslapd-state': 'backend',
|
||||
- 'nsslapd-backend': 'Parent',
|
||||
- })
|
||||
- mt2 = mts.create(properties={
|
||||
- 'cn': CHILD_SUFFIX,
|
||||
- 'nsslapd-state': 'backend',
|
||||
- 'nsslapd-backend': 'Child',
|
||||
- 'nsslapd-parent-suffix': PARENT_SUFFIX,
|
||||
- })
|
||||
-
|
||||
- dc_ex = Domain(inst, dn=PARENT_SUFFIX)
|
||||
- assert dc_ex.exists()
|
||||
-
|
||||
- dc_st = Domain(inst, dn=CHILD_SUFFIX)
|
||||
- assert dc_st.exists()
|
||||
-
|
||||
- # Step 3 reduce nsslapd-referral-check-period to accelerate test
|
||||
- # requires a restart done on step 4
|
||||
- REFERRAL_CHECK=7
|
||||
- topology_st.standalone.config.set("nsslapd-referral-check-period", str(REFERRAL_CHECK))
|
||||
-
|
||||
- # Check that if we create a referral at parent level
|
||||
- # - referral is detected at parent backend
|
||||
- # - referral is not detected at child backend
|
||||
-
|
||||
- # Step 3 Remove error log file
|
||||
- topology_st.standalone.stop()
|
||||
- lpath = topology_st.standalone.ds_error_log._get_log_path()
|
||||
- os.unlink(lpath)
|
||||
- topology_st.standalone.start()
|
||||
-
|
||||
- # Step 4 Create a referral entry on parent suffix
|
||||
- rs_parent = Referrals(topology_st.standalone, PARENT_SUFFIX)
|
||||
-
|
||||
- referral_entry_parent = rs_parent.create(properties={
|
||||
- 'cn': 'testref',
|
||||
- 'ref': 'ldap://localhost:38901/ou=People,dc=example,dc=com'
|
||||
- })
|
||||
-
|
||||
- # Step 5 Check that the server detected the referral
|
||||
- time.sleep(REFERRAL_CHECK + 1)
|
||||
- assert topology_st.standalone.ds_error_log.match('.*slapd_daemon - New referral entries are detected under %s.*' % PARENT_SUFFIX)
|
||||
- assert not topology_st.standalone.ds_error_log.match('.*slapd_daemon - New referral entries are detected under %s.*' % CHILD_SUFFIX)
|
||||
- assert not topology_st.standalone.ds_error_log.match('.*slapd_daemon - No more referral entry under %s' % PARENT_SUFFIX)
|
||||
-
|
||||
- # Step 6 Delete the referral entry
|
||||
- referral_entry_parent.delete()
|
||||
-
|
||||
- # Step 7 Check that the server detected the deletion of the referral
|
||||
- time.sleep(REFERRAL_CHECK + 1)
|
||||
- assert topology_st.standalone.ds_error_log.match('.*slapd_daemon - No more referral entry under %s' % PARENT_SUFFIX)
|
||||
-
|
||||
- # Check that if we create a referral at child level
|
||||
- # - referral is detected at parent backend
|
||||
- # - referral is detected at child backend
|
||||
-
|
||||
- # Step 8 Remove error log file
|
||||
- topology_st.standalone.stop()
|
||||
- lpath = topology_st.standalone.ds_error_log._get_log_path()
|
||||
- os.unlink(lpath)
|
||||
- topology_st.standalone.start()
|
||||
-
|
||||
- # Step 9 Create a referral entry on child suffix
|
||||
- rs_child = Referrals(topology_st.standalone, CHILD_SUFFIX)
|
||||
- referral_entry_child = rs_child.create(properties={
|
||||
- 'cn': 'testref',
|
||||
- 'ref': 'ldap://localhost:38901/ou=People,dc=example,dc=com'
|
||||
- })
|
||||
-
|
||||
- # Step 10 Check that the server detected the referral on both parent and child suffixes
|
||||
- time.sleep(REFERRAL_CHECK + 1)
|
||||
- assert topology_st.standalone.ds_error_log.match('.*slapd_daemon - New referral entries are detected under %s.*' % PARENT_SUFFIX)
|
||||
- assert topology_st.standalone.ds_error_log.match('.*slapd_daemon - New referral entries are detected under %s.*' % CHILD_SUFFIX)
|
||||
- assert not topology_st.standalone.ds_error_log.match('.*slapd_daemon - No more referral entry under %s' % CHILD_SUFFIX)
|
||||
-
|
||||
- # Step 11 Delete the referral entry
|
||||
- referral_entry_child.delete()
|
||||
-
|
||||
- # Step 12 Check that the server detected the deletion of the referral on both parent and child suffixes
|
||||
- time.sleep(REFERRAL_CHECK + 1)
|
||||
- assert topology_st.standalone.ds_error_log.match('.*slapd_daemon - No more referral entry under %s' % PARENT_SUFFIX)
|
||||
- assert topology_st.standalone.ds_error_log.match('.*slapd_daemon - No more referral entry under %s' % CHILD_SUFFIX)
|
||||
-
|
||||
- # Check that if we create a referral at child level and parent level
|
||||
- # - referral is detected at parent backend
|
||||
- # - referral is detected at child backend
|
||||
-
|
||||
- # Step 13 Remove error log file
|
||||
- topology_st.standalone.stop()
|
||||
- lpath = topology_st.standalone.ds_error_log._get_log_path()
|
||||
- os.unlink(lpath)
|
||||
- topology_st.standalone.start()
|
||||
-
|
||||
- # Step 14 Create a referral entry on parent suffix
|
||||
- # Create a referral entry on child suffix
|
||||
- referral_entry_parent = rs_parent.create(properties={
|
||||
- 'cn': 'testref',
|
||||
- 'ref': 'ldap://localhost:38901/ou=People,dc=example,dc=com'
|
||||
- })
|
||||
- referral_entry_child = rs_child.create(properties={
|
||||
- 'cn': 'testref',
|
||||
- 'ref': 'ldap://localhost:38901/ou=People,dc=example,dc=com'
|
||||
- })
|
||||
-
|
||||
- # Step 15 Check that the server detected the referral on both parent and child suffixes
|
||||
- time.sleep(REFERRAL_CHECK + 1)
|
||||
- assert topology_st.standalone.ds_error_log.match('.*slapd_daemon - New referral entries are detected under %s.*' % PARENT_SUFFIX)
|
||||
- assert topology_st.standalone.ds_error_log.match('.*slapd_daemon - New referral entries are detected under %s.*' % CHILD_SUFFIX)
|
||||
- assert not topology_st.standalone.ds_error_log.match('.*slapd_daemon - No more referral entry under %s' % CHILD_SUFFIX)
|
||||
-
|
||||
- # Step 16 Delete the child referral entry
|
||||
- referral_entry_child.delete()
|
||||
-
|
||||
- # Step 17 Check that the server detected the deletion of the referral on child suffix but not on parent suffix
|
||||
- time.sleep(REFERRAL_CHECK + 1)
|
||||
- assert topology_st.standalone.ds_error_log.match('.*slapd_daemon - No more referral entry under %s' % CHILD_SUFFIX)
|
||||
- assert not topology_st.standalone.ds_error_log.match('.*slapd_daemon - No more referral entry under %s' % PARENT_SUFFIX)
|
||||
-
|
||||
- # Step 18 Delete the parent referral entry
|
||||
- referral_entry_parent.delete()
|
||||
-
|
||||
- # Step 19 Check that the server detected the deletion of the referral parent suffix
|
||||
- time.sleep(REFERRAL_CHECK + 1)
|
||||
- assert topology_st.standalone.ds_error_log.match('.*slapd_daemon - No more referral entry under %s' % PARENT_SUFFIX)
|
||||
-
|
||||
- def fin():
|
||||
- log.info('Deleting referral')
|
||||
- try:
|
||||
- referral_entry_parent.delete()
|
||||
- referral.entry_child.delete()
|
||||
- except:
|
||||
- pass
|
||||
-
|
||||
- request.addfinalizer(fin)
|
||||
|
||||
def test_missing_backend_suffix(topology_st, request):
|
||||
"""Test that the server does not crash if a backend has no suffix
|
||||
--
|
||||
2.48.0
|
||||
|
||||
@ -1,90 +0,0 @@
|
||||
From 4121ffe7a44fbacf513758661e71e483eb11ee3c Mon Sep 17 00:00:00 2001
|
||||
From: tbordaz <tbordaz@redhat.com>
|
||||
Date: Mon, 6 Jan 2025 14:00:39 +0100
|
||||
Subject: [PATCH] Issue 6417 - (2nd) If an entry RDN is identical to the
|
||||
suffix, then Entryrdn gets broken during a reindex (#6460)
|
||||
|
||||
Bug description:
|
||||
The primary fix has a flaw as it assumes that the
|
||||
suffix ID is '1'.
|
||||
If the RUV entry is the first entry of the database
|
||||
the server loops indefinitely
|
||||
|
||||
Fix description:
|
||||
Read the suffix ID from the entryrdn index
|
||||
|
||||
fixes: #6417
|
||||
|
||||
Reviewed by: Pierre Rogier (also reviewed the first fix)
|
||||
---
|
||||
.../suites/replication/regression_m2_test.py | 9 +++++++++
|
||||
ldap/servers/slapd/back-ldbm/ldbm_entryrdn.c | 19 ++++++++++++++++++-
|
||||
2 files changed, 27 insertions(+), 1 deletion(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/replication/regression_m2_test.py b/dirsrvtests/tests/suites/replication/regression_m2_test.py
|
||||
index abac46ada..72d4b9f89 100644
|
||||
--- a/dirsrvtests/tests/suites/replication/regression_m2_test.py
|
||||
+++ b/dirsrvtests/tests/suites/replication/regression_m2_test.py
|
||||
@@ -1010,6 +1010,15 @@ def test_online_reinit_may_hang(topo_with_sigkill):
|
||||
"""
|
||||
M1 = topo_with_sigkill.ms["supplier1"]
|
||||
M2 = topo_with_sigkill.ms["supplier2"]
|
||||
+
|
||||
+ # The RFE 5367 (when enabled) retrieves the DN
|
||||
+ # from the dncache. This hides an issue
|
||||
+ # with primary fix for 6417.
|
||||
+ # We need to disable the RFE to verify that the primary
|
||||
+ # fix is properly fixed.
|
||||
+ if ds_is_newer('2.3.1'):
|
||||
+ M1.config.replace('nsslapd-return-original-entrydn', 'off')
|
||||
+
|
||||
M1.stop()
|
||||
ldif_file = '%s/supplier1.ldif' % M1.get_ldif_dir()
|
||||
M1.db2ldif(bename=DEFAULT_BENAME, suffixes=[DEFAULT_SUFFIX],
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/ldbm_entryrdn.c b/ldap/servers/slapd/back-ldbm/ldbm_entryrdn.c
|
||||
index 83b041192..1bbb6252a 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/ldbm_entryrdn.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/ldbm_entryrdn.c
|
||||
@@ -1115,6 +1115,7 @@ entryrdn_lookup_dn(backend *be,
|
||||
rdn_elem *elem = NULL;
|
||||
int maybesuffix = 0;
|
||||
int db_retry = 0;
|
||||
+ ID suffix_id = 1;
|
||||
|
||||
slapi_log_err(SLAPI_LOG_TRACE, "entryrdn_lookup_dn",
|
||||
"--> entryrdn_lookup_dn\n");
|
||||
@@ -1175,6 +1176,22 @@ entryrdn_lookup_dn(backend *be,
|
||||
/* Setting the bulk fetch buffer */
|
||||
data.flags = DB_DBT_MALLOC;
|
||||
|
||||
+ /* Just in case the suffix ID is not '1' retrieve it from the database */
|
||||
+ keybuf = slapi_ch_strdup(slapi_sdn_get_ndn(be->be_suffix));
|
||||
+ dblayer_value_set(be, &key, keybuf, strlen(keybuf) + 1);
|
||||
+ rc = dblayer_cursor_op(&ctx.cursor, DBI_OP_MOVE_TO_KEY, &key, &data);
|
||||
+ if (rc) {
|
||||
+ slapi_log_err(SLAPI_LOG_WARNING, "entryrdn_lookup_dn",
|
||||
+ "Fails to retrieve the ID of suffix %s - keep the default value '%d'\n",
|
||||
+ slapi_sdn_get_ndn(be->be_suffix),
|
||||
+ suffix_id);
|
||||
+ } else {
|
||||
+ elem = (rdn_elem *)data.data;
|
||||
+ suffix_id = id_stored_to_internal(elem->rdn_elem_id);
|
||||
+ }
|
||||
+ dblayer_value_free(be, &data);
|
||||
+ dblayer_value_free(be, &key);
|
||||
+
|
||||
do {
|
||||
/* Setting up a key for the node to get its parent */
|
||||
slapi_ch_free_string(&keybuf);
|
||||
@@ -1224,7 +1241,7 @@ entryrdn_lookup_dn(backend *be,
|
||||
}
|
||||
goto bail;
|
||||
}
|
||||
- if (workid == 1) {
|
||||
+ if (workid == suffix_id) {
|
||||
/* The loop (workid) iterates from the starting 'id'
|
||||
* up to the suffix ID (i.e. '1').
|
||||
* A corner case (#6417) is if an entry, on the path
|
||||
--
|
||||
2.48.0
|
||||
|
||||
@ -1,40 +0,0 @@
|
||||
From 1ffcc9aa9a397180fe35283ee61b164471d073fb Mon Sep 17 00:00:00 2001
|
||||
From: Thierry Bordaz <tbordaz@redhat.com>
|
||||
Date: Tue, 7 Jan 2025 10:01:51 +0100
|
||||
Subject: [PATCH] Issue 6417 - (2nd) fix typo
|
||||
|
||||
---
|
||||
ldap/servers/slapd/back-ldbm/ldbm_entryrdn.c | 10 ++++++----
|
||||
1 file changed, 6 insertions(+), 4 deletions(-)
|
||||
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/ldbm_entryrdn.c b/ldap/servers/slapd/back-ldbm/ldbm_entryrdn.c
|
||||
index 1bbb6252a..e2b8273a2 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/ldbm_entryrdn.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/ldbm_entryrdn.c
|
||||
@@ -1178,8 +1178,10 @@ entryrdn_lookup_dn(backend *be,
|
||||
|
||||
/* Just in case the suffix ID is not '1' retrieve it from the database */
|
||||
keybuf = slapi_ch_strdup(slapi_sdn_get_ndn(be->be_suffix));
|
||||
- dblayer_value_set(be, &key, keybuf, strlen(keybuf) + 1);
|
||||
- rc = dblayer_cursor_op(&ctx.cursor, DBI_OP_MOVE_TO_KEY, &key, &data);
|
||||
+ key.data = keybuf;
|
||||
+ key.size = key.ulen = strlen(keybuf) + 1;
|
||||
+ key.flags = DB_DBT_USERMEM;
|
||||
+ rc = cursor->c_get(cursor, &key, &data, DB_SET);
|
||||
if (rc) {
|
||||
slapi_log_err(SLAPI_LOG_WARNING, "entryrdn_lookup_dn",
|
||||
"Fails to retrieve the ID of suffix %s - keep the default value '%d'\n",
|
||||
@@ -1189,8 +1191,8 @@ entryrdn_lookup_dn(backend *be,
|
||||
elem = (rdn_elem *)data.data;
|
||||
suffix_id = id_stored_to_internal(elem->rdn_elem_id);
|
||||
}
|
||||
- dblayer_value_free(be, &data);
|
||||
- dblayer_value_free(be, &key);
|
||||
+ slapi_ch_free(&data.data);
|
||||
+ slapi_ch_free_string(&keybuf);
|
||||
|
||||
do {
|
||||
/* Setting up a key for the node to get its parent */
|
||||
--
|
||||
2.48.0
|
||||
|
||||
@ -1,75 +0,0 @@
|
||||
From 9e1284122a929fe14633a2aa6e2de4d72891f98f Mon Sep 17 00:00:00 2001
|
||||
From: Thierry Bordaz <tbordaz@redhat.com>
|
||||
Date: Mon, 13 Jan 2025 17:41:18 +0100
|
||||
Subject: [PATCH] Issue 6417 - (3rd) If an entry RDN is identical to the
|
||||
suffix, then Entryrdn gets broken during a reindex (#6480)
|
||||
|
||||
Bug description:
|
||||
The previous fix had a flaw.
|
||||
In case entryrdn_lookup_dn is called with an undefined suffix
|
||||
the lookup of the suffix trigger a crash.
|
||||
For example it can occur during internal search of an
|
||||
unexisting map (view plugin).
|
||||
The issue exists in all releases but is hidden since 2.3.
|
||||
|
||||
Fix description:
|
||||
testing the suffix is defined
|
||||
|
||||
fixes: #6417
|
||||
|
||||
Reviewed by: Pierre Rogier (THnaks !)
|
||||
---
|
||||
ldap/servers/slapd/back-ldbm/ldbm_entryrdn.c | 36 +++++++++++---------
|
||||
1 file changed, 20 insertions(+), 16 deletions(-)
|
||||
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/ldbm_entryrdn.c b/ldap/servers/slapd/back-ldbm/ldbm_entryrdn.c
|
||||
index e2b8273a2..01c77156f 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/ldbm_entryrdn.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/ldbm_entryrdn.c
|
||||
@@ -1176,23 +1176,27 @@ entryrdn_lookup_dn(backend *be,
|
||||
/* Setting the bulk fetch buffer */
|
||||
data.flags = DB_DBT_MALLOC;
|
||||
|
||||
- /* Just in case the suffix ID is not '1' retrieve it from the database */
|
||||
- keybuf = slapi_ch_strdup(slapi_sdn_get_ndn(be->be_suffix));
|
||||
- key.data = keybuf;
|
||||
- key.size = key.ulen = strlen(keybuf) + 1;
|
||||
- key.flags = DB_DBT_USERMEM;
|
||||
- rc = cursor->c_get(cursor, &key, &data, DB_SET);
|
||||
- if (rc) {
|
||||
- slapi_log_err(SLAPI_LOG_WARNING, "entryrdn_lookup_dn",
|
||||
- "Fails to retrieve the ID of suffix %s - keep the default value '%d'\n",
|
||||
- slapi_sdn_get_ndn(be->be_suffix),
|
||||
- suffix_id);
|
||||
- } else {
|
||||
- elem = (rdn_elem *)data.data;
|
||||
- suffix_id = id_stored_to_internal(elem->rdn_elem_id);
|
||||
+ /* Just in case the suffix ID is not '1' retrieve it from the database
|
||||
+ * if the suffix is not defined suffix_id remains '1'
|
||||
+ */
|
||||
+ if (be->be_suffix) {
|
||||
+ keybuf = slapi_ch_strdup(slapi_sdn_get_ndn(be->be_suffix));
|
||||
+ key.data = keybuf;
|
||||
+ key.size = key.ulen = strlen(keybuf) + 1;
|
||||
+ key.flags = DB_DBT_USERMEM;
|
||||
+ rc = cursor->c_get(cursor, &key, &data, DB_SET);
|
||||
+ if (rc) {
|
||||
+ slapi_log_err(SLAPI_LOG_WARNING, "entryrdn_lookup_dn",
|
||||
+ "Fails to retrieve the ID of suffix %s - keep the default value '%d'\n",
|
||||
+ slapi_sdn_get_ndn(be->be_suffix),
|
||||
+ suffix_id);
|
||||
+ } else {
|
||||
+ elem = (rdn_elem *) data.data;
|
||||
+ suffix_id = id_stored_to_internal(elem->rdn_elem_id);
|
||||
+ }
|
||||
+ slapi_ch_free(&data.data);
|
||||
+ slapi_ch_free_string(&keybuf);
|
||||
}
|
||||
- slapi_ch_free(&data.data);
|
||||
- slapi_ch_free_string(&keybuf);
|
||||
|
||||
do {
|
||||
/* Setting up a key for the node to get its parent */
|
||||
--
|
||||
2.48.0
|
||||
|
||||
@ -1,297 +0,0 @@
|
||||
From d2f9dd82e3610ee9b73feea981c680c03bb21394 Mon Sep 17 00:00:00 2001
|
||||
From: Mark Reynolds <mreynolds@redhat.com>
|
||||
Date: Thu, 16 Jan 2025 08:42:53 -0500
|
||||
Subject: [PATCH] Issue 6509 - Race condition with Paged Result searches
|
||||
|
||||
Description:
|
||||
|
||||
There is a race condition with Paged Result searches when a new operation comes
|
||||
in while a paged search is finishing. This triggers an invalid time out error
|
||||
and closes the connection with a T3 code.
|
||||
|
||||
The problem is that we do not use the "PagedResult lock" when checking the
|
||||
connection's paged result data for a timeout event. This causes the paged
|
||||
result timeout value to change unexpectedly and trigger a false timeout when a
|
||||
new operation arrives.
|
||||
|
||||
Now we check the timeout without hte conn lock, if its expired it could
|
||||
be a race condition and false positive. Try the lock again and test the
|
||||
timeout. This also prevents blocking non-paged result searches from
|
||||
getting held up by the lock when it's not necessary.
|
||||
|
||||
This also fixes some memory leaks that occur when an error happens.
|
||||
|
||||
Relates: https://github.com/389ds/389-ds-base/issues/6509
|
||||
|
||||
Reviewed by: tbordaz & proger (Thanks!!)
|
||||
---
|
||||
ldap/servers/slapd/daemon.c | 61 ++++++++++++++++++-------------
|
||||
ldap/servers/slapd/opshared.c | 58 ++++++++++++++---------------
|
||||
ldap/servers/slapd/pagedresults.c | 9 +++++
|
||||
ldap/servers/slapd/slap.h | 2 +-
|
||||
4 files changed, 75 insertions(+), 55 deletions(-)
|
||||
|
||||
diff --git a/ldap/servers/slapd/daemon.c b/ldap/servers/slapd/daemon.c
|
||||
index bb80dae36..13dfe250d 100644
|
||||
--- a/ldap/servers/slapd/daemon.c
|
||||
+++ b/ldap/servers/slapd/daemon.c
|
||||
@@ -1578,7 +1578,29 @@ setup_pr_read_pds(Connection_Table *ct)
|
||||
if (c->c_state == CONN_STATE_FREE) {
|
||||
connection_table_move_connection_out_of_active_list(ct, c);
|
||||
} else {
|
||||
- /* we try to acquire the connection mutex, if it is already
|
||||
+ /* Check for a timeout for PAGED RESULTS */
|
||||
+ if (pagedresults_is_timedout_nolock(c)) {
|
||||
+ /*
|
||||
+ * There could be a race condition so lets try again with the
|
||||
+ * right lock
|
||||
+ */
|
||||
+ pthread_mutex_t *pr_mutex = pageresult_lock_get_addr(c);
|
||||
+ if (pthread_mutex_trylock(pr_mutex) == EBUSY) {
|
||||
+ c = next;
|
||||
+ continue;
|
||||
+ }
|
||||
+ if (pagedresults_is_timedout_nolock(c)) {
|
||||
+ pthread_mutex_unlock(pr_mutex);
|
||||
+ disconnect_server(c, c->c_connid, -1,
|
||||
+ SLAPD_DISCONNECT_PAGED_SEARCH_LIMIT,
|
||||
+ 0);
|
||||
+ } else {
|
||||
+ pthread_mutex_unlock(pr_mutex);
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ /*
|
||||
+ * we try to acquire the connection mutex, if it is already
|
||||
* acquired by another thread, don't wait
|
||||
*/
|
||||
if (pthread_mutex_trylock(&(c->c_mutex)) == EBUSY) {
|
||||
@@ -1586,35 +1608,24 @@ setup_pr_read_pds(Connection_Table *ct)
|
||||
continue;
|
||||
}
|
||||
if (c->c_flags & CONN_FLAG_CLOSING) {
|
||||
- /* A worker thread has marked that this connection
|
||||
- * should be closed by calling disconnect_server.
|
||||
- * move this connection out of the active list
|
||||
- * the last thread to use the connection will close it
|
||||
+ /*
|
||||
+ * A worker thread, or paged result timeout, has marked that
|
||||
+ * this connection should be closed by calling
|
||||
+ * disconnect_server(). Move this connection out of the active
|
||||
+ * list then the last thread to use the connection will close
|
||||
+ * it.
|
||||
*/
|
||||
connection_table_move_connection_out_of_active_list(ct, c);
|
||||
} else if (c->c_sd == SLAPD_INVALID_SOCKET) {
|
||||
connection_table_move_connection_out_of_active_list(ct, c);
|
||||
} else if (c->c_prfd != NULL) {
|
||||
if ((!c->c_gettingber) && (c->c_threadnumber < c->c_max_threads_per_conn)) {
|
||||
- int add_fd = 1;
|
||||
- /* check timeout for PAGED RESULTS */
|
||||
- if (pagedresults_is_timedout_nolock(c)) {
|
||||
- /* Exceeded the paged search timelimit; disconnect the client */
|
||||
- disconnect_server_nomutex(c, c->c_connid, -1,
|
||||
- SLAPD_DISCONNECT_PAGED_SEARCH_LIMIT,
|
||||
- 0);
|
||||
- connection_table_move_connection_out_of_active_list(ct,
|
||||
- c);
|
||||
- add_fd = 0; /* do not poll on this fd */
|
||||
- }
|
||||
- if (add_fd) {
|
||||
- ct->fd[count].fd = c->c_prfd;
|
||||
- ct->fd[count].in_flags = SLAPD_POLL_FLAGS;
|
||||
- /* slot i of the connection table is mapped to slot
|
||||
- * count of the fds array */
|
||||
- c->c_fdi = count;
|
||||
- count++;
|
||||
- }
|
||||
+ ct->fd[listnum][count].fd = c->c_prfd;
|
||||
+ ct->fd[listnum][count].in_flags = SLAPD_POLL_FLAGS;
|
||||
+ /* slot i of the connection table is mapped to slot
|
||||
+ * count of the fds array */
|
||||
+ c->c_fdi = count;
|
||||
+ count++;
|
||||
} else {
|
||||
if (c->c_threadnumber >= c->c_max_threads_per_conn) {
|
||||
c->c_maxthreadsblocked++;
|
||||
@@ -1675,7 +1686,7 @@ handle_pr_read_ready(Connection_Table *ct, PRIntn num_poll __attribute__((unused
|
||||
continue;
|
||||
}
|
||||
|
||||
- /* Try to get connection mutex, if not available just skip the connection and
|
||||
+ /* Try to get connection mutex, if not available just skip the connection and
|
||||
* process other connections events. May generates cpu load for listening thread
|
||||
* if connection mutex is held for a long time
|
||||
*/
|
||||
diff --git a/ldap/servers/slapd/opshared.c b/ldap/servers/slapd/opshared.c
|
||||
index 7ab4117cd..a29eed052 100644
|
||||
--- a/ldap/servers/slapd/opshared.c
|
||||
+++ b/ldap/servers/slapd/opshared.c
|
||||
@@ -250,7 +250,7 @@ op_shared_search(Slapi_PBlock *pb, int send_result)
|
||||
char *errtext = NULL;
|
||||
int nentries, pnentries;
|
||||
int flag_search_base_found = 0;
|
||||
- int flag_no_such_object = 0;
|
||||
+ bool flag_no_such_object = false;
|
||||
int flag_referral = 0;
|
||||
int flag_psearch = 0;
|
||||
int err_code = LDAP_SUCCESS;
|
||||
@@ -315,7 +315,7 @@ op_shared_search(Slapi_PBlock *pb, int send_result)
|
||||
rc = -1;
|
||||
goto free_and_return_nolock;
|
||||
}
|
||||
-
|
||||
+
|
||||
/* Set the time we actually started the operation */
|
||||
slapi_operation_set_time_started(operation);
|
||||
|
||||
@@ -798,11 +798,11 @@ op_shared_search(Slapi_PBlock *pb, int send_result)
|
||||
}
|
||||
|
||||
/* subtree searches :
|
||||
- * if the search was started above the backend suffix
|
||||
- * - temporarily set the SLAPI_SEARCH_TARGET_SDN to the
|
||||
- * base of the node so that we don't get a NO SUCH OBJECT error
|
||||
- * - do not change the scope
|
||||
- */
|
||||
+ * if the search was started above the backend suffix
|
||||
+ * - temporarily set the SLAPI_SEARCH_TARGET_SDN to the
|
||||
+ * base of the node so that we don't get a NO SUCH OBJECT error
|
||||
+ * - do not change the scope
|
||||
+ */
|
||||
if (scope == LDAP_SCOPE_SUBTREE) {
|
||||
if (slapi_sdn_issuffix(be_suffix, basesdn)) {
|
||||
if (free_sdn) {
|
||||
@@ -825,53 +825,53 @@ op_shared_search(Slapi_PBlock *pb, int send_result)
|
||||
switch (rc) {
|
||||
case 1:
|
||||
/* if the backend returned LDAP_NO_SUCH_OBJECT for a SEARCH request,
|
||||
- * it will not have sent back a result - otherwise, it will have
|
||||
- * sent a result */
|
||||
+ * it will not have sent back a result - otherwise, it will have
|
||||
+ * sent a result */
|
||||
rc = SLAPI_FAIL_GENERAL;
|
||||
slapi_pblock_get(pb, SLAPI_RESULT_CODE, &err);
|
||||
if (err == LDAP_NO_SUCH_OBJECT) {
|
||||
/* may be the object exist somewhere else
|
||||
- * wait the end of the loop to send back this error
|
||||
- */
|
||||
- flag_no_such_object = 1;
|
||||
+ * wait the end of the loop to send back this error
|
||||
+ */
|
||||
+ flag_no_such_object = true;
|
||||
} else {
|
||||
/* err something other than LDAP_NO_SUCH_OBJECT, so the backend will
|
||||
- * have sent the result -
|
||||
- * Set a flag here so we don't return another result. */
|
||||
+ * have sent the result -
|
||||
+ * Set a flag here so we don't return another result. */
|
||||
sent_result = 1;
|
||||
}
|
||||
- /* fall through */
|
||||
+ /* fall through */
|
||||
|
||||
case -1: /* an error occurred */
|
||||
+ slapi_pblock_get(pb, SLAPI_RESULT_CODE, &err);
|
||||
/* PAGED RESULTS */
|
||||
if (op_is_pagedresults(operation)) {
|
||||
/* cleanup the slot */
|
||||
pthread_mutex_lock(pagedresults_mutex);
|
||||
+ if (err != LDAP_NO_SUCH_OBJECT && !flag_no_such_object) {
|
||||
+ /* Free the results if not "no_such_object" */
|
||||
+ void *sr = NULL;
|
||||
+ slapi_pblock_get(pb, SLAPI_SEARCH_RESULT_SET, &sr);
|
||||
+ be->be_search_results_release(&sr);
|
||||
+ }
|
||||
pagedresults_set_search_result(pb_conn, operation, NULL, 1, pr_idx);
|
||||
rc = pagedresults_set_current_be(pb_conn, NULL, pr_idx, 1);
|
||||
pthread_mutex_unlock(pagedresults_mutex);
|
||||
}
|
||||
- if (1 == flag_no_such_object) {
|
||||
- break;
|
||||
- }
|
||||
- slapi_pblock_get(pb, SLAPI_RESULT_CODE, &err);
|
||||
- if (err == LDAP_NO_SUCH_OBJECT) {
|
||||
- /* may be the object exist somewhere else
|
||||
- * wait the end of the loop to send back this error
|
||||
- */
|
||||
- flag_no_such_object = 1;
|
||||
+
|
||||
+ if (err == LDAP_NO_SUCH_OBJECT || flag_no_such_object) {
|
||||
+ /* Maybe the object exists somewhere else, wait to the end
|
||||
+ * of the loop to send back this error */
|
||||
+ flag_no_such_object = true;
|
||||
break;
|
||||
} else {
|
||||
- /* for error other than LDAP_NO_SUCH_OBJECT
|
||||
- * the error has already been sent
|
||||
- * stop the search here
|
||||
- */
|
||||
+ /* For error other than LDAP_NO_SUCH_OBJECT the error has
|
||||
+ * already been sent stop the search here */
|
||||
cache_return_target_entry(pb, be, operation);
|
||||
goto free_and_return;
|
||||
}
|
||||
|
||||
/* when rc == SLAPI_FAIL_DISKFULL this case is executed */
|
||||
-
|
||||
case SLAPI_FAIL_DISKFULL:
|
||||
operation_out_of_disk_space();
|
||||
cache_return_target_entry(pb, be, operation);
|
||||
diff --git a/ldap/servers/slapd/pagedresults.c b/ldap/servers/slapd/pagedresults.c
|
||||
index db87e486e..4aa1fa3e5 100644
|
||||
--- a/ldap/servers/slapd/pagedresults.c
|
||||
+++ b/ldap/servers/slapd/pagedresults.c
|
||||
@@ -121,12 +121,15 @@ pagedresults_parse_control_value(Slapi_PBlock *pb,
|
||||
if (ber_scanf(ber, "{io}", pagesize, &cookie) == LBER_ERROR) {
|
||||
slapi_log_err(SLAPI_LOG_ERR, "pagedresults_parse_control_value",
|
||||
"<= corrupted control value\n");
|
||||
+ ber_free(ber, 1);
|
||||
return LDAP_PROTOCOL_ERROR;
|
||||
}
|
||||
if (!maxreqs) {
|
||||
slapi_log_err(SLAPI_LOG_ERR, "pagedresults_parse_control_value",
|
||||
"Simple paged results requests per conn exceeded the limit: %d\n",
|
||||
maxreqs);
|
||||
+ ber_free(ber, 1);
|
||||
+ slapi_ch_free_string(&cookie.bv_val);
|
||||
return LDAP_UNWILLING_TO_PERFORM;
|
||||
}
|
||||
|
||||
@@ -376,6 +379,10 @@ pagedresults_free_one_msgid(Connection *conn, ber_int_t msgid, pthread_mutex_t *
|
||||
}
|
||||
prp->pr_flags |= CONN_FLAG_PAGEDRESULTS_ABANDONED;
|
||||
prp->pr_flags &= ~CONN_FLAG_PAGEDRESULTS_PROCESSING;
|
||||
+ if (conn->c_pagedresults.prl_count > 0) {
|
||||
+ _pr_cleanup_one_slot(prp);
|
||||
+ conn->c_pagedresults.prl_count--;
|
||||
+ }
|
||||
rc = 0;
|
||||
break;
|
||||
}
|
||||
@@ -940,7 +947,9 @@ pagedresults_is_timedout_nolock(Connection *conn)
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
+
|
||||
slapi_log_err(SLAPI_LOG_TRACE, "<-- pagedresults_is_timedout", "<= false 2\n");
|
||||
+
|
||||
return 0;
|
||||
}
|
||||
|
||||
diff --git a/ldap/servers/slapd/slap.h b/ldap/servers/slapd/slap.h
|
||||
index 072f6f962..469874fd1 100644
|
||||
--- a/ldap/servers/slapd/slap.h
|
||||
+++ b/ldap/servers/slapd/slap.h
|
||||
@@ -74,7 +74,7 @@ static char ptokPBE[34] = "Internal (Software) Token ";
|
||||
#include <sys/stat.h>
|
||||
#include <sys/socket.h>
|
||||
#include <netinet/in.h>
|
||||
-
|
||||
+#include <stdbool.h>
|
||||
#include <time.h> /* For timespec definitions */
|
||||
|
||||
/* Provides our int types and platform specific requirements. */
|
||||
--
|
||||
2.48.0
|
||||
|
||||
@ -1,29 +0,0 @@
|
||||
From 27cd055197bc3cae458a1f86621aa5410c66dd2c Mon Sep 17 00:00:00 2001
|
||||
From: Mark Reynolds <mreynolds@redhat.com>
|
||||
Date: Mon, 20 Jan 2025 15:51:24 -0500
|
||||
Subject: [PATCH] Issue 6509 - Fix cherry pick issue (race condition in Paged
|
||||
results)
|
||||
|
||||
Relates: https://github.com/389ds/389-ds-base/issues/6509
|
||||
---
|
||||
ldap/servers/slapd/daemon.c | 4 ++--
|
||||
1 file changed, 2 insertions(+), 2 deletions(-)
|
||||
|
||||
diff --git a/ldap/servers/slapd/daemon.c b/ldap/servers/slapd/daemon.c
|
||||
index 13dfe250d..57e07e5f5 100644
|
||||
--- a/ldap/servers/slapd/daemon.c
|
||||
+++ b/ldap/servers/slapd/daemon.c
|
||||
@@ -1620,8 +1620,8 @@ setup_pr_read_pds(Connection_Table *ct)
|
||||
connection_table_move_connection_out_of_active_list(ct, c);
|
||||
} else if (c->c_prfd != NULL) {
|
||||
if ((!c->c_gettingber) && (c->c_threadnumber < c->c_max_threads_per_conn)) {
|
||||
- ct->fd[listnum][count].fd = c->c_prfd;
|
||||
- ct->fd[listnum][count].in_flags = SLAPD_POLL_FLAGS;
|
||||
+ ct->fd[count].fd = c->c_prfd;
|
||||
+ ct->fd[count].in_flags = SLAPD_POLL_FLAGS;
|
||||
/* slot i of the connection table is mapped to slot
|
||||
* count of the fds array */
|
||||
c->c_fdi = count;
|
||||
--
|
||||
2.48.0
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@ -1,651 +0,0 @@
|
||||
From dba27e56161943fbcf54ecbc28337e2c81b07979 Mon Sep 17 00:00:00 2001
|
||||
From: progier389 <progier@redhat.com>
|
||||
Date: Mon, 13 Jan 2025 18:03:07 +0100
|
||||
Subject: [PATCH] Issue 6494 - Various errors when using extended matching rule
|
||||
on vlv sort filter (#6495)
|
||||
|
||||
* Issue 6494 - Various errors when using extended matching rule on vlv sort filter
|
||||
|
||||
Various issues when configuring and using extended matching rule within a vlv sort filter:
|
||||
|
||||
Race condition about the keys storage while indexing leading to various heap and data corruption. (lmdb only)
|
||||
Crash while indexing if vlv are misconfigured because NULL key is not checked.
|
||||
Read after block because of data type mismatch between SlapiValue and berval
|
||||
Memory leaks
|
||||
Solution:
|
||||
|
||||
Serialize the vlv index key generation if vlv filter has an extended matching rule.
|
||||
Check null keys
|
||||
Always provides SlapiValue even ifg we want to get keys as bervals
|
||||
Free properly the resources
|
||||
Issue: #6494
|
||||
|
||||
Reviewed by: @mreynolds389 (Thanks!)
|
||||
|
||||
(cherry picked from commit 4bd27ecc4e1d21c8af5ab8cad795d70477179a98)
|
||||
(cherry picked from commit 223a20250cbf29a546dcb398cfc76024d2f91347)
|
||||
(cherry picked from commit 280043740a525eaf0438129fd8b99ca251c62366)
|
||||
---
|
||||
.../tests/suites/indexes/regression_test.py | 29 +++
|
||||
.../tests/suites/vlv/regression_test.py | 183 ++++++++++++++++++
|
||||
ldap/servers/slapd/back-ldbm/cleanup.c | 8 +
|
||||
ldap/servers/slapd/back-ldbm/dblayer.c | 22 ++-
|
||||
ldap/servers/slapd/back-ldbm/ldbm_attr.c | 2 +-
|
||||
ldap/servers/slapd/back-ldbm/matchrule.c | 8 +-
|
||||
.../servers/slapd/back-ldbm/proto-back-ldbm.h | 3 +-
|
||||
ldap/servers/slapd/back-ldbm/sort.c | 37 ++--
|
||||
ldap/servers/slapd/back-ldbm/vlv.c | 26 +--
|
||||
ldap/servers/slapd/back-ldbm/vlv_srch.c | 4 +-
|
||||
ldap/servers/slapd/generation.c | 5 +
|
||||
ldap/servers/slapd/plugin_mr.c | 12 +-
|
||||
src/lib389/lib389/backend.py | 10 +
|
||||
13 files changed, 292 insertions(+), 57 deletions(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/indexes/regression_test.py b/dirsrvtests/tests/suites/indexes/regression_test.py
|
||||
index fc6db727f..2196fb2ed 100644
|
||||
--- a/dirsrvtests/tests/suites/indexes/regression_test.py
|
||||
+++ b/dirsrvtests/tests/suites/indexes/regression_test.py
|
||||
@@ -227,6 +227,35 @@ def test_reject_virtual_attr_for_indexing(topo):
|
||||
break
|
||||
|
||||
|
||||
+def test_reindex_extended_matching_rule(topo, add_backend_and_ldif_50K_users):
|
||||
+ """Check that index with extended matching rule are reindexed properly.
|
||||
+
|
||||
+ :id: 8a3198e8-cc5a-11ef-a3e7-482ae39447e5
|
||||
+ :setup: Standalone instance + a second backend with 50K users
|
||||
+ :steps:
|
||||
+ 1. Configure uid with 2.5.13.2 matching rule
|
||||
+ 1. Configure cn with 2.5.13.2 matching rule
|
||||
+ 2. Reindex
|
||||
+ :expectedresults:
|
||||
+ 1. Success
|
||||
+ 2. Success
|
||||
+ """
|
||||
+
|
||||
+ inst = topo.standalone
|
||||
+ tasks = Tasks(inst)
|
||||
+ be2 = Backends(topo.standalone).get_backend(SUFFIX2)
|
||||
+ index = be2.get_index('uid')
|
||||
+ index.replace('nsMatchingRule', '2.5.13.2')
|
||||
+ index = be2.get_index('cn')
|
||||
+ index.replace('nsMatchingRule', '2.5.13.2')
|
||||
+
|
||||
+ assert tasks.reindex(
|
||||
+ suffix=SUFFIX2,
|
||||
+ args={TASK_WAIT: True}
|
||||
+ ) == 0
|
||||
+
|
||||
+
|
||||
+
|
||||
if __name__ == "__main__":
|
||||
# Run isolated
|
||||
# -s for DEBUG mode
|
||||
diff --git a/dirsrvtests/tests/suites/vlv/regression_test.py b/dirsrvtests/tests/suites/vlv/regression_test.py
|
||||
index 3b66de8b5..6ab709bd3 100644
|
||||
--- a/dirsrvtests/tests/suites/vlv/regression_test.py
|
||||
+++ b/dirsrvtests/tests/suites/vlv/regression_test.py
|
||||
@@ -22,6 +22,146 @@ logging.getLogger(__name__).setLevel(logging.DEBUG)
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
+class BackendHandler:
|
||||
+ def __init__(self, inst, bedict, scope=ldap.SCOPE_ONELEVEL):
|
||||
+ self.inst = inst
|
||||
+ self.bedict = bedict
|
||||
+ self.bes = Backends(inst)
|
||||
+ self.scope = scope
|
||||
+ self.data = {}
|
||||
+
|
||||
+ def find_backend(self, bename):
|
||||
+ for be in self.bes.list():
|
||||
+ if be.get_attr_val_utf8_l('cn') == bename:
|
||||
+ return be
|
||||
+ return None
|
||||
+
|
||||
+ def cleanup(self):
|
||||
+ benames = list(self.bedict.keys())
|
||||
+ benames.reverse()
|
||||
+ for bename in benames:
|
||||
+ be = self.find_backend(bename)
|
||||
+ if be:
|
||||
+ be.delete()
|
||||
+
|
||||
+ def setup(self):
|
||||
+ # Create backends, add vlv index and populate the backends.
|
||||
+ for bename,suffix in self.bedict.items():
|
||||
+ be = self.bes.create(properties={
|
||||
+ 'cn': bename,
|
||||
+ 'nsslapd-suffix': suffix,
|
||||
+ })
|
||||
+ # Add suffix entry
|
||||
+ Organization(self.inst, dn=suffix).create(properties={ 'o': bename, })
|
||||
+ # Configure vlv
|
||||
+ vlv_search, vlv_index = create_vlv_search_and_index(
|
||||
+ self.inst, basedn=suffix,
|
||||
+ bename=bename, scope=self.scope,
|
||||
+ prefix=f'vlv_1lvl_{bename}')
|
||||
+ # Reindex
|
||||
+ reindex_task = Tasks(self.inst)
|
||||
+ assert reindex_task.reindex(
|
||||
+ suffix=suffix,
|
||||
+ attrname=vlv_index.rdn,
|
||||
+ args={TASK_WAIT: True},
|
||||
+ vlv=True
|
||||
+ ) == 0
|
||||
+ # Add ou=People entry
|
||||
+ OrganizationalUnits(self.inst, suffix).create(properties={'ou': 'People'})
|
||||
+ # Add another ou that will be deleted before the export
|
||||
+ # so that import will change the vlv search basedn entryid
|
||||
+ ou2 = OrganizationalUnits(self.inst, suffix).create(properties={'ou': 'dummy ou'})
|
||||
+ # Add a demo user so that vlv_check is happy
|
||||
+ dn = f'uid=demo_user,ou=people,{suffix}'
|
||||
+ UserAccount(self.inst, dn=dn).create( properties= {
|
||||
+ 'uid': 'demo_user',
|
||||
+ 'cn': 'Demo user',
|
||||
+ 'sn': 'Demo user',
|
||||
+ 'uidNumber': '99998',
|
||||
+ 'gidNumber': '99998',
|
||||
+ 'homeDirectory': '/var/empty',
|
||||
+ 'loginShell': '/bin/false',
|
||||
+ 'userpassword': DEMO_PW })
|
||||
+ # Add regular user
|
||||
+ add_users(self.inst, 10, suffix=suffix)
|
||||
+ # Removing ou2
|
||||
+ ou2.delete()
|
||||
+ # And export
|
||||
+ tasks = Tasks(self.inst)
|
||||
+ ldif = f'{self.inst.get_ldif_dir()}/db-{bename}.ldif'
|
||||
+ assert tasks.exportLDIF(suffix=suffix,
|
||||
+ output_file=ldif,
|
||||
+ args={TASK_WAIT: True}) == 0
|
||||
+ # Add the various parameters in topology_st.belist
|
||||
+ self.data[bename] = { 'be': be,
|
||||
+ 'suffix': suffix,
|
||||
+ 'ldif': ldif,
|
||||
+ 'vlv_search' : vlv_search,
|
||||
+ 'vlv_index' : vlv_index,
|
||||
+ 'dn' : dn}
|
||||
+
|
||||
+
|
||||
+def create_vlv_search_and_index(inst, basedn=DEFAULT_SUFFIX, bename='userRoot',
|
||||
+ scope=ldap.SCOPE_SUBTREE, prefix="vlv", vlvsort="cn"):
|
||||
+ vlv_searches = VLVSearch(inst)
|
||||
+ vlv_search_properties = {
|
||||
+ "objectclass": ["top", "vlvSearch"],
|
||||
+ "cn": f"{prefix}Srch",
|
||||
+ "vlvbase": basedn,
|
||||
+ "vlvfilter": "(uid=*)",
|
||||
+ "vlvscope": str(scope),
|
||||
+ }
|
||||
+ vlv_searches.create(
|
||||
+ basedn=f"cn={bename},cn=ldbm database,cn=plugins,cn=config",
|
||||
+ properties=vlv_search_properties
|
||||
+ )
|
||||
+
|
||||
+ vlv_index = VLVIndex(inst)
|
||||
+ vlv_index_properties = {
|
||||
+ "objectclass": ["top", "vlvIndex"],
|
||||
+ "cn": f"{prefix}Idx",
|
||||
+ "vlvsort": vlvsort,
|
||||
+ }
|
||||
+ vlv_index.create(
|
||||
+ basedn=f"cn={prefix}Srch,cn={bename},cn=ldbm database,cn=plugins,cn=config",
|
||||
+ properties=vlv_index_properties
|
||||
+ )
|
||||
+ return vlv_searches, vlv_index
|
||||
+
|
||||
+
|
||||
+@pytest.fixture
|
||||
+def vlv_setup_with_uid_mr(topology_st, request):
|
||||
+ inst = topology_st.standalone
|
||||
+ bename = 'be1'
|
||||
+ besuffix = f'o={bename}'
|
||||
+ beh = BackendHandler(inst, { bename: besuffix })
|
||||
+
|
||||
+ def fin():
|
||||
+ # Cleanup function
|
||||
+ if not DEBUGGING and inst.exists() and inst.status():
|
||||
+ beh.cleanup()
|
||||
+
|
||||
+ request.addfinalizer(fin)
|
||||
+
|
||||
+ # Make sure that our backend are not already present.
|
||||
+ beh.cleanup()
|
||||
+
|
||||
+ # Then add the new backend
|
||||
+ beh.setup()
|
||||
+
|
||||
+ index = Index(inst, f'cn=uid,cn=index,cn={bename},cn=ldbm database,cn=plugins,cn=config')
|
||||
+ index.add('nsMatchingRule', '2.5.13.2')
|
||||
+ reindex_task = Tasks(inst)
|
||||
+ assert reindex_task.reindex(
|
||||
+ suffix=besuffix,
|
||||
+ attrname='uid',
|
||||
+ args={TASK_WAIT: True}
|
||||
+ ) == 0
|
||||
+
|
||||
+ topology_st.beh = beh
|
||||
+ return topology_st
|
||||
+
|
||||
+
|
||||
@pytest.mark.DS47966
|
||||
def test_bulk_import_when_the_backend_with_vlv_was_recreated(topology_m2):
|
||||
"""
|
||||
@@ -105,6 +245,49 @@ def test_bulk_import_when_the_backend_with_vlv_was_recreated(topology_m2):
|
||||
entries = M2.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, "(objectclass=*)")
|
||||
|
||||
|
||||
+def test_vlv_with_mr(vlv_setup_with_uid_mr):
|
||||
+ """
|
||||
+ Testing vlv having specific matching rule
|
||||
+
|
||||
+ :id: 5e04afe2-beec-11ef-aa84-482ae39447e5
|
||||
+ :setup: Standalone with uid have a matching rule index
|
||||
+ :steps:
|
||||
+ 1. Append vlvIndex entries then vlvSearch entry in the dse.ldif
|
||||
+ 2. Restart the server
|
||||
+ :expectedresults:
|
||||
+ 1. Should Success.
|
||||
+ 2. Should Success.
|
||||
+ """
|
||||
+ inst = vlv_setup_with_uid_mr.standalone
|
||||
+ beh = vlv_setup_with_uid_mr.beh
|
||||
+ bename, besuffix = next(iter(beh.bedict.items()))
|
||||
+ vlv_searches, vlv_index = create_vlv_search_and_index(
|
||||
+ inst, basedn=besuffix, bename=bename,
|
||||
+ vlvsort="uid:2.5.13.2")
|
||||
+ # Reindex the vlv
|
||||
+ reindex_task = Tasks(inst)
|
||||
+ assert reindex_task.reindex(
|
||||
+ suffix=besuffix,
|
||||
+ attrname=vlv_index.rdn,
|
||||
+ args={TASK_WAIT: True},
|
||||
+ vlv=True
|
||||
+ ) == 0
|
||||
+
|
||||
+ inst.restart()
|
||||
+ users = UserAccounts(inst, besuffix)
|
||||
+ user_properties = {
|
||||
+ 'uid': f'a new testuser',
|
||||
+ 'cn': f'a new testuser',
|
||||
+ 'sn': 'user',
|
||||
+ 'uidNumber': '0',
|
||||
+ 'gidNumber': '0',
|
||||
+ 'homeDirectory': 'foo'
|
||||
+ }
|
||||
+ user = users.create(properties=user_properties)
|
||||
+ user.delete()
|
||||
+ assert inst.status()
|
||||
+
|
||||
+
|
||||
if __name__ == "__main__":
|
||||
# Run isolated
|
||||
# -s for DEBUG mode
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/cleanup.c b/ldap/servers/slapd/back-ldbm/cleanup.c
|
||||
index 6b2e9faef..939d8bc4f 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/cleanup.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/cleanup.c
|
||||
@@ -15,12 +15,14 @@
|
||||
|
||||
#include "back-ldbm.h"
|
||||
#include "dblayer.h"
|
||||
+#include "vlv_srch.h"
|
||||
|
||||
int
|
||||
ldbm_back_cleanup(Slapi_PBlock *pb)
|
||||
{
|
||||
struct ldbminfo *li;
|
||||
Slapi_Backend *be;
|
||||
+ struct vlvSearch *nextp;
|
||||
|
||||
slapi_log_err(SLAPI_LOG_TRACE, "ldbm_back_cleanup", "ldbm backend cleaning up\n");
|
||||
slapi_pblock_get(pb, SLAPI_PLUGIN_PRIVATE, &li);
|
||||
@@ -45,6 +47,12 @@ ldbm_back_cleanup(Slapi_PBlock *pb)
|
||||
return 0;
|
||||
}
|
||||
|
||||
+ /* Release the vlv list */
|
||||
+ for (struct vlvSearch *p=be->vlvSearchList; p; p=nextp) {
|
||||
+ nextp = p->vlv_next;
|
||||
+ vlvSearch_delete(&p);
|
||||
+ }
|
||||
+
|
||||
/*
|
||||
* We check if li is NULL. Because of an issue in how we create backends
|
||||
* we share the li and plugin info between many unique backends. This causes
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/dblayer.c b/ldap/servers/slapd/back-ldbm/dblayer.c
|
||||
index 05cc5b891..6b8ce0016 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/dblayer.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/dblayer.c
|
||||
@@ -494,8 +494,12 @@ int
|
||||
dblayer_close(struct ldbminfo *li, int dbmode)
|
||||
{
|
||||
dblayer_private *priv = (dblayer_private *)li->li_dblayer_private;
|
||||
-
|
||||
- return priv->dblayer_close_fn(li, dbmode);
|
||||
+ int rc = priv->dblayer_close_fn(li, dbmode);
|
||||
+ if (rc == 0) {
|
||||
+ /* Clean thread specific data */
|
||||
+ dblayer_destroy_txn_stack();
|
||||
+ }
|
||||
+ return rc;
|
||||
}
|
||||
|
||||
/* Routines for opening and closing random files in the DB_ENV.
|
||||
@@ -621,6 +625,9 @@ dblayer_erase_index_file(backend *be, struct attrinfo *a, PRBool use_lock, int n
|
||||
return 0;
|
||||
}
|
||||
struct ldbminfo *li = (struct ldbminfo *)be->be_database->plg_private;
|
||||
+ if (NULL == li) {
|
||||
+ return 0;
|
||||
+ }
|
||||
dblayer_private *priv = (dblayer_private *)li->li_dblayer_private;
|
||||
|
||||
return priv->dblayer_rm_db_file_fn(be, a, use_lock, no_force_chkpt);
|
||||
@@ -1382,3 +1389,14 @@ dblayer_pop_pvt_txn(void)
|
||||
}
|
||||
return;
|
||||
}
|
||||
+
|
||||
+void
|
||||
+dblayer_destroy_txn_stack(void)
|
||||
+{
|
||||
+ /*
|
||||
+ * Cleanup for the main thread to avoid false/positive leaks from libasan
|
||||
+ * Note: data is freed because PR_SetThreadPrivate calls the
|
||||
+ * dblayer_cleanup_txn_stack callback
|
||||
+ */
|
||||
+ PR_SetThreadPrivate(thread_private_txn_stack, NULL);
|
||||
+}
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/ldbm_attr.c b/ldap/servers/slapd/back-ldbm/ldbm_attr.c
|
||||
index 708756d3e..70700ca1d 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/ldbm_attr.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/ldbm_attr.c
|
||||
@@ -54,7 +54,7 @@ attrinfo_delete(struct attrinfo **pp)
|
||||
idl_release_private(*pp);
|
||||
(*pp)->ai_key_cmp_fn = NULL;
|
||||
slapi_ch_free((void **)&((*pp)->ai_type));
|
||||
- slapi_ch_free((void **)(*pp)->ai_index_rules);
|
||||
+ charray_free((*pp)->ai_index_rules);
|
||||
slapi_ch_free((void **)&((*pp)->ai_attrcrypt));
|
||||
attr_done(&((*pp)->ai_sattr));
|
||||
attrinfo_delete_idlistinfo(&(*pp)->ai_idlistinfo);
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/matchrule.c b/ldap/servers/slapd/back-ldbm/matchrule.c
|
||||
index 5d516b9f8..5365e8acf 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/matchrule.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/matchrule.c
|
||||
@@ -107,7 +107,7 @@ destroy_matchrule_indexer(Slapi_PBlock *pb)
|
||||
* is destroyed
|
||||
*/
|
||||
int
|
||||
-matchrule_values_to_keys(Slapi_PBlock *pb, struct berval **input_values, struct berval ***output_values)
|
||||
+matchrule_values_to_keys(Slapi_PBlock *pb, Slapi_Value **input_values, struct berval ***output_values)
|
||||
{
|
||||
IFP mrINDEX = NULL;
|
||||
|
||||
@@ -135,10 +135,8 @@ matchrule_values_to_keys_sv(Slapi_PBlock *pb, Slapi_Value **input_values, Slapi_
|
||||
slapi_pblock_get(pb, SLAPI_PLUGIN_MR_INDEX_SV_FN, &mrINDEX);
|
||||
if (NULL == mrINDEX) { /* old school - does not have SV function */
|
||||
int rc;
|
||||
- struct berval **bvi = NULL, **bvo = NULL;
|
||||
- valuearray_get_bervalarray(input_values, &bvi);
|
||||
- rc = matchrule_values_to_keys(pb, bvi, &bvo);
|
||||
- ber_bvecfree(bvi);
|
||||
+ struct berval **bvo = NULL;
|
||||
+ rc = matchrule_values_to_keys(pb, input_values, &bvo);
|
||||
/* note - the indexer owns bvo and will free it when destroyed */
|
||||
valuearray_init_bervalarray(bvo, output_values);
|
||||
/* store output values in SV form - caller expects SLAPI_PLUGIN_MR_KEYS is Slapi_Value** */
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/proto-back-ldbm.h b/ldap/servers/slapd/back-ldbm/proto-back-ldbm.h
|
||||
index d93ff9239..157788fa4 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/proto-back-ldbm.h
|
||||
+++ b/ldap/servers/slapd/back-ldbm/proto-back-ldbm.h
|
||||
@@ -84,6 +84,7 @@ int dblayer_release_index_file(backend *be, struct attrinfo *a, DB *pDB);
|
||||
int dblayer_erase_index_file(backend *be, struct attrinfo *a, PRBool use_lock, int no_force_chkpt);
|
||||
int dblayer_get_id2entry(backend *be, DB **ppDB);
|
||||
int dblayer_release_id2entry(backend *be, DB *pDB);
|
||||
+void dblayer_destroy_txn_stack(void);
|
||||
int dblayer_txn_init(struct ldbminfo *li, back_txn *txn);
|
||||
int dblayer_txn_begin(backend *be, back_txnid parent_txn, back_txn *txn);
|
||||
int dblayer_txn_begin_ext(struct ldbminfo *li, back_txnid parent_txn, back_txn *txn, PRBool use_lock);
|
||||
@@ -560,7 +561,7 @@ int compute_allids_limit(Slapi_PBlock *pb, struct ldbminfo *li);
|
||||
*/
|
||||
int create_matchrule_indexer(Slapi_PBlock **pb, char *matchrule, char *type);
|
||||
int destroy_matchrule_indexer(Slapi_PBlock *pb);
|
||||
-int matchrule_values_to_keys(Slapi_PBlock *pb, struct berval **input_values, struct berval ***output_values);
|
||||
+int matchrule_values_to_keys(Slapi_PBlock *pb, Slapi_Value **input_values, struct berval ***output_values);
|
||||
int matchrule_values_to_keys_sv(Slapi_PBlock *pb, Slapi_Value **input_values, Slapi_Value ***output_values);
|
||||
|
||||
/*
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/sort.c b/ldap/servers/slapd/back-ldbm/sort.c
|
||||
index 70ac60803..196af753f 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/sort.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/sort.c
|
||||
@@ -536,30 +536,18 @@ compare_entries_sv(ID *id_a, ID *id_b, sort_spec *s, baggage_carrier *bc, int *e
|
||||
valuearray_get_bervalarray(valueset_get_valuearray(&attr_b->a_present_values), &value_b);
|
||||
} else {
|
||||
/* Match rule case */
|
||||
- struct berval **actual_value_a = NULL;
|
||||
- struct berval **actual_value_b = NULL;
|
||||
- struct berval **temp_value = NULL;
|
||||
-
|
||||
- valuearray_get_bervalarray(valueset_get_valuearray(&attr_a->a_present_values), &actual_value_a);
|
||||
- valuearray_get_bervalarray(valueset_get_valuearray(&attr_b->a_present_values), &actual_value_b);
|
||||
- matchrule_values_to_keys(this_one->mr_pb, actual_value_a, &temp_value);
|
||||
- /* Now copy it, so the second call doesn't crap on it */
|
||||
- value_a = slapi_ch_bvecdup(temp_value); /* Really, we'd prefer to not call the chXXX variant...*/
|
||||
- matchrule_values_to_keys(this_one->mr_pb, actual_value_b, &value_b);
|
||||
-
|
||||
- if ((actual_value_a && !value_a) ||
|
||||
- (actual_value_b && !value_b)) {
|
||||
- ber_bvecfree(actual_value_a);
|
||||
- ber_bvecfree(actual_value_b);
|
||||
- CACHE_RETURN(&inst->inst_cache, &a);
|
||||
- CACHE_RETURN(&inst->inst_cache, &b);
|
||||
- *error = 1;
|
||||
- return 0;
|
||||
+ Slapi_Value **va_a = valueset_get_valuearray(&attr_a->a_present_values);
|
||||
+ Slapi_Value **va_b = valueset_get_valuearray(&attr_b->a_present_values);
|
||||
+
|
||||
+ matchrule_values_to_keys(this_one->mr_pb, va_a, &value_a);
|
||||
+ /* Plugin owns the memory ==> duplicate the key before next call garble it */
|
||||
+ value_a = slapi_ch_bvecdup(value_a);
|
||||
+ matchrule_values_to_keys(this_one->mr_pb, va_b, &value_b);
|
||||
+
|
||||
+ if ((va_a && !value_a) || (va_b && !value_b)) {
|
||||
+ result = 0;
|
||||
+ goto bail;
|
||||
}
|
||||
- if (actual_value_a)
|
||||
- ber_bvecfree(actual_value_a);
|
||||
- if (actual_value_b)
|
||||
- ber_bvecfree(actual_value_b);
|
||||
}
|
||||
/* Compare them */
|
||||
if (!order) {
|
||||
@@ -582,9 +570,10 @@ compare_entries_sv(ID *id_a, ID *id_b, sort_spec *s, baggage_carrier *bc, int *e
|
||||
}
|
||||
/* If so, proceed to the next attribute for comparison */
|
||||
}
|
||||
+ *error = 0;
|
||||
+bail:
|
||||
CACHE_RETURN(&inst->inst_cache, &a);
|
||||
CACHE_RETURN(&inst->inst_cache, &b);
|
||||
- *error = 0;
|
||||
return result;
|
||||
}
|
||||
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/vlv.c b/ldap/servers/slapd/back-ldbm/vlv.c
|
||||
index 121fb3667..70e0bac85 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/vlv.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/vlv.c
|
||||
@@ -605,7 +605,7 @@ vlv_getindices(IFP callback_fn, void *param, backend *be)
|
||||
* generate the same composite key, so we append the EntryID
|
||||
* to ensure the uniqueness of the key.
|
||||
*
|
||||
- * Always creates a key. Never returns NULL.
|
||||
+ * May return NULL in case of errors (typically in some configuration error cases)
|
||||
*/
|
||||
static struct vlv_key *
|
||||
vlv_create_key(struct vlvIndex *p, struct backentry *e)
|
||||
@@ -659,10 +659,8 @@ vlv_create_key(struct vlvIndex *p, struct backentry *e)
|
||||
/* Matching rule. Do the magic mangling. Plugin owns the memory. */
|
||||
if (p->vlv_mrpb[sortattr] != NULL) {
|
||||
/* xxxPINAKI */
|
||||
- struct berval **bval = NULL;
|
||||
Slapi_Value **va = valueset_get_valuearray(&attr->a_present_values);
|
||||
- valuearray_get_bervalarray(va, &bval);
|
||||
- matchrule_values_to_keys(p->vlv_mrpb[sortattr], bval, &value);
|
||||
+ matchrule_values_to_keys(p->vlv_mrpb[sortattr], va, &value);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -779,6 +777,13 @@ do_vlv_update_index(back_txn *txn, struct ldbminfo *li __attribute__((unused)),
|
||||
}
|
||||
|
||||
key = vlv_create_key(pIndex, entry);
|
||||
+ if (key == NULL) {
|
||||
+ slapi_log_err(SLAPI_LOG_ERR, "vlv_create_key", "Unable to generate vlv %s index key."
|
||||
+ " There may be a configuration issue.\n", pIndex->vlv_name);
|
||||
+ dblayer_release_index_file(be, pIndex->vlv_attrinfo, db);
|
||||
+ return rc;
|
||||
+ }
|
||||
+
|
||||
if (NULL != txn) {
|
||||
db_txn = txn->back_txn_txn;
|
||||
} else {
|
||||
@@ -949,11 +954,11 @@ vlv_create_matching_rule_value(Slapi_PBlock *pb, struct berval *original_value)
|
||||
struct berval **value = NULL;
|
||||
if (pb != NULL) {
|
||||
struct berval **outvalue = NULL;
|
||||
- struct berval *invalue[2];
|
||||
- invalue[0] = original_value; /* jcm: cast away const */
|
||||
- invalue[1] = NULL;
|
||||
+ Slapi_Value v_in = {0};
|
||||
+ Slapi_Value *va_in[2] = { &v_in, NULL };
|
||||
+ slapi_value_init_berval(&v_in, original_value);
|
||||
/* The plugin owns the memory it returns in outvalue */
|
||||
- matchrule_values_to_keys(pb, invalue, &outvalue);
|
||||
+ matchrule_values_to_keys(pb, va_in, &outvalue);
|
||||
if (outvalue != NULL) {
|
||||
value = slapi_ch_bvecdup(outvalue);
|
||||
}
|
||||
@@ -1610,11 +1615,8 @@ retry:
|
||||
PRBool needFree = PR_FALSE;
|
||||
|
||||
if (sort_control->mr_pb != NULL) {
|
||||
- struct berval **tmp_entry_value = NULL;
|
||||
-
|
||||
- valuearray_get_bervalarray(csn_value, &tmp_entry_value);
|
||||
/* Matching rule. Do the magic mangling. Plugin owns the memory. */
|
||||
- matchrule_values_to_keys(sort_control->mr_pb, /* xxxPINAKI needs modification attr->a_vals */ tmp_entry_value, &entry_value);
|
||||
+ matchrule_values_to_keys(sort_control->mr_pb, csn_value, &entry_value);
|
||||
} else {
|
||||
valuearray_get_bervalarray(csn_value, &entry_value);
|
||||
needFree = PR_TRUE; /* entry_value is a copy */
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/vlv_srch.c b/ldap/servers/slapd/back-ldbm/vlv_srch.c
|
||||
index fe1208d59..11d1c715b 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/vlv_srch.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/vlv_srch.c
|
||||
@@ -203,6 +203,9 @@ vlvSearch_delete(struct vlvSearch **ppvs)
|
||||
{
|
||||
if (ppvs != NULL && *ppvs != NULL) {
|
||||
struct vlvIndex *pi, *ni;
|
||||
+ if ((*ppvs)->vlv_e) {
|
||||
+ slapi_entry_free((struct slapi_entry *)((*ppvs)->vlv_e));
|
||||
+ }
|
||||
slapi_sdn_free(&((*ppvs)->vlv_dn));
|
||||
slapi_ch_free((void **)&((*ppvs)->vlv_name));
|
||||
slapi_sdn_free(&((*ppvs)->vlv_base));
|
||||
@@ -217,7 +220,6 @@ vlvSearch_delete(struct vlvSearch **ppvs)
|
||||
pi = ni;
|
||||
}
|
||||
slapi_ch_free((void **)ppvs);
|
||||
- *ppvs = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
diff --git a/ldap/servers/slapd/generation.c b/ldap/servers/slapd/generation.c
|
||||
index c4f20f793..89f097322 100644
|
||||
--- a/ldap/servers/slapd/generation.c
|
||||
+++ b/ldap/servers/slapd/generation.c
|
||||
@@ -93,9 +93,13 @@ get_server_dataversion()
|
||||
lenstr *l = NULL;
|
||||
Slapi_Backend *be;
|
||||
char *cookie;
|
||||
+ static pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER;
|
||||
|
||||
+ /* Serialize to avoid race condition */
|
||||
+ pthread_mutex_lock(&mutex);
|
||||
/* we already cached the copy - just return it */
|
||||
if (server_dataversion_id != NULL) {
|
||||
+ pthread_mutex_unlock(&mutex);
|
||||
return server_dataversion_id;
|
||||
}
|
||||
|
||||
@@ -130,5 +134,6 @@ get_server_dataversion()
|
||||
server_dataversion_id = slapi_ch_strdup(l->ls_buf);
|
||||
}
|
||||
lenstr_free(&l);
|
||||
+ pthread_mutex_unlock(&mutex);
|
||||
return server_dataversion_id;
|
||||
}
|
||||
diff --git a/ldap/servers/slapd/plugin_mr.c b/ldap/servers/slapd/plugin_mr.c
|
||||
index 13f76fe52..6cf88b7de 100644
|
||||
--- a/ldap/servers/slapd/plugin_mr.c
|
||||
+++ b/ldap/servers/slapd/plugin_mr.c
|
||||
@@ -391,28 +391,18 @@ mr_wrap_mr_index_sv_fn(Slapi_PBlock *pb)
|
||||
return rc;
|
||||
}
|
||||
|
||||
-/* this function takes SLAPI_PLUGIN_MR_VALUES as struct berval ** and
|
||||
+/* this function takes SLAPI_PLUGIN_MR_VALUES as Slapi_Value ** and
|
||||
returns SLAPI_PLUGIN_MR_KEYS as struct berval **
|
||||
*/
|
||||
static int
|
||||
mr_wrap_mr_index_fn(Slapi_PBlock *pb)
|
||||
{
|
||||
int rc = -1;
|
||||
- struct berval **in_vals = NULL;
|
||||
struct berval **out_vals = NULL;
|
||||
struct mr_private *mrpriv = NULL;
|
||||
- Slapi_Value **in_vals_sv = NULL;
|
||||
Slapi_Value **out_vals_sv = NULL;
|
||||
|
||||
- slapi_pblock_get(pb, SLAPI_PLUGIN_MR_VALUES, &in_vals); /* get bervals */
|
||||
- /* convert bervals to sv ary */
|
||||
- valuearray_init_bervalarray(in_vals, &in_vals_sv);
|
||||
- slapi_pblock_set(pb, SLAPI_PLUGIN_MR_VALUES, in_vals_sv); /* use sv */
|
||||
rc = mr_wrap_mr_index_sv_fn(pb);
|
||||
- /* clean up in_vals_sv */
|
||||
- valuearray_free(&in_vals_sv);
|
||||
- /* restore old in_vals */
|
||||
- slapi_pblock_set(pb, SLAPI_PLUGIN_MR_VALUES, in_vals);
|
||||
/* get result sv keys */
|
||||
slapi_pblock_get(pb, SLAPI_PLUGIN_MR_KEYS, &out_vals_sv);
|
||||
/* convert to bvec */
|
||||
diff --git a/src/lib389/lib389/backend.py b/src/lib389/lib389/backend.py
|
||||
index 9acced205..cee073ea7 100644
|
||||
--- a/src/lib389/lib389/backend.py
|
||||
+++ b/src/lib389/lib389/backend.py
|
||||
@@ -1029,6 +1029,16 @@ class Backends(DSLdapObjects):
|
||||
for be in sorted(self.list(), key=lambda be: len(be.get_suffix()), reverse=True):
|
||||
be.delete()
|
||||
|
||||
+ def get_backend(self, suffix):
|
||||
+ """
|
||||
+ Return the backend associated with the provided suffix.
|
||||
+ """
|
||||
+ suffix_l = suffix.lower()
|
||||
+ for be in self.list():
|
||||
+ if be.get_attr_val_utf8_l('nsslapd-suffix') == suffix_l:
|
||||
+ return be
|
||||
+ return None
|
||||
+
|
||||
|
||||
class DatabaseConfig(DSLdapObject):
|
||||
"""Backend Database configuration
|
||||
--
|
||||
2.48.1
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user