From 6a59456f240d339422d08954b7af4b026742e5d5 Mon Sep 17 00:00:00 2001 From: Viktor Ashirov Date: Wed, 12 Feb 2025 16:13:44 +0100 Subject: [PATCH] Bump version to 3.0.6-3 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - Resolves: RHEL-5141 - [RFE] For each request, a ldap client can assign an identifier to be added in the logs - Resolves: RHEL-77948 - ns-slapd crashes with data directory ≥ 2 days old [rhel-10] - Resolves: RHEL-78342 - During import of entries without nsUniqueId, a supplier generates duplicate nsUniqueId (LMDB only) --- ...reindex-is-broken-if-index-type-is-s.patch | 237 -- ...nv.py-python3-magic-conflicts-with-p.patch | 53 + ...pd-mdb-max-dbs-autotuning-doesn-t-wo.patch | 311 +++ ...ix-dbscan-options-and-man-pages-6315.patch | 72 + ...-log-rotation-refresh-the-FD-pointer.patch | 146 ++ ...n-a-large-group-slow-if-substring-in.patch | 236 ++ ...ugin-failure-to-handle-a-modrdn-for-.patch | 70 + ...ate-race-condition-in-paged_results_.patch | 43 + ...-an-initial-failure-subsequent-onlin.patch | 566 +++++ ...g-import-of-entries-without-nsUnique.patch | 165 ++ ...-6596-BUG-Compilation-Regresion-6597.patch | 77 + ...upport-of-Session-Tracking-Control-i.patch | 2106 +++++++++++++++++ 389-ds-base.spec | 12 + main.fmf | 2 +- 14 files changed, 3858 insertions(+), 238 deletions(-) delete mode 100644 0001-Issue-6316-lmdb-reindex-is-broken-if-index-type-is-s.patch create mode 100644 0001-Issue-6544-logconv.py-python3-magic-conflicts-with-p.patch create mode 100644 0002-Issue-6374-nsslapd-mdb-max-dbs-autotuning-doesn-t-wo.patch create mode 100644 0003-Issue-6090-Fix-dbscan-options-and-man-pages-6315.patch create mode 100644 0004-Issue-6489-After-log-rotation-refresh-the-FD-pointer.patch create mode 100644 0005-Issue-6436-MOD-on-a-large-group-slow-if-substring-in.patch create mode 100644 0006-Issue-6566-RI-plugin-failure-to-handle-a-modrdn-for-.patch create mode 100644 0007-Issue-6258-Mitigate-race-condition-in-paged_results_.patch create mode 100644 0008-Issue-6229-After-an-initial-failure-subsequent-onlin.patch create mode 100644 0009-Issue-6554-During-import-of-entries-without-nsUnique.patch create mode 100644 0010-Issue-6596-BUG-Compilation-Regresion-6597.patch create mode 100644 0011-Issue-6367-RFE-support-of-Session-Tracking-Control-i.patch diff --git a/0001-Issue-6316-lmdb-reindex-is-broken-if-index-type-is-s.patch b/0001-Issue-6316-lmdb-reindex-is-broken-if-index-type-is-s.patch deleted file mode 100644 index 939065b..0000000 --- a/0001-Issue-6316-lmdb-reindex-is-broken-if-index-type-is-s.patch +++ /dev/null @@ -1,237 +0,0 @@ -From a251914c8defce11c3f8496406af8dec6cf50c4b Mon Sep 17 00:00:00 2001 -From: progier389 -Date: Fri, 6 Sep 2024 18:07:17 +0200 -Subject: [PATCH] Issue 6316 - lmdb reindex is broken if index type is - specified (#6318) - -While reindexing using task or offline reindex, if the attribute name contains the index type (for example :eq,pres) -Then the attribute is not reindexed. Problem occurs when lmdb is used, things are working fine with bdb. -Solution: strip the index type in reindex as it is done in bdb case. -Anyway the reindex design requires that for a given attribute all the configured index types must be rebuild. - -Issue: #6316 - -Reviewed by: @tbordaz, @droideck (Thanks!) ---- - .../tests/suites/indexes/regression_test.py | 141 +++++++++++++++++- - .../slapd/back-ldbm/db-mdb/mdb_import.c | 10 +- - 2 files changed, 147 insertions(+), 4 deletions(-) - -diff --git a/dirsrvtests/tests/suites/indexes/regression_test.py b/dirsrvtests/tests/suites/indexes/regression_test.py -index 51f88017d..e98ca6172 100644 ---- a/dirsrvtests/tests/suites/indexes/regression_test.py -+++ b/dirsrvtests/tests/suites/indexes/regression_test.py -@@ -10,6 +10,9 @@ import time - import os - import pytest - import ldap -+import logging -+import glob -+import re - from lib389._constants import DEFAULT_BENAME, DEFAULT_SUFFIX - from lib389.backend import Backend, Backends, DatabaseConfig - from lib389.cos import CosClassicDefinition, CosClassicDefinitions, CosTemplate -@@ -31,6 +34,8 @@ SUFFIX2 = 'dc=example2,dc=com' - BENAME2 = 'be2' - - DEBUGGING = os.getenv("DEBUGGING", default=False) -+logging.getLogger(__name__).setLevel(logging.INFO) -+log = logging.getLogger(__name__) - - - @pytest.fixture(scope="function") -@@ -83,6 +88,7 @@ def add_a_group_with_users(request, topo): - 'cn': USER_NAME, - 'uidNumber': f'{num}', - 'gidNumber': f'{num}', -+ 'description': f'Description for {USER_NAME}', - 'homeDirectory': f'/home/{USER_NAME}' - }) - users_list.append(user) -@@ -95,9 +101,10 @@ def add_a_group_with_users(request, topo): - # If the server crashed, start it again to do the cleanup - if not topo.standalone.status(): - topo.standalone.start() -- for user in users_list: -- user.delete() -- group.delete() -+ if not DEBUGGING: -+ for user in users_list: -+ user.delete() -+ group.delete() - - request.addfinalizer(fin) - -@@ -124,6 +131,38 @@ def set_small_idlistscanlimit(request, topo): - - request.addfinalizer(fin) - -+ -+@pytest.fixture(scope="function") -+def set_description_index(request, topo, add_a_group_with_users): -+ """ -+ Set some description values and description index without reindexing. -+ """ -+ inst = topo.standalone -+ backends = Backends(inst) -+ backend = backends.get(DEFAULT_BENAME) -+ indexes = backend.get_indexes() -+ attr = 'description' -+ -+ def fin(always=False): -+ if always or not DEBUGGING: -+ try: -+ idx = indexes.get(attr) -+ idx.delete() -+ except ldap.NO_SUCH_OBJECT: -+ pass -+ -+ request.addfinalizer(fin) -+ fin(always=True) -+ index = indexes.create(properties={ -+ 'cn': attr, -+ 'nsSystemIndex': 'false', -+ 'nsIndexType': ['eq', 'pres', 'sub'] -+ }) -+ # Restart needed with lmdb (to open the dbi handle) -+ inst.restart() -+ return (indexes, attr) -+ -+ - #unstable or unstatus tests, skipped for now - @pytest.mark.flaky(max_runs=2, min_passes=1) - @pytest.mark.skipif(ds_is_older("1.4.4.4"), reason="Not implemented") -@@ -346,6 +385,102 @@ def test_task_status(topo): - assert reindex_task.get_exit_code() == 0 - - -+def count_keys(inst, bename, attr, prefix=''): -+ indexfile = os.path.join(inst.dbdir, bename, attr + '.db') -+ # (bdb - we should also accept a version number for .db suffix) -+ for f in glob.glob(f'{indexfile}*'): -+ indexfile = f -+ -+ inst.stop() -+ output = inst.dbscan(None, None, args=['-f', indexfile, '-A'], stopping=False).decode() -+ inst.start() -+ count = 0 -+ regexp = f'^KEY: {re.escape(prefix)}' -+ for match in re.finditer(regexp, output, flags=re.MULTILINE): -+ count += 1 -+ log.info(f"count_keys found {count} keys starting with '{prefix}' in {indexfile}") -+ return count -+ -+ -+def test_reindex_task_with_type(topo, set_description_index): -+ """Check that reindex task works as expected when index type is specified. -+ -+ :id: 0c7f2fda-69f6-11ef-9eb8-083a88554478 -+ :setup: Standalone instance -+ - with 100 users having description attribute -+ - with description:eq,pres,sub index entry but not yet reindexed -+ :steps: -+ 1. Set description in suffix entry -+ 2. Count number of equality keys in description index -+ 3. Start a Reindex task on description:eq,pres and wait for completion -+ 4. Check the task status and exit code -+ 5. Count the equality, presence and substring keys in description index -+ 6. Start a Reindex task on description and wait for completion -+ 7. Check the task status and exit code -+ 8. Count the equality, presence and substring keys in description index -+ -+ :expectedresults: -+ 1. Success -+ 2. Should be either no key (bdb) or a single one (lmdb) -+ 3. Success -+ 4. Success -+ 5. Should have: more equality keys than in step 2 -+ one presence key -+ some substrings keys -+ 6. Success -+ 7. Success -+ 8. Should have same counts than in step 5 -+ """ -+ (indexes, attr) = set_description_index -+ inst = topo.standalone -+ if not inst.is_dbi_supported(): -+ pytest.skip('This test requires that dbscan supports -A option') -+ # modify indexed value -+ Domain(inst, DEFAULT_SUFFIX).replace(attr, f'test_before_reindex') -+ -+ keys1 = count_keys(inst, DEFAULT_BENAME, attr, prefix='=') -+ assert keys1 <= 1 -+ -+ tasks = Tasks(topo.standalone) -+ # completed reindex tasks MUST have a status because freeipa check it. -+ -+ # Reindex attr with eq,pres types -+ log.info(f'Reindex {attr} with eq,pres types') -+ tasks.reindex( -+ suffix=DEFAULT_SUFFIX, -+ attrname=f'{attr}:eq,pres', -+ args={TASK_WAIT: True} -+ ) -+ reindex_task = Task(topo.standalone, tasks.dn) -+ assert reindex_task.status() -+ assert reindex_task.get_exit_code() == 0 -+ -+ keys2e = count_keys(inst, DEFAULT_BENAME, attr, prefix='=') -+ keys2p = count_keys(inst, DEFAULT_BENAME, attr, prefix='+') -+ keys2s = count_keys(inst, DEFAULT_BENAME, attr, prefix='*') -+ assert keys2e > keys1 -+ assert keys2p > 0 -+ assert keys2s > 0 -+ -+ # Reindex attr without types -+ log.info(f'Reindex {attr} without types') -+ tasks.reindex( -+ suffix=DEFAULT_SUFFIX, -+ attrname=attr, -+ args={TASK_WAIT: True} -+ ) -+ reindex_task = Task(topo.standalone, tasks.dn) -+ assert reindex_task.status() -+ assert reindex_task.get_exit_code() == 0 -+ -+ keys3e = count_keys(inst, DEFAULT_BENAME, attr, prefix='=') -+ keys3p = count_keys(inst, DEFAULT_BENAME, attr, prefix='+') -+ keys3s = count_keys(inst, DEFAULT_BENAME, attr, prefix='*') -+ assert keys3e == keys2e -+ assert keys3p == keys2p -+ assert keys3s == keys2s -+ -+ - def test_task_and_be(topo, add_backend_and_ldif_50K_users): - """Check that backend is writable after finishing a tasks - -diff --git a/ldap/servers/slapd/back-ldbm/db-mdb/mdb_import.c b/ldap/servers/slapd/back-ldbm/db-mdb/mdb_import.c -index cfd9de268..5f8e36cdc 100644 ---- a/ldap/servers/slapd/back-ldbm/db-mdb/mdb_import.c -+++ b/ldap/servers/slapd/back-ldbm/db-mdb/mdb_import.c -@@ -1150,6 +1150,8 @@ process_db2index_attrs(Slapi_PBlock *pb, ImportCtx_t *ctx) - * TBD - */ - char **attrs = NULL; -+ char *attrname = NULL; -+ char *pt = NULL; - int i; - - slapi_pblock_get(pb, SLAPI_DB2INDEX_ATTRS, &attrs); -@@ -1157,7 +1159,13 @@ process_db2index_attrs(Slapi_PBlock *pb, ImportCtx_t *ctx) - for (i = 0; attrs && attrs[i]; i++) { - switch (attrs[i][0]) { - case 't': /* attribute type to index */ -- slapi_ch_array_add(&ctx->indexAttrs, slapi_ch_strdup(attrs[i] + 1)); -+ attrname = slapi_ch_strdup(attrs[i] + 1); -+ /* Strip index type */ -+ pt = strchr(attrname, ':'); -+ if (pt != NULL) { -+ *pt = '\0'; -+ } -+ slapi_ch_array_add(&ctx->indexAttrs, attrname); - break; - case 'T': /* VLV Search to index */ - slapi_ch_array_add(&ctx->indexVlvs, get_vlv_dbname(attrs[i] + 1)); --- -2.46.0 - diff --git a/0001-Issue-6544-logconv.py-python3-magic-conflicts-with-p.patch b/0001-Issue-6544-logconv.py-python3-magic-conflicts-with-p.patch new file mode 100644 index 0000000..6657c79 --- /dev/null +++ b/0001-Issue-6544-logconv.py-python3-magic-conflicts-with-p.patch @@ -0,0 +1,53 @@ +From fc7f5aa01e245c7c2e35b01d171dbd5a6dc75db4 Mon Sep 17 00:00:00 2001 +From: Viktor Ashirov +Date: Sat, 25 Jan 2025 13:54:33 +0100 +Subject: [PATCH] Issue 6544 - logconv.py: python3-magic conflicts with + python3-file-magic + +Bug Description: +python3-magic and python3-file-magic can't be installed simultaneously, +python3-magic is not packaged for EL10. + +Fix Description: +Use python3-file-magic instead. + +Issue identified and fix suggested by Adam Williamson. + +Fixes: https://github.com/389ds/389-ds-base/issues/6544 + +Reviewed by: @mreynolds389 (Thanks!) +--- + ldap/admin/src/logconv.py | 3 +-- + rpm/389-ds-base.spec.in | 2 +- + 2 files changed, 2 insertions(+), 3 deletions(-) + +diff --git a/ldap/admin/src/logconv.py b/ldap/admin/src/logconv.py +index 566f9af38..2fb5bb8c1 100755 +--- a/ldap/admin/src/logconv.py ++++ b/ldap/admin/src/logconv.py +@@ -1798,8 +1798,7 @@ class logAnalyser: + return None + + try: +- mime = magic.Magic(mime=True) +- filetype = mime.from_file(filepath) ++ filetype = magic.detect_from_filename(filepath).mime_type + + # List of supported compression types + compressed_mime_types = [ +diff --git a/rpm/389-ds-base.spec.in b/rpm/389-ds-base.spec.in +index 3146b9186..3c6e95938 100644 +--- a/rpm/389-ds-base.spec.in ++++ b/rpm/389-ds-base.spec.in +@@ -298,7 +298,7 @@ Requires: json-c + # Log compression + Requires: zlib-devel + # logconv.py, MIME type +-Requires: python-magic ++Requires: python3-file-magic + # Picks up our systemd deps. + %{?systemd_requires} + +-- +2.48.0 + diff --git a/0002-Issue-6374-nsslapd-mdb-max-dbs-autotuning-doesn-t-wo.patch b/0002-Issue-6374-nsslapd-mdb-max-dbs-autotuning-doesn-t-wo.patch new file mode 100644 index 0000000..da75320 --- /dev/null +++ b/0002-Issue-6374-nsslapd-mdb-max-dbs-autotuning-doesn-t-wo.patch @@ -0,0 +1,311 @@ +From 1aabba9b17f99eb1a460be3305aad4b7099b9fe6 Mon Sep 17 00:00:00 2001 +From: progier389 +Date: Wed, 13 Nov 2024 15:31:35 +0100 +Subject: [PATCH] Issue 6374 - nsslapd-mdb-max-dbs autotuning doesn't work + properly (#6400) + +* Issue 6374 - nsslapd-mdb-max-dbs autotuning doesn't work properly + +Several issues: + +After restarting the server nsslapd-mdb-max-dbs may not be high enough to add a new backend +because the value computation is wrong. +dbscan fails to open the database if nsslapd-mdb-max-dbs has been increased. +dbscan crashes when closing the database (typically when using -S) +When starting the instance the nsslapd-mdb-max-dbs parameter is increased to ensure that a new backend may be added. +When dse.ldif path is not specified, the db environment is now open using the INFO.mdb data instead of using the default values. +synchronization between thread closure and database context destruction is hardened +Issue: #6374 + +Reviewed by: @tbordaz , @vashirov (Thanks!) + +(cherry picked from commit 56cd3389da608a3f6eeee58d20dffbcd286a8033) +--- + .../tests/suites/config/config_test.py | 86 +++++++++++++++++++ + ldap/servers/slapd/back-ldbm/back-ldbm.h | 2 + + .../slapd/back-ldbm/db-mdb/mdb_config.c | 17 ++-- + .../back-ldbm/db-mdb/mdb_import_threads.c | 9 +- + .../slapd/back-ldbm/db-mdb/mdb_instance.c | 8 ++ + ldap/servers/slapd/back-ldbm/dbimpl.c | 2 +- + ldap/servers/slapd/back-ldbm/import.c | 14 ++- + 7 files changed, 128 insertions(+), 10 deletions(-) + +diff --git a/dirsrvtests/tests/suites/config/config_test.py b/dirsrvtests/tests/suites/config/config_test.py +index c3e26eed4..08544594f 100644 +--- a/dirsrvtests/tests/suites/config/config_test.py ++++ b/dirsrvtests/tests/suites/config/config_test.py +@@ -17,6 +17,7 @@ from lib389.topologies import topology_m2, topology_st as topo + from lib389.utils import * + from lib389._constants import DN_CONFIG, DEFAULT_SUFFIX, DEFAULT_BENAME + from lib389._mapped_object import DSLdapObjects ++from lib389.agreement import Agreements + from lib389.cli_base import FakeArgs + from lib389.cli_conf.backend import db_config_set + from lib389.idm.user import UserAccounts, TEST_USER_PROPERTIES +@@ -27,6 +28,8 @@ from lib389.cos import CosPointerDefinitions, CosTemplates + from lib389.backend import Backends, DatabaseConfig + from lib389.monitor import MonitorLDBM, Monitor + from lib389.plugins import ReferentialIntegrityPlugin ++from lib389.replica import BootstrapReplicationManager, Replicas ++from lib389.passwd import password_generate + + pytestmark = pytest.mark.tier0 + +@@ -36,6 +39,8 @@ PSTACK_CMD = '/usr/bin/pstack' + logging.getLogger(__name__).setLevel(logging.INFO) + log = logging.getLogger(__name__) + ++DEBUGGING = os.getenv("DEBUGGING", default=False) ++ + @pytest.fixture(scope="module") + def big_file(): + TEMP_BIG_FILE = '' +@@ -813,6 +818,87 @@ def test_numlisteners_limit(topo): + assert numlisteners[0] == '4' + + ++def bootstrap_replication(inst_from, inst_to, creds): ++ manager = BootstrapReplicationManager(inst_to) ++ rdn_val = 'replication manager' ++ if manager.exists(): ++ manager.delete() ++ manager.create(properties={ ++ 'cn': rdn_val, ++ 'uid': rdn_val, ++ 'userPassword': creds ++ }) ++ for replica in Replicas(inst_to).list(): ++ replica.remove_all('nsDS5ReplicaBindDNGroup') ++ replica.replace('nsDS5ReplicaBindDN', manager.dn) ++ for agmt in Agreements(inst_from).list(): ++ agmt.replace('nsDS5ReplicaBindDN', manager.dn) ++ agmt.replace('nsDS5ReplicaCredentials', creds) ++ ++ ++@pytest.mark.skipif(get_default_db_lib() != "mdb", reason="This test requires lmdb") ++def test_lmdb_autotuned_maxdbs(topology_m2, request): ++ """Verify that after restart, nsslapd-mdb-max-dbs is large enough to add a new backend. ++ ++ :id: 0272d432-9080-11ef-8f40-482ae39447e5 ++ :setup: Two suppliers configuration ++ :steps: ++ 1. loop 20 times ++ 3. In 1 loop: restart instance ++ 3. In 1 loop: add a new backend ++ 4. In 1 loop: check that instance is still alive ++ :expectedresults: ++ 1. Success ++ 2. Success ++ 3. Success ++ 4. Success ++ """ ++ ++ s1 = topology_m2.ms["supplier1"] ++ s2 = topology_m2.ms["supplier2"] ++ ++ backends = Backends(s1) ++ db_config = DatabaseConfig(s1) ++ # Generate the teardown finalizer ++ belist = [] ++ creds=password_generate() ++ bootstrap_replication(s2, s1, creds) ++ bootstrap_replication(s1, s2, creds) ++ ++ def fin(): ++ s1.start() ++ for be in belist: ++ be.delete() ++ ++ if not DEBUGGING: ++ request.addfinalizer(fin) ++ ++ # 1. Set autotuning (off-line to be able to decrease the value) ++ s1.stop() ++ dse_ldif = DSEldif(s1) ++ dse_ldif.replace(db_config.dn, 'nsslapd-mdb-max-dbs', '0') ++ os.remove(f'{s1.dbdir}/data.mdb') ++ s1.start() ++ ++ # 2. Reinitialize the db: ++ log.info("Bulk import...") ++ agmt = Agreements(s2).list()[0] ++ agmt.begin_reinit() ++ (done, error) = agmt.wait_reinit() ++ log.info(f'Bulk importresult is ({done}, {error})') ++ assert done is True ++ assert error is False ++ ++ # 3. loop 20 times ++ for idx in range(20): ++ s1.restart() ++ log.info(f'Adding backend test{idx}') ++ belist.append(backends.create(properties={'cn': f'test{idx}', ++ 'nsslapd-suffix': f'dc=test{idx}'})) ++ assert s1.status() ++ ++ ++ + if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode +diff --git a/ldap/servers/slapd/back-ldbm/back-ldbm.h b/ldap/servers/slapd/back-ldbm/back-ldbm.h +index 8fea63e35..35d0ece04 100644 +--- a/ldap/servers/slapd/back-ldbm/back-ldbm.h ++++ b/ldap/servers/slapd/back-ldbm/back-ldbm.h +@@ -896,4 +896,6 @@ typedef struct _back_search_result_set + ((L)->size == (R)->size && !memcmp((L)->data, (R)->data, (L)->size)) + + typedef int backend_implement_init_fn(struct ldbminfo *li, config_info *config_array); ++ ++pthread_mutex_t *get_import_ctx_mutex(); + #endif /* _back_ldbm_h_ */ +diff --git a/ldap/servers/slapd/back-ldbm/db-mdb/mdb_config.c b/ldap/servers/slapd/back-ldbm/db-mdb/mdb_config.c +index 351f54037..1f7b71442 100644 +--- a/ldap/servers/slapd/back-ldbm/db-mdb/mdb_config.c ++++ b/ldap/servers/slapd/back-ldbm/db-mdb/mdb_config.c +@@ -83,7 +83,7 @@ dbmdb_compute_limits(struct ldbminfo *li) + uint64_t total_space = 0; + uint64_t avail_space = 0; + uint64_t cur_dbsize = 0; +- int nbchangelogs = 0; ++ int nbvlvs = 0; + int nbsuffixes = 0; + int nbindexes = 0; + int nbagmt = 0; +@@ -99,8 +99,8 @@ dbmdb_compute_limits(struct ldbminfo *li) + * But some tunable may be autotuned. + */ + if (dbmdb_count_config_entries("(objectClass=nsMappingTree)", &nbsuffixes) || +- dbmdb_count_config_entries("(objectClass=nsIndex)", &nbsuffixes) || +- dbmdb_count_config_entries("(&(objectClass=nsds5Replica)(nsDS5Flags=1))", &nbchangelogs) || ++ dbmdb_count_config_entries("(objectClass=nsIndex)", &nbindexes) || ++ dbmdb_count_config_entries("(objectClass=vlvIndex)", &nbvlvs) || + dbmdb_count_config_entries("(objectClass=nsds5replicationagreement)", &nbagmt)) { + /* error message is already logged */ + return 1; +@@ -120,8 +120,15 @@ dbmdb_compute_limits(struct ldbminfo *li) + + info->pagesize = sysconf(_SC_PAGE_SIZE); + limits->min_readers = config_get_threadnumber() + nbagmt + DBMDB_READERS_MARGIN; +- /* Default indexes are counted in "nbindexes" so we should always have enough resource to add 1 new suffix */ +- limits->min_dbs = nbsuffixes + nbindexes + nbchangelogs + DBMDB_DBS_MARGIN; ++ /* ++ * For each suffix there are 4 databases instances: ++ * long-entryrdn, replication_changelog, id2entry and ancestorid ++ * then the indexes and the vlv and vlv cache ++ * ++ * Default indexes are counted in "nbindexes" so we should always have enough ++ * resource to add 1 new suffix ++ */ ++ limits->min_dbs = 4*nbsuffixes + nbindexes + 2*nbvlvs + DBMDB_DBS_MARGIN; + + total_space = ((uint64_t)(buf.f_blocks)) * ((uint64_t)(buf.f_bsize)); + avail_space = ((uint64_t)(buf.f_bavail)) * ((uint64_t)(buf.f_bsize)); +diff --git a/ldap/servers/slapd/back-ldbm/db-mdb/mdb_import_threads.c b/ldap/servers/slapd/back-ldbm/db-mdb/mdb_import_threads.c +index 8c879da31..707a110c5 100644 +--- a/ldap/servers/slapd/back-ldbm/db-mdb/mdb_import_threads.c ++++ b/ldap/servers/slapd/back-ldbm/db-mdb/mdb_import_threads.c +@@ -4312,9 +4312,12 @@ dbmdb_import_init_writer(ImportJob *job, ImportRole_t role) + void + dbmdb_free_import_ctx(ImportJob *job) + { +- if (job->writer_ctx) { +- ImportCtx_t *ctx = job->writer_ctx; +- job->writer_ctx = NULL; ++ ImportCtx_t *ctx = NULL; ++ pthread_mutex_lock(get_import_ctx_mutex()); ++ ctx = job->writer_ctx; ++ job->writer_ctx = NULL; ++ pthread_mutex_unlock(get_import_ctx_mutex()); ++ if (ctx) { + pthread_mutex_destroy(&ctx->workerq.mutex); + pthread_cond_destroy(&ctx->workerq.cv); + slapi_ch_free((void**)&ctx->workerq.slots); +diff --git a/ldap/servers/slapd/back-ldbm/db-mdb/mdb_instance.c b/ldap/servers/slapd/back-ldbm/db-mdb/mdb_instance.c +index 6386ecf06..05f1e348d 100644 +--- a/ldap/servers/slapd/back-ldbm/db-mdb/mdb_instance.c ++++ b/ldap/servers/slapd/back-ldbm/db-mdb/mdb_instance.c +@@ -287,6 +287,13 @@ int add_dbi(dbi_open_ctx_t *octx, backend *be, const char *fname, int flags) + slapi_ch_free((void**)&treekey.dbname); + return octx->rc; + } ++ if (treekey.dbi >= ctx->dsecfg.max_dbs) { ++ octx->rc = MDB_DBS_FULL; ++ slapi_log_err(SLAPI_LOG_ERR, "add_dbi", "Failed to open database instance %s slots: %d/%d. Error is %d: %s.\n", ++ treekey.dbname, treekey.dbi, ctx->dsecfg.max_dbs, octx->rc, mdb_strerror(octx->rc)); ++ slapi_ch_free((void**)&treekey.dbname); ++ return octx->rc; ++ } + if (octx->ai && octx->ai->ai_key_cmp_fn) { + octx->rc = dbmdb_update_dbi_cmp_fn(ctx, &treekey, octx->ai->ai_key_cmp_fn, octx->txn); + if (octx->rc) { +@@ -689,6 +696,7 @@ int dbmdb_make_env(dbmdb_ctx_t *ctx, int readOnly, mdb_mode_t mode) + rc = dbmdb_write_infofile(ctx); + } else { + /* No Config ==> read it from info file */ ++ ctx->dsecfg = ctx->startcfg; + } + if (rc) { + return rc; +diff --git a/ldap/servers/slapd/back-ldbm/dbimpl.c b/ldap/servers/slapd/back-ldbm/dbimpl.c +index 86df986bd..f3bf68a9f 100644 +--- a/ldap/servers/slapd/back-ldbm/dbimpl.c ++++ b/ldap/servers/slapd/back-ldbm/dbimpl.c +@@ -505,7 +505,7 @@ int dblayer_show_statistics(const char *dbimpl_name, const char *dbhome, FILE *f + li->li_plugin = be->be_database; + li->li_plugin->plg_name = (char*) "back-ldbm-dbimpl"; + li->li_plugin->plg_libpath = (char*) "libback-ldbm"; +- li->li_directory = (char*)dbhome; ++ li->li_directory = get_li_directory(dbhome); + + /* Initialize database plugin */ + rc = dbimpl_setup(li, dbimpl_name); +diff --git a/ldap/servers/slapd/back-ldbm/import.c b/ldap/servers/slapd/back-ldbm/import.c +index 2bb8cb581..30ec462fa 100644 +--- a/ldap/servers/slapd/back-ldbm/import.c ++++ b/ldap/servers/slapd/back-ldbm/import.c +@@ -27,6 +27,9 @@ + #define NEED_DN_NORM_SP -25 + #define NEED_DN_NORM_BT -26 + ++/* Protect against import context destruction */ ++static pthread_mutex_t import_ctx_mutex = PTHREAD_MUTEX_INITIALIZER; ++ + + /********** routines to manipulate the entry fifo **********/ + +@@ -143,6 +146,14 @@ ldbm_back_wire_import(Slapi_PBlock *pb) + + /* Threads management */ + ++/* Return the mutex that protects against import context destruction */ ++pthread_mutex_t * ++get_import_ctx_mutex() ++{ ++ return &import_ctx_mutex; ++} ++ ++ + /* tell all the threads to abort */ + void + import_abort_all(ImportJob *job, int wait_for_them) +@@ -151,7 +162,7 @@ import_abort_all(ImportJob *job, int wait_for_them) + + /* tell all the worker threads to abort */ + job->flags |= FLAG_ABORT; +- ++ pthread_mutex_lock(&import_ctx_mutex); + for (worker = job->worker_list; worker; worker = worker->next) + worker->command = ABORT; + +@@ -167,6 +178,7 @@ import_abort_all(ImportJob *job, int wait_for_them) + } + } + } ++ pthread_mutex_unlock(&import_ctx_mutex); + } + + +-- +2.48.0 + diff --git a/0003-Issue-6090-Fix-dbscan-options-and-man-pages-6315.patch b/0003-Issue-6090-Fix-dbscan-options-and-man-pages-6315.patch new file mode 100644 index 0000000..db5eb9a --- /dev/null +++ b/0003-Issue-6090-Fix-dbscan-options-and-man-pages-6315.patch @@ -0,0 +1,72 @@ +From 6b80ba631161219093267e8e4c885bfc392d3d61 Mon Sep 17 00:00:00 2001 +From: progier389 +Date: Fri, 6 Sep 2024 14:45:06 +0200 +Subject: [PATCH] Issue 6090 - Fix dbscan options and man pages (#6315) + +* Issue 6090 - Fix dbscan options and man pages + +dbscan -d option is dangerously confusing as it removes a database instance while in db_stat it identify the database +(cf issue #5609 ). +This fix implements long options in dbscan, rename -d in --remove, and requires a new --do-it option for action that change the database content. +The fix should also align both the usage and the dbscan man page with the new set of options + +Issue: #6090 + +Reviewed by: @tbordaz, @droideck (Thanks!) + +(cherry picked from commit 25e1d16887ebd299dfe0088080b9ee0deec1e41f) +--- + ldap/servers/slapd/back-ldbm/dbimpl.c | 5 ++++- + src/lib389/lib389/cli_ctl/dblib.py | 13 ++++++++++++- + 2 files changed, 16 insertions(+), 2 deletions(-) + +diff --git a/ldap/servers/slapd/back-ldbm/dbimpl.c b/ldap/servers/slapd/back-ldbm/dbimpl.c +index f3bf68a9f..83662df8c 100644 +--- a/ldap/servers/slapd/back-ldbm/dbimpl.c ++++ b/ldap/servers/slapd/back-ldbm/dbimpl.c +@@ -481,7 +481,10 @@ int dblayer_private_close(Slapi_Backend **be, dbi_env_t **env, dbi_db_t **db) + slapi_ch_free_string(&li->li_directory); + slapi_ch_free((void**)&li->li_dblayer_private); + slapi_ch_free((void**)&li->li_dblayer_config); +- ldbm_config_destroy(li); ++ if (dblayer_is_lmdb(*be)) { ++ /* Generate use after free and double free in bdb case */ ++ ldbm_config_destroy(li); ++ } + slapi_ch_free((void**)&(*be)->be_database); + slapi_ch_free((void**)&(*be)->be_instance_info); + slapi_ch_free((void**)be); +diff --git a/src/lib389/lib389/cli_ctl/dblib.py b/src/lib389/lib389/cli_ctl/dblib.py +index 053a72d61..318ae5ae9 100644 +--- a/src/lib389/lib389/cli_ctl/dblib.py ++++ b/src/lib389/lib389/cli_ctl/dblib.py +@@ -199,6 +199,14 @@ def run_dbscan(args): + return output + + ++def does_dbscan_need_do_it(): ++ prefix = os.environ.get('PREFIX', "") ++ prog = f'{prefix}/bin/dbscan' ++ args = [ prog, '-h' ] ++ output = subprocess.run(args, encoding='utf-8', stdout=subprocess.PIPE, stderr=subprocess.STDOUT) ++ return '--do-it' in output.stdout ++ ++ + def export_changelog(be, dblib): + # Export backend changelog + if not be['has_changelog']: +@@ -217,7 +225,10 @@ def import_changelog(be, dblib): + try: + cl5dbname = be['eccl5dbname'] if dblib == "bdb" else be['cl5dbname'] + _log.info(f"Importing changelog {cl5dbname} from {be['cl5name']}") +- run_dbscan(['-D', dblib, '-f', cl5dbname, '--import', be['cl5name'], '--do-it']) ++ if does_dbscan_need_do_it(): ++ run_dbscan(['-D', dblib, '-f', cl5dbname, '-I', be['cl5name'], '--do-it']) ++ else: ++ run_dbscan(['-D', dblib, '-f', cl5dbname, '-I', be['cl5name']]) + return True + except subprocess.CalledProcessError as e: + return False +-- +2.48.0 + diff --git a/0004-Issue-6489-After-log-rotation-refresh-the-FD-pointer.patch b/0004-Issue-6489-After-log-rotation-refresh-the-FD-pointer.patch new file mode 100644 index 0000000..61c21ff --- /dev/null +++ b/0004-Issue-6489-After-log-rotation-refresh-the-FD-pointer.patch @@ -0,0 +1,146 @@ +From dc8032856d51c382e266eea72f66284e70a0e40c Mon Sep 17 00:00:00 2001 +From: Mark Reynolds +Date: Fri, 31 Jan 2025 08:54:27 -0500 +Subject: [PATCH] Issue 6489 - After log rotation refresh the FD pointer + +Description: + +When flushing a log buffer we get a FD for log prior to checking if the +log should be rotated. If the log is rotated that FD reference is now +invalid, and it needs to be refrehed before proceeding + +Relates: https://github.com/389ds/389-ds-base/issues/6489 + +Reviewed by: tbordaz(Thanks!) +--- + .../suites/logging/log_flush_rotation_test.py | 81 +++++++++++++++++++ + ldap/servers/slapd/log.c | 18 +++++ + 2 files changed, 99 insertions(+) + create mode 100644 dirsrvtests/tests/suites/logging/log_flush_rotation_test.py + +diff --git a/dirsrvtests/tests/suites/logging/log_flush_rotation_test.py b/dirsrvtests/tests/suites/logging/log_flush_rotation_test.py +new file mode 100644 +index 000000000..b33a622e1 +--- /dev/null ++++ b/dirsrvtests/tests/suites/logging/log_flush_rotation_test.py +@@ -0,0 +1,81 @@ ++# --- BEGIN COPYRIGHT BLOCK --- ++# Copyright (C) 2025 Red Hat, Inc. ++# All rights reserved. ++# ++# License: GPL (version 3 or any later version). ++# See LICENSE for details. ++# --- END COPYRIGHT BLOCK --- ++# ++import os ++import logging ++import time ++import pytest ++from lib389._constants import DEFAULT_SUFFIX, PW_DM ++from lib389.tasks import ImportTask ++from lib389.idm.user import UserAccounts ++from lib389.topologies import topology_st as topo ++ ++ ++log = logging.getLogger(__name__) ++ ++ ++def test_log_flush_and_rotation_crash(topo): ++ """Make sure server does not crash whening flushing a buffer and rotating ++ the log at the same time ++ ++ :id: d4b0af2f-48b2-45f5-ae8b-f06f692c3133 ++ :setup: Standalone Instance ++ :steps: ++ 1. Enable all logs ++ 2. Enable log buffering for all logs ++ 3. Set rotation time unit to 1 minute ++ 4. Make sure server is still running after 1 minute ++ :expectedresults: ++ 1. Success ++ 2. Success ++ 3. Success ++ 4. Success ++ """ ++ ++ inst = topo.standalone ++ ++ # Enable logging and buffering ++ inst.config.set("nsslapd-auditlog-logging-enabled", "on") ++ inst.config.set("nsslapd-accesslog-logbuffering", "on") ++ inst.config.set("nsslapd-auditlog-logbuffering", "on") ++ inst.config.set("nsslapd-errorlog-logbuffering", "on") ++ inst.config.set("nsslapd-securitylog-logbuffering", "on") ++ ++ # Set rotation policy to trigger rotation asap ++ inst.config.set("nsslapd-accesslog-logrotationtimeunit", "minute") ++ inst.config.set("nsslapd-auditlog-logrotationtimeunit", "minute") ++ inst.config.set("nsslapd-errorlog-logrotationtimeunit", "minute") ++ inst.config.set("nsslapd-securitylog-logrotationtimeunit", "minute") ++ ++ # ++ # Performs ops to populate all the logs ++ # ++ # Access & audit log ++ users = UserAccounts(topo.standalone, DEFAULT_SUFFIX) ++ user = users.create_test_user() ++ user.set("userPassword", PW_DM) ++ # Security log ++ user.bind(PW_DM) ++ # Error log ++ import_task = ImportTask(inst) ++ import_task.import_suffix_from_ldif(ldiffile="/not/here", ++ suffix=DEFAULT_SUFFIX) ++ ++ # Wait a minute and make sure the server did not crash ++ log.info("Sleep until logs are flushed and rotated") ++ time.sleep(61) ++ ++ assert inst.status() ++ ++ ++if __name__ == '__main__': ++ # Run isolated ++ # -s for DEBUG mode ++ CURRENT_FILE = os.path.realpath(__file__) ++ pytest.main(["-s", CURRENT_FILE]) ++ +diff --git a/ldap/servers/slapd/log.c b/ldap/servers/slapd/log.c +index 76f2b6768..7e2c980a4 100644 +--- a/ldap/servers/slapd/log.c ++++ b/ldap/servers/slapd/log.c +@@ -6746,6 +6746,23 @@ log_refresh_state(int32_t log_type) + return 0; + } + } ++static LOGFD ++log_refresh_fd(int32_t log_type) ++{ ++ switch (log_type) { ++ case SLAPD_ACCESS_LOG: ++ return loginfo.log_access_fdes; ++ case SLAPD_SECURITY_LOG: ++ return loginfo.log_security_fdes; ++ case SLAPD_AUDIT_LOG: ++ return loginfo.log_audit_fdes; ++ case SLAPD_AUDITFAIL_LOG: ++ return loginfo.log_auditfail_fdes; ++ case SLAPD_ERROR_LOG: ++ return loginfo.log_error_fdes; ++ } ++ return NULL; ++} + + /* this function assumes the lock is already acquired */ + /* if sync_now is non-zero, data is flushed to physical storage */ +@@ -6857,6 +6874,7 @@ log_flush_buffer(LogBufferInfo *lbi, int log_type, int sync_now, int locked) + rotationtime_secs); + } + log_state = log_refresh_state(log_type); ++ fd = log_refresh_fd(log_type); + } + + if (log_state & LOGGING_NEED_TITLE) { +-- +2.48.0 + diff --git a/0005-Issue-6436-MOD-on-a-large-group-slow-if-substring-in.patch b/0005-Issue-6436-MOD-on-a-large-group-slow-if-substring-in.patch new file mode 100644 index 0000000..067a418 --- /dev/null +++ b/0005-Issue-6436-MOD-on-a-large-group-slow-if-substring-in.patch @@ -0,0 +1,236 @@ +From 90460bfa66fb77118967927963572f69e097c4eb Mon Sep 17 00:00:00 2001 +From: James Chapman +Date: Wed, 29 Jan 2025 17:41:55 +0000 +Subject: [PATCH] Issue 6436 - MOD on a large group slow if substring index is + present (#6437) + +Bug Description: If the substring index is configured for the group +membership attribute ( member or uniqueMember ), the removal of a +member from a large static group is pretty slow. + +Fix Description: A solution to this issue would be to introduce +a new index to track a membership atttribute index. In the interm, +we add a check to healthcheck to inform the user of the implications +of this configuration. + +Fixes: https://github.com/389ds/389-ds-base/issues/6436 + +Reviewed by: @Firstyear, @tbordaz, @droideck (Thanks) +--- + .../suites/healthcheck/health_config_test.py | 89 ++++++++++++++++++- + src/lib389/lib389/lint.py | 15 ++++ + src/lib389/lib389/plugins.py | 37 +++++++- + 3 files changed, 137 insertions(+), 4 deletions(-) + +diff --git a/dirsrvtests/tests/suites/healthcheck/health_config_test.py b/dirsrvtests/tests/suites/healthcheck/health_config_test.py +index e1e5398ab..f09bc8bb8 100644 +--- a/dirsrvtests/tests/suites/healthcheck/health_config_test.py ++++ b/dirsrvtests/tests/suites/healthcheck/health_config_test.py +@@ -167,6 +167,7 @@ def test_healthcheck_RI_plugin_missing_indexes(topology_st): + MEMBER_DN = 'cn=member,cn=index,cn=userroot,cn=ldbm database,cn=plugins,cn=config' + + standalone = topology_st.standalone ++ standalone.config.set("nsslapd-accesslog-logbuffering", "on") + + log.info('Enable RI plugin') + plugin = ReferentialIntegrityPlugin(standalone) +@@ -188,7 +189,7 @@ def test_healthcheck_RI_plugin_missing_indexes(topology_st): + + + def test_healthcheck_MO_plugin_missing_indexes(topology_st): +- """Check if HealthCheck returns DSMOLE0002 code ++ """Check if HealthCheck returns DSMOLE0001 code + + :id: 236b0ec2-13da-48fb-b65a-db7406d56d5d + :setup: Standalone instance +@@ -203,8 +204,8 @@ def test_healthcheck_MO_plugin_missing_indexes(topology_st): + :expectedresults: + 1. Success + 2. Success +- 3. Healthcheck reports DSMOLE0002 code and related details +- 4. Healthcheck reports DSMOLE0002 code and related details ++ 3. Healthcheck reports DSMOLE0001 code and related details ++ 4. Healthcheck reports DSMOLE0001 code and related details + 5. Success + 6. Healthcheck reports no issue found + 7. Healthcheck reports no issue found +@@ -214,6 +215,7 @@ def test_healthcheck_MO_plugin_missing_indexes(topology_st): + MO_GROUP_ATTR = 'creatorsname' + + standalone = topology_st.standalone ++ standalone.config.set("nsslapd-accesslog-logbuffering", "on") + + log.info('Enable MO plugin') + plugin = MemberOfPlugin(standalone) +@@ -236,6 +238,87 @@ def test_healthcheck_MO_plugin_missing_indexes(topology_st): + standalone.restart() + + ++def test_healthcheck_MO_plugin_substring_index(topology_st): ++ """Check if HealthCheck returns DSMOLE0002 code when the ++ member, uniquemember attribute contains a substring index type ++ ++ :id: 10954811-24ac-4886-8183-e30892f8e02d ++ :setup: Standalone instance ++ :steps: ++ 1. Create DS instance ++ 2. Configure the instance with MO Plugin ++ 3. Change index type to substring for member attribute ++ 4. Use HealthCheck without --json option ++ 5. Use HealthCheck with --json option ++ 6. Change index type back to equality for member attribute ++ 7. Use HealthCheck without --json option ++ 8. Use HealthCheck with --json option ++ 9. Change index type to substring for uniquemember attribute ++ 10. Use HealthCheck without --json option ++ 11. Use HealthCheck with --json option ++ 12. Change index type back to equality for uniquemember attribute ++ 13. Use HealthCheck without --json option ++ 14. Use HealthCheck with --json option ++ ++ :expectedresults: ++ 1. Success ++ 2. Success ++ 3. Success ++ 4. Healthcheck reports DSMOLE0002 code and related details ++ 5. Healthcheck reports DSMOLE0002 code and related details ++ 6. Success ++ 7. Healthcheck reports no issue found ++ 8. Healthcheck reports no issue found ++ 9. Success ++ 10. Healthcheck reports DSMOLE0002 code and related details ++ 11. Healthcheck reports DSMOLE0002 code and related details ++ 12. Success ++ 13. Healthcheck reports no issue found ++ 14. Healthcheck reports no issue found ++ """ ++ ++ RET_CODE = 'DSMOLE0002' ++ MEMBER_DN = 'cn=member,cn=index,cn=userroot,cn=ldbm database,cn=plugins,cn=config' ++ UNIQUE_MEMBER_DN = 'cn=uniquemember,cn=index,cn=userroot,cn=ldbm database,cn=plugins,cn=config' ++ ++ standalone = topology_st.standalone ++ standalone.config.set("nsslapd-accesslog-logbuffering", "on") ++ ++ log.info('Enable MO plugin') ++ plugin = MemberOfPlugin(standalone) ++ plugin.disable() ++ plugin.enable() ++ ++ log.info('Change the index type of the member attribute index to substring') ++ index = Index(topology_st.standalone, MEMBER_DN) ++ index.replace('nsIndexType', 'sub') ++ ++ run_healthcheck_and_flush_log(topology_st, standalone, json=False, searched_code=RET_CODE) ++ run_healthcheck_and_flush_log(topology_st, standalone, json=True, searched_code=RET_CODE) ++ ++ log.info('Set the index type of the member attribute index back to eq') ++ index.replace('nsIndexType', 'eq') ++ ++ run_healthcheck_and_flush_log(topology_st, standalone, json=False, searched_code=CMD_OUTPUT) ++ run_healthcheck_and_flush_log(topology_st, standalone, json=True, searched_code=JSON_OUTPUT) ++ ++ log.info('Change the index type of the uniquemember attribute index to substring') ++ index = Index(topology_st.standalone, UNIQUE_MEMBER_DN) ++ index.replace('nsIndexType', 'sub') ++ ++ run_healthcheck_and_flush_log(topology_st, standalone, json=False, searched_code=RET_CODE) ++ run_healthcheck_and_flush_log(topology_st, standalone, json=True, searched_code=RET_CODE) ++ ++ log.info('Set the index type of the uniquemember attribute index back to eq') ++ index.replace('nsIndexType', 'eq') ++ ++ run_healthcheck_and_flush_log(topology_st, standalone, json=False, searched_code=CMD_OUTPUT) ++ run_healthcheck_and_flush_log(topology_st, standalone, json=True, searched_code=JSON_OUTPUT) ++ ++ # Restart the instance after changing the plugin to avoid breaking the other tests ++ standalone.restart() ++ ++ + @pytest.mark.xfail(ds_is_older("1.4.1"), reason="Not implemented") + def test_healthcheck_virtual_attr_incorrectly_indexed(topology_st): + """Check if HealthCheck returns DSVIRTLE0001 code +diff --git a/src/lib389/lib389/lint.py b/src/lib389/lib389/lint.py +index d0747f0f4..460bf64fc 100644 +--- a/src/lib389/lib389/lint.py ++++ b/src/lib389/lib389/lint.py +@@ -270,6 +270,21 @@ database after adding the missing index type. Here is an example using dsconf: + """ + } + ++DSMOLE0002 = { ++ 'dsle': 'DSMOLE0002', ++ 'severity': 'LOW', ++ 'description': 'Removal of a member can be slow ', ++ 'items': ['cn=memberof plugin,cn=plugins,cn=config', ], ++ 'detail': """If the substring index is configured for a membership attribute. The removal of a member ++from the large group can be slow. ++ ++""", ++ 'fix': """If not required, you can remove the substring index type using dsconf: ++ ++ # dsconf slapd-YOUR_INSTANCE backend index set --attr=ATTR BACKEND --del-type=sub ++""" ++} ++ + # Disk Space check. Note - PARTITION is replaced by the calling function + DSDSLE0001 = { + 'dsle': 'DSDSLE0001', +diff --git a/src/lib389/lib389/plugins.py b/src/lib389/lib389/plugins.py +index 67af93a14..31bbfa502 100644 +--- a/src/lib389/lib389/plugins.py ++++ b/src/lib389/lib389/plugins.py +@@ -12,7 +12,7 @@ import copy + import os.path + from lib389 import tasks + from lib389._mapped_object import DSLdapObjects, DSLdapObject +-from lib389.lint import DSRILE0001, DSRILE0002, DSMOLE0001 ++from lib389.lint import DSRILE0001, DSRILE0002, DSMOLE0001, DSMOLE0002 + from lib389.utils import ensure_str, ensure_list_bytes + from lib389.schema import Schema + from lib389._constants import ( +@@ -827,6 +827,41 @@ class MemberOfPlugin(Plugin): + report['check'] = f'memberof:attr_indexes' + yield report + ++ def _lint_member_substring_index(self): ++ if self.status(): ++ from lib389.backend import Backends ++ backends = Backends(self._instance).list() ++ membership_attrs = ['member', 'uniquemember'] ++ container = self.get_attr_val_utf8_l("nsslapd-plugincontainerscope") ++ for backend in backends: ++ suffix = backend.get_attr_val_utf8_l('nsslapd-suffix') ++ if suffix == "cn=changelog": ++ # Always skip retro changelog ++ continue ++ if container is not None: ++ # Check if this backend is in the scope ++ if not container.endswith(suffix): ++ # skip this backend that is not in the scope ++ continue ++ indexes = backend.get_indexes() ++ for attr in membership_attrs: ++ report = copy.deepcopy(DSMOLE0002) ++ try: ++ index = indexes.get(attr) ++ types = index.get_attr_vals_utf8_l("nsIndexType") ++ if "sub" in types: ++ report['detail'] = report['detail'].replace('ATTR', attr) ++ report['detail'] = report['detail'].replace('BACKEND', suffix) ++ report['fix'] = report['fix'].replace('ATTR', attr) ++ report['fix'] = report['fix'].replace('BACKEND', suffix) ++ report['fix'] = report['fix'].replace('YOUR_INSTANCE', self._instance.serverid) ++ report['items'].append(suffix) ++ report['items'].append(attr) ++ report['check'] = f'attr:substring_index' ++ yield report ++ except KeyError: ++ continue ++ + def get_attr(self): + """Get memberofattr attribute""" + +-- +2.48.0 + diff --git a/0006-Issue-6566-RI-plugin-failure-to-handle-a-modrdn-for-.patch b/0006-Issue-6566-RI-plugin-failure-to-handle-a-modrdn-for-.patch new file mode 100644 index 0000000..5173452 --- /dev/null +++ b/0006-Issue-6566-RI-plugin-failure-to-handle-a-modrdn-for-.patch @@ -0,0 +1,70 @@ +From dcb6298db5bfef4b2541f7c52682d153b424bfa7 Mon Sep 17 00:00:00 2001 +From: James Chapman +Date: Tue, 4 Feb 2025 15:40:16 +0000 +Subject: [PATCH] Issue 6566 - RI plugin failure to handle a modrdn for rename + of member of multiple groups (#6567) + +Bug description: +With AM and RI plugins enabled, the rename of a user that is part of multiple groups +fails with a "value exists" error. + +Fix description: +For a modrdn the RI plugin creates a new DN, before a modify is attempted check +if the new DN already exists in the attr being updated. + +Fixes: https://github.com/389ds/389-ds-base/issues/6566 + +Reviewed by: @progier389 , @tbordaz (Thank you) +--- + ldap/servers/plugins/referint/referint.c | 15 ++++++++++++--- + 1 file changed, 12 insertions(+), 3 deletions(-) + +diff --git a/ldap/servers/plugins/referint/referint.c b/ldap/servers/plugins/referint/referint.c +index 468fdc239..218863ea5 100644 +--- a/ldap/servers/plugins/referint/referint.c ++++ b/ldap/servers/plugins/referint/referint.c +@@ -924,6 +924,7 @@ _update_all_per_mod(Slapi_DN *entrySDN, /* DN of the searched entry */ + { + Slapi_Mods *smods = NULL; + char *newDN = NULL; ++ struct berval bv = {0}; + char **dnParts = NULL; + char *sval = NULL; + char *newvalue = NULL; +@@ -1026,22 +1027,30 @@ _update_all_per_mod(Slapi_DN *entrySDN, /* DN of the searched entry */ + } + /* else: normalize_rc < 0) Ignore the DN normalization error for now. */ + ++ bv.bv_val = newDN; ++ bv.bv_len = strlen(newDN); + p = PL_strstr(sval, slapi_sdn_get_ndn(origDN)); + if (p == sval) { + /* (case 1) */ + slapi_mods_add_string(smods, LDAP_MOD_DELETE, attrName, sval); +- slapi_mods_add_string(smods, LDAP_MOD_ADD, attrName, newDN); +- ++ /* Add only if the attr value does not exist */ ++ if (VALUE_PRESENT != attr_value_find_wsi(attr, &bv, &v)) { ++ slapi_mods_add_string(smods, LDAP_MOD_ADD, attrName, newDN); ++ } + } else if (p) { + /* (case 2) */ + slapi_mods_add_string(smods, LDAP_MOD_DELETE, attrName, sval); + *p = '\0'; + newvalue = slapi_ch_smprintf("%s%s", sval, newDN); +- slapi_mods_add_string(smods, LDAP_MOD_ADD, attrName, newvalue); ++ /* Add only if the attr value does not exist */ ++ if (VALUE_PRESENT != attr_value_find_wsi(attr, &bv, &v)) { ++ slapi_mods_add_string(smods, LDAP_MOD_ADD, attrName, newvalue); ++ } + slapi_ch_free_string(&newvalue); + } + /* else: value does not include the modified DN. Ignore it. */ + slapi_ch_free_string(&sval); ++ bv = (struct berval){0}; + } + rc = _do_modify(mod_pb, entrySDN, slapi_mods_get_ldapmods_byref(smods)); + if (rc) { +-- +2.48.0 + diff --git a/0007-Issue-6258-Mitigate-race-condition-in-paged_results_.patch b/0007-Issue-6258-Mitigate-race-condition-in-paged_results_.patch new file mode 100644 index 0000000..1e97bca --- /dev/null +++ b/0007-Issue-6258-Mitigate-race-condition-in-paged_results_.patch @@ -0,0 +1,43 @@ +From be57ea839934c29b3f4db450a65281aa30a72caf Mon Sep 17 00:00:00 2001 +From: Masahiro Matsuya +Date: Wed, 5 Feb 2025 11:38:28 +0900 +Subject: [PATCH] Issue 6258 - Mitigate race condition in paged_results_test.py + (#6433) + +The regression test dirsrvtests/tests/suites/paged_results/paged_results_test.py::test_multi_suffix_search has a race condition causing it to fail due to multiple queries potentially writing their logs out of chronological order. + +This failure is mitigated by sorting the retrieved access_log_lines by their "op" value. This ensures the log lines are in chronological order, as expected by the assertions at the end of test_multi_suffix_search(). + +Helps fix: #6258 + +Reviewed by: @droideck , @progier389 (Thanks!) + +Co-authored-by: Anuar Beisembayev <111912342+abeisemb@users.noreply.github.com> +--- + dirsrvtests/tests/suites/paged_results/paged_results_test.py | 3 +++ + 1 file changed, 3 insertions(+) + +diff --git a/dirsrvtests/tests/suites/paged_results/paged_results_test.py b/dirsrvtests/tests/suites/paged_results/paged_results_test.py +index eaf0e0da9..fca48db0f 100644 +--- a/dirsrvtests/tests/suites/paged_results/paged_results_test.py ++++ b/dirsrvtests/tests/suites/paged_results/paged_results_test.py +@@ -7,6 +7,7 @@ + # --- END COPYRIGHT BLOCK --- + # + import socket ++import re + from random import sample, randrange + + import pytest +@@ -1126,6 +1127,8 @@ def test_multi_suffix_search(topology_st, create_user, new_suffixes): + topology_st.standalone.restart(timeout=10) + + access_log_lines = topology_st.standalone.ds_access_log.match('.*pr_cookie=.*') ++ # Sort access_log_lines by op number to mitigate race condition effects. ++ access_log_lines.sort(key=lambda x: int(re.search(r"op=(\d+) RESULT", x).group(1))) + pr_cookie_list = ([line.rsplit('=', 1)[-1] for line in access_log_lines]) + pr_cookie_list = [int(pr_cookie) for pr_cookie in pr_cookie_list] + log.info('Assert that last pr_cookie == -1 and others pr_cookie == 0') +-- +2.48.0 + diff --git a/0008-Issue-6229-After-an-initial-failure-subsequent-onlin.patch b/0008-Issue-6229-After-an-initial-failure-subsequent-onlin.patch new file mode 100644 index 0000000..d9528d1 --- /dev/null +++ b/0008-Issue-6229-After-an-initial-failure-subsequent-onlin.patch @@ -0,0 +1,566 @@ +From 8e3a484f88fc9f9a3fcdfdd685d4ad2ed3cbe5d9 Mon Sep 17 00:00:00 2001 +From: progier389 +Date: Fri, 28 Jun 2024 18:56:49 +0200 +Subject: [PATCH] Issue 6229 - After an initial failure, subsequent online + backups fail (#6230) + +* Issue 6229 - After an initial failure, subsequent online backups will not work + +Several issues related to backup task error handling: +Backends stay busy after the failure +Exit code is 0 in some cases +Crash if failing to open the backup directory +And a more general one: +lib389 Task DN collision + +Solutions: +Always reset the busy flags that have been set +Ensure that 0 is not returned in error case +Avoid closing NULL directory descriptor +Use a timestamp having milliseconds precision to create the task DN + +Issue: #6229 + +Reviewed by: @droideck (Thanks!) + +(cherry picked from commit 04a0b6ac776a1d588ec2e10ff651e5015078ad21) +--- + ldap/servers/slapd/back-ldbm/archive.c | 45 +++++----- + .../slapd/back-ldbm/db-mdb/mdb_layer.c | 3 + + src/lib389/lib389/__init__.py | 10 +-- + src/lib389/lib389/tasks.py | 82 +++++++++---------- + 4 files changed, 70 insertions(+), 70 deletions(-) + +diff --git a/ldap/servers/slapd/back-ldbm/archive.c b/ldap/servers/slapd/back-ldbm/archive.c +index 0460a42f6..6658cc80a 100644 +--- a/ldap/servers/slapd/back-ldbm/archive.c ++++ b/ldap/servers/slapd/back-ldbm/archive.c +@@ -16,6 +16,8 @@ + #include "back-ldbm.h" + #include "dblayer.h" + ++#define NO_OBJECT ((Object*)-1) ++ + int + ldbm_temporary_close_all_instances(Slapi_PBlock *pb) + { +@@ -270,6 +272,7 @@ ldbm_back_ldbm2archive(Slapi_PBlock *pb) + int run_from_cmdline = 0; + Slapi_Task *task; + struct stat sbuf; ++ Object *last_busy_inst_obj = NO_OBJECT; + + slapi_pblock_get(pb, SLAPI_PLUGIN_PRIVATE, &li); + slapi_pblock_get(pb, SLAPI_SEQ_VAL, &rawdirectory); +@@ -380,13 +383,12 @@ ldbm_back_ldbm2archive(Slapi_PBlock *pb) + + /* to avoid conflict w/ import, do this check for commandline, as well */ + { +- Object *inst_obj, *inst_obj2; + ldbm_instance *inst = NULL; + + /* server is up -- mark all backends busy */ +- for (inst_obj = objset_first_obj(li->li_instance_set); inst_obj; +- inst_obj = objset_next_obj(li->li_instance_set, inst_obj)) { +- inst = (ldbm_instance *)object_get_data(inst_obj); ++ for (last_busy_inst_obj = objset_first_obj(li->li_instance_set); last_busy_inst_obj; ++ last_busy_inst_obj = objset_next_obj(li->li_instance_set, last_busy_inst_obj)) { ++ inst = (ldbm_instance *)object_get_data(last_busy_inst_obj); + + /* check if an import/restore is already ongoing... */ + if (instance_set_busy(inst) != 0 || dblayer_in_import(inst) != 0) { +@@ -400,20 +402,6 @@ ldbm_back_ldbm2archive(Slapi_PBlock *pb) + "another task and cannot be disturbed.", + inst->inst_name); + } +- +- /* painfully, we have to clear the BUSY flags on the +- * backends we'd already marked... +- */ +- for (inst_obj2 = objset_first_obj(li->li_instance_set); +- inst_obj2 && (inst_obj2 != inst_obj); +- inst_obj2 = objset_next_obj(li->li_instance_set, +- inst_obj2)) { +- inst = (ldbm_instance *)object_get_data(inst_obj2); +- instance_set_not_busy(inst); +- } +- if (inst_obj2 && inst_obj2 != inst_obj) +- object_release(inst_obj2); +- object_release(inst_obj); + goto err; + } + } +@@ -427,18 +415,26 @@ ldbm_back_ldbm2archive(Slapi_PBlock *pb) + goto err; + } + +- if (!run_from_cmdline) { ++err: ++ /* Clear all BUSY flags that have been previously set */ ++ if (last_busy_inst_obj != NO_OBJECT) { + ldbm_instance *inst; + Object *inst_obj; + +- /* none of these backends are busy anymore */ +- for (inst_obj = objset_first_obj(li->li_instance_set); inst_obj; ++ for (inst_obj = objset_first_obj(li->li_instance_set); ++ inst_obj && (inst_obj != last_busy_inst_obj); + inst_obj = objset_next_obj(li->li_instance_set, inst_obj)) { + inst = (ldbm_instance *)object_get_data(inst_obj); + instance_set_not_busy(inst); + } ++ if (last_busy_inst_obj != NULL) { ++ /* release last seen object for aborted objset_next_obj iterations */ ++ if (inst_obj != NULL) { ++ object_release(inst_obj); ++ } ++ object_release(last_busy_inst_obj); ++ } + } +-err: + if (return_value) { + if (dir_bak) { + slapi_log_err(SLAPI_LOG_ERR, +@@ -727,7 +723,10 @@ ldbm_archive_config(char *bakdir, Slapi_Task *task) + } + + error: +- PR_CloseDir(dirhandle); ++ if (NULL != dirhandle) { ++ PR_CloseDir(dirhandle); ++ dirhandle = NULL; ++ } + dse_backup_unlock(); + slapi_ch_free_string(&backup_config_dir); + slapi_ch_free_string(&dse_file); +diff --git a/ldap/servers/slapd/back-ldbm/db-mdb/mdb_layer.c b/ldap/servers/slapd/back-ldbm/db-mdb/mdb_layer.c +index 4a7beedeb..3ecc47170 100644 +--- a/ldap/servers/slapd/back-ldbm/db-mdb/mdb_layer.c ++++ b/ldap/servers/slapd/back-ldbm/db-mdb/mdb_layer.c +@@ -983,6 +983,9 @@ dbmdb_backup(struct ldbminfo *li, char *dest_dir, Slapi_Task *task) + if (ldbm_archive_config(dest_dir, task) != 0) { + slapi_log_err(SLAPI_LOG_ERR, "dbmdb_backup", + "Backup of config files failed or is incomplete\n"); ++ if (0 == return_value) { ++ return_value = -1; ++ } + } + + goto bail; +diff --git a/src/lib389/lib389/__init__.py b/src/lib389/lib389/__init__.py +index 368741a66..cb372c138 100644 +--- a/src/lib389/lib389/__init__.py ++++ b/src/lib389/lib389/__init__.py +@@ -69,7 +69,7 @@ from lib389.utils import ( + get_user_is_root) + from lib389.paths import Paths + from lib389.nss_ssl import NssSsl +-from lib389.tasks import BackupTask, RestoreTask ++from lib389.tasks import BackupTask, RestoreTask, Task + from lib389.dseldif import DSEldif + + # mixin +@@ -1424,7 +1424,7 @@ class DirSrv(SimpleLDAPObject, object): + name, self.ds_paths.prefix) + + # create the archive +- name = "backup_%s_%s.tar.gz" % (self.serverid, time.strftime("%m%d%Y_%H%M%S")) ++ name = "backup_%s_%s.tar.gz" % (self.serverid, Task.get_timestamp()) + backup_file = os.path.join(backup_dir, name) + tar = tarfile.open(backup_file, "w:gz") + tar.extraction_filter = (lambda member, path: member) +@@ -2810,7 +2810,7 @@ class DirSrv(SimpleLDAPObject, object): + else: + # No output file specified. Use the default ldif location/name + cmd.append('-a') +- tnow = datetime.now().strftime("%Y_%m_%d_%H_%M_%S") ++ tnow = Task.get_timestamp() + if bename: + ldifname = os.path.join(self.ds_paths.ldif_dir, "%s-%s-%s.ldif" % (self.serverid, bename, tnow)) + else: +@@ -2881,7 +2881,7 @@ class DirSrv(SimpleLDAPObject, object): + + if archive_dir is None: + # Use the instance name and date/time as the default backup name +- tnow = datetime.now().strftime("%Y_%m_%d_%H_%M_%S") ++ tnow = Task.get_timestamp() + archive_dir = os.path.join(self.ds_paths.backup_dir, "%s-%s" % (self.serverid, tnow)) + elif not archive_dir.startswith("/"): + # Relative path, append it to the bak directory +@@ -3506,7 +3506,7 @@ class DirSrv(SimpleLDAPObject, object): + + if archive is None: + # Use the instance name and date/time as the default backup name +- tnow = datetime.now().strftime("%Y_%m_%d_%H_%M_%S") ++ tnow = Task.get_timestamp() + if self.serverid is not None: + backup_dir_name = "%s-%s" % (self.serverid, tnow) + else: +diff --git a/src/lib389/lib389/tasks.py b/src/lib389/lib389/tasks.py +index 6c2adb5b2..6bf302862 100644 +--- a/src/lib389/lib389/tasks.py ++++ b/src/lib389/lib389/tasks.py +@@ -118,7 +118,7 @@ class Task(DSLdapObject): + return super(Task, self).create(rdn, properties, basedn) + + @staticmethod +- def _get_task_date(): ++ def get_timestamp(): + """Return a timestamp to use in naming new task entries.""" + + return datetime.now().isoformat() +@@ -132,7 +132,7 @@ class AutomemberRebuildMembershipTask(Task): + """ + + def __init__(self, instance, dn=None): +- self.cn = 'automember_rebuild_' + Task._get_task_date() ++ self.cn = 'automember_rebuild_' + Task.get_timestamp() + dn = "cn=" + self.cn + "," + DN_AUTOMEMBER_REBUILD_TASK + + super(AutomemberRebuildMembershipTask, self).__init__(instance, dn) +@@ -147,7 +147,7 @@ class AutomemberAbortRebuildTask(Task): + """ + + def __init__(self, instance, dn=None): +- self.cn = 'automember_abort_' + Task._get_task_date() ++ self.cn = 'automember_abort_' + Task.get_timestamp() + dn = "cn=" + self.cn + "," + DN_AUTOMEMBER_ABORT_REBUILD_TASK + + super(AutomemberAbortRebuildTask, self).__init__(instance, dn) +@@ -161,7 +161,7 @@ class FixupLinkedAttributesTask(Task): + """ + + def __init__(self, instance, dn=None): +- self.cn = 'fixup_linked_attrs_' + Task._get_task_date() ++ self.cn = 'fixup_linked_attrs_' + Task.get_timestamp() + dn = "cn=" + self.cn + "," + DN_FIXUP_LINKED_ATTIBUTES + + super(FixupLinkedAttributesTask, self).__init__(instance, dn) +@@ -175,7 +175,7 @@ class MemberUidFixupTask(Task): + """ + + def __init__(self, instance, dn=None): +- self.cn = 'memberUid_fixup_' + Task._get_task_date() ++ self.cn = 'memberUid_fixup_' + Task.get_timestamp() + dn = f"cn={self.cn},cn=memberuid task,cn=tasks,cn=config" + + super(MemberUidFixupTask, self).__init__(instance, dn) +@@ -190,7 +190,7 @@ class MemberOfFixupTask(Task): + """ + + def __init__(self, instance, dn=None): +- self.cn = 'memberOf_fixup_' + Task._get_task_date() ++ self.cn = 'memberOf_fixup_' + Task.get_timestamp() + dn = "cn=" + self.cn + "," + DN_MBO_TASK + + super(MemberOfFixupTask, self).__init__(instance, dn) +@@ -205,7 +205,7 @@ class USNTombstoneCleanupTask(Task): + """ + + def __init__(self, instance, dn=None): +- self.cn = 'usn_cleanup_' + Task._get_task_date() ++ self.cn = 'usn_cleanup_' + Task.get_timestamp() + dn = "cn=" + self.cn + ",cn=USN tombstone cleanup task," + DN_TASKS + + super(USNTombstoneCleanupTask, self).__init__(instance, dn) +@@ -225,7 +225,7 @@ class csngenTestTask(Task): + """ + + def __init__(self, instance, dn=None): +- self.cn = 'csngenTest_' + Task._get_task_date() ++ self.cn = 'csngenTest_' + Task.get_timestamp() + dn = "cn=" + self.cn + ",cn=csngen_test," + DN_TASKS + super(csngenTestTask, self).__init__(instance, dn) + +@@ -238,7 +238,7 @@ class EntryUUIDFixupTask(Task): + """ + + def __init__(self, instance, dn=None): +- self.cn = 'entryuuid_fixup_' + Task._get_task_date() ++ self.cn = 'entryuuid_fixup_' + Task.get_timestamp() + dn = "cn=" + self.cn + "," + DN_EUUID_TASK + super(EntryUUIDFixupTask, self).__init__(instance, dn) + self._must_attributes.extend(['basedn']) +@@ -252,7 +252,7 @@ class DBCompactTask(Task): + """ + + def __init__(self, instance, dn=None): +- self.cn = 'compact_db_' + Task._get_task_date() ++ self.cn = 'compact_db_' + Task.get_timestamp() + dn = "cn=" + self.cn + "," + DN_COMPACTDB_TASK + super(DBCompactTask, self).__init__(instance, dn) + +@@ -265,7 +265,7 @@ class SchemaReloadTask(Task): + """ + + def __init__(self, instance, dn=None): +- self.cn = 'schema_reload_' + Task._get_task_date() ++ self.cn = 'schema_reload_' + Task.get_timestamp() + dn = "cn=" + self.cn + ",cn=schema reload task," + DN_TASKS + super(SchemaReloadTask, self).__init__(instance, dn) + +@@ -278,7 +278,7 @@ class SyntaxValidateTask(Task): + """ + + def __init__(self, instance, dn=None): +- self.cn = 'syntax_validate_' + Task._get_task_date() ++ self.cn = 'syntax_validate_' + Task.get_timestamp() + dn = f"cn={self.cn},cn=syntax validate,cn=tasks,cn=config" + + super(SyntaxValidateTask, self).__init__(instance, dn) +@@ -295,7 +295,7 @@ class AbortCleanAllRUVTask(Task): + """ + + def __init__(self, instance, dn=None): +- self.cn = 'abortcleanallruv_' + Task._get_task_date() ++ self.cn = 'abortcleanallruv_' + Task.get_timestamp() + dn = "cn=" + self.cn + ",cn=abort cleanallruv," + DN_TASKS + + super(AbortCleanAllRUVTask, self).__init__(instance, dn) +@@ -312,7 +312,7 @@ class CleanAllRUVTask(Task): + """ + + def __init__(self, instance, dn=None): +- self.cn = 'cleanallruv_' + Task._get_task_date() ++ self.cn = 'cleanallruv_' + Task.get_timestamp() + dn = "cn=" + self.cn + ",cn=cleanallruv," + DN_TASKS + self._properties = None + +@@ -359,7 +359,7 @@ class ImportTask(Task): + """ + + def __init__(self, instance, dn=None): +- self.cn = 'import_' + Task._get_task_date() ++ self.cn = 'import_' + Task.get_timestamp() + dn = "cn=%s,%s" % (self.cn, DN_IMPORT_TASK) + self._properties = None + +@@ -388,7 +388,7 @@ class ExportTask(Task): + """ + + def __init__(self, instance, dn=None): +- self.cn = 'export_' + Task._get_task_date() ++ self.cn = 'export_' + Task.get_timestamp() + dn = "cn=%s,%s" % (self.cn, DN_EXPORT_TASK) + self._properties = None + +@@ -411,7 +411,7 @@ class BackupTask(Task): + """ + + def __init__(self, instance, dn=None): +- self.cn = 'backup_' + Task._get_task_date() ++ self.cn = 'backup_' + Task.get_timestamp() + dn = "cn=" + self.cn + ",cn=backup," + DN_TASKS + self._properties = None + +@@ -426,7 +426,7 @@ class RestoreTask(Task): + """ + + def __init__(self, instance, dn=None): +- self.cn = 'restore_' + Task._get_task_date() ++ self.cn = 'restore_' + Task.get_timestamp() + dn = "cn=" + self.cn + ",cn=restore," + DN_TASKS + self._properties = None + +@@ -513,7 +513,7 @@ class Tasks(object): + raise ValueError("Import file (%s) does not exist" % input_file) + + # Prepare the task entry +- cn = "import_" + time.strftime("%m%d%Y_%H%M%S", time.localtime()) ++ cn = "import_" + Task.get_timestamp() + dn = "cn=%s,%s" % (cn, DN_IMPORT_TASK) + entry = Entry(dn) + entry.setValues('objectclass', 'top', 'extensibleObject') +@@ -581,7 +581,7 @@ class Tasks(object): + raise ValueError("output_file is mandatory") + + # Prepare the task entry +- cn = "export_" + time.strftime("%m%d%Y_%H%M%S", time.localtime()) ++ cn = "export_" + Task.get_timestamp() + dn = "cn=%s,%s" % (cn, DN_EXPORT_TASK) + entry = Entry(dn) + entry.update({ +@@ -637,7 +637,7 @@ class Tasks(object): + raise ValueError("You must specify a backup directory.") + + # build the task entry +- cn = "backup_" + time.strftime("%m%d%Y_%H%M%S", time.localtime()) ++ cn = "backup_" + Task.get_timestamp() + dn = "cn=%s,%s" % (cn, DN_BACKUP_TASK) + entry = Entry(dn) + entry.update({ +@@ -694,7 +694,7 @@ class Tasks(object): + raise ValueError("Backup file (%s) does not exist" % backup_dir) + + # build the task entry +- cn = "restore_" + time.strftime("%m%d%Y_%H%M%S", time.localtime()) ++ cn = "restore_" + Task.get_timestamp() + dn = "cn=%s,%s" % (cn, DN_RESTORE_TASK) + entry = Entry(dn) + entry.update({ +@@ -789,7 +789,7 @@ class Tasks(object): + attrs.append(attr) + else: + attrs.append(attrname) +- cn = "index_vlv_%s" % (time.strftime("%m%d%Y_%H%M%S", time.localtime())) ++ cn = "index_vlv_%s" % (Task.get_timestamp()) + dn = "cn=%s,%s" % (cn, DN_INDEX_TASK) + entry = Entry(dn) + entry.update({ +@@ -803,7 +803,7 @@ class Tasks(object): + # + # Reindex all attributes - gather them first... + # +- cn = "index_all_%s" % (time.strftime("%m%d%Y_%H%M%S", time.localtime())) ++ cn = "index_all_%s" % (Task.get_timestamp()) + dn = ('cn=%s,cn=ldbm database,cn=plugins,cn=config' % backend) + try: + indexes = self.conn.search_s(dn, ldap.SCOPE_SUBTREE, '(objectclass=nsIndex)') +@@ -815,7 +815,7 @@ class Tasks(object): + # + # Reindex specific attributes + # +- cn = "index_attrs_%s" % (time.strftime("%m%d%Y_%H%M%S", time.localtime())) ++ cn = "index_attrs_%s" % (Task.get_timestamp()) + if isinstance(attrname, (tuple, list)): + # Need to guarantee this is a list (and not a tuple) + for attr in attrname: +@@ -903,8 +903,7 @@ class Tasks(object): + + suffix = ents[0].getValue(attr) + +- cn = "fixupmemberof_" + time.strftime("%m%d%Y_%H%M%S", +- time.localtime()) ++ cn = "fixupmemberof_" + Task.get_timestamp() + dn = "cn=%s,%s" % (cn, DN_MBO_TASK) + entry = Entry(dn) + entry.setValues('objectclass', 'top', 'extensibleObject') +@@ -965,8 +964,7 @@ class Tasks(object): + if len(ents) != 1: + raise ValueError("invalid backend name: %s" % bename) + +- cn = "fixupTombstone_" + time.strftime("%m%d%Y_%H%M%S", +- time.localtime()) ++ cn = "fixupTombstone_" + Task.get_timestamp() + dn = "cn=%s,%s" % (cn, DN_TOMB_FIXUP_TASK) + entry = Entry(dn) + entry.setValues('objectclass', 'top', 'extensibleObject') +@@ -1019,7 +1017,7 @@ class Tasks(object): + @return exit code + ''' + +- cn = 'task-' + time.strftime("%m%d%Y_%H%M%S", time.localtime()) ++ cn = 'task-' + Task.get_timestamp() + dn = ('cn=%s,cn=automember rebuild membership,cn=tasks,cn=config' % cn) + + entry = Entry(dn) +@@ -1077,7 +1075,7 @@ class Tasks(object): + if not ldif_out: + raise ValueError("Missing ldif_out") + +- cn = 'task-' + time.strftime("%m%d%Y_%H%M%S", time.localtime()) ++ cn = 'task-' + Task.get_timestamp() + dn = ('cn=%s,cn=automember export updates,cn=tasks,cn=config' % cn) + entry = Entry(dn) + entry.setValues('objectclass', 'top', 'extensibleObject') +@@ -1129,7 +1127,7 @@ class Tasks(object): + if not ldif_out or not ldif_in: + raise ValueError("Missing ldif_out and/or ldif_in") + +- cn = 'task-' + time.strftime("%m%d%Y_%H%M%S", time.localtime()) ++ cn = 'task-' + Task.get_timestamp() + dn = ('cn=%s,cn=automember map updates,cn=tasks,cn=config' % cn) + + entry = Entry(dn) +@@ -1175,7 +1173,7 @@ class Tasks(object): + @return exit code + ''' + +- cn = 'task-' + time.strftime("%m%d%Y_%H%M%S", time.localtime()) ++ cn = 'task-' + Task.get_timestamp() + dn = ('cn=%s,cn=fixup linked attributes,cn=tasks,cn=config' % cn) + entry = Entry(dn) + entry.setValues('objectclass', 'top', 'extensibleObject') +@@ -1219,7 +1217,7 @@ class Tasks(object): + @return exit code + ''' + +- cn = 'task-' + time.strftime("%m%d%Y_%H%M%S", time.localtime()) ++ cn = 'task-' + Task.get_timestamp() + dn = ('cn=%s,cn=schema reload task,cn=tasks,cn=config' % cn) + entry = Entry(dn) + entry.setValues('objectclass', 'top', 'extensibleObject') +@@ -1264,7 +1262,7 @@ class Tasks(object): + @return exit code + ''' + +- cn = 'task-' + time.strftime("%m%d%Y_%H%M%S", time.localtime()) ++ cn = 'task-' + Task.get_timestamp() + dn = ('cn=%s,cn=memberuid task,cn=tasks,cn=config' % cn) + entry = Entry(dn) + entry.setValues('objectclass', 'top', 'extensibleObject') +@@ -1311,7 +1309,7 @@ class Tasks(object): + @return exit code + ''' + +- cn = 'task-' + time.strftime("%m%d%Y_%H%M%S", time.localtime()) ++ cn = 'task-' + Task.get_timestamp() + dn = ('cn=%s,cn=syntax validate,cn=tasks,cn=config' % cn) + entry = Entry(dn) + entry.setValues('objectclass', 'top', 'extensibleObject') +@@ -1358,7 +1356,7 @@ class Tasks(object): + @return exit code + ''' + +- cn = 'task-' + time.strftime("%m%d%Y_%H%M%S", time.localtime()) ++ cn = 'task-' + Task.get_timestamp() + dn = ('cn=%s,cn=USN tombstone cleanup task,cn=tasks,cn=config' % cn) + entry = Entry(dn) + entry.setValues('objectclass', 'top', 'extensibleObject') +@@ -1413,7 +1411,7 @@ class Tasks(object): + if not configfile: + raise ValueError("Missing required paramter: configfile") + +- cn = 'task-' + time.strftime("%m%d%Y_%H%M%S", time.localtime()) ++ cn = 'task-' + Task.get_timestamp() + dn = ('cn=%s,cn=sysconfig reload,cn=tasks,cn=config' % cn) + entry = Entry(dn) + entry.setValues('objectclass', 'top', 'extensibleObject') +@@ -1464,7 +1462,7 @@ class Tasks(object): + if not suffix: + raise ValueError("Missing required paramter: suffix") + +- cn = 'task-' + time.strftime("%m%d%Y_%H%M%S", time.localtime()) ++ cn = 'task-' + Task.get_timestamp() + dn = ('cn=%s,cn=cleanallruv,cn=tasks,cn=config' % cn) + entry = Entry(dn) + entry.setValues('objectclass', 'top', 'extensibleObject') +@@ -1516,7 +1514,7 @@ class Tasks(object): + if not suffix: + raise ValueError("Missing required paramter: suffix") + +- cn = 'task-' + time.strftime("%m%d%Y_%H%M%S", time.localtime()) ++ cn = 'task-' + Task.get_timestamp() + dn = ('cn=%s,cn=abort cleanallruv,cn=tasks,cn=config' % cn) + entry = Entry(dn) + entry.setValues('objectclass', 'top', 'extensibleObject') +@@ -1571,7 +1569,7 @@ class Tasks(object): + if not nsArchiveDir: + raise ValueError("Missing required paramter: nsArchiveDir") + +- cn = 'task-' + time.strftime("%m%d%Y_%H%M%S", time.localtime()) ++ cn = 'task-' + Task.get_timestamp() + dn = ('cn=%s,cn=upgradedb,cn=tasks,cn=config' % cn) + entry = Entry(dn) + entry.setValues('objectclass', 'top', 'extensibleObject') +@@ -1616,6 +1614,6 @@ class LDAPIMappingReloadTask(Task): + """ + + def __init__(self, instance, dn=None): +- self.cn = 'reload-' + Task._get_task_date() ++ self.cn = 'reload-' + Task.get_timestamp() + dn = f'cn={self.cn},cn=reload ldapi mappings,cn=tasks,cn=config' + super(LDAPIMappingReloadTask, self).__init__(instance, dn) +-- +2.48.0 + diff --git a/0009-Issue-6554-During-import-of-entries-without-nsUnique.patch b/0009-Issue-6554-During-import-of-entries-without-nsUnique.patch new file mode 100644 index 0000000..90a7c52 --- /dev/null +++ b/0009-Issue-6554-During-import-of-entries-without-nsUnique.patch @@ -0,0 +1,165 @@ +From 2b1b2db90c9d337166fa28e313f60828cd43de09 Mon Sep 17 00:00:00 2001 +From: tbordaz +Date: Thu, 6 Feb 2025 18:25:36 +0100 +Subject: [PATCH] Issue 6554 - During import of entries without nsUniqueId, a + supplier generates duplicate nsUniqueId (LMDB only) (#6582) + +Bug description: + During an import the entry is prepared (schema, operational + attributes, password encryption,...) before starting the + update of the database and indexes. + A step of the preparation is to assign a value to 'nsuniqueid' + operational attribute. 'nsuniqueid' must be unique. + In LMDB the preparation is done by multiple threads (workers). + In such case the 'nsuniqueid' are generated in parallel and + as it is time based several values can be duplicated. + +Fix description: + To prevent that the routine dbmdb_import_generate_uniqueid + should make sure to synchronize the workers. + +fixes: #6554 + +Reviewed by: Pierre Rogier +--- + .../tests/suites/import/import_test.py | 79 ++++++++++++++++++- + .../back-ldbm/db-mdb/mdb_import_threads.c | 11 +++ + 2 files changed, 89 insertions(+), 1 deletion(-) + +diff --git a/dirsrvtests/tests/suites/import/import_test.py b/dirsrvtests/tests/suites/import/import_test.py +index dbd921924..54d304753 100644 +--- a/dirsrvtests/tests/suites/import/import_test.py ++++ b/dirsrvtests/tests/suites/import/import_test.py +@@ -14,11 +14,13 @@ import os + import pytest + import time + import glob ++import re + import logging + import subprocess + from datetime import datetime + from lib389.topologies import topology_st as topo +-from lib389._constants import DEFAULT_SUFFIX, TaskWarning ++from lib389.topologies import topology_m2 as topo_m2 ++from lib389._constants import DEFAULT_BENAME, DEFAULT_SUFFIX, TaskWarning + from lib389.dbgen import dbgen_users + from lib389.tasks import ImportTask + from lib389.index import Indexes +@@ -688,6 +690,81 @@ def test_online_import_under_load(topo): + assert import_task.get_exit_code() == 0 + + ++def test_duplicate_nsuniqueid(topo_m2, request): ++ """Test that after an offline import all ++ nsuniqueid are different ++ ++ :id: a2541677-a288-4633-bacf-4050cc56016d ++ :setup: MMR with 2 suppliers ++ :steps: ++ 1. stop the instance to do offline operations ++ 2. Generate a 5K users LDIF file ++ 3. Check that no uniqueid are present in the generated file ++ 4. import the generated LDIF ++ 5. export the database ++ 6. Check that that exported LDIF contains more than 5K nsuniqueid ++ 7. Check that there is no duplicate nsuniqued in exported LDIF ++ :expectedresults: ++ 1. Should succeeds ++ 2. Should succeeds ++ 3. Should succeeds ++ 4. Should succeeds ++ 5. Should succeeds ++ 6. Should succeeds ++ 7. Should succeeds ++ """ ++ m1 = topo_m2.ms["supplier1"] ++ ++ # Stop the instance ++ m1.stop() ++ ++ # Generate a test ldif (5k entries) ++ log.info("Generating LDIF...") ++ ldif_dir = m1.get_ldif_dir() ++ import_ldif = ldif_dir + '/5k_users_import.ldif' ++ dbgen_users(m1, 5000, import_ldif, DEFAULT_SUFFIX) ++ ++ # Check that the generated LDIF does not contain nsuniqueid ++ all_nsuniqueid = [] ++ with open(import_ldif, 'r') as file: ++ for line in file: ++ if line.lower().startswith("nsuniqueid: "): ++ all_nsuniqueid.append(line.split(': ')[1]) ++ log.info("import file contains " + str(len(all_nsuniqueid)) + " nsuniqueid") ++ assert len(all_nsuniqueid) == 0 ++ ++ # Import the "nsuniquied free" LDIF file ++ if not m1.ldif2db('userRoot', None, None, None, import_ldif): ++ assert False ++ ++ # Export the DB that now should contain nsuniqueid ++ export_ldif = ldif_dir + '/5k_user_export.ldif' ++ log.info("export to file " + export_ldif) ++ m1.db2ldif(bename=DEFAULT_BENAME, suffixes=[DEFAULT_SUFFIX], ++ excludeSuffixes=None, repl_data=False, ++ outputfile=export_ldif, encrypt=False) ++ ++ # Check that the export LDIF contain nsuniqueid ++ all_nsuniqueid = [] ++ with open(export_ldif, 'r') as file: ++ for line in file: ++ if line.lower().startswith("nsuniqueid: "): ++ all_nsuniqueid.append(line.split(': ')[1]) ++ log.info("export file " + export_ldif + " contains " + str(len(all_nsuniqueid)) + " nsuniqueid") ++ assert len(all_nsuniqueid) >= 5000 ++ ++ # Check that the nsuniqueid are unique ++ assert len(set(all_nsuniqueid)) == len(all_nsuniqueid) ++ ++ def fin(): ++ if os.path.exists(import_ldif): ++ os.remove(import_ldif) ++ if os.path.exists(export_ldif): ++ os.remove(export_ldif) ++ m1.start ++ ++ request.addfinalizer(fin) ++ + if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode +diff --git a/ldap/servers/slapd/back-ldbm/db-mdb/mdb_import_threads.c b/ldap/servers/slapd/back-ldbm/db-mdb/mdb_import_threads.c +index 707a110c5..0f445bb56 100644 +--- a/ldap/servers/slapd/back-ldbm/db-mdb/mdb_import_threads.c ++++ b/ldap/servers/slapd/back-ldbm/db-mdb/mdb_import_threads.c +@@ -610,10 +610,20 @@ dbmdb_import_generate_uniqueid(ImportJob *job, Slapi_Entry *e) + { + const char *uniqueid = slapi_entry_get_uniqueid(e); + int rc = UID_SUCCESS; ++ static pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER; + + if (!uniqueid && (job->uuid_gen_type != SLAPI_UNIQUEID_GENERATE_NONE)) { + char *newuniqueid; + ++ /* With 'mdb' we have several workers generating nsuniqueid ++ * we need to serialize them to prevent generating duplicate value ++ * From performance pov it only impacts import ++ * The default value is SLAPI_UNIQUEID_GENERATE_TIME_BASED so ++ * the only syscall is clock_gettime and then string formating ++ * that should limit contention ++ */ ++ pthread_mutex_lock(&mutex); ++ + /* generate id based on dn */ + if (job->uuid_gen_type == SLAPI_UNIQUEID_GENERATE_NAME_BASED) { + char *dn = slapi_entry_get_dn(e); +@@ -624,6 +634,7 @@ dbmdb_import_generate_uniqueid(ImportJob *job, Slapi_Entry *e) + /* time based */ + rc = slapi_uniqueIDGenerateString(&newuniqueid); + } ++ pthread_mutex_unlock(&mutex); + + if (rc == UID_SUCCESS) { + slapi_entry_set_uniqueid(e, newuniqueid); +-- +2.48.0 + diff --git a/0010-Issue-6596-BUG-Compilation-Regresion-6597.patch b/0010-Issue-6596-BUG-Compilation-Regresion-6597.patch new file mode 100644 index 0000000..7a0ef58 --- /dev/null +++ b/0010-Issue-6596-BUG-Compilation-Regresion-6597.patch @@ -0,0 +1,77 @@ +From e638e801afd51ca44523222a90a9f69f4be82ae3 Mon Sep 17 00:00:00 2001 +From: Firstyear +Date: Fri, 7 Feb 2025 14:47:29 +1000 +Subject: [PATCH] Issue 6596 - BUG - Compilation Regresion (#6597) + +Bug Description: The addition of the json auditlog feature caused +a regresion in compilation due to the use of labels in a declaration. + +Fix Description: Enclose the switch/case in braces to resolve the +compilation issue. + +fixes: https://github.com/389ds/389-ds-base/issues/6596 + +Author: William Brown + +Review by: @droideck Thanks! +--- + ldap/servers/slapd/auditlog.c | 15 ++++++++------- + 1 file changed, 8 insertions(+), 7 deletions(-) + +diff --git a/ldap/servers/slapd/auditlog.c b/ldap/servers/slapd/auditlog.c +index c288a1a7f..ff9a6fdde 100644 +--- a/ldap/servers/slapd/auditlog.c ++++ b/ldap/servers/slapd/auditlog.c +@@ -456,7 +456,7 @@ write_audit_file_json(Slapi_PBlock *pb, Slapi_Entry *entry, int logtype, + add_entry_attrs_json(entry, log_json); + + switch (optype) { +- case SLAPI_OPERATION_MODIFY: ++ case SLAPI_OPERATION_MODIFY: { + json_object *mod_list = json_object_new_array(); + mods = change; + for (size_t j = 0; (mods != NULL) && (mods[j] != NULL); j++) { +@@ -511,8 +511,8 @@ write_audit_file_json(Slapi_PBlock *pb, Slapi_Entry *entry, int logtype, + /* Add entire mod list to the main object */ + json_object_object_add(log_json, "modify", mod_list); + break; +- +- case SLAPI_OPERATION_ADD: ++ } ++ case SLAPI_OPERATION_ADD: { + int len; + e = change; + tmp = slapi_entry2str(e, &len); +@@ -526,8 +526,8 @@ write_audit_file_json(Slapi_PBlock *pb, Slapi_Entry *entry, int logtype, + json_object_object_add(log_json, "add", json_object_new_string(tmp)); + slapi_ch_free_string(&tmpsave); + break; +- +- case SLAPI_OPERATION_DELETE: ++ } ++ case SLAPI_OPERATION_DELETE: { + tmp = change; + del_obj = json_object_new_object(); + if (tmp && tmp[0]) { +@@ -538,8 +538,8 @@ write_audit_file_json(Slapi_PBlock *pb, Slapi_Entry *entry, int logtype, + json_object_object_add(log_json, "delete", del_obj); + } + break; +- +- case SLAPI_OPERATION_MODDN: ++ } ++ case SLAPI_OPERATION_MODDN: { + newrdn = ((char **)change)[0]; + modrdn_obj = json_object_new_object(); + json_object_object_add(modrdn_obj, attr_newrdn, json_object_new_string(newrdn)); +@@ -551,6 +551,7 @@ write_audit_file_json(Slapi_PBlock *pb, Slapi_Entry *entry, int logtype, + } + json_object_object_add(log_json, "modrdn", modrdn_obj); + break; ++ } + } + + msg = (char *)json_object_to_json_string_ext(log_json, log_format); +-- +2.48.0 + diff --git a/0011-Issue-6367-RFE-support-of-Session-Tracking-Control-i.patch b/0011-Issue-6367-RFE-support-of-Session-Tracking-Control-i.patch new file mode 100644 index 0000000..2feceba --- /dev/null +++ b/0011-Issue-6367-RFE-support-of-Session-Tracking-Control-i.patch @@ -0,0 +1,2106 @@ +From 8cbd49a7aa59f42856d31e79ba09ce14bbc5c51a Mon Sep 17 00:00:00 2001 +From: tbordaz +Date: Fri, 29 Nov 2024 18:27:57 +0100 +Subject: [PATCH] Issue 6367 - RFE support of Session Tracking Control internet + draft (#6403) + +Bug description: + This RFE is to support https://datatracker.ietf.org/doc/html/draft-wahl-ldap-session-03 + In short, it allows a client to send strings in a control. + Those strings are added to the operation result logged in the + access logs. + Those strings are meaningful for the client (debug, + kmonitoring,...). + +Fix description: + The design is https://www.port389.org/docs/389ds/design/session-identifier-in-logs.html + +fixes: #6367 + +Reviewed by: William Brown, Pierre Rogier (Thanks !!!) +--- + .../suites/session_tracking/session_test.py | 1576 +++++++++++++++++ + ldap/servers/slapd/abandon.c | 42 +- + ldap/servers/slapd/control.c | 127 ++ + ldap/servers/slapd/pblock.c | 12 + + ldap/servers/slapd/pblock_v3.h | 3 + + ldap/servers/slapd/result.c | 57 +- + ldap/servers/slapd/slapi-plugin.h | 1 + + 7 files changed, 1794 insertions(+), 24 deletions(-) + create mode 100644 dirsrvtests/tests/suites/session_tracking/session_test.py + +diff --git a/dirsrvtests/tests/suites/session_tracking/session_test.py b/dirsrvtests/tests/suites/session_tracking/session_test.py +new file mode 100644 +index 000000000..452ca04fb +--- /dev/null ++++ b/dirsrvtests/tests/suites/session_tracking/session_test.py +@@ -0,0 +1,1576 @@ ++# --- BEGIN COPYRIGHT BLOCK --- ++# Copyright (C) 2024 RED Hat, Inc. ++# All rights reserved. ++# ++# License: GPL (version 3 or any later version). ++# See LICENSE for details. ++# --- END COPYRIGHT BLOCK ---- ++ ++import pytest, os, re, time ++from lib389.tasks import * ++from lib389.utils import * ++from lib389 import Entry ++from ldap import SCOPE_SUBTREE, ALREADY_EXISTS ++from ldap.controls import SimplePagedResultsControl ++from ldap.controls.sessiontrack import SessionTrackingControl, SESSION_TRACKING_CONTROL_OID ++from ldap.extop import ExtendedRequest ++ ++from lib389._constants import DEFAULT_SUFFIX, PW_DM, PLUGIN_MEMBER_OF ++from lib389.topologies import topology_st ++from lib389.plugins import MemberOfPlugin ++ ++from lib389.schema import Schema ++from lib389.idm.user import UserAccount, UserAccounts ++from lib389.idm.account import Accounts ++from lib389.idm.account import Anonymous ++ ++SESSION_SOURCE_IP = '10.0.0.10' ++SESSION_SOURCE_NAME = 'host.example.com' ++SESSION_TRACKING_FORMAT_OID = SESSION_TRACKING_CONTROL_OID + ".1234" ++ ++pytestmark = pytest.mark.tier0 ++ ++def test_short_session_tracking_srch(topology_st, request): ++ """Verify that a short session_tracking string ++ is added (not truncate) during a search ++ ++ :id: c9efc1cc-03c7-42b7-801c-440f7a11ee13 ++ :customerscenario: False ++ :setup: Standalone instance, default backend ++ :steps: ++ 1. Do a search with a short session tracking string ++ 2. Restart the instance to flush the log ++ 3. Check the exact same string is present in the access log ++ :expectedresults: ++ 1. Search should succeed ++ 2. success ++ 3. Log should contain one log with that session ++ """ ++ ++ ++ SESSION_TRACKING_IDENTIFIER = "SRCH short" ++ st_ctrl = SessionTrackingControl( ++ SESSION_SOURCE_IP, ++ SESSION_SOURCE_NAME, ++ SESSION_TRACKING_FORMAT_OID, ++ SESSION_TRACKING_IDENTIFIER ++ ) ++ ++ topology_st.standalone.search_ext_s(DEFAULT_SUFFIX, ++ ldap.SCOPE_SUBTREE, ++ '(uid=*)', ++ serverctrls=[st_ctrl]) ++ topology_st.standalone.restart(timeout=10) ++ access_log_lines = topology_st.standalone.ds_access_log.match('.*tag=101.* sid="%s".*' % SESSION_TRACKING_IDENTIFIER) ++ assert len(access_log_lines) == 1 ++ ++ def fin(): ++ pass ++ ++ request.addfinalizer(fin) ++ ++def test_short_session_tracking_add(topology_st, request): ++ """Verify that a short session_tracking string ++ is added (not truncate) during a add ++ ++ :id: 04afd3de-365e-485f-9e00-913d913af931 ++ :customerscenario: False ++ :setup: Standalone instance, default backend ++ :steps: ++ 1. Add a test entry with a short session tracking ++ 2. Restart the instance to flush the log ++ 3. Check the exact same string is present in the access log ++ :expectedresults: ++ 1. Add should succeed ++ 2. success ++ 3. Log should contain one log with that session ++ """ ++ ++ ++ SESSION_TRACKING_IDENTIFIER = "ADD short" ++ st_ctrl = SessionTrackingControl( ++ SESSION_SOURCE_IP, ++ SESSION_SOURCE_NAME, ++ SESSION_TRACKING_FORMAT_OID, ++ SESSION_TRACKING_IDENTIFIER ++ ) ++ TEST_DN = "cn=test_add," + DEFAULT_SUFFIX ++ try: ++ ent = topology_st.standalone.add_ext_s(TEST_DN, ++ [ ++ ('objectClass', b'person'), ++ ('sn', b'test_add'), ++ ('cn', b'test_add'), ++ ('userPassword', b'test_add'), ++ ], ++ serverctrls=[st_ctrl]) ++ except ldap.LDAPError as e: ++ log.fatal('Failed to add user1 entry, error: ' + e.message['desc']) ++ assert False ++ ++ topology_st.standalone.restart(timeout=10) ++ access_log_lines = topology_st.standalone.ds_access_log.match('.*tag=105.* sid="%s".*' % SESSION_TRACKING_IDENTIFIER) ++ assert len(access_log_lines) == 1 ++ ++ def fin(): ++ try: ++ topology_st.standalone.delete_s(TEST_DN) ++ except: ++ pass ++ ++ request.addfinalizer(fin) ++ ++def test_short_session_tracking_del(topology_st, request): ++ """Verify that a short session_tracking string ++ is added (not truncate) during a del ++ ++ :id: a1391fbc-2107-4474-aaaf-088c393767a6 ++ :customerscenario: False ++ :setup: Standalone instance, default backend ++ :steps: ++ 1. Add a test entry ++ 2. Delete the test entry with a short session tracking ++ 3. Restart the instance to flush the log ++ 4. Check the exact same string is not present in the access log for the ADD ++ 5. Check the exact same string is present in the access log for the DEL ++ :expectedresults: ++ 1. Add should succeed ++ 2. DEL should succeed ++ 3. success ++ 4. Log should not contain a log with that session for the ADD ++ 5. Log should contain one log with that session for the DEL ++ """ ++ ++ ++ SESSION_TRACKING_IDENTIFIER = "DEL short" ++ st_ctrl = SessionTrackingControl( ++ SESSION_SOURCE_IP, ++ SESSION_SOURCE_NAME, ++ SESSION_TRACKING_FORMAT_OID, ++ SESSION_TRACKING_IDENTIFIER ++ ) ++ TEST_DN = "cn=test_del," + DEFAULT_SUFFIX ++ try: ++ ent = topology_st.standalone.add_ext_s(TEST_DN, ++ [ ++ ('objectClass', b'person'), ++ ('sn', b'test_del'), ++ ('cn', b'test_del'), ++ ('userPassword', b'test_del'), ++ ]) ++ topology_st.standalone.delete_ext_s(TEST_DN, ++ serverctrls=[st_ctrl]) ++ except ldap.LDAPError as e: ++ log.fatal('Failed to add user1 entry, error: ' + e.message['desc']) ++ assert False ++ ++ topology_st.standalone.restart(timeout=10) ++ access_log_lines = topology_st.standalone.ds_access_log.match('.*tag=105.* sid="%s".*' % SESSION_TRACKING_IDENTIFIER) ++ assert len(access_log_lines) == 0 ++ ++ access_log_lines = topology_st.standalone.ds_access_log.match('.*tag=107.* sid="%s".*' % SESSION_TRACKING_IDENTIFIER) ++ assert len(access_log_lines) == 1 ++ ++ def fin(): ++ try: ++ topology_st.standalone.delete_s(TEST_DN) ++ except: ++ pass ++ ++ request.addfinalizer(fin) ++ ++def test_short_session_tracking_mod(topology_st, request): ++ """Verify that a short session_tracking string ++ is added (not truncate) during a MOD ++ ++ :id: 00c91efc-071d-4187-8185-6cca27b5bf63 ++ :customerscenario: False ++ :setup: Standalone instance, default backend ++ :steps: ++ 1. Add a test entry ++ 2. Modify the test entry with a short session tracking ++ 3. Restart the instance to flush the log ++ 4. Check the exact same string is not present in the access log for the ADD ++ 5. Check the exact same string is present in the access log for the MOD ++ :expectedresults: ++ 1. Add should succeed ++ 2. Mod should succeed ++ 3. success ++ 4. Log should not contain a log with that session for the ADD ++ 5. Log should contain one log with that session for the MOD ++ """ ++ ++ ++ SESSION_TRACKING_IDENTIFIER = "MOD short" ++ st_ctrl = SessionTrackingControl( ++ SESSION_SOURCE_IP, ++ SESSION_SOURCE_NAME, ++ SESSION_TRACKING_FORMAT_OID, ++ SESSION_TRACKING_IDENTIFIER ++ ) ++ TEST_DN = "cn=test_mod," + DEFAULT_SUFFIX ++ try: ++ ent = topology_st.standalone.add_ext_s(TEST_DN, ++ [ ++ ('objectClass', b'person'), ++ ('sn', b'test_del'), ++ ('cn', b'test_del'), ++ ('userPassword', b'test_del'), ++ ]) ++ topology_st.standalone.modify_ext_s(TEST_DN, ++ [(ldap.MOD_REPLACE, 'sn', b'new_sn')], ++ serverctrls=[st_ctrl]) ++ except ldap.LDAPError as e: ++ log.fatal('Failed to add user1 entry, error: ' + e.message['desc']) ++ assert False ++ ++ topology_st.standalone.restart(timeout=10) ++ access_log_lines = topology_st.standalone.ds_access_log.match('.*tag=105.* sid="%s".*' % SESSION_TRACKING_IDENTIFIER) ++ assert len(access_log_lines) == 0 ++ ++ access_log_lines = topology_st.standalone.ds_access_log.match('.*tag=103.* sid="%s".*' % SESSION_TRACKING_IDENTIFIER) ++ assert len(access_log_lines) == 1 ++ ++ def fin(): ++ try: ++ topology_st.standalone.delete_s(TEST_DN) ++ except: ++ pass ++ ++ request.addfinalizer(fin) ++ ++def test_short_session_tracking_compare(topology_st, request): ++ """Verify that a short session_tracking string ++ is added (not truncate) during a compare ++ ++ :id: 6f2090fd-a960-48e5-b7f1-04ddef4a85af ++ :customerscenario: False ++ :setup: Standalone instance, default backend ++ :steps: ++ 1. Add a test entry ++ 2. compare an attribute with a short session tracking ++ 3. Restart the instance to flush the log ++ 4. Check the exact same string is not present in the access log for the ADD ++ 5. Check the exact same string is present in the access log for the COMPARE ++ :expectedresults: ++ 1. Add should succeed ++ 2. Compare should succeed ++ 3. success ++ 4. Log should not contain a log with that session for the ADD ++ 5. Log should contain one log with that session for the COMPARE ++ """ ++ ++ ++ SESSION_TRACKING_IDENTIFIER = "COMPARE short" ++ st_ctrl = SessionTrackingControl( ++ SESSION_SOURCE_IP, ++ SESSION_SOURCE_NAME, ++ SESSION_TRACKING_FORMAT_OID, ++ SESSION_TRACKING_IDENTIFIER ++ ) ++ TEST_DN = "cn=test_compare," + DEFAULT_SUFFIX ++ try: ++ ent = topology_st.standalone.add_ext_s(TEST_DN, ++ [ ++ ('objectClass', b'person'), ++ ('sn', b'test_compare'), ++ ('cn', b'test_compare'), ++ ('userPassword', b'test_compare'), ++ ]) ++ topology_st.standalone.compare_ext_s(TEST_DN, 'sn', b'test_compare', serverctrls=[st_ctrl]) ++ topology_st.standalone.compare_ext_s(TEST_DN, 'sn', b'test_fail_compare', serverctrls=[st_ctrl]) ++ except ldap.LDAPError as e: ++ log.fatal('Failed to add user1 entry, error: ' + e.message['desc']) ++ assert False ++ ++ topology_st.standalone.restart(timeout=10) ++ ++ access_log_lines = topology_st.standalone.ds_access_log.match('.*tag=105.* sid="%s".*' % SESSION_TRACKING_IDENTIFIER) ++ assert len(access_log_lines) == 0 ++ ++ access_log_lines = topology_st.standalone.ds_access_log.match('.*err=6 tag=111.* sid="%s".*' % SESSION_TRACKING_IDENTIFIER) ++ assert len(access_log_lines) == 1 ++ ++ access_log_lines = topology_st.standalone.ds_access_log.match('.*err=5 tag=111.* sid="%s".*' % SESSION_TRACKING_IDENTIFIER) ++ assert len(access_log_lines) == 1 ++ ++ def fin(): ++ try: ++ topology_st.standalone.delete_s(TEST_DN) ++ except: ++ pass ++ ++ request.addfinalizer(fin) ++ ++def test_short_session_tracking_abandon(topology_st, request): ++ """Verify that a short session_tracking string ++ is added (not truncate) during an abandon ++ ++ :id: 58f54ada-e05c-411b-a1c6-8b19fd99843c ++ :customerscenario: False ++ :setup: Standalone instance, default backend ++ :steps: ++ 1. Add 10 test entries ++ 2. Launch Page Search with a window of 3 ++ 3. Abandon the Page Search with a short session tracking ++ 4. Restart the instance to flush the log ++ 5. Check the exact same string is not present in the access log for the ADD ++ 6. Check the exact same string is present in the access log for the ABANDON ++ :expectedresults: ++ 1. Add should succeed ++ 2. success ++ 3. success ++ 4. success ++ 5. Log should not contain log with that session for the ADDs ++ 6. Log should contain one log with that session for the abandon ++ """ ++ ++ SESSION_TRACKING_IDENTIFIER = "ABANDON short" ++ st_ctrl = SessionTrackingControl( ++ SESSION_SOURCE_IP, ++ SESSION_SOURCE_NAME, ++ SESSION_TRACKING_FORMAT_OID, ++ SESSION_TRACKING_IDENTIFIER ++ ) ++ ++ # provision more entries than the page search will fetch ++ entries = [] ++ for i in range(10): ++ TEST_DN = "cn=test_abandon_%d,%s" % (i, DEFAULT_SUFFIX) ++ ent = topology_st.standalone.add_ext_s(TEST_DN, ++ [ ++ ('objectClass', b'person'), ++ ('sn', b'test_abandon'), ++ ('cn', b'test_abandon_%d' % i), ++ ('userPassword', b'test_abandon'), ++ ]) ++ entries.append(TEST_DN) ++ ++ # run a page search (with the session) using a small window. So we can abandon it. ++ req_ctrl = SimplePagedResultsControl(True, size=3, cookie='') ++ msgid = topology_st.standalone.search_ext(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, r'(objectclass=*)', ['cn'], serverctrls=[req_ctrl]) ++ time.sleep(1) ++ topology_st.standalone.abandon_ext(msgid, serverctrls=[st_ctrl]) ++ ++ ++ topology_st.standalone.restart(timeout=10) ++ access_log_lines = topology_st.standalone.ds_access_log.match('.*tag=105.* sid="%s".*' % SESSION_TRACKING_IDENTIFIER) ++ assert len(access_log_lines) == 0 ++ ++ access_log_lines = topology_st.standalone.ds_access_log.match('.*ABANDON.* sid="%s".*' % SESSION_TRACKING_IDENTIFIER) ++ assert len(access_log_lines) == 1 ++ ++ def fin(): ++ for ent in entries: ++ try: ++ topology_st.standalone.delete_s(ent) ++ except: ++ pass ++ ++ request.addfinalizer(fin) ++ ++def test_short_session_tracking_extop(topology_st, request): ++ """Verify that a short session_tracking string ++ is added (not truncate) during an extended operation ++ ++ :id: 65c2d014-d798-46f3-8168-b6f56b43d069 ++ :customerscenario: False ++ :setup: Standalone instance, default backend ++ :steps: ++ 1. run whoami extop ++ 2. Check the exact same string is present in the access log for the EXTOP ++ :expectedresults: ++ 1. success ++ 2. Log should contain one log with that session for the EXTOP ++ """ ++ SESSION_TRACKING_IDENTIFIER = "Extop short" ++ st_ctrl = SessionTrackingControl( ++ SESSION_SOURCE_IP, ++ SESSION_SOURCE_NAME, ++ SESSION_TRACKING_FORMAT_OID, ++ SESSION_TRACKING_IDENTIFIER ++ ) ++ ++ extop = ExtendedRequest(requestName = '1.3.6.1.4.1.4203.1.11.3', requestValue=None) ++ (oid_response, res) = topology_st.standalone.extop_s(extop, serverctrls=[st_ctrl]) ++ ++ topology_st.standalone.restart(timeout=10) ++ access_log_lines = topology_st.standalone.ds_access_log.match('.*tag=120.* sid="%s".*' % SESSION_TRACKING_IDENTIFIER) ++ assert len(access_log_lines) == 1 ++ ++ def fin(): ++ pass ++ ++ request.addfinalizer(fin) ++ ++def test_exact_max_lgth_session_tracking_srch(topology_st, request): ++ """Verify that a exact max length session_tracking string ++ is added (not truncate) during a search ++ ++ :id: 2c8c86f9-4896-4ccc-a727-6a4033f6f44a ++ :customerscenario: False ++ :setup: Standalone instance, default backend ++ :steps: ++ 1. Do a search with a exact max length session tracking string ++ 2. Restart the instance to flush the log ++ 3. Check the exact same string is present in the access log (without '.') ++ :expectedresults: ++ 1. Search should succeed ++ 2. success ++ 3. Log should contain one log with that session ++ """ ++ ++ ++ SESSION_TRACKING_IDENTIFIER = "SRCH long ---->" ++ st_ctrl = SessionTrackingControl( ++ SESSION_SOURCE_IP, ++ SESSION_SOURCE_NAME, ++ SESSION_TRACKING_FORMAT_OID, ++ SESSION_TRACKING_IDENTIFIER ++ ) ++ ++ topology_st.standalone.search_ext_s(DEFAULT_SUFFIX, ++ ldap.SCOPE_SUBTREE, ++ '(uid=*)', ++ serverctrls=[st_ctrl]) ++ topology_st.standalone.restart(timeout=10) ++ access_log_lines = topology_st.standalone.ds_access_log.match('.*tag=101.* sid="%s".*' % SESSION_TRACKING_IDENTIFIER) ++ assert len(access_log_lines) == 1 ++ ++ def fin(): ++ pass ++ ++ request.addfinalizer(fin) ++ ++def test_exact_max_lgth_session_tracking_add(topology_st, request): ++ """Verify that a exact max length of session_tracking string ++ is added (not truncate) during a add ++ ++ :id: 41c0b4f3-5e75-404b-98af-98cc98b742c7 ++ :customerscenario: False ++ :setup: Standalone instance, default backend ++ :steps: ++ 1. Add a test entry with a exact max lenght (15) session tracking ++ 2. Restart the instance to flush the log ++ 3. Check the exact same string is present in the access log ++ :expectedresults: ++ 1. Add should succeed ++ 2. success ++ 3. Log should contain one log with that session ++ """ ++ ++ ++ SESSION_TRACKING_IDENTIFIER = "ADD long ----->" ++ st_ctrl = SessionTrackingControl( ++ SESSION_SOURCE_IP, ++ SESSION_SOURCE_NAME, ++ SESSION_TRACKING_FORMAT_OID, ++ SESSION_TRACKING_IDENTIFIER ++ ) ++ TEST_DN = "cn=test_add," + DEFAULT_SUFFIX ++ try: ++ ent = topology_st.standalone.add_ext_s(TEST_DN, ++ [ ++ ('objectClass', b'person'), ++ ('sn', b'test_add'), ++ ('cn', b'test_add'), ++ ('userPassword', b'test_add'), ++ ], ++ serverctrls=[st_ctrl]) ++ except ldap.LDAPError as e: ++ log.fatal('Failed to add user1 entry, error: ' + e.message['desc']) ++ assert False ++ ++ topology_st.standalone.restart(timeout=10) ++ access_log_lines = topology_st.standalone.ds_access_log.match('.*tag=105.* sid="%s".*' % SESSION_TRACKING_IDENTIFIER) ++ assert len(access_log_lines) == 1 ++ ++ def fin(): ++ try: ++ topology_st.standalone.delete_s(TEST_DN) ++ except: ++ pass ++ ++ request.addfinalizer(fin) ++ ++def test_exact_max_lgth_session_tracking_del(topology_st, request): ++ """Verify that a exact max lgth session_tracking string ++ is added (not truncate) during a del ++ ++ :id: b8dca6c9-7cd4-4950-bcb5-7e9e6bb9202f ++ :customerscenario: False ++ :setup: Standalone instance, default backend ++ :steps: ++ 1. Add a test entry ++ 2. Delete the test entry with a exact max length (15) session tracking ++ 3. Restart the instance to flush the log ++ 4. Check the exact same string is not present in the access log for the ADD ++ 5. Check the exact same string is present in the access log for the DEL ++ :expectedresults: ++ 1. Add should succeed ++ 2. DEL should succeed ++ 3. success ++ 4. Log should not contain a log with that session for the ADD ++ 5. Log should contain one log with that session for the DEL ++ """ ++ ++ ++ SESSION_TRACKING_IDENTIFIER = "DEL long ----->" ++ st_ctrl = SessionTrackingControl( ++ SESSION_SOURCE_IP, ++ SESSION_SOURCE_NAME, ++ SESSION_TRACKING_FORMAT_OID, ++ SESSION_TRACKING_IDENTIFIER ++ ) ++ TEST_DN = "cn=test_del," + DEFAULT_SUFFIX ++ try: ++ ent = topology_st.standalone.add_ext_s(TEST_DN, ++ [ ++ ('objectClass', b'person'), ++ ('sn', b'test_del'), ++ ('cn', b'test_del'), ++ ('userPassword', b'test_del'), ++ ]) ++ topology_st.standalone.delete_ext_s(TEST_DN, ++ serverctrls=[st_ctrl]) ++ except ldap.LDAPError as e: ++ log.fatal('Failed to add user1 entry, error: ' + e.message['desc']) ++ assert False ++ ++ topology_st.standalone.restart(timeout=10) ++ access_log_lines = topology_st.standalone.ds_access_log.match('.*tag=105.* sid="%s".*' % SESSION_TRACKING_IDENTIFIER) ++ assert len(access_log_lines) == 0 ++ ++ access_log_lines = topology_st.standalone.ds_access_log.match('.*tag=107.* sid="%s".*' % SESSION_TRACKING_IDENTIFIER) ++ assert len(access_log_lines) == 1 ++ ++ def fin(): ++ try: ++ topology_st.standalone.delete_s(TEST_DN) ++ except: ++ pass ++ ++ request.addfinalizer(fin) ++ ++def test_exact_max_lgth_session_tracking_mod(topology_st, request): ++ """Verify that an exact max length session_tracking string ++ is added (not truncate) during a MOD ++ ++ :id: 3bd1205f-a035-48a7-94c2-f8774e24ae91 ++ :customerscenario: False ++ :setup: Standalone instance, default backend ++ :steps: ++ 1. Add a test entry ++ 2. Modify the test entry with an exact max length (15) session tracking ++ 3. Restart the instance to flush the log ++ 4. Check the exact same string is not present in the access log for the ADD ++ 5. Check the exact same string is present in the access log for the MOD ++ :expectedresults: ++ 1. Add should succeed ++ 2. Mod should succeed ++ 3. success ++ 4. Log should not contain a log with that session for the ADD ++ 5. Log should contain one log with that session for the MOD ++ """ ++ ++ ++ SESSION_TRACKING_IDENTIFIER = "MOD long ----->" ++ st_ctrl = SessionTrackingControl( ++ SESSION_SOURCE_IP, ++ SESSION_SOURCE_NAME, ++ SESSION_TRACKING_FORMAT_OID, ++ SESSION_TRACKING_IDENTIFIER ++ ) ++ TEST_DN = "cn=test_mod," + DEFAULT_SUFFIX ++ try: ++ ent = topology_st.standalone.add_ext_s(TEST_DN, ++ [ ++ ('objectClass', b'person'), ++ ('sn', b'test_del'), ++ ('cn', b'test_del'), ++ ('userPassword', b'test_del'), ++ ]) ++ topology_st.standalone.modify_ext_s(TEST_DN, ++ [(ldap.MOD_REPLACE, 'sn', b'new_sn')], ++ serverctrls=[st_ctrl]) ++ except ldap.LDAPError as e: ++ log.fatal('Failed to add user1 entry, error: ' + e.message['desc']) ++ assert False ++ ++ topology_st.standalone.restart(timeout=10) ++ access_log_lines = topology_st.standalone.ds_access_log.match('.*tag=105.* sid="%s".*' % SESSION_TRACKING_IDENTIFIER) ++ assert len(access_log_lines) == 0 ++ ++ access_log_lines = topology_st.standalone.ds_access_log.match('.*tag=103.* sid="%s".*' % SESSION_TRACKING_IDENTIFIER) ++ assert len(access_log_lines) == 1 ++ ++ def fin(): ++ try: ++ topology_st.standalone.delete_s(TEST_DN) ++ except: ++ pass ++ ++ request.addfinalizer(fin) ++ ++def test_exact_max_lgth_session_tracking_compare(topology_st, request): ++ """Verify that an exact max length session_tracking string ++ is added (not truncate) during a compare ++ ++ :id: a6c8ad60-7edb-4ee4-b0e3-06727870687c ++ :customerscenario: False ++ :setup: Standalone instance, default backend ++ :steps: ++ 1. Add a test entry ++ 2. compare an attribute with an exact max length session tracking ++ 3. Restart the instance to flush the log ++ 4. Check the exact same string is not present in the access log for the ADD ++ 5. Check the exact same string is present in the access log for the COMPARE ++ :expectedresults: ++ 1. Add should succeed ++ 2. Compare should succeed ++ 3. success ++ 4. Log should not contain a log with that session for the ADD ++ 5. Log should contain one log with that session for the COMPARE ++ """ ++ ++ ++ SESSION_TRACKING_IDENTIFIER = "COMPARE long ->" ++ st_ctrl = SessionTrackingControl( ++ SESSION_SOURCE_IP, ++ SESSION_SOURCE_NAME, ++ SESSION_TRACKING_FORMAT_OID, ++ SESSION_TRACKING_IDENTIFIER ++ ) ++ TEST_DN = "cn=test_compare," + DEFAULT_SUFFIX ++ try: ++ ent = topology_st.standalone.add_ext_s(TEST_DN, ++ [ ++ ('objectClass', b'person'), ++ ('sn', b'test_compare'), ++ ('cn', b'test_compare'), ++ ('userPassword', b'test_compare'), ++ ]) ++ topology_st.standalone.compare_ext_s(TEST_DN, 'sn', b'test_compare', serverctrls=[st_ctrl]) ++ topology_st.standalone.compare_ext_s(TEST_DN, 'sn', b'test_fail_compare', serverctrls=[st_ctrl]) ++ except ldap.LDAPError as e: ++ log.fatal('Failed to add user1 entry, error: ' + e.message['desc']) ++ assert False ++ ++ topology_st.standalone.restart(timeout=10) ++ ++ access_log_lines = topology_st.standalone.ds_access_log.match('.*tag=105.* sid="%s".*' % SESSION_TRACKING_IDENTIFIER) ++ assert len(access_log_lines) == 0 ++ ++ access_log_lines = topology_st.standalone.ds_access_log.match('.*err=6 tag=111.* sid="%s".*' % SESSION_TRACKING_IDENTIFIER) ++ assert len(access_log_lines) == 1 ++ ++ access_log_lines = topology_st.standalone.ds_access_log.match('.*err=5 tag=111.* sid="%s".*' % SESSION_TRACKING_IDENTIFIER) ++ assert len(access_log_lines) == 1 ++ ++ def fin(): ++ try: ++ topology_st.standalone.delete_s(TEST_DN) ++ except: ++ pass ++ ++ request.addfinalizer(fin) ++ ++def test_exact_max_lgth_session_tracking_abandon(topology_st, request): ++ """Verify that an exact max length session_tracking string ++ is added (not truncate) during an abandon ++ ++ :id: 708554b9-8403-411c-90b5-d9ecc2d3830f ++ :customerscenario: False ++ :setup: Standalone instance, default backend ++ :steps: ++ 1. Add 10 test entries ++ 2. Launch Page Search with a window of 3 ++ 3. Abandon the Page Search with an exact max length session tracking ++ 4. Restart the instance to flush the log ++ 5. Check the exact same string is not present in the access log for the ADD ++ 6. Check the exact same string is present in the access log for the ABANDON ++ :expectedresults: ++ 1. Add should succeed ++ 2. success ++ 3. success ++ 4. success ++ 5. Log should not contain log with that session for the ADDs ++ 6. Log should contain one log with that session for the abandon ++ """ ++ ++ SESSION_TRACKING_IDENTIFIER = "ABANDON long ->" ++ st_ctrl = SessionTrackingControl( ++ SESSION_SOURCE_IP, ++ SESSION_SOURCE_NAME, ++ SESSION_TRACKING_FORMAT_OID, ++ SESSION_TRACKING_IDENTIFIER ++ ) ++ ++ # provision more entries than the page search will fetch ++ entries = [] ++ for i in range(10): ++ TEST_DN = "cn=test_abandon_%d,%s" % (i, DEFAULT_SUFFIX) ++ ent = topology_st.standalone.add_ext_s(TEST_DN, ++ [ ++ ('objectClass', b'person'), ++ ('sn', b'test_abandon'), ++ ('cn', b'test_abandon_%d' % i), ++ ('userPassword', b'test_abandon'), ++ ]) ++ entries.append(TEST_DN) ++ ++ # run a page search (with the session) using a small window. So we can abandon it. ++ req_ctrl = SimplePagedResultsControl(True, size=3, cookie='') ++ msgid = topology_st.standalone.search_ext(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, r'(objectclass=*)', ['cn'], serverctrls=[req_ctrl]) ++ time.sleep(1) ++ topology_st.standalone.abandon_ext(msgid, serverctrls=[st_ctrl]) ++ ++ ++ topology_st.standalone.restart(timeout=10) ++ access_log_lines = topology_st.standalone.ds_access_log.match('.*tag=105.* sid="%s".*' % SESSION_TRACKING_IDENTIFIER) ++ assert len(access_log_lines) == 0 ++ ++ access_log_lines = topology_st.standalone.ds_access_log.match('.*ABANDON.* sid="%s".*' % SESSION_TRACKING_IDENTIFIER) ++ assert len(access_log_lines) == 1 ++ ++ def fin(): ++ for ent in entries: ++ try: ++ topology_st.standalone.delete_s(ent) ++ except: ++ pass ++ ++ request.addfinalizer(fin) ++ ++def test_exact_max_lgth_session_tracking_extop(topology_st, request): ++ """Verify that an exact max length session_tracking string ++ is added (not truncate) during an extended operation ++ ++ :id: 078d33c4-9124-4766-966e-2e3eebdf0e18 ++ :customerscenario: False ++ :setup: Standalone instance, default backend ++ :steps: ++ 1. run whoami extop ++ 2. Check the exact same string (max length 15) ++ is present in the access log for the EXTOP ++ :expectedresults: ++ 1. success ++ 2. Log should contain one log with that session for the EXTOP ++ """ ++ SESSION_TRACKING_IDENTIFIER = "Extop long --->" ++ st_ctrl = SessionTrackingControl( ++ SESSION_SOURCE_IP, ++ SESSION_SOURCE_NAME, ++ SESSION_TRACKING_FORMAT_OID, ++ SESSION_TRACKING_IDENTIFIER ++ ) ++ ++ extop = ExtendedRequest(requestName = '1.3.6.1.4.1.4203.1.11.3', requestValue=None) ++ (oid_response, res) = topology_st.standalone.extop_s(extop, serverctrls=[st_ctrl]) ++ ++ topology_st.standalone.restart(timeout=10) ++ access_log_lines = topology_st.standalone.ds_access_log.match('.*tag=120.* sid="%s".*' % SESSION_TRACKING_IDENTIFIER) ++ assert len(access_log_lines) == 1 ++ ++ def fin(): ++ pass ++ ++ request.addfinalizer(fin) ++ ++def test_long_session_tracking_srch(topology_st, request): ++ """Verify that a long session_tracking string ++ is added (truncate) during a search ++ ++ :id: 56118d13-c0b1-401f-aaa4-6dc233156e36 ++ :customerscenario: False ++ :setup: Standalone instance, default backend ++ :steps: ++ 1. Do a search with a long session tracking string ++ 2. Restart the instance to flush the log ++ 3. Check the exact same string is present in the access log (with '.') ++ :expectedresults: ++ 1. Search should succeed ++ 2. success ++ 3. Log should contain one log with that session ++ """ ++ ++ ++ SESSION_TRACKING_IDENTIFIER_MAX = "SRCH long ---->" ++ SESSION_TRACKING_IDENTIFIER = SESSION_TRACKING_IDENTIFIER_MAX + "xxxxxxxx" ++ st_ctrl = SessionTrackingControl( ++ SESSION_SOURCE_IP, ++ SESSION_SOURCE_NAME, ++ SESSION_TRACKING_FORMAT_OID, ++ SESSION_TRACKING_IDENTIFIER ++ ) ++ ++ topology_st.standalone.search_ext_s(DEFAULT_SUFFIX, ++ ldap.SCOPE_SUBTREE, ++ '(uid=*)', ++ serverctrls=[st_ctrl]) ++ topology_st.standalone.restart(timeout=10) ++ access_log_lines = topology_st.standalone.ds_access_log.match('.*tag=101.* sid="%s...".*' % SESSION_TRACKING_IDENTIFIER_MAX) ++ assert len(access_log_lines) == 1 ++ ++ def fin(): ++ pass ++ ++ request.addfinalizer(fin) ++ ++def test_long_session_tracking_add(topology_st, request): ++ """Verify that a long session_tracking string ++ is added (truncate) during a add ++ ++ :id: ac97bc6b-f2c5-41e2-9ab6-df05afb2757c ++ :customerscenario: False ++ :setup: Standalone instance, default backend ++ :steps: ++ 1. Add a test entry with a long session tracking ++ 2. Restart the instance to flush the log ++ 3. Check the exact same string is present in the access log (with '.') ++ :expectedresults: ++ 1. Add should succeed ++ 2. success ++ 3. Log should contain one log with that session ++ """ ++ ++ ++ SESSION_TRACKING_IDENTIFIER_MAX = "ADD long ----->" ++ SESSION_TRACKING_IDENTIFIER = SESSION_TRACKING_IDENTIFIER_MAX + "xxxxxxxx" ++ st_ctrl = SessionTrackingControl( ++ SESSION_SOURCE_IP, ++ SESSION_SOURCE_NAME, ++ SESSION_TRACKING_FORMAT_OID, ++ SESSION_TRACKING_IDENTIFIER ++ ) ++ TEST_DN = "cn=test_add," + DEFAULT_SUFFIX ++ try: ++ ent = topology_st.standalone.add_ext_s(TEST_DN, ++ [ ++ ('objectClass', b'person'), ++ ('sn', b'test_add'), ++ ('cn', b'test_add'), ++ ('userPassword', b'test_add'), ++ ], ++ serverctrls=[st_ctrl]) ++ except ldap.LDAPError as e: ++ log.fatal('Failed to add user1 entry, error: ' + e.message['desc']) ++ assert False ++ ++ topology_st.standalone.restart(timeout=10) ++ access_log_lines = topology_st.standalone.ds_access_log.match('.*tag=105.* sid="%s...".*' % SESSION_TRACKING_IDENTIFIER_MAX) ++ assert len(access_log_lines) == 1 ++ ++ def fin(): ++ try: ++ topology_st.standalone.delete_s(TEST_DN) ++ except: ++ pass ++ ++ request.addfinalizer(fin) ++ ++def test_long_session_tracking_del(topology_st, request): ++ """Verify that a long session_tracking string ++ is added (truncate) during a del ++ ++ :id: 283152b8-ba6b-4153-b2de-17070911bf18 ++ :customerscenario: False ++ :setup: Standalone instance, default backend ++ :steps: ++ 1. Add a test entry ++ 2. Delete the test entry with a long session tracking ++ 3. Restart the instance to flush the log ++ 4. Check the exact same string is not present in the access log for the ADD ++ 5. Check the exact same string is present in the access log for the DEL (with '.') ++ :expectedresults: ++ 1. Add should succeed ++ 2. DEL should succeed ++ 3. success ++ 4. Log should not contain a log with that session for the ADD ++ 5. Log should contain one log with that session for the DEL ++ """ ++ ++ ++ SESSION_TRACKING_IDENTIFIER_MAX = "DEL long ----->" ++ SESSION_TRACKING_IDENTIFIER = SESSION_TRACKING_IDENTIFIER_MAX + "xxxxxxxx" ++ st_ctrl = SessionTrackingControl( ++ SESSION_SOURCE_IP, ++ SESSION_SOURCE_NAME, ++ SESSION_TRACKING_FORMAT_OID, ++ SESSION_TRACKING_IDENTIFIER ++ ) ++ TEST_DN = "cn=test_del," + DEFAULT_SUFFIX ++ try: ++ ent = topology_st.standalone.add_ext_s(TEST_DN, ++ [ ++ ('objectClass', b'person'), ++ ('sn', b'test_del'), ++ ('cn', b'test_del'), ++ ('userPassword', b'test_del'), ++ ]) ++ topology_st.standalone.delete_ext_s(TEST_DN, ++ serverctrls=[st_ctrl]) ++ except ldap.LDAPError as e: ++ log.fatal('Failed to add user1 entry, error: ' + e.message['desc']) ++ assert False ++ ++ topology_st.standalone.restart(timeout=10) ++ access_log_lines = topology_st.standalone.ds_access_log.match('.*tag=105.* sid="%s...".*' % SESSION_TRACKING_IDENTIFIER_MAX) ++ assert len(access_log_lines) == 0 ++ ++ access_log_lines = topology_st.standalone.ds_access_log.match('.*tag=107.* sid="%s...".*' % SESSION_TRACKING_IDENTIFIER_MAX) ++ assert len(access_log_lines) == 1 ++ ++ def fin(): ++ try: ++ topology_st.standalone.delete_s(TEST_DN) ++ except: ++ pass ++ ++ request.addfinalizer(fin) ++ ++def test_long_session_tracking_mod(topology_st, request): ++ """Verify that a long session_tracking string ++ is added (truncate) during a MOD ++ ++ :id: 6bfcca4b-40b4-4288-9b77-cfa0d4f15c14 ++ :customerscenario: False ++ :setup: Standalone instance, default backend ++ :steps: ++ 1. Add a test entry ++ 2. Modify the test entry with an long session tracking ++ 3. Restart the instance to flush the log ++ 4. Check the exact same string is not present in the access log for the ADD ++ 5. Check the exact same string is present in the access log for the MOD (with '.') ++ :expectedresults: ++ 1. Add should succeed ++ 2. Mod should succeed ++ 3. success ++ 4. Log should not contain a log with that session for the ADD ++ 5. Log should contain one log with that session for the MOD ++ """ ++ ++ ++ SESSION_TRACKING_IDENTIFIER_MAX = "MOD long ----->" ++ SESSION_TRACKING_IDENTIFIER = SESSION_TRACKING_IDENTIFIER_MAX + "xxxxxxxx" ++ st_ctrl = SessionTrackingControl( ++ SESSION_SOURCE_IP, ++ SESSION_SOURCE_NAME, ++ SESSION_TRACKING_FORMAT_OID, ++ SESSION_TRACKING_IDENTIFIER ++ ) ++ TEST_DN = "cn=test_mod," + DEFAULT_SUFFIX ++ try: ++ ent = topology_st.standalone.add_ext_s(TEST_DN, ++ [ ++ ('objectClass', b'person'), ++ ('sn', b'test_del'), ++ ('cn', b'test_del'), ++ ('userPassword', b'test_del'), ++ ]) ++ topology_st.standalone.modify_ext_s(TEST_DN, ++ [(ldap.MOD_REPLACE, 'sn', b'new_sn')], ++ serverctrls=[st_ctrl]) ++ except ldap.LDAPError as e: ++ log.fatal('Failed to add user1 entry, error: ' + e.message['desc']) ++ assert False ++ ++ topology_st.standalone.restart(timeout=10) ++ access_log_lines = topology_st.standalone.ds_access_log.match('.*tag=105.* sid="%s...".*' % SESSION_TRACKING_IDENTIFIER_MAX) ++ assert len(access_log_lines) == 0 ++ ++ access_log_lines = topology_st.standalone.ds_access_log.match('.*tag=103.* sid="%s...".*' % SESSION_TRACKING_IDENTIFIER_MAX) ++ assert len(access_log_lines) == 1 ++ ++ def fin(): ++ try: ++ topology_st.standalone.delete_s(TEST_DN) ++ except: ++ pass ++ ++ request.addfinalizer(fin) ++ ++def test_long_session_tracking_compare(topology_st, request): ++ """Verify that a long session_tracking string ++ is added (truncate) during a compare ++ ++ :id: 840ad60b-d2c5-4375-a50d-1553701d3c22 ++ :customerscenario: False ++ :setup: Standalone instance, default backend ++ :steps: ++ 1. Add a test entry ++ 2. compare an attribute with an exact max length session tracking ++ 3. Restart the instance to flush the log ++ 4. Check the exact same string is not present in the access log for the ADD ++ 5. Check the exact same string is present in the access log for the COMPARE (with '.') ++ :expectedresults: ++ 1. Add should succeed ++ 2. Compare should succeed ++ 3. success ++ 4. Log should not contain a log with that session for the ADD ++ 5. Log should contain one log with that session for the COMPARE ++ """ ++ ++ ++ SESSION_TRACKING_IDENTIFIER_MAX = "COMPARE long ->" ++ SESSION_TRACKING_IDENTIFIER = SESSION_TRACKING_IDENTIFIER_MAX + "xxxxxxxx" ++ st_ctrl = SessionTrackingControl( ++ SESSION_SOURCE_IP, ++ SESSION_SOURCE_NAME, ++ SESSION_TRACKING_FORMAT_OID, ++ SESSION_TRACKING_IDENTIFIER ++ ) ++ TEST_DN = "cn=test_compare," + DEFAULT_SUFFIX ++ try: ++ ent = topology_st.standalone.add_ext_s(TEST_DN, ++ [ ++ ('objectClass', b'person'), ++ ('sn', b'test_compare'), ++ ('cn', b'test_compare'), ++ ('userPassword', b'test_compare'), ++ ]) ++ topology_st.standalone.compare_ext_s(TEST_DN, 'sn', b'test_compare', serverctrls=[st_ctrl]) ++ topology_st.standalone.compare_ext_s(TEST_DN, 'sn', b'test_fail_compare', serverctrls=[st_ctrl]) ++ except ldap.LDAPError as e: ++ log.fatal('Failed to add user1 entry, error: ' + e.message['desc']) ++ assert False ++ ++ topology_st.standalone.restart(timeout=10) ++ ++ access_log_lines = topology_st.standalone.ds_access_log.match('.*tag=105.* sid="%s...".*' % SESSION_TRACKING_IDENTIFIER_MAX) ++ assert len(access_log_lines) == 0 ++ ++ access_log_lines = topology_st.standalone.ds_access_log.match('.*err=6 tag=111.* sid="%s...".*' % SESSION_TRACKING_IDENTIFIER_MAX) ++ assert len(access_log_lines) == 1 ++ ++ access_log_lines = topology_st.standalone.ds_access_log.match('.*err=5 tag=111.* sid="%s...".*' % SESSION_TRACKING_IDENTIFIER_MAX) ++ assert len(access_log_lines) == 1 ++ ++ def fin(): ++ try: ++ topology_st.standalone.delete_s(TEST_DN) ++ except: ++ pass ++ ++ request.addfinalizer(fin) ++ ++def test_long_session_tracking_abandon(topology_st, request): ++ """Verify that long session_tracking string ++ is added (truncate) during an abandon ++ ++ :id: bded1fbb-b123-42c5-8d28-9fcf9f19af94 ++ :customerscenario: False ++ :setup: Standalone instance, default backend ++ :steps: ++ 1. Add 10 test entries ++ 2. Launch Page Search with a window of 3 ++ 3. Abandon the Page Search with long session tracking ++ 4. Restart the instance to flush the log ++ 5. Check the exact same string is not present in the access log for the ADD ++ 6. Check the exact same string is present in the access log for the ABANDON (with '.') ++ :expectedresults: ++ 1. Add should succeed ++ 2. success ++ 3. success ++ 4. success ++ 5. Log should not contain log with that session for the ADDs ++ 6. Log should contain one log with that session for the abandon ++ """ ++ ++ SESSION_TRACKING_IDENTIFIER_MAX = "ABANDON long ->" ++ SESSION_TRACKING_IDENTIFIER = SESSION_TRACKING_IDENTIFIER_MAX + "xxxxxxxx" ++ st_ctrl = SessionTrackingControl( ++ SESSION_SOURCE_IP, ++ SESSION_SOURCE_NAME, ++ SESSION_TRACKING_FORMAT_OID, ++ SESSION_TRACKING_IDENTIFIER ++ ) ++ ++ # provision more entries than the page search will fetch ++ entries = [] ++ for i in range(10): ++ TEST_DN = "cn=test_abandon_%d,%s" % (i, DEFAULT_SUFFIX) ++ ent = topology_st.standalone.add_ext_s(TEST_DN, ++ [ ++ ('objectClass', b'person'), ++ ('sn', b'test_abandon'), ++ ('cn', b'test_abandon_%d' % i), ++ ('userPassword', b'test_abandon'), ++ ]) ++ entries.append(TEST_DN) ++ ++ # run a page search (with the session) using a small window. So we can abandon it. ++ req_ctrl = SimplePagedResultsControl(True, size=3, cookie='') ++ msgid = topology_st.standalone.search_ext(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, r'(objectclass=*)', ['cn'], serverctrls=[req_ctrl]) ++ time.sleep(1) ++ topology_st.standalone.abandon_ext(msgid, serverctrls=[st_ctrl]) ++ ++ ++ topology_st.standalone.restart(timeout=10) ++ access_log_lines = topology_st.standalone.ds_access_log.match('.*tag=105.* sid="%s...".*' % SESSION_TRACKING_IDENTIFIER_MAX) ++ assert len(access_log_lines) == 0 ++ ++ access_log_lines = topology_st.standalone.ds_access_log.match('.*ABANDON.* sid="%s...".*' % SESSION_TRACKING_IDENTIFIER_MAX) ++ assert len(access_log_lines) == 1 ++ ++ def fin(): ++ for ent in entries: ++ try: ++ topology_st.standalone.delete_s(ent) ++ except: ++ pass ++ ++ request.addfinalizer(fin) ++ ++def test_long_session_tracking_extop(topology_st, request): ++ """Verify that long session_tracking string ++ is added (truncate) during an extended operation ++ ++ :id: a7aa65d2-eed8-4bdd-9786-2379997ff0b7 ++ :customerscenario: False ++ :setup: Standalone instance, default backend ++ :steps: ++ 1. run whoami extop ++ 2. Check the truncated long session string ++ is present in the access log for the EXTOP ++ :expectedresults: ++ 1. success ++ 2. Log should contain one log with that session for the EXTOP ++ """ ++ SESSION_TRACKING_IDENTIFIER_MAX = "Extop long --->" ++ SESSION_TRACKING_IDENTIFIER = SESSION_TRACKING_IDENTIFIER_MAX + "xxxxxxxx" ++ st_ctrl = SessionTrackingControl( ++ SESSION_SOURCE_IP, ++ SESSION_SOURCE_NAME, ++ SESSION_TRACKING_FORMAT_OID, ++ SESSION_TRACKING_IDENTIFIER ++ ) ++ ++ extop = ExtendedRequest(requestName = '1.3.6.1.4.1.4203.1.11.3', requestValue=None) ++ (oid_response, res) = topology_st.standalone.extop_s(extop, serverctrls=[st_ctrl]) ++ ++ topology_st.standalone.restart(timeout=10) ++ access_log_lines = topology_st.standalone.ds_access_log.match('.*tag=120.* sid="%s...".*' % SESSION_TRACKING_IDENTIFIER_MAX) ++ assert len(access_log_lines) == 1 ++ ++ def fin(): ++ pass ++ ++ request.addfinalizer(fin) ++ ++def test_escaped_session_tracking_srch(topology_st, request): ++ """Verify that a session_tracking string containing escaped character ++ is added (not truncate) during a search ++ ++ :id: dce83631-7a3f-4af8-a79a-ee81df4b0595 ++ :customerscenario: False ++ :setup: Standalone instance, default backend ++ :steps: ++ 1. Do a search with a session tracking string containing escaped character ++ 2. Restart the instance to flush the log ++ 3. Check the exact same string is present in the access log ++ :expectedresults: ++ 1. Search should succeed ++ 2. success ++ 3. Log should contain one log with that session ++ """ ++ ++ ++ SESSION_TRACKING_IDENTIFIER_START = "SRCH" ++ SESSION_TRACKING_IDENTIFIER_ORIGINAL = "  " ++ SESSION_TRACKING_IDENTIFIER_ESCAPED = " \\\\06 " ++ SESSION_TRACKING_IDENTIFIER_END = "escape" ++ SESSION_TRACKING_IDENTIFIER = SESSION_TRACKING_IDENTIFIER_START + SESSION_TRACKING_IDENTIFIER_ORIGINAL + SESSION_TRACKING_IDENTIFIER_END ++ st_ctrl = SessionTrackingControl( ++ SESSION_SOURCE_IP, ++ SESSION_SOURCE_NAME, ++ SESSION_TRACKING_FORMAT_OID, ++ SESSION_TRACKING_IDENTIFIER ++ ) ++ ++ topology_st.standalone.search_ext_s(DEFAULT_SUFFIX, ++ ldap.SCOPE_SUBTREE, ++ '(uid=*)', ++ serverctrls=[st_ctrl]) ++ topology_st.standalone.restart(timeout=10) ++ sid_escaped = SESSION_TRACKING_IDENTIFIER_START + SESSION_TRACKING_IDENTIFIER_ESCAPED + SESSION_TRACKING_IDENTIFIER_END ++ access_log_lines = topology_st.standalone.ds_access_log.match('.*tag=101.* sid="%s".*' % sid_escaped) ++ assert len(access_log_lines) == 1 ++ ++ def fin(): ++ pass ++ ++ request.addfinalizer(fin) ++ ++def test_escaped_session_tracking_add(topology_st, request): ++ """Verify that a session_tracking string containing escaped character ++ is added (not truncate) during a add ++ ++ :id: df40e5b3-20d9-4a85-a7ad-246e3ec25f4f ++ :customerscenario: False ++ :setup: Standalone instance, default backend ++ :steps: ++ 1. Add a test entry with a session tracking containing escaped character ++ 2. Restart the instance to flush the log ++ 3. Check the exact same string is present in the access log ++ :expectedresults: ++ 1. Add should succeed ++ 2. success ++ 3. Log should contain one log with that session ++ """ ++ ++ ++ SESSION_TRACKING_IDENTIFIER_START = "ADD" ++ SESSION_TRACKING_IDENTIFIER_ORIGINAL = "  " ++ SESSION_TRACKING_IDENTIFIER_ESCAPED = " \\\\07 " ++ SESSION_TRACKING_IDENTIFIER_END = "escape" ++ SESSION_TRACKING_IDENTIFIER = SESSION_TRACKING_IDENTIFIER_START + SESSION_TRACKING_IDENTIFIER_ORIGINAL + SESSION_TRACKING_IDENTIFIER_END ++ st_ctrl = SessionTrackingControl( ++ SESSION_SOURCE_IP, ++ SESSION_SOURCE_NAME, ++ SESSION_TRACKING_FORMAT_OID, ++ SESSION_TRACKING_IDENTIFIER ++ ) ++ TEST_DN = "cn=test_add," + DEFAULT_SUFFIX ++ try: ++ ent = topology_st.standalone.add_ext_s(TEST_DN, ++ [ ++ ('objectClass', b'person'), ++ ('sn', b'test_add'), ++ ('cn', b'test_add'), ++ ('userPassword', b'test_add'), ++ ], ++ serverctrls=[st_ctrl]) ++ except ldap.LDAPError as e: ++ log.fatal('Failed to add user1 entry, error: ' + e.message['desc']) ++ assert False ++ ++ topology_st.standalone.restart(timeout=10) ++ sid_escaped = SESSION_TRACKING_IDENTIFIER_START + SESSION_TRACKING_IDENTIFIER_ESCAPED + SESSION_TRACKING_IDENTIFIER_END ++ access_log_lines = topology_st.standalone.ds_access_log.match('.*tag=105.* sid="%s".*' % sid_escaped) ++ assert len(access_log_lines) == 1 ++ ++ def fin(): ++ try: ++ topology_st.standalone.delete_s(TEST_DN) ++ except: ++ pass ++ ++ request.addfinalizer(fin) ++ ++def test_escaped_session_tracking_del(topology_st, request): ++ """Verify that a session_tracking string containing escaped character ++ is added (not truncate) during a DEL ++ ++ :id: 561c75fc-ae24-42ed-b062-9c994f71e3fc ++ :customerscenario: False ++ :setup: Standalone instance, default backend ++ :steps: ++ 1. Add a test entry ++ 2. Delete the test entry with a session tracking containing escaped character ++ 3. Restart the instance to flush the log ++ 4. Check the exact same string is not present in the access log for the ADD ++ 5. Check the exact same string is present in the access log for the DEL ++ :expectedresults: ++ 1. Add should succeed ++ 2. DEL should succeed ++ 3. success ++ 4. Log should not contain a log with that session for the ADD ++ 5. Log should contain one log with that session for the DEL ++ """ ++ ++ ++ SESSION_TRACKING_IDENTIFIER_START = "DEL" ++ SESSION_TRACKING_IDENTIFIER_ORIGINAL = "  " ++ SESSION_TRACKING_IDENTIFIER_ESCAPED = " \\\\14 " ++ SESSION_TRACKING_IDENTIFIER_END = "escape" ++ SESSION_TRACKING_IDENTIFIER = SESSION_TRACKING_IDENTIFIER_START + SESSION_TRACKING_IDENTIFIER_ORIGINAL + SESSION_TRACKING_IDENTIFIER_END ++ st_ctrl = SessionTrackingControl( ++ SESSION_SOURCE_IP, ++ SESSION_SOURCE_NAME, ++ SESSION_TRACKING_FORMAT_OID, ++ SESSION_TRACKING_IDENTIFIER ++ ) ++ TEST_DN = "cn=test_del," + DEFAULT_SUFFIX ++ try: ++ ent = topology_st.standalone.add_ext_s(TEST_DN, ++ [ ++ ('objectClass', b'person'), ++ ('sn', b'test_del'), ++ ('cn', b'test_del'), ++ ('userPassword', b'test_del'), ++ ]) ++ topology_st.standalone.delete_ext_s(TEST_DN, ++ serverctrls=[st_ctrl]) ++ except ldap.LDAPError as e: ++ log.fatal('Failed to add user1 entry, error: ' + e.message['desc']) ++ assert False ++ ++ topology_st.standalone.restart(timeout=10) ++ sid_escaped = SESSION_TRACKING_IDENTIFIER_START + SESSION_TRACKING_IDENTIFIER_ESCAPED + SESSION_TRACKING_IDENTIFIER_END ++ access_log_lines = topology_st.standalone.ds_access_log.match('.*tag=105.* sid="%s".*' % sid_escaped) ++ assert len(access_log_lines) == 0 ++ ++ access_log_lines = topology_st.standalone.ds_access_log.match('.*tag=107.* sid="%s".*' % sid_escaped) ++ assert len(access_log_lines) == 1 ++ ++ def fin(): ++ try: ++ topology_st.standalone.delete_s(TEST_DN) ++ except: ++ pass ++ ++ request.addfinalizer(fin) ++ ++def test_escaped_session_tracking_mod(topology_st, request): ++ """Verify that a session_tracking string containing escaped character ++ is added (not truncate) during a MOD ++ ++ :id: ca2ca411-32b4-4a9f-845d-e596f08a849c ++ :customerscenario: False ++ :setup: Standalone instance, default backend ++ :steps: ++ 1. Add a test entry ++ 2. Modify the test entry with a session tracking containing escaped character ++ 3. Restart the instance to flush the log ++ 4. Check the exact same string is not present in the access log for the ADD ++ 5. Check the exact same string is present in the access log for the MOD ++ :expectedresults: ++ 1. Add should succeed ++ 2. Mod should succeed ++ 3. success ++ 4. Log should not contain a log with that session for the ADD ++ 5. Log should contain one log with that session for the MOD ++ """ ++ ++ ++ SESSION_TRACKING_IDENTIFIER_START = "MOD" ++ SESSION_TRACKING_IDENTIFIER_ORIGINAL = "  " ++ SESSION_TRACKING_IDENTIFIER_ESCAPED = " \\\\10 " ++ SESSION_TRACKING_IDENTIFIER_END = "escape" ++ SESSION_TRACKING_IDENTIFIER = SESSION_TRACKING_IDENTIFIER_START + SESSION_TRACKING_IDENTIFIER_ORIGINAL + SESSION_TRACKING_IDENTIFIER_END ++ st_ctrl = SessionTrackingControl( ++ SESSION_SOURCE_IP, ++ SESSION_SOURCE_NAME, ++ SESSION_TRACKING_FORMAT_OID, ++ SESSION_TRACKING_IDENTIFIER ++ ) ++ TEST_DN = "cn=test_mod," + DEFAULT_SUFFIX ++ try: ++ ent = topology_st.standalone.add_ext_s(TEST_DN, ++ [ ++ ('objectClass', b'person'), ++ ('sn', b'test_del'), ++ ('cn', b'test_del'), ++ ('userPassword', b'test_del'), ++ ]) ++ topology_st.standalone.modify_ext_s(TEST_DN, ++ [(ldap.MOD_REPLACE, 'sn', b'new_sn')], ++ serverctrls=[st_ctrl]) ++ except ldap.LDAPError as e: ++ log.fatal('Failed to add user1 entry, error: ' + e.message['desc']) ++ assert False ++ ++ topology_st.standalone.restart(timeout=10) ++ sid_escaped = SESSION_TRACKING_IDENTIFIER_START + SESSION_TRACKING_IDENTIFIER_ESCAPED + SESSION_TRACKING_IDENTIFIER_END ++ access_log_lines = topology_st.standalone.ds_access_log.match('.*tag=105.* sid="%s".*' % sid_escaped) ++ assert len(access_log_lines) == 0 ++ ++ access_log_lines = topology_st.standalone.ds_access_log.match('.*tag=103.* sid="%s".*' % sid_escaped) ++ assert len(access_log_lines) == 1 ++ ++ def fin(): ++ try: ++ topology_st.standalone.delete_s(TEST_DN) ++ except: ++ pass ++ ++ request.addfinalizer(fin) ++ ++def test_escaped_session_tracking_compare(topology_st, request): ++ """Verify that a session_tracking string containing escaped character ++ is added (not truncate) during a COMPARE ++ ++ :id: 93c13457-5c51-4bec-8e8a-0c6320cd970b ++ :customerscenario: False ++ :setup: Standalone instance, default backend ++ :steps: ++ 1. Add a test entry ++ 2. compare an attribute with a session tracking containing escaped character ++ 3. Restart the instance to flush the log ++ 4. Check the exact same string is not present in the access log for the ADD ++ 5. Check the exact same string is present in the access log for the COMPARE ++ :expectedresults: ++ 1. Add should succeed ++ 2. Compare should succeed ++ 3. success ++ 4. Log should not contain a log with that session for the ADD ++ 5. Log should contain one log with that session for the COMPARE ++ """ ++ ++ ++ # be careful that the complete string is less than 15 chars ++ SESSION_TRACKING_IDENTIFIER_START = "COMPARE" ++ SESSION_TRACKING_IDENTIFIER_ORIGINAL = "  " ++ SESSION_TRACKING_IDENTIFIER_ESCAPED = " \\\\11 " ++ SESSION_TRACKING_IDENTIFIER_END = "esc" ++ SESSION_TRACKING_IDENTIFIER = SESSION_TRACKING_IDENTIFIER_START + SESSION_TRACKING_IDENTIFIER_ORIGINAL + SESSION_TRACKING_IDENTIFIER_END ++ st_ctrl = SessionTrackingControl( ++ SESSION_SOURCE_IP, ++ SESSION_SOURCE_NAME, ++ SESSION_TRACKING_FORMAT_OID, ++ SESSION_TRACKING_IDENTIFIER ++ ) ++ TEST_DN = "cn=test_compare," + DEFAULT_SUFFIX ++ try: ++ ent = topology_st.standalone.add_ext_s(TEST_DN, ++ [ ++ ('objectClass', b'person'), ++ ('sn', b'test_compare'), ++ ('cn', b'test_compare'), ++ ('userPassword', b'test_compare'), ++ ]) ++ topology_st.standalone.compare_ext_s(TEST_DN, 'sn', b'test_compare', serverctrls=[st_ctrl]) ++ topology_st.standalone.compare_ext_s(TEST_DN, 'sn', b'test_fail_compare', serverctrls=[st_ctrl]) ++ except ldap.LDAPError as e: ++ log.fatal('Failed to add user1 entry, error: ' + e.message['desc']) ++ assert False ++ ++ topology_st.standalone.restart(timeout=10) ++ ++ sid_escaped = SESSION_TRACKING_IDENTIFIER_START + SESSION_TRACKING_IDENTIFIER_ESCAPED + SESSION_TRACKING_IDENTIFIER_END ++ access_log_lines = topology_st.standalone.ds_access_log.match('.*tag=105.* sid="%s".*' % sid_escaped) ++ assert len(access_log_lines) == 0 ++ ++ access_log_lines = topology_st.standalone.ds_access_log.match('.*err=6 tag=111.* sid="%s".*' % sid_escaped) ++ assert len(access_log_lines) == 1 ++ ++ access_log_lines = topology_st.standalone.ds_access_log.match('.*err=5 tag=111.* sid="%s".*' % sid_escaped) ++ assert len(access_log_lines) == 1 ++ ++ def fin(): ++ try: ++ topology_st.standalone.delete_s(TEST_DN) ++ except: ++ pass ++ ++ request.addfinalizer(fin) ++ ++ ++def test_escaped_session_tracking_abandon(topology_st, request): ++ """Verify that a session_tracking string containing escaped character ++ is added (not truncate) during an abandon ++ ++ :id: 37a9d1db-ec19-4381-88f3-48eae477cb81 ++ :customerscenario: False ++ :setup: Standalone instance, default backend ++ :steps: ++ 1. Add 10 test entries ++ 2. Launch Page Search with a window of 3 ++ 3. Abandon the Page Search with a session tracking containing escaped character ++ 4. Restart the instance to flush the log ++ 5. Check the exact same string is not present in the access log for the ADD ++ 6. Check the exact same string is present in the access log for the ABANDON ++ :expectedresults: ++ 1. Add should succeed ++ 2. success ++ 3. success ++ 4. success ++ 5. Log should not contain log with that session for the ADDs ++ 6. Log should contain one log with that session for the abandon ++ """ ++ ++ # be careful that the complete string is less than 15 chars ++ SESSION_TRACKING_IDENTIFIER_START = "ABANDON" ++ SESSION_TRACKING_IDENTIFIER_ORIGINAL = "  " ++ SESSION_TRACKING_IDENTIFIER_ESCAPED = " \\\\12 " ++ SESSION_TRACKING_IDENTIFIER_END = "es" ++ SESSION_TRACKING_IDENTIFIER = SESSION_TRACKING_IDENTIFIER_START + SESSION_TRACKING_IDENTIFIER_ORIGINAL + SESSION_TRACKING_IDENTIFIER_END ++ st_ctrl = SessionTrackingControl( ++ SESSION_SOURCE_IP, ++ SESSION_SOURCE_NAME, ++ SESSION_TRACKING_FORMAT_OID, ++ SESSION_TRACKING_IDENTIFIER ++ ) ++ ++ # provision more entries than the page search will fetch ++ entries = [] ++ for i in range(10): ++ TEST_DN = "cn=test_abandon_%d,%s" % (i, DEFAULT_SUFFIX) ++ ent = topology_st.standalone.add_ext_s(TEST_DN, ++ [ ++ ('objectClass', b'person'), ++ ('sn', b'test_abandon'), ++ ('cn', b'test_abandon_%d' % i), ++ ('userPassword', b'test_abandon'), ++ ]) ++ entries.append(TEST_DN) ++ ++ # run a page search (with the session) using a small window. So we can abandon it. ++ req_ctrl = SimplePagedResultsControl(True, size=3, cookie='') ++ msgid = topology_st.standalone.search_ext(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, r'(objectclass=*)', ['cn'], serverctrls=[req_ctrl]) ++ time.sleep(1) ++ topology_st.standalone.abandon_ext(msgid, serverctrls=[st_ctrl]) ++ ++ ++ topology_st.standalone.restart(timeout=10) ++ sid_escaped = SESSION_TRACKING_IDENTIFIER_START + SESSION_TRACKING_IDENTIFIER_ESCAPED + SESSION_TRACKING_IDENTIFIER_END ++ access_log_lines = topology_st.standalone.ds_access_log.match('.*tag=105.* sid="%s".*' % sid_escaped) ++ assert len(access_log_lines) == 0 ++ ++ access_log_lines = topology_st.standalone.ds_access_log.match('.*ABANDON.* sid="%s".*' % sid_escaped) ++ assert len(access_log_lines) == 1 ++ ++ def fin(): ++ for ent in entries: ++ try: ++ topology_st.standalone.delete_s(ent) ++ except: ++ pass ++ ++ request.addfinalizer(fin) ++ ++def test_escaped_session_tracking_extop(topology_st, request): ++ """Verify that a session_tracking string containing escaped character ++ is added (not truncate) during an extended operation ++ ++ :id: fd3afce9-86c9-4d87-ab05-9d19f2d733c3 ++ :customerscenario: False ++ :setup: Standalone instance, default backend ++ :steps: ++ 1. run whoami extop ++ 2. Check the exact same string is present in the access log for the EXTOP ++ :expectedresults: ++ 1. success ++ 2. Log should contain one log with that session for the EXTOP ++ """ ++ ++ # be careful that the complete string is less than 15 chars ++ SESSION_TRACKING_IDENTIFIER_START = "EXTOP" ++ SESSION_TRACKING_IDENTIFIER_ORIGINAL = "  " ++ SESSION_TRACKING_IDENTIFIER_ESCAPED = " \\\\13 " ++ SESSION_TRACKING_IDENTIFIER_END = "escap" ++ SESSION_TRACKING_IDENTIFIER = SESSION_TRACKING_IDENTIFIER_START + SESSION_TRACKING_IDENTIFIER_ORIGINAL + SESSION_TRACKING_IDENTIFIER_END ++ st_ctrl = SessionTrackingControl( ++ SESSION_SOURCE_IP, ++ SESSION_SOURCE_NAME, ++ SESSION_TRACKING_FORMAT_OID, ++ SESSION_TRACKING_IDENTIFIER ++ ) ++ ++ extop = ExtendedRequest(requestName = '1.3.6.1.4.1.4203.1.11.3', requestValue=None) ++ (oid_response, res) = topology_st.standalone.extop_s(extop, serverctrls=[st_ctrl]) ++ ++ topology_st.standalone.restart(timeout=10) ++ sid_escaped = SESSION_TRACKING_IDENTIFIER_START + SESSION_TRACKING_IDENTIFIER_ESCAPED + SESSION_TRACKING_IDENTIFIER_END ++ access_log_lines = topology_st.standalone.ds_access_log.match('.*tag=120.* sid="%s".*' % sid_escaped) ++ assert len(access_log_lines) == 1 ++ ++ def fin(): ++ pass ++ ++ request.addfinalizer(fin) ++ ++if __name__ == "__main__": ++ CURRENT_FILE = os.path.realpath(__file__) ++ pytest.main("-s -v %s" % CURRENT_FILE) +diff --git a/ldap/servers/slapd/abandon.c b/ldap/servers/slapd/abandon.c +index 8e0ab009b..ba42cf0bc 100644 +--- a/ldap/servers/slapd/abandon.c ++++ b/ldap/servers/slapd/abandon.c +@@ -40,6 +40,13 @@ do_abandon(Slapi_PBlock *pb) + Operation *o; + int32_t log_format = config_get_accesslog_log_format(); + slapd_log_pblock logpb = {0}; ++ char *sessionTrackingId; ++ /* Should fit ++ * - ~10chars for ' sid=\"..\"' ++ * - 15+3 for the truncated sessionID ++ * Need to sync with SESSION_ID_STR_SZ ++ */ ++ char session_str[30] = {0}; + /* Keep a copy of some data because o may vanish once conn is unlocked */ + struct { + struct timespec hr_time_end; +@@ -89,6 +96,25 @@ do_abandon(Slapi_PBlock *pb) + + slapi_log_err(SLAPI_LOG_ARGS, "do_abandon", "id %d\n", id); + ++ slapi_pblock_get(pb, SLAPI_SESSION_TRACKING, &sessionTrackingId); ++ ++ /* prepare session_str to be logged */ ++ if (sessionTrackingId) { ++ if (sizeof(session_str) < (strlen(sessionTrackingId) + 10 + 1)) { ++ /* The session tracking string is too large to fit in 'session_str' ++ * Likely SESSION_ID_STR_SZ was changed without increasing the size of session_str. ++ * Just ignore the session string. ++ */ ++ session_str[0] = '\0'; ++ slapi_log_err(SLAPI_LOG_ERR, "do_abandon", "Too large session tracking string (%ld) - It is ignored\n", ++ strlen(sessionTrackingId)); ++ } else { ++ snprintf(session_str, sizeof(session_str), " sid=\"%s\"", sessionTrackingId); ++ } ++ } else { ++ session_str[0] = '\0'; ++ } ++ + /* + * find the operation being abandoned and set the o_abandon + * flag. We don't allow the operation to abandon itself. +@@ -158,8 +184,8 @@ do_abandon(Slapi_PBlock *pb) + slapd_log_access_abandon(&logpb); + } else { + slapi_log_access(LDAP_DEBUG_STATS, "conn=%" PRIu64 +- " op=%d ABANDON targetop=Simple Paged Results msgid=%d\n", +- pb_conn->c_connid, pb_op->o_opid, id); ++ " op=%d ABANDON targetop=Simple Paged Results msgid=%d%s\n", ++ pb_conn->c_connid, pb_op->o_opid, id, session_str); + } + } else if (NULL == o) { + if (log_format != LOG_FORMAT_DEFAULT) { +@@ -168,8 +194,8 @@ do_abandon(Slapi_PBlock *pb) + slapd_log_access_abandon(&logpb); + } else { + slapi_log_access(LDAP_DEBUG_STATS, "conn=%" PRIu64 " op=%d ABANDON" +- " targetop=NOTFOUND msgid=%d\n", +- pb_conn->c_connid, pb_op->o_opid, id); ++ " targetop=NOTFOUND msgid=%d%s\n", ++ pb_conn->c_connid, pb_op->o_opid, id, session_str); + } + } else if (suppressed_by_plugin) { + if (log_format != LOG_FORMAT_DEFAULT) { +@@ -178,8 +204,8 @@ do_abandon(Slapi_PBlock *pb) + slapd_log_access_abandon(&logpb); + } else { + slapi_log_access(LDAP_DEBUG_STATS, "conn=%" PRIu64 " op=%d ABANDON" +- " targetop=SUPPRESSED-BY-PLUGIN msgid=%d\n", +- pb_conn->c_connid, pb_op->o_opid, id); ++ " targetop=SUPPRESSED-BY-PLUGIN msgid=%d%s\n", ++ pb_conn->c_connid, pb_op->o_opid, id, session_str); + } + } else { + if (log_format != LOG_FORMAT_DEFAULT) { +@@ -194,10 +220,10 @@ do_abandon(Slapi_PBlock *pb) + slapd_log_access_abandon(&logpb); + } else { + slapi_log_access(LDAP_DEBUG_STATS, "conn=%" PRIu64 " op=%d ABANDON" +- " targetop=%d msgid=%d nentries=%d etime=%" PRId64 ".%010" PRId64 "\n", ++ " targetop=%d msgid=%d nentries=%d etime=%" PRId64 ".%010" PRId64 "%s\n", + pb_conn->c_connid, pb_op->o_opid, o_copy.opid, id, + o_copy.nentries, (int64_t)o_copy.hr_time_end.tv_sec, +- (int64_t)o_copy.hr_time_end.tv_nsec); ++ (int64_t)o_copy.hr_time_end.tv_nsec, session_str); + } + } + /* +diff --git a/ldap/servers/slapd/control.c b/ldap/servers/slapd/control.c +index fa60c18dd..538a387da 100644 +--- a/ldap/servers/slapd/control.c ++++ b/ldap/servers/slapd/control.c +@@ -91,6 +91,10 @@ init_controls(void) + /* LDAP_CONTROL_PAGEDRESULTS is shared by request and response */ + slapi_register_supported_control(LDAP_CONTROL_PAGEDRESULTS, + SLAPI_OPERATION_SEARCH); ++ ++ /* LDAP_CONTROL_X_SESSION_TRACKING only supported by request */ ++ slapi_register_supported_control(LDAP_CONTROL_X_SESSION_TRACKING, ++ SLAPI_OPERATION_BIND | SLAPI_OPERATION_UNBIND | SLAPI_OPERATION_ABANDON | SLAPI_OPERATION_EXTENDED | SLAPI_OPERATION_SEARCH | SLAPI_OPERATION_COMPARE | SLAPI_OPERATION_ADD | SLAPI_OPERATION_DELETE | SLAPI_OPERATION_MODIFY | SLAPI_OPERATION_MODDN); + } + + +@@ -162,6 +166,97 @@ slapi_get_supported_controls_copy(char ***ctrloidsp, unsigned long **ctrlopsp) + return (0); + } + ++/* Parse the Session Tracking control ++ * see https://datatracker.ietf.org/doc/html/draft-wahl-ldap-session-03 ++ * LDAPString ::= OCTET STRING -- UTF-8 encoded ++ * LDAPOID ::= OCTET STRING -- Constrained to numericoid ++ * ++ * SessionIdentifierControlValue ::= SEQUENCE { ++ * sessionSourceIp LDAPString, ++ * sessionSourceName LDAPString, ++ * formatOID LDAPOID, ++ * sessionTrackingIdentifier LDAPString ++ * } ++ * ++ * design https://www.port389.org/docs/389ds/design/session-identifier-in-logs.html ++ * ++ * It ignores sessionSourceIp, sessionSourceName and formatOID. ++ * It extracts the 15 first chars from sessionTrackingIdentifier (escaped) ++ * and return them in session_tracking_id (allocated buffer) ++ * The caller is responsible of the free of session_tracking_id ++ */ ++static int ++parse_sessiontracking_ctrl(struct berval *session_tracking_spec, char **session_tracking_id) ++{ ++ BerElement *ber = NULL; ++ ber_tag_t ber_rc; ++ struct berval sessionTrackingIdentifier = {0}; ++#define SESSION_ID_STR_SZ 15 ++#define NB_DOTS 3 ++ char buf_sid_orig[SESSION_ID_STR_SZ + 2] = {0}; ++ int32_t sid_orig_sz; /* size of the original sid that we retain */ ++ const char *buf_sid_escaped; ++ int32_t sid_escaped_sz; /* size of the escaped sid that we retain */ ++ char buf[BUFSIZ]; ++ char *sid; ++ int rc = LDAP_SUCCESS; ++ ++ if (!BV_HAS_DATA(session_tracking_spec)) { ++ return LDAP_PROTOCOL_ERROR; ++ } ++ ber = ber_init(session_tracking_spec); ++ if ((ber == NULL) || (session_tracking_id == NULL)) { ++ return LDAP_OPERATIONS_ERROR; ++ } ++ ++ *session_tracking_id = NULL; ++ ++ /* Discard sessionSourceIp, sessionSourceName and formatOID ++ * Then only get sessionTrackingIdentifier and truncate it if needed */ ++ ber_rc = ber_scanf(ber, "{xxxo}", &sessionTrackingIdentifier); ++ if ((ber_rc == LBER_ERROR) || (sessionTrackingIdentifier.bv_len > 65536)) { ++ rc = LDAP_PROTOCOL_ERROR; ++ goto free_and_return; ++ } ++ ++ /* Make sure the interesting part of the provided SID is escaped */ ++ if (sessionTrackingIdentifier.bv_len > SESSION_ID_STR_SZ) { ++ sid_orig_sz = SESSION_ID_STR_SZ + 1; ++ } else { ++ sid_orig_sz = sessionTrackingIdentifier.bv_len; ++ memcpy(buf_sid_orig, sessionTrackingIdentifier.bv_val, sessionTrackingIdentifier.bv_len); ++ } ++ memcpy(buf_sid_orig, sessionTrackingIdentifier.bv_val, sid_orig_sz); ++ buf_sid_escaped = escape_string(buf_sid_orig, buf); ++ ++ /* Allocate the buffer that contains the heading portion ++ * of the escaped SID ++ */ ++ sid_escaped_sz = strlen(buf_sid_escaped); ++ if (sid_escaped_sz > SESSION_ID_STR_SZ) { ++ /* Take only a portion of it plus some '.' */ ++ sid_escaped_sz = SESSION_ID_STR_SZ + NB_DOTS; ++ } ++ sid = (char *) slapi_ch_calloc(1, sid_escaped_sz + 1); ++ ++ /* Lets copy the escaped SID into the buffer */ ++ if (sid_escaped_sz > SESSION_ID_STR_SZ) { ++ memcpy(sid, buf_sid_escaped, SESSION_ID_STR_SZ); ++ memset(sid + SESSION_ID_STR_SZ, '.', NB_DOTS); /* ending the string with "..." */ ++ } else { ++ memcpy(sid, buf_sid_escaped, sid_escaped_sz); ++ } ++ sid[sid_escaped_sz] = '\0'; ++ ++ *session_tracking_id = sid; ++ return rc; ++ ++free_and_return: ++ slapi_ch_free_string(&sessionTrackingIdentifier.bv_val); ++ ++ return rc; ++} ++ + /* + * RFC 4511 section 4.1.11. Controls says that the UnbindRequest + * MUST ignore the criticality field of controls +@@ -346,9 +441,16 @@ get_ldapmessage_controls_ext( + slapi_pblock_set(pb, SLAPI_REQCONTROLS, NULL); + slapi_pblock_set(pb, SLAPI_MANAGEDSAIT, &ctrl_not_found); + slapi_pblock_set(pb, SLAPI_PWPOLICY, &ctrl_not_found); ++ slapi_pblock_set(pb, SLAPI_SESSION_TRACKING, NULL); + slapi_log_err(SLAPI_LOG_CONNS, "get_ldapmessage_controls_ext", "Warning: conn=%" PRIu64 " op=%d contains an empty list of controls\n", + pb_conn ? pb_conn->c_connid : -1, pb_op ? pb_op->o_opid : -1); + } else { ++ struct berval *session_tracking_spec = NULL; ++ int iscritical = 0; ++ char *session_tracking_id = NULL; ++ char *old_sid; ++ int parse_rc = 0; ++ + /* len, ber_len_t is uint, not int, cannot be != -1, may be better to remove this check. */ + if ((tag != LBER_END_OF_SEQORSET) && (len != -1)) { + goto free_and_return; +@@ -358,6 +460,31 @@ get_ldapmessage_controls_ext( + managedsait = slapi_control_present(ctrls, + LDAP_CONTROL_MANAGEDSAIT, NULL, NULL); + slapi_pblock_set(pb, SLAPI_MANAGEDSAIT, &managedsait); ++ if (slapi_control_present(ctrls, ++ LDAP_CONTROL_X_SESSION_TRACKING, &session_tracking_spec, &iscritical)) { ++ Operation *pb_op = NULL; ++ slapi_pblock_get(pb, SLAPI_OPERATION, &pb_op); ++ ++ if (iscritical) { ++ /* It must not be critical */ ++ slapi_log_err(SLAPI_LOG_ERR, "get_ldapmessage_controls_ext", "conn=%" PRIu64 " op=%d SessionTracking critical flag must be unset\n", ++ pb_conn ? pb_conn->c_connid : -1, pb_op ? pb_op->o_opid : -1); ++ rc = LDAP_UNAVAILABLE_CRITICAL_EXTENSION; ++ goto free_and_return; ++ } ++ parse_rc = parse_sessiontracking_ctrl(session_tracking_spec, &session_tracking_id); ++ if (parse_rc != LDAP_SUCCESS) { ++ slapi_log_err(SLAPI_LOG_WARNING, "get_ldapmessage_controls_ext", "Warning: conn=%" PRIu64 " op=%d failed to parse SessionTracking control (%d)\n", ++ pb_conn ? pb_conn->c_connid : -1, pb_op ? pb_op->o_opid : -1, parse_rc); ++ slapi_ch_free_string(&session_tracking_id); ++ } else { ++ /* now replace the sid (if any) in the pblock */ ++ slapi_pblock_get(pb, SLAPI_SESSION_TRACKING, &old_sid); ++ slapi_ch_free_string(&old_sid); ++ slapi_pblock_set(pb, SLAPI_SESSION_TRACKING, session_tracking_id); ++ } ++ } ++ slapi_pblock_set(pb, SLAPI_SESSION_TRACKING, session_tracking_id); + pwpolicy_ctrl = slapi_control_present(ctrls, + LDAP_X_CONTROL_PWPOLICY_REQUEST, NULL, NULL); + slapi_pblock_set(pb, SLAPI_PWPOLICY, &pwpolicy_ctrl); +diff --git a/ldap/servers/slapd/pblock.c b/ldap/servers/slapd/pblock.c +index ec820ec70..bedb4f6ff 100644 +--- a/ldap/servers/slapd/pblock.c ++++ b/ldap/servers/slapd/pblock.c +@@ -218,6 +218,7 @@ pblock_done(Slapi_PBlock *pb) + if (pb->pb_intop != NULL) { + delete_passwdPolicy(&pb->pb_intop->pwdpolicy); + slapi_ch_free((void **)&(pb->pb_intop->pb_result_text)); ++ slapi_ch_free_string(&pb->pb_intop->pb_session_tracking_id); + } + slapi_ch_free((void **)&(pb->pb_intop)); + if (pb->pb_intplugin != NULL) { +@@ -1552,6 +1553,13 @@ slapi_pblock_get(Slapi_PBlock *pblock, int arg, void *value) + (*(int *)value) = 0; + } + break; ++ case SLAPI_SESSION_TRACKING: ++ if (pblock->pb_intop != NULL) { ++ (*(char **)value) = pblock->pb_intop->pb_session_tracking_id; ++ } else { ++ (*(char **)value) = 0; ++ } ++ break; + case SLAPI_PWPOLICY: + if (pblock->pb_intop != NULL) { + (*(int *)value) = pblock->pb_intop->pb_pwpolicy_ctrl; +@@ -3485,6 +3493,10 @@ slapi_pblock_set(Slapi_PBlock *pblock, int arg, void *value) + _pblock_assert_pb_intop(pblock); + pblock->pb_intop->pb_managedsait = *((int *)value); + break; ++ case SLAPI_SESSION_TRACKING: ++ _pblock_assert_pb_intop(pblock); ++ pblock->pb_intop->pb_session_tracking_id = (char *)value; ++ break; + case SLAPI_PWPOLICY: + _pblock_assert_pb_intop(pblock); + pblock->pb_intop->pb_pwpolicy_ctrl = *((int *)value); +diff --git a/ldap/servers/slapd/pblock_v3.h b/ldap/servers/slapd/pblock_v3.h +index ef15ee457..1b4996c48 100644 +--- a/ldap/servers/slapd/pblock_v3.h ++++ b/ldap/servers/slapd/pblock_v3.h +@@ -161,6 +161,9 @@ typedef struct _slapi_pblock_intop + /* For password policy control */ + int pb_pwpolicy_ctrl; + ++ /* For Session Tracking control */ ++ char *pb_session_tracking_id; ++ + int pb_paged_results_index; /* stash SLAPI_PAGED_RESULTS_INDEX */ + int pb_paged_results_cookie; /* stash SLAPI_PAGED_RESULTS_COOKIE */ + int32_t pb_usn_tombstone_incremented; /* stash SLAPI_PAGED_RESULTS_COOKIE */ +diff --git a/ldap/servers/slapd/result.c b/ldap/servers/slapd/result.c +index 98c01d8d0..c0abbeef1 100644 +--- a/ldap/servers/slapd/result.c ++++ b/ldap/servers/slapd/result.c +@@ -2184,10 +2184,35 @@ log_result(Slapi_PBlock *pb, Operation *op, int err, ber_tag_t tag, int nentries + time_t start_time; + int32_t log_format = config_get_accesslog_log_format(); + slapd_log_pblock logpb = {0}; ++ char *sessionTrackingId; ++ /* Should fit ++ * - ~10chars for ' sid=\"..\"' ++ * - 15+3 for the truncated sessionID ++ * Need to sync with SESSION_ID_STR_SZ ++ */ ++ char session_str[30] = {0}; + + get_internal_conn_op(&connid, &op_id, &op_internal_id, &op_nested_count, &start_time); + slapi_pblock_get(pb, SLAPI_PAGED_RESULTS_INDEX, &pr_idx); + slapi_pblock_get(pb, SLAPI_PAGED_RESULTS_COOKIE, &pr_cookie); ++ slapi_pblock_get(pb, SLAPI_SESSION_TRACKING, &sessionTrackingId); ++ ++ /* prepare session_str to be logged */ ++ if (sessionTrackingId) { ++ if (sizeof(session_str) < (strlen(sessionTrackingId) + 10 + 1)) { ++ /* The session tracking string is too large to fit in 'session_str' ++ * Likely SESSION_ID_STR_SZ was changed without increasing the size of session_str. ++ * Just ignore the session string. ++ */ ++ session_str[0] = '\0'; ++ slapi_log_err(SLAPI_LOG_ERR, "log_result", "Too large session tracking string (%ld) - It is ignored\n", ++ strlen(sessionTrackingId)); ++ } else { ++ snprintf(session_str, sizeof(session_str), " sid=\"%s\"", sessionTrackingId); ++ } ++ } else { ++ session_str[0] = '\0'; ++ } + internal_op = operation_is_flag_set(op, OP_FLAG_INTERNAL); + + /* total elapsed time */ +@@ -2251,13 +2276,13 @@ log_result(Slapi_PBlock *pb, Operation *op, int err, ber_tag_t tag, int nentries + } else { + slapi_log_access(LDAP_DEBUG_STATS, + "conn=%" PRIu64 " op=%d RESULT err=%d" +- " tag=%" BERTAG_T " nentries=%d wtime=%s optime=%s etime=%s%s%s" ++ " tag=%" BERTAG_T " nentries=%d wtime=%s optime=%s etime=%s%s%s%s" + ", SASL bind in progress\n", + op->o_connid, + op->o_opid, + err, tag, nentries, + wtime, optime, etime, +- notes_str, csn_str); ++ notes_str, csn_str, session_str); + } + } else { + if (log_format != LOG_FORMAT_DEFAULT) { +@@ -2267,7 +2292,7 @@ log_result(Slapi_PBlock *pb, Operation *op, int err, ber_tag_t tag, int nentries + logpb.msg = "SASL bind in progress"; + slapd_log_access_result(&logpb); + } else { +-#define LOG_SASLMSG_FMT " tag=%" BERTAG_T " nentries=%d wtime=%s optime=%s etime=%s%s%s, SASL bind in progress\n" ++#define LOG_SASLMSG_FMT " tag=%" BERTAG_T " nentries=%d wtime=%s optime=%s etime=%s%s%s%s, SASL bind in progress\n" + slapi_log_access(LDAP_DEBUG_ARGS, + connid == 0 ? LOG_CONN_OP_FMT_INT_INT LOG_SASLMSG_FMT : + LOG_CONN_OP_FMT_EXT_INT LOG_SASLMSG_FMT, +@@ -2277,7 +2302,7 @@ log_result(Slapi_PBlock *pb, Operation *op, int err, ber_tag_t tag, int nentries + op_nested_count, + err, tag, nentries, + wtime, optime, etime, +- notes_str, csn_str); ++ notes_str, csn_str, session_str); + } + } + } else if (op->o_tag == LDAP_REQ_BIND && err == LDAP_SUCCESS) { +@@ -2296,13 +2321,13 @@ log_result(Slapi_PBlock *pb, Operation *op, int err, ber_tag_t tag, int nentries + slapi_pblock_get(pb, SLAPI_CONN_DN, &dn); + slapi_log_access(LDAP_DEBUG_STATS, + "conn=%" PRIu64 " op=%d RESULT err=%d" +- " tag=%" BERTAG_T " nentries=%d wtime=%s optime=%s etime=%s%s%s" ++ " tag=%" BERTAG_T " nentries=%d wtime=%s optime=%s etime=%s%s%s%s" + " dn=\"%s\"\n", + op->o_connid, + op->o_opid, + err, tag, nentries, + wtime, optime, etime, +- notes_str, csn_str, dn ? dn : ""); ++ notes_str, csn_str, session_str, dn ? dn : ""); + slapi_ch_free_string(&dn); + } + } else { +@@ -2314,7 +2339,7 @@ log_result(Slapi_PBlock *pb, Operation *op, int err, ber_tag_t tag, int nentries + slapd_log_access_result(&logpb); + } else { + slapi_pblock_get(pb, SLAPI_CONN_DN, &dn); +-#define LOG_BINDMSG_FMT " tag=%" BERTAG_T " nentries=%d wtime=%s optime=%s etime=%s%s%s dn=\"%s\"\n" ++#define LOG_BINDMSG_FMT " tag=%" BERTAG_T " nentries=%d wtime=%s optime=%s etime=%s%s%s%s dn=\"%s\"\n" + slapi_log_access(LDAP_DEBUG_ARGS, + connid == 0 ? LOG_CONN_OP_FMT_INT_INT LOG_BINDMSG_FMT : + LOG_CONN_OP_FMT_EXT_INT LOG_BINDMSG_FMT, +@@ -2324,7 +2349,7 @@ log_result(Slapi_PBlock *pb, Operation *op, int err, ber_tag_t tag, int nentries + op_nested_count, + err, tag, nentries, + wtime, optime, etime, +- notes_str, csn_str, dn ? dn : ""); ++ notes_str, csn_str, session_str, dn ? dn : ""); + slapi_ch_free_string(&dn); + } + } +@@ -2339,13 +2364,13 @@ log_result(Slapi_PBlock *pb, Operation *op, int err, ber_tag_t tag, int nentries + } else { + slapi_log_access(LDAP_DEBUG_STATS, + "conn=%" PRIu64 " op=%d RESULT err=%d" +- " tag=%" BERTAG_T " nentries=%d wtime=%s optime=%s etime=%s%s%s" ++ " tag=%" BERTAG_T " nentries=%d wtime=%s optime=%s etime=%s%s%s%s" + " pr_idx=%d pr_cookie=%d\n", + op->o_connid, + op->o_opid, + err, tag, nentries, + wtime, optime, etime, +- notes_str, csn_str, pr_idx, pr_cookie); ++ notes_str, csn_str, session_str, pr_idx, pr_cookie); + } + } else { + if (log_format != LOG_FORMAT_DEFAULT) { +@@ -2356,7 +2381,7 @@ log_result(Slapi_PBlock *pb, Operation *op, int err, ber_tag_t tag, int nentries + logpb.pr_cookie = pr_cookie; + slapd_log_access_result(&logpb); + } else { +-#define LOG_PRMSG_FMT " tag=%" BERTAG_T " nentries=%d wtime=%s optime=%s etime=%s%s%s pr_idx=%d pr_cookie=%d \n" ++#define LOG_PRMSG_FMT " tag=%" BERTAG_T " nentries=%d wtime=%s optime=%s etime=%s%s%s%s pr_idx=%d pr_cookie=%d \n" + slapi_log_access(LDAP_DEBUG_ARGS, + connid == 0 ? LOG_CONN_OP_FMT_INT_INT LOG_PRMSG_FMT : + LOG_CONN_OP_FMT_EXT_INT LOG_PRMSG_FMT, +@@ -2366,7 +2391,7 @@ log_result(Slapi_PBlock *pb, Operation *op, int err, ber_tag_t tag, int nentries + op_nested_count, + err, tag, nentries, + wtime, optime, etime, +- notes_str, csn_str, pr_idx, pr_cookie); ++ notes_str, csn_str, session_str, pr_idx, pr_cookie); + } + } + } else if (!internal_op) { +@@ -2387,12 +2412,12 @@ log_result(Slapi_PBlock *pb, Operation *op, int err, ber_tag_t tag, int nentries + } else { + slapi_log_access(LDAP_DEBUG_STATS, + "conn=%" PRIu64 " op=%d RESULT err=%d" +- " tag=%" BERTAG_T " nentries=%d wtime=%s optime=%s etime=%s%s%s%s\n", ++ " tag=%" BERTAG_T " nentries=%d wtime=%s optime=%s etime=%s%s%s%s%s\n", + op->o_connid, + op->o_opid, + err, tag, nentries, + wtime, optime, etime, +- notes_str, csn_str, ext_str); ++ notes_str, csn_str, ext_str, session_str); + } + if (pbtxt) { + /* if !pbtxt ==> ext_str == "". Don't free ext_str. */ +@@ -2408,7 +2433,7 @@ log_result(Slapi_PBlock *pb, Operation *op, int err, ber_tag_t tag, int nentries + logpb.op_nested_count = op_nested_count; + slapd_log_access_result(&logpb); + } else { +-#define LOG_MSG_FMT " tag=%" BERTAG_T " nentries=%d wtime=%s optime=%s etime=%s%s%s\n" ++#define LOG_MSG_FMT " tag=%" BERTAG_T " nentries=%d wtime=%s optime=%s etime=%s%s%s%s\n" + slapi_log_access(LDAP_DEBUG_ARGS, + connid == 0 ? LOG_CONN_OP_FMT_INT_INT LOG_MSG_FMT : + LOG_CONN_OP_FMT_EXT_INT LOG_MSG_FMT, +@@ -2418,7 +2443,7 @@ log_result(Slapi_PBlock *pb, Operation *op, int err, ber_tag_t tag, int nentries + op_nested_count, + err, tag, nentries, + wtime, optime, etime, +- notes_str, csn_str); ++ notes_str, csn_str, session_str); + } + /* + * If this is an unindexed search we should log it in the error log if +diff --git a/ldap/servers/slapd/slapi-plugin.h b/ldap/servers/slapd/slapi-plugin.h +index 18b4cbc67..de461706c 100644 +--- a/ldap/servers/slapd/slapi-plugin.h ++++ b/ldap/servers/slapd/slapi-plugin.h +@@ -7284,6 +7284,7 @@ typedef struct slapi_plugindesc + /* controls we know about */ + #define SLAPI_MANAGEDSAIT 1000 + #define SLAPI_PWPOLICY 1001 ++#define SLAPI_SESSION_TRACKING 1002 + + /* arguments that are common to all operation */ + #define SLAPI_TARGET_SDN 47 /* target sdn of the operation */ +-- +2.48.0 + diff --git a/389-ds-base.spec b/389-ds-base.spec index d3e0678..fa46705 100644 --- a/389-ds-base.spec +++ b/389-ds-base.spec @@ -506,6 +506,18 @@ Source4: 389-ds-base.sysusers Source5: https://fedorapeople.org/groups/389ds/libdb-5.3.28-59.tar.bz2 %endif +Patch: 0001-Issue-6544-logconv.py-python3-magic-conflicts-with-p.patch +Patch: 0002-Issue-6374-nsslapd-mdb-max-dbs-autotuning-doesn-t-wo.patch +Patch: 0003-Issue-6090-Fix-dbscan-options-and-man-pages-6315.patch +Patch: 0004-Issue-6489-After-log-rotation-refresh-the-FD-pointer.patch +Patch: 0005-Issue-6436-MOD-on-a-large-group-slow-if-substring-in.patch +Patch: 0006-Issue-6566-RI-plugin-failure-to-handle-a-modrdn-for-.patch +Patch: 0007-Issue-6258-Mitigate-race-condition-in-paged_results_.patch +Patch: 0008-Issue-6229-After-an-initial-failure-subsequent-onlin.patch +Patch: 0009-Issue-6554-During-import-of-entries-without-nsUnique.patch +Patch: 0010-Issue-6596-BUG-Compilation-Regresion-6597.patch +Patch: 0011-Issue-6367-RFE-support-of-Session-Tracking-Control-i.patch + %description 389 Directory Server is an LDAPv3 compliant server. The base package includes the LDAP server and command line utilities for server administration. diff --git a/main.fmf b/main.fmf index e7231a2..338f547 100644 --- a/main.fmf +++ b/main.fmf @@ -14,4 +14,4 @@ /test: /upstream_basic: test: pytest -v /root/ds/dirsrvtests/tests/suites/basic/basic_test.py - duration: 30m + duration: 60m