Bump version to 2.6.1-3
- Resolves: RHEL-18333 Can't rename users member of automember rule - Resolves: RHEL-61341 After an initial failure, subsequent online backups will not work. - Resolves: RHEL-63887 nsslapd-mdb-max-dbs autotuning doesn't work properly - Resolves: RHEL-63891 dbscan crashes when showing statistics for MDB - Resolves: RHEL-63998 dsconf should check for number of available named databases - Resolves: RHEL-78344 During import of entries without nsUniqueId, a supplier generates duplicate nsUniqueId (LMDB only) [rhel-9]
This commit is contained in:
parent
0c554777ab
commit
728c3df389
@ -1,60 +0,0 @@
|
||||
From 0ff5aa641d619bdcc154c2c94f8f8180bcaec776 Mon Sep 17 00:00:00 2001
|
||||
From: tbordaz <tbordaz@redhat.com>
|
||||
Date: Thu, 29 Aug 2024 10:49:57 +0200
|
||||
Subject: [PATCH] Issue 6312 - In branch 2.5, healthcheck report an invalid
|
||||
warning regarding BDB deprecation (#6313)
|
||||
|
||||
Bug description:
|
||||
during healthcheck, _lint_backend_implementation checks that
|
||||
the instance is not running a BDB backend.
|
||||
This check only applies for instance after 3.0.0
|
||||
|
||||
Fix description:
|
||||
If the instance is newer than 3.0.0 the health check
|
||||
just returns
|
||||
|
||||
relates: #6312
|
||||
|
||||
Reviewed by:
|
||||
---
|
||||
dirsrvtests/tests/suites/healthcheck/healthcheck_test.py | 1 +
|
||||
src/lib389/lib389/backend.py | 4 ++--
|
||||
2 files changed, 3 insertions(+), 2 deletions(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/healthcheck/healthcheck_test.py b/dirsrvtests/tests/suites/healthcheck/healthcheck_test.py
|
||||
index 29cca187e..66cf3c7d3 100644
|
||||
--- a/dirsrvtests/tests/suites/healthcheck/healthcheck_test.py
|
||||
+++ b/dirsrvtests/tests/suites/healthcheck/healthcheck_test.py
|
||||
@@ -556,6 +556,7 @@ def test_lint_backend_implementation_wrong_files(topology_st):
|
||||
|
||||
|
||||
@pytest.mark.skipif(get_default_db_lib() == "mdb", reason="Not needed for mdb")
|
||||
+@pytest.mark.skipif(ds_is_older("3.0.0"), reason="mdb and bdb are both supported")
|
||||
def test_lint_backend_implementation(topology_st):
|
||||
"""Test the lint for backend implementation mismatch
|
||||
|
||||
diff --git a/src/lib389/lib389/backend.py b/src/lib389/lib389/backend.py
|
||||
index caee88e6a..0ed00a4a7 100644
|
||||
--- a/src/lib389/lib389/backend.py
|
||||
+++ b/src/lib389/lib389/backend.py
|
||||
@@ -14,7 +14,7 @@ from lib389._constants import DN_LDBM, DN_CHAIN, DN_PLUGIN, DEFAULT_BENAME
|
||||
from lib389.properties import BACKEND_OBJECTCLASS_VALUE, BACKEND_PROPNAME_TO_ATTRNAME, BACKEND_CHAIN_BIND_DN, \
|
||||
BACKEND_CHAIN_BIND_PW, BACKEND_CHAIN_URLS, BACKEND_PROPNAME_TO_ATTRNAME, BACKEND_NAME, \
|
||||
BACKEND_SUFFIX, BACKEND_SAMPLE_ENTRIES, TASK_WAIT
|
||||
-from lib389.utils import normalizeDN, ensure_str, assert_c
|
||||
+from lib389.utils import normalizeDN, ensure_str, assert_c, ds_is_newer
|
||||
from lib389 import Entry
|
||||
|
||||
# Need to fix this ....
|
||||
@@ -513,7 +513,7 @@ class Backend(DSLdapObject):
|
||||
|
||||
def _lint_backend_implementation(self):
|
||||
backend_impl = self._instance.get_db_lib()
|
||||
- if backend_impl == 'bdb':
|
||||
+ if backend_impl == 'bdb' and ds_is_newer('3.0.0', instance=self._instance):
|
||||
result = DSBLE0006
|
||||
result['items'] = [self.lint_uid()]
|
||||
yield result
|
||||
--
|
||||
2.46.0
|
||||
|
@ -1,237 +0,0 @@
|
||||
From af27f433ec14bcaf070108ab0b6af64ad1153a11 Mon Sep 17 00:00:00 2001
|
||||
From: progier389 <progier@redhat.com>
|
||||
Date: Fri, 6 Sep 2024 18:07:17 +0200
|
||||
Subject: [PATCH] Issue 6316 - lmdb reindex is broken if index type is
|
||||
specified (#6318)
|
||||
|
||||
While reindexing using task or offline reindex, if the attribute name contains the index type (for example :eq,pres)
|
||||
Then the attribute is not reindexed. Problem occurs when lmdb is used, things are working fine with bdb.
|
||||
Solution: strip the index type in reindex as it is done in bdb case.
|
||||
Anyway the reindex design requires that for a given attribute all the configured index types must be rebuild.
|
||||
|
||||
Issue: #6316
|
||||
|
||||
Reviewed by: @tbordaz, @droideck (Thanks!)
|
||||
---
|
||||
.../tests/suites/indexes/regression_test.py | 141 +++++++++++++++++-
|
||||
.../slapd/back-ldbm/db-mdb/mdb_import.c | 10 +-
|
||||
2 files changed, 147 insertions(+), 4 deletions(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/indexes/regression_test.py b/dirsrvtests/tests/suites/indexes/regression_test.py
|
||||
index c385f5ca4..b077b529a 100644
|
||||
--- a/dirsrvtests/tests/suites/indexes/regression_test.py
|
||||
+++ b/dirsrvtests/tests/suites/indexes/regression_test.py
|
||||
@@ -10,6 +10,9 @@ import time
|
||||
import os
|
||||
import pytest
|
||||
import ldap
|
||||
+import logging
|
||||
+import glob
|
||||
+import re
|
||||
from lib389._constants import DEFAULT_BENAME, DEFAULT_SUFFIX
|
||||
from lib389.backend import Backend, Backends, DatabaseConfig
|
||||
from lib389.cos import CosClassicDefinition, CosClassicDefinitions, CosTemplate
|
||||
@@ -31,6 +34,8 @@ SUFFIX2 = 'dc=example2,dc=com'
|
||||
BENAME2 = 'be2'
|
||||
|
||||
DEBUGGING = os.getenv("DEBUGGING", default=False)
|
||||
+logging.getLogger(__name__).setLevel(logging.INFO)
|
||||
+log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@pytest.fixture(scope="function")
|
||||
@@ -83,6 +88,7 @@ def add_a_group_with_users(request, topo):
|
||||
'cn': USER_NAME,
|
||||
'uidNumber': f'{num}',
|
||||
'gidNumber': f'{num}',
|
||||
+ 'description': f'Description for {USER_NAME}',
|
||||
'homeDirectory': f'/home/{USER_NAME}'
|
||||
})
|
||||
users_list.append(user)
|
||||
@@ -95,9 +101,10 @@ def add_a_group_with_users(request, topo):
|
||||
# If the server crashed, start it again to do the cleanup
|
||||
if not topo.standalone.status():
|
||||
topo.standalone.start()
|
||||
- for user in users_list:
|
||||
- user.delete()
|
||||
- group.delete()
|
||||
+ if not DEBUGGING:
|
||||
+ for user in users_list:
|
||||
+ user.delete()
|
||||
+ group.delete()
|
||||
|
||||
request.addfinalizer(fin)
|
||||
|
||||
@@ -124,6 +131,38 @@ def set_small_idlistscanlimit(request, topo):
|
||||
|
||||
request.addfinalizer(fin)
|
||||
|
||||
+
|
||||
+@pytest.fixture(scope="function")
|
||||
+def set_description_index(request, topo, add_a_group_with_users):
|
||||
+ """
|
||||
+ Set some description values and description index without reindexing.
|
||||
+ """
|
||||
+ inst = topo.standalone
|
||||
+ backends = Backends(inst)
|
||||
+ backend = backends.get(DEFAULT_BENAME)
|
||||
+ indexes = backend.get_indexes()
|
||||
+ attr = 'description'
|
||||
+
|
||||
+ def fin(always=False):
|
||||
+ if always or not DEBUGGING:
|
||||
+ try:
|
||||
+ idx = indexes.get(attr)
|
||||
+ idx.delete()
|
||||
+ except ldap.NO_SUCH_OBJECT:
|
||||
+ pass
|
||||
+
|
||||
+ request.addfinalizer(fin)
|
||||
+ fin(always=True)
|
||||
+ index = indexes.create(properties={
|
||||
+ 'cn': attr,
|
||||
+ 'nsSystemIndex': 'false',
|
||||
+ 'nsIndexType': ['eq', 'pres', 'sub']
|
||||
+ })
|
||||
+ # Restart needed with lmdb (to open the dbi handle)
|
||||
+ inst.restart()
|
||||
+ return (indexes, attr)
|
||||
+
|
||||
+
|
||||
#unstable or unstatus tests, skipped for now
|
||||
@pytest.mark.flaky(max_runs=2, min_passes=1)
|
||||
@pytest.mark.skipif(ds_is_older("1.4.4.4"), reason="Not implemented")
|
||||
@@ -347,6 +386,102 @@ def test_task_status(topo):
|
||||
assert reindex_task.get_exit_code() == 0
|
||||
|
||||
|
||||
+def count_keys(inst, bename, attr, prefix=''):
|
||||
+ indexfile = os.path.join(inst.dbdir, bename, attr + '.db')
|
||||
+ # (bdb - we should also accept a version number for .db suffix)
|
||||
+ for f in glob.glob(f'{indexfile}*'):
|
||||
+ indexfile = f
|
||||
+
|
||||
+ inst.stop()
|
||||
+ output = inst.dbscan(None, None, args=['-f', indexfile, '-A'], stopping=False).decode()
|
||||
+ inst.start()
|
||||
+ count = 0
|
||||
+ regexp = f'^KEY: {re.escape(prefix)}'
|
||||
+ for match in re.finditer(regexp, output, flags=re.MULTILINE):
|
||||
+ count += 1
|
||||
+ log.info(f"count_keys found {count} keys starting with '{prefix}' in {indexfile}")
|
||||
+ return count
|
||||
+
|
||||
+
|
||||
+def test_reindex_task_with_type(topo, set_description_index):
|
||||
+ """Check that reindex task works as expected when index type is specified.
|
||||
+
|
||||
+ :id: 0c7f2fda-69f6-11ef-9eb8-083a88554478
|
||||
+ :setup: Standalone instance
|
||||
+ - with 100 users having description attribute
|
||||
+ - with description:eq,pres,sub index entry but not yet reindexed
|
||||
+ :steps:
|
||||
+ 1. Set description in suffix entry
|
||||
+ 2. Count number of equality keys in description index
|
||||
+ 3. Start a Reindex task on description:eq,pres and wait for completion
|
||||
+ 4. Check the task status and exit code
|
||||
+ 5. Count the equality, presence and substring keys in description index
|
||||
+ 6. Start a Reindex task on description and wait for completion
|
||||
+ 7. Check the task status and exit code
|
||||
+ 8. Count the equality, presence and substring keys in description index
|
||||
+
|
||||
+ :expectedresults:
|
||||
+ 1. Success
|
||||
+ 2. Should be either no key (bdb) or a single one (lmdb)
|
||||
+ 3. Success
|
||||
+ 4. Success
|
||||
+ 5. Should have: more equality keys than in step 2
|
||||
+ one presence key
|
||||
+ some substrings keys
|
||||
+ 6. Success
|
||||
+ 7. Success
|
||||
+ 8. Should have same counts than in step 5
|
||||
+ """
|
||||
+ (indexes, attr) = set_description_index
|
||||
+ inst = topo.standalone
|
||||
+ if not inst.is_dbi_supported():
|
||||
+ pytest.skip('This test requires that dbscan supports -A option')
|
||||
+ # modify indexed value
|
||||
+ Domain(inst, DEFAULT_SUFFIX).replace(attr, f'test_before_reindex')
|
||||
+
|
||||
+ keys1 = count_keys(inst, DEFAULT_BENAME, attr, prefix='=')
|
||||
+ assert keys1 <= 1
|
||||
+
|
||||
+ tasks = Tasks(topo.standalone)
|
||||
+ # completed reindex tasks MUST have a status because freeipa check it.
|
||||
+
|
||||
+ # Reindex attr with eq,pres types
|
||||
+ log.info(f'Reindex {attr} with eq,pres types')
|
||||
+ tasks.reindex(
|
||||
+ suffix=DEFAULT_SUFFIX,
|
||||
+ attrname=f'{attr}:eq,pres',
|
||||
+ args={TASK_WAIT: True}
|
||||
+ )
|
||||
+ reindex_task = Task(topo.standalone, tasks.dn)
|
||||
+ assert reindex_task.status()
|
||||
+ assert reindex_task.get_exit_code() == 0
|
||||
+
|
||||
+ keys2e = count_keys(inst, DEFAULT_BENAME, attr, prefix='=')
|
||||
+ keys2p = count_keys(inst, DEFAULT_BENAME, attr, prefix='+')
|
||||
+ keys2s = count_keys(inst, DEFAULT_BENAME, attr, prefix='*')
|
||||
+ assert keys2e > keys1
|
||||
+ assert keys2p > 0
|
||||
+ assert keys2s > 0
|
||||
+
|
||||
+ # Reindex attr without types
|
||||
+ log.info(f'Reindex {attr} without types')
|
||||
+ tasks.reindex(
|
||||
+ suffix=DEFAULT_SUFFIX,
|
||||
+ attrname=attr,
|
||||
+ args={TASK_WAIT: True}
|
||||
+ )
|
||||
+ reindex_task = Task(topo.standalone, tasks.dn)
|
||||
+ assert reindex_task.status()
|
||||
+ assert reindex_task.get_exit_code() == 0
|
||||
+
|
||||
+ keys3e = count_keys(inst, DEFAULT_BENAME, attr, prefix='=')
|
||||
+ keys3p = count_keys(inst, DEFAULT_BENAME, attr, prefix='+')
|
||||
+ keys3s = count_keys(inst, DEFAULT_BENAME, attr, prefix='*')
|
||||
+ assert keys3e == keys2e
|
||||
+ assert keys3p == keys2p
|
||||
+ assert keys3s == keys2s
|
||||
+
|
||||
+
|
||||
def test_task_and_be(topo, add_backend_and_ldif_50K_users):
|
||||
"""Check that backend is writable after finishing a tasks
|
||||
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/db-mdb/mdb_import.c b/ldap/servers/slapd/back-ldbm/db-mdb/mdb_import.c
|
||||
index d57146953..ce2151174 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/db-mdb/mdb_import.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/db-mdb/mdb_import.c
|
||||
@@ -1150,6 +1150,8 @@ process_db2index_attrs(Slapi_PBlock *pb, ImportCtx_t *ctx)
|
||||
* TBD
|
||||
*/
|
||||
char **attrs = NULL;
|
||||
+ char *attrname = NULL;
|
||||
+ char *pt = NULL;
|
||||
int i;
|
||||
|
||||
slapi_pblock_get(pb, SLAPI_DB2INDEX_ATTRS, &attrs);
|
||||
@@ -1157,7 +1159,13 @@ process_db2index_attrs(Slapi_PBlock *pb, ImportCtx_t *ctx)
|
||||
for (i = 0; attrs && attrs[i]; i++) {
|
||||
switch (attrs[i][0]) {
|
||||
case 't': /* attribute type to index */
|
||||
- slapi_ch_array_add(&ctx->indexAttrs, slapi_ch_strdup(attrs[i] + 1));
|
||||
+ attrname = slapi_ch_strdup(attrs[i] + 1);
|
||||
+ /* Strip index type */
|
||||
+ pt = strchr(attrname, ':');
|
||||
+ if (pt != NULL) {
|
||||
+ *pt = '\0';
|
||||
+ }
|
||||
+ slapi_ch_array_add(&ctx->indexAttrs, attrname);
|
||||
break;
|
||||
case 'T': /* VLV Search to index */
|
||||
slapi_ch_array_add(&ctx->indexVlvs, get_vlv_dbname(attrs[i] + 1));
|
||||
--
|
||||
2.46.0
|
||||
|
311
0003-Issue-6374-nsslapd-mdb-max-dbs-autotuning-doesn-t-wo.patch
Normal file
311
0003-Issue-6374-nsslapd-mdb-max-dbs-autotuning-doesn-t-wo.patch
Normal file
@ -0,0 +1,311 @@
|
||||
From f077f9692d1625a1bc2dc6ee02a4fca71ee30b03 Mon Sep 17 00:00:00 2001
|
||||
From: progier389 <progier@redhat.com>
|
||||
Date: Wed, 13 Nov 2024 15:31:35 +0100
|
||||
Subject: [PATCH] Issue 6374 - nsslapd-mdb-max-dbs autotuning doesn't work
|
||||
properly (#6400)
|
||||
|
||||
* Issue 6374 - nsslapd-mdb-max-dbs autotuning doesn't work properly
|
||||
|
||||
Several issues:
|
||||
|
||||
After restarting the server nsslapd-mdb-max-dbs may not be high enough to add a new backend
|
||||
because the value computation is wrong.
|
||||
dbscan fails to open the database if nsslapd-mdb-max-dbs has been increased.
|
||||
dbscan crashes when closing the database (typically when using -S)
|
||||
When starting the instance the nsslapd-mdb-max-dbs parameter is increased to ensure that a new backend may be added.
|
||||
When dse.ldif path is not specified, the db environment is now open using the INFO.mdb data instead of using the default values.
|
||||
synchronization between thread closure and database context destruction is hardened
|
||||
Issue: #6374
|
||||
|
||||
Reviewed by: @tbordaz , @vashirov (Thanks!)
|
||||
|
||||
(cherry picked from commit 56cd3389da608a3f6eeee58d20dffbcd286a8033)
|
||||
---
|
||||
.../tests/suites/config/config_test.py | 86 +++++++++++++++++++
|
||||
ldap/servers/slapd/back-ldbm/back-ldbm.h | 2 +
|
||||
.../slapd/back-ldbm/db-mdb/mdb_config.c | 17 ++--
|
||||
.../back-ldbm/db-mdb/mdb_import_threads.c | 9 +-
|
||||
.../slapd/back-ldbm/db-mdb/mdb_instance.c | 8 ++
|
||||
ldap/servers/slapd/back-ldbm/dbimpl.c | 2 +-
|
||||
ldap/servers/slapd/back-ldbm/import.c | 14 ++-
|
||||
7 files changed, 128 insertions(+), 10 deletions(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/config/config_test.py b/dirsrvtests/tests/suites/config/config_test.py
|
||||
index 57b155af7..34dac36b6 100644
|
||||
--- a/dirsrvtests/tests/suites/config/config_test.py
|
||||
+++ b/dirsrvtests/tests/suites/config/config_test.py
|
||||
@@ -17,6 +17,7 @@ from lib389.topologies import topology_m2, topology_st as topo
|
||||
from lib389.utils import *
|
||||
from lib389._constants import DN_CONFIG, DEFAULT_SUFFIX, DEFAULT_BENAME
|
||||
from lib389._mapped_object import DSLdapObjects
|
||||
+from lib389.agreement import Agreements
|
||||
from lib389.cli_base import FakeArgs
|
||||
from lib389.cli_conf.backend import db_config_set
|
||||
from lib389.idm.user import UserAccounts, TEST_USER_PROPERTIES
|
||||
@@ -27,6 +28,8 @@ from lib389.cos import CosPointerDefinitions, CosTemplates
|
||||
from lib389.backend import Backends, DatabaseConfig
|
||||
from lib389.monitor import MonitorLDBM, Monitor
|
||||
from lib389.plugins import ReferentialIntegrityPlugin
|
||||
+from lib389.replica import BootstrapReplicationManager, Replicas
|
||||
+from lib389.passwd import password_generate
|
||||
|
||||
pytestmark = pytest.mark.tier0
|
||||
|
||||
@@ -36,6 +39,8 @@ PSTACK_CMD = '/usr/bin/pstack'
|
||||
logging.getLogger(__name__).setLevel(logging.INFO)
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
+DEBUGGING = os.getenv("DEBUGGING", default=False)
|
||||
+
|
||||
@pytest.fixture(scope="module")
|
||||
def big_file():
|
||||
TEMP_BIG_FILE = ''
|
||||
@@ -811,6 +816,87 @@ def test_numlisteners_limit(topo):
|
||||
assert numlisteners[0] == '4'
|
||||
|
||||
|
||||
+def bootstrap_replication(inst_from, inst_to, creds):
|
||||
+ manager = BootstrapReplicationManager(inst_to)
|
||||
+ rdn_val = 'replication manager'
|
||||
+ if manager.exists():
|
||||
+ manager.delete()
|
||||
+ manager.create(properties={
|
||||
+ 'cn': rdn_val,
|
||||
+ 'uid': rdn_val,
|
||||
+ 'userPassword': creds
|
||||
+ })
|
||||
+ for replica in Replicas(inst_to).list():
|
||||
+ replica.remove_all('nsDS5ReplicaBindDNGroup')
|
||||
+ replica.replace('nsDS5ReplicaBindDN', manager.dn)
|
||||
+ for agmt in Agreements(inst_from).list():
|
||||
+ agmt.replace('nsDS5ReplicaBindDN', manager.dn)
|
||||
+ agmt.replace('nsDS5ReplicaCredentials', creds)
|
||||
+
|
||||
+
|
||||
+@pytest.mark.skipif(get_default_db_lib() != "mdb", reason="This test requires lmdb")
|
||||
+def test_lmdb_autotuned_maxdbs(topology_m2, request):
|
||||
+ """Verify that after restart, nsslapd-mdb-max-dbs is large enough to add a new backend.
|
||||
+
|
||||
+ :id: 0272d432-9080-11ef-8f40-482ae39447e5
|
||||
+ :setup: Two suppliers configuration
|
||||
+ :steps:
|
||||
+ 1. loop 20 times
|
||||
+ 3. In 1 loop: restart instance
|
||||
+ 3. In 1 loop: add a new backend
|
||||
+ 4. In 1 loop: check that instance is still alive
|
||||
+ :expectedresults:
|
||||
+ 1. Success
|
||||
+ 2. Success
|
||||
+ 3. Success
|
||||
+ 4. Success
|
||||
+ """
|
||||
+
|
||||
+ s1 = topology_m2.ms["supplier1"]
|
||||
+ s2 = topology_m2.ms["supplier2"]
|
||||
+
|
||||
+ backends = Backends(s1)
|
||||
+ db_config = DatabaseConfig(s1)
|
||||
+ # Generate the teardown finalizer
|
||||
+ belist = []
|
||||
+ creds=password_generate()
|
||||
+ bootstrap_replication(s2, s1, creds)
|
||||
+ bootstrap_replication(s1, s2, creds)
|
||||
+
|
||||
+ def fin():
|
||||
+ s1.start()
|
||||
+ for be in belist:
|
||||
+ be.delete()
|
||||
+
|
||||
+ if not DEBUGGING:
|
||||
+ request.addfinalizer(fin)
|
||||
+
|
||||
+ # 1. Set autotuning (off-line to be able to decrease the value)
|
||||
+ s1.stop()
|
||||
+ dse_ldif = DSEldif(s1)
|
||||
+ dse_ldif.replace(db_config.dn, 'nsslapd-mdb-max-dbs', '0')
|
||||
+ os.remove(f'{s1.dbdir}/data.mdb')
|
||||
+ s1.start()
|
||||
+
|
||||
+ # 2. Reinitialize the db:
|
||||
+ log.info("Bulk import...")
|
||||
+ agmt = Agreements(s2).list()[0]
|
||||
+ agmt.begin_reinit()
|
||||
+ (done, error) = agmt.wait_reinit()
|
||||
+ log.info(f'Bulk importresult is ({done}, {error})')
|
||||
+ assert done is True
|
||||
+ assert error is False
|
||||
+
|
||||
+ # 3. loop 20 times
|
||||
+ for idx in range(20):
|
||||
+ s1.restart()
|
||||
+ log.info(f'Adding backend test{idx}')
|
||||
+ belist.append(backends.create(properties={'cn': f'test{idx}',
|
||||
+ 'nsslapd-suffix': f'dc=test{idx}'}))
|
||||
+ assert s1.status()
|
||||
+
|
||||
+
|
||||
+
|
||||
if __name__ == '__main__':
|
||||
# Run isolated
|
||||
# -s for DEBUG mode
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/back-ldbm.h b/ldap/servers/slapd/back-ldbm/back-ldbm.h
|
||||
index 8fea63e35..35d0ece04 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/back-ldbm.h
|
||||
+++ b/ldap/servers/slapd/back-ldbm/back-ldbm.h
|
||||
@@ -896,4 +896,6 @@ typedef struct _back_search_result_set
|
||||
((L)->size == (R)->size && !memcmp((L)->data, (R)->data, (L)->size))
|
||||
|
||||
typedef int backend_implement_init_fn(struct ldbminfo *li, config_info *config_array);
|
||||
+
|
||||
+pthread_mutex_t *get_import_ctx_mutex();
|
||||
#endif /* _back_ldbm_h_ */
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/db-mdb/mdb_config.c b/ldap/servers/slapd/back-ldbm/db-mdb/mdb_config.c
|
||||
index 351f54037..1f7b71442 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/db-mdb/mdb_config.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/db-mdb/mdb_config.c
|
||||
@@ -83,7 +83,7 @@ dbmdb_compute_limits(struct ldbminfo *li)
|
||||
uint64_t total_space = 0;
|
||||
uint64_t avail_space = 0;
|
||||
uint64_t cur_dbsize = 0;
|
||||
- int nbchangelogs = 0;
|
||||
+ int nbvlvs = 0;
|
||||
int nbsuffixes = 0;
|
||||
int nbindexes = 0;
|
||||
int nbagmt = 0;
|
||||
@@ -99,8 +99,8 @@ dbmdb_compute_limits(struct ldbminfo *li)
|
||||
* But some tunable may be autotuned.
|
||||
*/
|
||||
if (dbmdb_count_config_entries("(objectClass=nsMappingTree)", &nbsuffixes) ||
|
||||
- dbmdb_count_config_entries("(objectClass=nsIndex)", &nbsuffixes) ||
|
||||
- dbmdb_count_config_entries("(&(objectClass=nsds5Replica)(nsDS5Flags=1))", &nbchangelogs) ||
|
||||
+ dbmdb_count_config_entries("(objectClass=nsIndex)", &nbindexes) ||
|
||||
+ dbmdb_count_config_entries("(objectClass=vlvIndex)", &nbvlvs) ||
|
||||
dbmdb_count_config_entries("(objectClass=nsds5replicationagreement)", &nbagmt)) {
|
||||
/* error message is already logged */
|
||||
return 1;
|
||||
@@ -120,8 +120,15 @@ dbmdb_compute_limits(struct ldbminfo *li)
|
||||
|
||||
info->pagesize = sysconf(_SC_PAGE_SIZE);
|
||||
limits->min_readers = config_get_threadnumber() + nbagmt + DBMDB_READERS_MARGIN;
|
||||
- /* Default indexes are counted in "nbindexes" so we should always have enough resource to add 1 new suffix */
|
||||
- limits->min_dbs = nbsuffixes + nbindexes + nbchangelogs + DBMDB_DBS_MARGIN;
|
||||
+ /*
|
||||
+ * For each suffix there are 4 databases instances:
|
||||
+ * long-entryrdn, replication_changelog, id2entry and ancestorid
|
||||
+ * then the indexes and the vlv and vlv cache
|
||||
+ *
|
||||
+ * Default indexes are counted in "nbindexes" so we should always have enough
|
||||
+ * resource to add 1 new suffix
|
||||
+ */
|
||||
+ limits->min_dbs = 4*nbsuffixes + nbindexes + 2*nbvlvs + DBMDB_DBS_MARGIN;
|
||||
|
||||
total_space = ((uint64_t)(buf.f_blocks)) * ((uint64_t)(buf.f_bsize));
|
||||
avail_space = ((uint64_t)(buf.f_bavail)) * ((uint64_t)(buf.f_bsize));
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/db-mdb/mdb_import_threads.c b/ldap/servers/slapd/back-ldbm/db-mdb/mdb_import_threads.c
|
||||
index 8c879da31..707a110c5 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/db-mdb/mdb_import_threads.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/db-mdb/mdb_import_threads.c
|
||||
@@ -4312,9 +4312,12 @@ dbmdb_import_init_writer(ImportJob *job, ImportRole_t role)
|
||||
void
|
||||
dbmdb_free_import_ctx(ImportJob *job)
|
||||
{
|
||||
- if (job->writer_ctx) {
|
||||
- ImportCtx_t *ctx = job->writer_ctx;
|
||||
- job->writer_ctx = NULL;
|
||||
+ ImportCtx_t *ctx = NULL;
|
||||
+ pthread_mutex_lock(get_import_ctx_mutex());
|
||||
+ ctx = job->writer_ctx;
|
||||
+ job->writer_ctx = NULL;
|
||||
+ pthread_mutex_unlock(get_import_ctx_mutex());
|
||||
+ if (ctx) {
|
||||
pthread_mutex_destroy(&ctx->workerq.mutex);
|
||||
pthread_cond_destroy(&ctx->workerq.cv);
|
||||
slapi_ch_free((void**)&ctx->workerq.slots);
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/db-mdb/mdb_instance.c b/ldap/servers/slapd/back-ldbm/db-mdb/mdb_instance.c
|
||||
index 6386ecf06..05f1e348d 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/db-mdb/mdb_instance.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/db-mdb/mdb_instance.c
|
||||
@@ -287,6 +287,13 @@ int add_dbi(dbi_open_ctx_t *octx, backend *be, const char *fname, int flags)
|
||||
slapi_ch_free((void**)&treekey.dbname);
|
||||
return octx->rc;
|
||||
}
|
||||
+ if (treekey.dbi >= ctx->dsecfg.max_dbs) {
|
||||
+ octx->rc = MDB_DBS_FULL;
|
||||
+ slapi_log_err(SLAPI_LOG_ERR, "add_dbi", "Failed to open database instance %s slots: %d/%d. Error is %d: %s.\n",
|
||||
+ treekey.dbname, treekey.dbi, ctx->dsecfg.max_dbs, octx->rc, mdb_strerror(octx->rc));
|
||||
+ slapi_ch_free((void**)&treekey.dbname);
|
||||
+ return octx->rc;
|
||||
+ }
|
||||
if (octx->ai && octx->ai->ai_key_cmp_fn) {
|
||||
octx->rc = dbmdb_update_dbi_cmp_fn(ctx, &treekey, octx->ai->ai_key_cmp_fn, octx->txn);
|
||||
if (octx->rc) {
|
||||
@@ -689,6 +696,7 @@ int dbmdb_make_env(dbmdb_ctx_t *ctx, int readOnly, mdb_mode_t mode)
|
||||
rc = dbmdb_write_infofile(ctx);
|
||||
} else {
|
||||
/* No Config ==> read it from info file */
|
||||
+ ctx->dsecfg = ctx->startcfg;
|
||||
}
|
||||
if (rc) {
|
||||
return rc;
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/dbimpl.c b/ldap/servers/slapd/back-ldbm/dbimpl.c
|
||||
index da4a4548e..42f4a0718 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/dbimpl.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/dbimpl.c
|
||||
@@ -463,7 +463,7 @@ int dblayer_show_statistics(const char *dbimpl_name, const char *dbhome, FILE *f
|
||||
li->li_plugin = be->be_database;
|
||||
li->li_plugin->plg_name = (char*) "back-ldbm-dbimpl";
|
||||
li->li_plugin->plg_libpath = (char*) "libback-ldbm";
|
||||
- li->li_directory = (char*)dbhome;
|
||||
+ li->li_directory = get_li_directory(dbhome);
|
||||
|
||||
/* Initialize database plugin */
|
||||
rc = dbimpl_setup(li, dbimpl_name);
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/import.c b/ldap/servers/slapd/back-ldbm/import.c
|
||||
index 2bb8cb581..30ec462fa 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/import.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/import.c
|
||||
@@ -27,6 +27,9 @@
|
||||
#define NEED_DN_NORM_SP -25
|
||||
#define NEED_DN_NORM_BT -26
|
||||
|
||||
+/* Protect against import context destruction */
|
||||
+static pthread_mutex_t import_ctx_mutex = PTHREAD_MUTEX_INITIALIZER;
|
||||
+
|
||||
|
||||
/********** routines to manipulate the entry fifo **********/
|
||||
|
||||
@@ -143,6 +146,14 @@ ldbm_back_wire_import(Slapi_PBlock *pb)
|
||||
|
||||
/* Threads management */
|
||||
|
||||
+/* Return the mutex that protects against import context destruction */
|
||||
+pthread_mutex_t *
|
||||
+get_import_ctx_mutex()
|
||||
+{
|
||||
+ return &import_ctx_mutex;
|
||||
+}
|
||||
+
|
||||
+
|
||||
/* tell all the threads to abort */
|
||||
void
|
||||
import_abort_all(ImportJob *job, int wait_for_them)
|
||||
@@ -151,7 +162,7 @@ import_abort_all(ImportJob *job, int wait_for_them)
|
||||
|
||||
/* tell all the worker threads to abort */
|
||||
job->flags |= FLAG_ABORT;
|
||||
-
|
||||
+ pthread_mutex_lock(&import_ctx_mutex);
|
||||
for (worker = job->worker_list; worker; worker = worker->next)
|
||||
worker->command = ABORT;
|
||||
|
||||
@@ -167,6 +178,7 @@ import_abort_all(ImportJob *job, int wait_for_them)
|
||||
}
|
||||
}
|
||||
}
|
||||
+ pthread_mutex_unlock(&import_ctx_mutex);
|
||||
}
|
||||
|
||||
|
||||
--
|
||||
2.48.0
|
||||
|
894
0004-Issue-6090-Fix-dbscan-options-and-man-pages-6315.patch
Normal file
894
0004-Issue-6090-Fix-dbscan-options-and-man-pages-6315.patch
Normal file
@ -0,0 +1,894 @@
|
||||
From b53faa9e7289383bbc02fc260b1b34958a317fdd Mon Sep 17 00:00:00 2001
|
||||
From: progier389 <progier@redhat.com>
|
||||
Date: Fri, 6 Sep 2024 14:45:06 +0200
|
||||
Subject: [PATCH] Issue 6090 - Fix dbscan options and man pages (#6315)
|
||||
|
||||
* Issue 6090 - Fix dbscan options and man pages
|
||||
|
||||
dbscan -d option is dangerously confusing as it removes a database instance while in db_stat it identify the database
|
||||
(cf issue #5609 ).
|
||||
This fix implements long options in dbscan, rename -d in --remove, and requires a new --do-it option for action that change the database content.
|
||||
The fix should also align both the usage and the dbscan man page with the new set of options
|
||||
|
||||
Issue: #6090
|
||||
|
||||
Reviewed by: @tbordaz, @droideck (Thanks!)
|
||||
|
||||
(cherry picked from commit 25e1d16887ebd299dfe0088080b9ee0deec1e41f)
|
||||
---
|
||||
dirsrvtests/tests/suites/clu/dbscan_test.py | 253 ++++++++++++++++++
|
||||
.../tests/suites/clu/repl_monitor_test.py | 4 +-
|
||||
.../slapd/back-ldbm/db-bdb/bdb_layer.c | 12 +-
|
||||
ldap/servers/slapd/back-ldbm/dbimpl.c | 50 +++-
|
||||
ldap/servers/slapd/tools/dbscan.c | 182 ++++++++++---
|
||||
man/man1/dbscan.1 | 74 +++--
|
||||
src/lib389/lib389/__init__.py | 9 +-
|
||||
src/lib389/lib389/cli_ctl/dblib.py | 13 +-
|
||||
8 files changed, 531 insertions(+), 66 deletions(-)
|
||||
create mode 100644 dirsrvtests/tests/suites/clu/dbscan_test.py
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/clu/dbscan_test.py b/dirsrvtests/tests/suites/clu/dbscan_test.py
|
||||
new file mode 100644
|
||||
index 000000000..2c9a9651a
|
||||
--- /dev/null
|
||||
+++ b/dirsrvtests/tests/suites/clu/dbscan_test.py
|
||||
@@ -0,0 +1,253 @@
|
||||
+# --- BEGIN COPYRIGHT BLOCK ---
|
||||
+# Copyright (C) 2024 Red Hat, Inc.
|
||||
+# All rights reserved.
|
||||
+#
|
||||
+# License: GPL (version 3 or any later version).
|
||||
+# See LICENSE for details.
|
||||
+# --- END COPYRIGHT BLOCK ---
|
||||
+#
|
||||
+import logging
|
||||
+import os
|
||||
+import pytest
|
||||
+import re
|
||||
+import subprocess
|
||||
+import sys
|
||||
+
|
||||
+from lib389 import DirSrv
|
||||
+from lib389._constants import DBSCAN
|
||||
+from lib389.topologies import topology_m2 as topo_m2
|
||||
+from difflib import context_diff
|
||||
+
|
||||
+pytestmark = pytest.mark.tier0
|
||||
+
|
||||
+logging.getLogger(__name__).setLevel(logging.DEBUG)
|
||||
+log = logging.getLogger(__name__)
|
||||
+
|
||||
+DEBUGGING = os.getenv("DEBUGGING", default=False)
|
||||
+
|
||||
+
|
||||
+class CalledProcessUnexpectedReturnCode(subprocess.CalledProcessError):
|
||||
+ def __init__(self, result, expected_rc):
|
||||
+ super().__init__(cmd=result.args, returncode=result.returncode, output=result.stdout, stderr=result.stderr)
|
||||
+ self.expected_rc = expected_rc
|
||||
+ self.result = result
|
||||
+
|
||||
+ def __str__(self):
|
||||
+ return f'Command {self.result.args} returned {self.result.returncode} instead of {self.expected_rc}'
|
||||
+
|
||||
+
|
||||
+class DbscanPaths:
|
||||
+ @staticmethod
|
||||
+ def list_instances(inst, dblib, dbhome):
|
||||
+ # compute db instance pathnames
|
||||
+ instances = dbscan(['-D', dblib, '-L', dbhome], inst=inst).stdout
|
||||
+ dbis = []
|
||||
+ if dblib == 'bdb':
|
||||
+ pattern = r'^ (.*) $'
|
||||
+ prefix = f'{dbhome}/'
|
||||
+ else:
|
||||
+ pattern = r'^ (.*) flags:'
|
||||
+ prefix = f''
|
||||
+ for match in re.finditer(pattern, instances, flags=re.MULTILINE):
|
||||
+ dbis.append(prefix+match.group(1))
|
||||
+ return dbis
|
||||
+
|
||||
+ @staticmethod
|
||||
+ def list_options(inst):
|
||||
+ # compute supported options
|
||||
+ options = []
|
||||
+ usage = dbscan(['-h'], inst=inst, expected_rc=None).stdout
|
||||
+ pattern = r'^\s+(?:(-[^-,]+), +)?(--[^ ]+).*$'
|
||||
+ for match in re.finditer(pattern, usage, flags=re.MULTILINE):
|
||||
+ for idx in range(1,3):
|
||||
+ if match.group(idx) is not None:
|
||||
+ options.append(match.group(idx))
|
||||
+ return options
|
||||
+
|
||||
+ def __init__(self, inst):
|
||||
+ dblib = inst.get_db_lib()
|
||||
+ dbhome = inst.ds_paths.db_home_dir
|
||||
+ self.inst = inst
|
||||
+ self.dblib = dblib
|
||||
+ self.dbhome = dbhome
|
||||
+ self.options = DbscanPaths.list_options(inst)
|
||||
+ self.dbis = DbscanPaths.list_instances(inst, dblib, dbhome)
|
||||
+ self.ldif_dir = inst.ds_paths.ldif_dir
|
||||
+
|
||||
+ def get_dbi(self, attr, backend='userroot'):
|
||||
+ for dbi in self.dbis:
|
||||
+ if f'{backend}/{attr}.'.lower() in dbi.lower():
|
||||
+ return dbi
|
||||
+ raise KeyError(f'Unknown dbi {backend}/{attr}')
|
||||
+
|
||||
+ def __repr__(self):
|
||||
+ attrs = ['inst', 'dblib', 'dbhome', 'ldif_dir', 'options', 'dbis' ]
|
||||
+ res = ", ".join(map(lambda x: f'{x}={self.__dict__[x]}', attrs))
|
||||
+ return f'DbscanPaths({res})'
|
||||
+
|
||||
+
|
||||
+def dbscan(args, inst=None, expected_rc=0):
|
||||
+ if inst is None:
|
||||
+ prefix = os.environ.get('PREFIX', "")
|
||||
+ prog = f'{prefix}/bin/dbscan'
|
||||
+ else:
|
||||
+ prog = os.path.join(inst.ds_paths.bin_dir, DBSCAN)
|
||||
+ args.insert(0, prog)
|
||||
+ output = subprocess.run(args, encoding='utf-8', stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
|
||||
+ log.debug(f'{args} result is {output.returncode} output is {output.stdout}')
|
||||
+ if expected_rc is not None and expected_rc != output.returncode:
|
||||
+ raise CalledProcessUnexpectedReturnCode(output, expected_rc)
|
||||
+ return output
|
||||
+
|
||||
+
|
||||
+def log_export_file(filename):
|
||||
+ with open(filename, 'r') as file:
|
||||
+ log.debug(f'=========== Dump of {filename} ================')
|
||||
+ for line in file:
|
||||
+ log.debug(line.rstrip('\n'))
|
||||
+ log.debug(f'=========== Enf of {filename} =================')
|
||||
+
|
||||
+
|
||||
+@pytest.fixture(scope='module')
|
||||
+def paths(topo_m2, request):
|
||||
+ inst = topo_m2.ms["supplier1"]
|
||||
+ if sys.version_info < (3,5):
|
||||
+ pytest.skip('requires python version >= 3.5')
|
||||
+ paths = DbscanPaths(inst)
|
||||
+ if '--do-it' not in paths.options:
|
||||
+ pytest.skip('Not supported with this dbscan version')
|
||||
+ inst.stop()
|
||||
+ return paths
|
||||
+
|
||||
+
|
||||
+def test_dbscan_destructive_actions(paths, request):
|
||||
+ """Test that dbscan remove/import actions
|
||||
+
|
||||
+ :id: f40b0c42-660a-11ef-9544-083a88554478
|
||||
+ :setup: Stopped standalone instance
|
||||
+ :steps:
|
||||
+ 1. Export cn instance with dbscan
|
||||
+ 2. Run dbscan --remove ...
|
||||
+ 3. Check the error message about missing --do-it
|
||||
+ 4. Check that cn instance is still present
|
||||
+ 5. Run dbscan -I import_file ...
|
||||
+ 6. Check it was properly imported
|
||||
+ 7. Check that cn instance is still present
|
||||
+ 8. Run dbscan --remove ... --doit
|
||||
+ 9. Check the error message about missing --do-it
|
||||
+ 10. Check that cn instance is still present
|
||||
+ 11. Run dbscan -I import_file ... --do-it
|
||||
+ 12. Check it was properly imported
|
||||
+ 13. Check that cn instance is still present
|
||||
+ 14. Export again the database
|
||||
+ 15. Check that content of export files are the same
|
||||
+ :expectedresults:
|
||||
+ 1. Success
|
||||
+ 2. dbscan return code should be 1 (error)
|
||||
+ 3. Error message should be present
|
||||
+ 4. cn instance should be present
|
||||
+ 5. dbscan return code should be 1 (error)
|
||||
+ 6. Error message should be present
|
||||
+ 7. cn instance should be present
|
||||
+ 8. dbscan return code should be 0 (success)
|
||||
+ 9. Error message should not be present
|
||||
+ 10. cn instance should not be present
|
||||
+ 11. dbscan return code should be 0 (success)
|
||||
+ 12. Error message should not be present
|
||||
+ 13. cn instance should be present
|
||||
+ 14. Success
|
||||
+ 15. Export files content should be the same
|
||||
+ """
|
||||
+
|
||||
+ # Export cn instance with dbscan
|
||||
+ export_cn = f'{paths.ldif_dir}/dbscan_cn.data'
|
||||
+ export_cn2 = f'{paths.ldif_dir}/dbscan_cn2.data'
|
||||
+ cndbi = paths.get_dbi('replication_changelog')
|
||||
+ inst = paths.inst
|
||||
+ dblib = paths.dblib
|
||||
+ exportok = False
|
||||
+ def fin():
|
||||
+ if os.path.exists(export_cn):
|
||||
+ # Restore cn if it was exported successfully but does not exists any more
|
||||
+ if exportok and cndbi not in DbscanPaths.list_instances(inst, dblib, paths.dbhome):
|
||||
+ dbscan(['-D', dblib, '-f', cndbi, '-I', export_cn, '--do-it'], inst=inst)
|
||||
+ if not DEBUGGING:
|
||||
+ os.remove(export_cn)
|
||||
+ if os.path.exists(export_cn) and not DEBUGGING:
|
||||
+ os.remove(export_cn2)
|
||||
+
|
||||
+ fin()
|
||||
+ request.addfinalizer(fin)
|
||||
+ dbscan(['-D', dblib, '-f', cndbi, '-X', export_cn], inst=inst)
|
||||
+ exportok = True
|
||||
+
|
||||
+ expected_msg = "without specifying '--do-it' parameter."
|
||||
+
|
||||
+ # Run dbscan --remove ...
|
||||
+ result = dbscan(['-D', paths.dblib, '--remove', '-f', cndbi],
|
||||
+ inst=paths.inst, expected_rc=1)
|
||||
+
|
||||
+ # Check the error message about missing --do-it
|
||||
+ assert expected_msg in result.stdout
|
||||
+
|
||||
+ # Check that cn instance is still present
|
||||
+ curdbis = DbscanPaths.list_instances(paths.inst, paths.dblib, paths.dbhome)
|
||||
+ assert cndbi in curdbis
|
||||
+
|
||||
+ # Run dbscan -I import_file ...
|
||||
+ result = dbscan(['-D', paths.dblib, '-f', cndbi, '-I', export_cn],
|
||||
+ inst=paths.inst, expected_rc=1)
|
||||
+
|
||||
+ # Check the error message about missing --do-it
|
||||
+ assert expected_msg in result.stdout
|
||||
+
|
||||
+ # Check that cn instance is still present
|
||||
+ curdbis = DbscanPaths.list_instances(paths.inst, paths.dblib, paths.dbhome)
|
||||
+ assert cndbi in curdbis
|
||||
+
|
||||
+ # Run dbscan --remove ... --doit
|
||||
+ result = dbscan(['-D', paths.dblib, '--remove', '-f', cndbi, '--do-it'],
|
||||
+ inst=paths.inst, expected_rc=0)
|
||||
+
|
||||
+ # Check the error message about missing --do-it
|
||||
+ assert expected_msg not in result.stdout
|
||||
+
|
||||
+ # Check that cn instance is still present
|
||||
+ curdbis = DbscanPaths.list_instances(paths.inst, paths.dblib, paths.dbhome)
|
||||
+ assert cndbi not in curdbis
|
||||
+
|
||||
+ # Run dbscan -I import_file ... --do-it
|
||||
+ result = dbscan(['-D', paths.dblib, '-f', cndbi,
|
||||
+ '-I', export_cn, '--do-it'],
|
||||
+ inst=paths.inst, expected_rc=0)
|
||||
+
|
||||
+ # Check the error message about missing --do-it
|
||||
+ assert expected_msg not in result.stdout
|
||||
+
|
||||
+ # Check that cn instance is still present
|
||||
+ curdbis = DbscanPaths.list_instances(paths.inst, paths.dblib, paths.dbhome)
|
||||
+ assert cndbi in curdbis
|
||||
+
|
||||
+ # Export again the database
|
||||
+ dbscan(['-D', dblib, '-f', cndbi, '-X', export_cn2], inst=inst)
|
||||
+
|
||||
+ # Check that content of export files are the same
|
||||
+ with open(export_cn) as f1:
|
||||
+ f1lines = f1.readlines()
|
||||
+ with open(export_cn2) as f2:
|
||||
+ f2lines = f2.readlines()
|
||||
+ diffs = list(context_diff(f1lines, f2lines))
|
||||
+ if len(diffs) > 0:
|
||||
+ log.debug("Export file differences are:")
|
||||
+ for d in diffs:
|
||||
+ log.debug(d)
|
||||
+ log_export_file(export_cn)
|
||||
+ log_export_file(export_cn2)
|
||||
+ assert diffs is None
|
||||
+
|
||||
+
|
||||
+if __name__ == '__main__':
|
||||
+ # Run isolated
|
||||
+ # -s for DEBUG mode
|
||||
+ CURRENT_FILE = os.path.realpath(__file__)
|
||||
+ pytest.main("-s %s" % CURRENT_FILE)
|
||||
diff --git a/dirsrvtests/tests/suites/clu/repl_monitor_test.py b/dirsrvtests/tests/suites/clu/repl_monitor_test.py
|
||||
index d83416847..842dd96fd 100644
|
||||
--- a/dirsrvtests/tests/suites/clu/repl_monitor_test.py
|
||||
+++ b/dirsrvtests/tests/suites/clu/repl_monitor_test.py
|
||||
@@ -77,13 +77,13 @@ def get_hostnames_from_log(port1, port2):
|
||||
# search for Supplier :hostname:port
|
||||
# and use \D to insure there is no more number is after
|
||||
# the matched port (i.e that 10 is not matching 101)
|
||||
- regexp = '(Supplier: )([^:]*)(:' + str(port1) + '\D)'
|
||||
+ regexp = '(Supplier: )([^:]*)(:' + str(port1) + r'\D)'
|
||||
match=re.search(regexp, logtext)
|
||||
host_m1 = 'localhost.localdomain'
|
||||
if (match is not None):
|
||||
host_m1 = match.group(2)
|
||||
# Same for supplier 2
|
||||
- regexp = '(Supplier: )([^:]*)(:' + str(port2) + '\D)'
|
||||
+ regexp = '(Supplier: )([^:]*)(:' + str(port2) + r'\D)'
|
||||
match=re.search(regexp, logtext)
|
||||
host_m2 = 'localhost.localdomain'
|
||||
if (match is not None):
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.c b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.c
|
||||
index de6be0f42..4b30e8e87 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.c
|
||||
@@ -5820,8 +5820,16 @@ bdb_import_file_name(ldbm_instance *inst)
|
||||
static char *
|
||||
bdb_restore_file_name(struct ldbminfo *li)
|
||||
{
|
||||
- char *fname = slapi_ch_smprintf("%s/../.restore", li->li_directory);
|
||||
-
|
||||
+ char *pt = strrchr(li->li_directory, '/');
|
||||
+ char *fname = NULL;
|
||||
+ if (pt == NULL) {
|
||||
+ fname = slapi_ch_strdup(".restore");
|
||||
+ } else {
|
||||
+ size_t len = pt-li->li_directory;
|
||||
+ fname = slapi_ch_malloc(len+10);
|
||||
+ strncpy(fname, li->li_directory, len);
|
||||
+ strcpy(fname+len, "/.restore");
|
||||
+ }
|
||||
return fname;
|
||||
}
|
||||
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/dbimpl.c b/ldap/servers/slapd/back-ldbm/dbimpl.c
|
||||
index 42f4a0718..134d06480 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/dbimpl.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/dbimpl.c
|
||||
@@ -397,7 +397,48 @@ const char *dblayer_op2str(dbi_op_t op)
|
||||
return str[idx];
|
||||
}
|
||||
|
||||
-/* Open db env, db and db file privately */
|
||||
+/* Get the li_directory directory from the database instance name -
|
||||
+ * Caller should free the returned value
|
||||
+ */
|
||||
+static char *
|
||||
+get_li_directory(const char *fname)
|
||||
+{
|
||||
+ /*
|
||||
+ * li_directory is an existing directory.
|
||||
+ * it can be fname or its parent or its greatparent
|
||||
+ * in case of problem returns the provided name
|
||||
+ */
|
||||
+ char *lid = slapi_ch_strdup(fname);
|
||||
+ struct stat sbuf = {0};
|
||||
+ char *pt = NULL;
|
||||
+ for (int count=0; count<3; count++) {
|
||||
+ if (stat(lid, &sbuf) == 0) {
|
||||
+ if (S_ISDIR(sbuf.st_mode)) {
|
||||
+ return lid;
|
||||
+ }
|
||||
+ /* Non directory existing file could be regular
|
||||
+ * at the first iteration otherwise it is an error.
|
||||
+ */
|
||||
+ if (count>0 || !S_ISREG(sbuf.st_mode)) {
|
||||
+ break;
|
||||
+ }
|
||||
+ }
|
||||
+ pt = strrchr(lid, '/');
|
||||
+ if (pt == NULL) {
|
||||
+ slapi_ch_free_string(&lid);
|
||||
+ return slapi_ch_strdup(".");
|
||||
+ }
|
||||
+ *pt = '\0';
|
||||
+ }
|
||||
+ /*
|
||||
+ * Error case. Returns a copy of the original string:
|
||||
+ * and let dblayer_private_open_fn fail to open the database
|
||||
+ */
|
||||
+ slapi_ch_free_string(&lid);
|
||||
+ return slapi_ch_strdup(fname);
|
||||
+}
|
||||
+
|
||||
+/* Open db env, db and db file privately (for dbscan) */
|
||||
int dblayer_private_open(const char *plgname, const char *dbfilename, int rw, Slapi_Backend **be, dbi_env_t **env, dbi_db_t **db)
|
||||
{
|
||||
struct ldbminfo *li;
|
||||
@@ -412,7 +453,7 @@ int dblayer_private_open(const char *plgname, const char *dbfilename, int rw, Sl
|
||||
li->li_plugin = (*be)->be_database;
|
||||
li->li_plugin->plg_name = (char*) "back-ldbm-dbimpl";
|
||||
li->li_plugin->plg_libpath = (char*) "libback-ldbm";
|
||||
- li->li_directory = slapi_ch_strdup(dbfilename);
|
||||
+ li->li_directory = get_li_directory(dbfilename);
|
||||
|
||||
/* Initialize database plugin */
|
||||
rc = dbimpl_setup(li, plgname);
|
||||
@@ -439,7 +480,10 @@ int dblayer_private_close(Slapi_Backend **be, dbi_env_t **env, dbi_db_t **db)
|
||||
}
|
||||
slapi_ch_free((void**)&li->li_dblayer_private);
|
||||
slapi_ch_free((void**)&li->li_dblayer_config);
|
||||
- ldbm_config_destroy(li);
|
||||
+ if (dblayer_is_lmdb(*be)) {
|
||||
+ /* Generate use after free and double free in bdb case */
|
||||
+ ldbm_config_destroy(li);
|
||||
+ }
|
||||
slapi_ch_free((void**)&(*be)->be_database);
|
||||
slapi_ch_free((void**)&(*be)->be_instance_info);
|
||||
slapi_ch_free((void**)be);
|
||||
diff --git a/ldap/servers/slapd/tools/dbscan.c b/ldap/servers/slapd/tools/dbscan.c
|
||||
index 2d28dd951..12edf7c5b 100644
|
||||
--- a/ldap/servers/slapd/tools/dbscan.c
|
||||
+++ b/ldap/servers/slapd/tools/dbscan.c
|
||||
@@ -26,6 +26,7 @@
|
||||
#include <string.h>
|
||||
#include <ctype.h>
|
||||
#include <errno.h>
|
||||
+#include <getopt.h>
|
||||
#include "../back-ldbm/dbimpl.h"
|
||||
#include "../slapi-plugin.h"
|
||||
#include "nspr.h"
|
||||
@@ -85,6 +86,8 @@
|
||||
#define DB_BUFFER_SMALL ENOMEM
|
||||
#endif
|
||||
|
||||
+#define COUNTOF(array) ((sizeof(array))/sizeof(*(array)))
|
||||
+
|
||||
#if defined(linux)
|
||||
#include <getopt.h>
|
||||
#endif
|
||||
@@ -130,9 +133,43 @@ long ind_cnt = 0;
|
||||
long allids_cnt = 0;
|
||||
long other_cnt = 0;
|
||||
char *dump_filename = NULL;
|
||||
+int do_it = 0;
|
||||
|
||||
static Slapi_Backend *be = NULL; /* Pseudo backend used to interact with db */
|
||||
|
||||
+/* For Long options without shortcuts */
|
||||
+enum {
|
||||
+ OPT_FIRST = 0x1000,
|
||||
+ OPT_DO_IT,
|
||||
+ OPT_REMOVE,
|
||||
+};
|
||||
+
|
||||
+static const struct option options[] = {
|
||||
+ /* Options without shortcut */
|
||||
+ { "do-it", no_argument, 0, OPT_DO_IT },
|
||||
+ { "remove", no_argument, 0, OPT_REMOVE },
|
||||
+ /* Options with shortcut */
|
||||
+ { "import", required_argument, 0, 'I' },
|
||||
+ { "export", required_argument, 0, 'X' },
|
||||
+ { "db-type", required_argument, 0, 'D' },
|
||||
+ { "dbi", required_argument, 0, 'f' },
|
||||
+ { "ascii", no_argument, 0, 'A' },
|
||||
+ { "raw", no_argument, 0, 'R' },
|
||||
+ { "truncate-entry", required_argument, 0, 't' },
|
||||
+ { "entry-id", required_argument, 0, 'K' },
|
||||
+ { "key", required_argument, 0, 'k' },
|
||||
+ { "list", required_argument, 0, 'L' },
|
||||
+ { "stats", required_argument, 0, 'S' },
|
||||
+ { "id-list-max-size", required_argument, 0, 'l' },
|
||||
+ { "id-list-min-size", required_argument, 0, 'G' },
|
||||
+ { "show-id-list-lenghts", no_argument, 0, 'n' },
|
||||
+ { "show-id-list", no_argument, 0, 'r' },
|
||||
+ { "summary", no_argument, 0, 's' },
|
||||
+ { "help", no_argument, 0, 'h' },
|
||||
+ { 0, 0, 0, 0 }
|
||||
+};
|
||||
+
|
||||
+
|
||||
/** db_printf - functioning same as printf but a place for manipluating output.
|
||||
*/
|
||||
void
|
||||
@@ -899,7 +936,7 @@ is_changelog(char *filename)
|
||||
}
|
||||
|
||||
static void
|
||||
-usage(char *argv0)
|
||||
+usage(char *argv0, int error)
|
||||
{
|
||||
char *copy = strdup(argv0);
|
||||
char *p0 = NULL, *p1 = NULL;
|
||||
@@ -922,42 +959,52 @@ usage(char *argv0)
|
||||
}
|
||||
printf("\n%s - scan a db file and dump the contents\n", p0);
|
||||
printf(" common options:\n");
|
||||
- printf(" -D <dbimpl> specify db implementaion (may be: bdb or mdb)\n");
|
||||
- printf(" -f <filename> specify db file\n");
|
||||
- printf(" -A dump as ascii data\n");
|
||||
- printf(" -R dump as raw data\n");
|
||||
- printf(" -t <size> entry truncate size (bytes)\n");
|
||||
+ printf(" -A, --ascii dump as ascii data\n");
|
||||
+ printf(" -D, --db-type <dbimpl> specify db implementaion (may be: bdb or mdb)\n");
|
||||
+ printf(" -f, --dbi <filename> specify db instance\n");
|
||||
+ printf(" -R, --raw dump as raw data\n");
|
||||
+ printf(" -t, --truncate-entry <size> entry truncate size (bytes)\n");
|
||||
+
|
||||
printf(" entry file options:\n");
|
||||
- printf(" -K <entry_id> lookup only a specific entry id\n");
|
||||
+ printf(" -K, --entry-id <entry_id> lookup only a specific entry id\n");
|
||||
+
|
||||
printf(" index file options:\n");
|
||||
- printf(" -k <key> lookup only a specific key\n");
|
||||
- printf(" -L <dbhome> list all db files\n");
|
||||
- printf(" -S <dbhome> show statistics\n");
|
||||
- printf(" -l <size> max length of dumped id list\n");
|
||||
- printf(" (default %" PRIu32 "; 40 bytes <= size <= 1048576 bytes)\n", MAX_BUFFER);
|
||||
- printf(" -G <n> only display index entries with more than <n> ids\n");
|
||||
- printf(" -n display ID list lengths\n");
|
||||
- printf(" -r display the conents of ID list\n");
|
||||
- printf(" -s Summary of index counts\n");
|
||||
- printf(" -I file Import database content from file\n");
|
||||
- printf(" -X file Export database content in file\n");
|
||||
+ printf(" -G, --id-list-min-size <n> only display index entries with more than <n> ids\n");
|
||||
+ printf(" -I, --import file Import database instance from file.\n");
|
||||
+ printf(" -k, --key <key> lookup only a specific key\n");
|
||||
+ printf(" -l, --id-list-max-size <size> max length of dumped id list\n");
|
||||
+ printf(" (default %" PRIu32 "; 40 bytes <= size <= 1048576 bytes)\n", MAX_BUFFER);
|
||||
+ printf(" -n, --show-id-list-lenghts display ID list lengths\n");
|
||||
+ printf(" --remove remove database instance\n");
|
||||
+ printf(" -r, --show-id-list display the conents of ID list\n");
|
||||
+ printf(" -S, --stats <dbhome> show statistics\n");
|
||||
+ printf(" -X, --export file export database instance in file\n");
|
||||
+
|
||||
+ printf(" other options:\n");
|
||||
+ printf(" -s, --summary summary of index counts\n");
|
||||
+ printf(" -L, --list <dbhome> list all db files\n");
|
||||
+ printf(" --do-it confirmation flags for destructive actions like --remove or --import\n");
|
||||
+ printf(" -h, --help display this usage\n");
|
||||
+
|
||||
printf(" sample usages:\n");
|
||||
- printf(" # list the db files\n");
|
||||
- printf(" %s -D mdb -L /var/lib/dirsrv/slapd-i/db/\n", p0);
|
||||
- printf(" %s -f id2entry.db\n", p0);
|
||||
+ printf(" # list the database instances\n");
|
||||
+ printf(" %s -L /var/lib/dirsrv/slapd-supplier1/db/\n", p0);
|
||||
printf(" # dump the entry file\n");
|
||||
printf(" %s -f id2entry.db\n", p0);
|
||||
printf(" # display index keys in cn.db4\n");
|
||||
printf(" %s -f cn.db4\n", p0);
|
||||
+ printf(" # display index keys in cn on lmdb\n");
|
||||
+ printf(" %s -f /var/lib/dirsrv/slapd-supplier1/db/userroot/cn.db\n", p0);
|
||||
+ printf(" (Note: Use 'dbscan -L db_home_dir' to get the db instance path)\n");
|
||||
printf(" # display index keys and the count of entries having the key in mail.db4\n");
|
||||
printf(" %s -r -f mail.db4\n", p0);
|
||||
printf(" # display index keys and the IDs having more than 20 IDs in sn.db4\n");
|
||||
printf(" %s -r -G 20 -f sn.db4\n", p0);
|
||||
printf(" # display summary of objectclass.db4\n");
|
||||
- printf(" %s -f objectclass.db4\n", p0);
|
||||
+ printf(" %s -s -f objectclass.db4\n", p0);
|
||||
printf("\n");
|
||||
free(copy);
|
||||
- exit(1);
|
||||
+ exit(error?1:0);
|
||||
}
|
||||
|
||||
void dump_ascii_val(const char *str, dbi_val_t *val)
|
||||
@@ -1126,13 +1173,12 @@ importdb(const char *dbimpl_name, const char *filename, const char *dump_name)
|
||||
dblayer_init_pvt_txn();
|
||||
|
||||
if (!dump) {
|
||||
- printf("Failed to open dump file %s. Error %d: %s\n", dump_name, errno, strerror(errno));
|
||||
- fclose(dump);
|
||||
+ printf("Error: Failed to open dump file %s. Error %d: %s\n", dump_name, errno, strerror(errno));
|
||||
return 1;
|
||||
}
|
||||
|
||||
if (dblayer_private_open(dbimpl_name, filename, 1, &be, &env, &db)) {
|
||||
- printf("Can't initialize db plugin: %s\n", dbimpl_name);
|
||||
+ printf("Error: Can't initialize db plugin: %s\n", dbimpl_name);
|
||||
fclose(dump);
|
||||
return 1;
|
||||
}
|
||||
@@ -1142,11 +1188,16 @@ importdb(const char *dbimpl_name, const char *filename, const char *dump_name)
|
||||
!_read_line(dump, &keyword, &data) && keyword == 'v') {
|
||||
ret = dblayer_db_op(be, db, txn.txn, DBI_OP_PUT, &key, &data);
|
||||
}
|
||||
+ if (ret !=0) {
|
||||
+ printf("Error: failed to write record in database. Error %d: %s\n", ret, dblayer_strerror(ret));
|
||||
+ dump_ascii_val("Failing record key", &key);
|
||||
+ dump_ascii_val("Failing record value", &data);
|
||||
+ }
|
||||
fclose(dump);
|
||||
dblayer_value_free(be, &key);
|
||||
dblayer_value_free(be, &data);
|
||||
if (dblayer_private_close(&be, &env, &db)) {
|
||||
- printf("Unable to shutdown the db plugin: %s\n", dblayer_strerror(1));
|
||||
+ printf("Error: Unable to shutdown the db plugin: %s\n", dblayer_strerror(1));
|
||||
return 1;
|
||||
}
|
||||
return ret;
|
||||
@@ -1243,6 +1294,7 @@ removedb(const char *dbimpl_name, const char *filename)
|
||||
return 1;
|
||||
}
|
||||
|
||||
+ db = NULL; /* Database is already closed by dblayer_db_remove */
|
||||
if (dblayer_private_close(&be, &env, &db)) {
|
||||
printf("Unable to shutdown the db plugin: %s\n", dblayer_strerror(1));
|
||||
return 1;
|
||||
@@ -1250,7 +1302,6 @@ removedb(const char *dbimpl_name, const char *filename)
|
||||
return 0;
|
||||
}
|
||||
|
||||
-
|
||||
int
|
||||
main(int argc, char **argv)
|
||||
{
|
||||
@@ -1262,11 +1313,46 @@ main(int argc, char **argv)
|
||||
int ret = 0;
|
||||
char *find_key = NULL;
|
||||
uint32_t entry_id = 0xffffffff;
|
||||
- char *dbimpl_name = (char*) "bdb";
|
||||
- int c;
|
||||
+ char *defdbimpl = getenv("NSSLAPD_DB_LIB");
|
||||
+ char *dbimpl_name = (char*) "mdb";
|
||||
+ int longopt_idx = 0;
|
||||
+ int c = 0;
|
||||
+ char optstring[2*COUNTOF(options)+1] = {0};
|
||||
+
|
||||
+ if (defdbimpl) {
|
||||
+ if (strcasecmp(defdbimpl, "bdb") == 0) {
|
||||
+ dbimpl_name = (char*) "bdb";
|
||||
+ }
|
||||
+ if (strcasecmp(defdbimpl, "mdb") == 0) {
|
||||
+ dbimpl_name = (char*) "mdb";
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ /* Compute getopt short option string */
|
||||
+ {
|
||||
+ char *pt = optstring;
|
||||
+ for (const struct option *opt = options; opt->name; opt++) {
|
||||
+ if (opt->val>0 && opt->val<OPT_FIRST) {
|
||||
+ *pt++ = (char)(opt->val);
|
||||
+ if (opt->has_arg == required_argument) {
|
||||
+ *pt++ = ':';
|
||||
+ }
|
||||
+ }
|
||||
+ }
|
||||
+ *pt = '\0';
|
||||
+ }
|
||||
|
||||
- while ((c = getopt(argc, argv, "Af:RL:S:l:nG:srk:K:hvt:D:X:I:d")) != EOF) {
|
||||
+ while ((c = getopt_long(argc, argv, optstring, options, &longopt_idx)) != EOF) {
|
||||
+ if (c == 0) {
|
||||
+ c = longopt_idx;
|
||||
+ }
|
||||
switch (c) {
|
||||
+ case OPT_DO_IT:
|
||||
+ do_it = 1;
|
||||
+ break;
|
||||
+ case OPT_REMOVE:
|
||||
+ display_mode |= REMOVE;
|
||||
+ break;
|
||||
case 'A':
|
||||
display_mode |= ASCIIDATA;
|
||||
break;
|
||||
@@ -1332,32 +1418,48 @@ main(int argc, char **argv)
|
||||
display_mode |= IMPORT;
|
||||
dump_filename = optarg;
|
||||
break;
|
||||
- case 'd':
|
||||
- display_mode |= REMOVE;
|
||||
- break;
|
||||
case 'h':
|
||||
default:
|
||||
- usage(argv[0]);
|
||||
+ usage(argv[0], 1);
|
||||
}
|
||||
}
|
||||
|
||||
+ if (filename == NULL) {
|
||||
+ fprintf(stderr, "PARAMETER ERROR! 'filename' parameter is missing.\n");
|
||||
+ usage(argv[0], 1);
|
||||
+ }
|
||||
+
|
||||
if (display_mode & EXPORT) {
|
||||
return exportdb(dbimpl_name, filename, dump_filename);
|
||||
}
|
||||
|
||||
if (display_mode & IMPORT) {
|
||||
+ if (!strstr(filename, "/id2entry") && !strstr(filename, "/replication_changelog")) {
|
||||
+ /* schema is unknown in dbscan ==> duplicate keys sort order is unknown
|
||||
+ * ==> cannot create dbi with duplicate keys
|
||||
+ * ==> only id2entry and repl changelog is importable.
|
||||
+ */
|
||||
+ fprintf(stderr, "ERROR: The only database instances that may be imported with dbscan are id2entry and replication_changelog.\n");
|
||||
+ exit(1);
|
||||
+ }
|
||||
+
|
||||
+ if (do_it == 0) {
|
||||
+ fprintf(stderr, "PARAMETER ERROR! Trying to perform a destructive action (import)\n"
|
||||
+ " without specifying '--do-it' parameter.\n");
|
||||
+ exit(1);
|
||||
+ }
|
||||
return importdb(dbimpl_name, filename, dump_filename);
|
||||
}
|
||||
|
||||
if (display_mode & REMOVE) {
|
||||
+ if (do_it == 0) {
|
||||
+ fprintf(stderr, "PARAMETER ERROR! Trying to perform a destructive action (remove)\n"
|
||||
+ " without specifying '--do-it' parameter.\n");
|
||||
+ exit(1);
|
||||
+ }
|
||||
return removedb(dbimpl_name, filename);
|
||||
}
|
||||
|
||||
- if (filename == NULL) {
|
||||
- fprintf(stderr, "PARAMETER ERROR! 'filename' parameter is missing.\n");
|
||||
- usage(argv[0]);
|
||||
- }
|
||||
-
|
||||
if (display_mode & LISTDBS) {
|
||||
dbi_dbslist_t *dbs = dblayer_list_dbs(dbimpl_name, filename);
|
||||
if (dbs) {
|
||||
diff --git a/man/man1/dbscan.1 b/man/man1/dbscan.1
|
||||
index 810608371..dfb6e8351 100644
|
||||
--- a/man/man1/dbscan.1
|
||||
+++ b/man/man1/dbscan.1
|
||||
@@ -31,50 +31,94 @@ Scans a Directory Server database index file and dumps the contents.
|
||||
.\" respectively.
|
||||
.SH OPTIONS
|
||||
A summary of options is included below:
|
||||
+.IP
|
||||
+common options:
|
||||
+.TP
|
||||
+.B \fB\-A, \-\-ascii\fR
|
||||
+dump as ascii data
|
||||
+.TP
|
||||
+.B \fB\-D, \-\-db\-type\fR <filename>
|
||||
+specify db type: bdb or mdb
|
||||
.TP
|
||||
-.B \fB\-f\fR <filename>
|
||||
-specify db file
|
||||
+.B \fB\-f, \-\-dbi\fR <filename>
|
||||
+specify db instance
|
||||
.TP
|
||||
-.B \fB\-R\fR
|
||||
+.B \fB\-R, \-\-raw\fR
|
||||
dump as raw data
|
||||
.TP
|
||||
-.B \fB\-t\fR <size>
|
||||
+.B \fB\-t, \-\-truncate\-entry\fR <size>
|
||||
entry truncate size (bytes)
|
||||
.IP
|
||||
entry file options:
|
||||
.TP
|
||||
-.B \fB\-K\fR <entry_id>
|
||||
+.B \fB\-K, \-\-entry\-id\fR <entry_id>
|
||||
lookup only a specific entry id
|
||||
+.IP
|
||||
index file options:
|
||||
.TP
|
||||
-.B \fB\-k\fR <key>
|
||||
+.B \fB\-G, \-\-id\-list\-min\-size\fR <n>
|
||||
+only display index entries with more than <n> ids
|
||||
+.TP
|
||||
+.B \fB\-I, \-\-import\fR <file>
|
||||
+Import database instance from file. Requires \-\-do\-it parameter
|
||||
+WARNING! Only the id2entry and replication_changelog database instances
|
||||
+may be imported by dbscan.
|
||||
+.TP
|
||||
+.B \fB\-k, \-\-key\fR <key>
|
||||
lookup only a specific key
|
||||
.TP
|
||||
-.B \fB\-l\fR <size>
|
||||
+.B \fB\-l, \-\-id\-list\-max\-size\fR <size>
|
||||
max length of dumped id list
|
||||
(default 4096; 40 bytes <= size <= 1048576 bytes)
|
||||
.TP
|
||||
-.B \fB\-G\fR <n>
|
||||
-only display index entries with more than <n> ids
|
||||
-.TP
|
||||
-.B \fB\-n\fR
|
||||
+.B \fB\-n, \-\-show\-id\-list\-lenghts\fR
|
||||
display ID list lengths
|
||||
.TP
|
||||
-.B \fB\-r\fR
|
||||
+.B \fB\-\-remove\fR
|
||||
+remove a db instance. Requires \-\-do\-it parameter
|
||||
+.TP
|
||||
+.B \fB\-r, \-\-show\-id\-list\fR
|
||||
display the contents of ID list
|
||||
.TP
|
||||
-.B \fB\-s\fR
|
||||
+.B \fB\-S, \-\-stats\fR
|
||||
+display statistics
|
||||
+.TP
|
||||
+.B \fB\-X, \-\-export\fR <file>
|
||||
+Export database instance to file
|
||||
+.IP
|
||||
+other options:
|
||||
+.TP
|
||||
+.B \fB\-s, \-\-summary\fR
|
||||
Summary of index counts
|
||||
+.TP
|
||||
+.B \fB\-L, \-\-list\fR
|
||||
+List od database instances
|
||||
+.TP
|
||||
+.B \fB\-\-do\-it\fR
|
||||
+confirmation required for actions that change the database contents
|
||||
+.TP
|
||||
+.B \fB\-h, \-\-help\-it\fR
|
||||
+display the usage
|
||||
.IP
|
||||
.SH USAGE
|
||||
Sample usages:
|
||||
.TP
|
||||
+List the database instances
|
||||
+.B
|
||||
+dbscan -L /var/lib/dirsrv/slapd-supplier1/db
|
||||
+.TP
|
||||
Dump the entry file:
|
||||
.B
|
||||
dbscan \fB\-f\fR id2entry.db4
|
||||
.TP
|
||||
Display index keys in cn.db4:
|
||||
-.B dbscan \fB\-f\fR cn.db4
|
||||
+.B
|
||||
+dbscan \fB\-f\fR cn.db4
|
||||
+.TP
|
||||
+Display index keys in cn on lmdb:
|
||||
+.B
|
||||
+dbscan \fB\-f\fR /var/lib/dirsrv/slapd\-supplier1/db/userroot/cn.db
|
||||
+ (Note: Use \fBdbscan \-L db_home_dir\R to get the db instance path)
|
||||
.TP
|
||||
Display index keys and the count of entries having the key in mail.db4:
|
||||
.B
|
||||
@@ -86,7 +130,7 @@ dbscan \fB\-r\fR \fB\-G\fR 20 \fB\-f\fR sn.db4
|
||||
.TP
|
||||
Display summary of objectclass.db4:
|
||||
.B
|
||||
-dbscan \fB\-f\fR objectclass.db4
|
||||
+dbscan \fB\-s \-f\fR objectclass.db4
|
||||
.br
|
||||
.SH AUTHOR
|
||||
dbscan was written by the 389 Project.
|
||||
diff --git a/src/lib389/lib389/__init__.py b/src/lib389/lib389/__init__.py
|
||||
index e87582d9e..368741a66 100644
|
||||
--- a/src/lib389/lib389/__init__.py
|
||||
+++ b/src/lib389/lib389/__init__.py
|
||||
@@ -3039,14 +3039,17 @@ class DirSrv(SimpleLDAPObject, object):
|
||||
return self._dbisupport
|
||||
# check if -D and -L options are supported
|
||||
try:
|
||||
- cmd = ["%s/dbscan" % self.get_bin_dir(), "--help"]
|
||||
+ cmd = ["%s/dbscan" % self.get_bin_dir(), "-h"]
|
||||
self.log.debug("DEBUG: checking dbscan supported options %s" % cmd)
|
||||
p = subprocess.Popen(cmd, stdout=subprocess.PIPE)
|
||||
except subprocess.CalledProcessError:
|
||||
pass
|
||||
output, stderr = p.communicate()
|
||||
- self.log.debug("is_dbi_supported output " + output.decode())
|
||||
- if "-D <dbimpl>" in output.decode() and "-L <dbhome>" in output.decode():
|
||||
+ output = output.decode()
|
||||
+ self.log.debug("is_dbi_supported output " + output)
|
||||
+ if "-D <dbimpl>" in output and "-L <dbhome>" in output:
|
||||
+ self._dbisupport = True
|
||||
+ elif "--db-type" in output and "--list" in output:
|
||||
self._dbisupport = True
|
||||
else:
|
||||
self._dbisupport = False
|
||||
diff --git a/src/lib389/lib389/cli_ctl/dblib.py b/src/lib389/lib389/cli_ctl/dblib.py
|
||||
index e9269e340..82f09c70c 100644
|
||||
--- a/src/lib389/lib389/cli_ctl/dblib.py
|
||||
+++ b/src/lib389/lib389/cli_ctl/dblib.py
|
||||
@@ -158,6 +158,14 @@ def run_dbscan(args):
|
||||
return output
|
||||
|
||||
|
||||
+def does_dbscan_need_do_it():
|
||||
+ prefix = os.environ.get('PREFIX', "")
|
||||
+ prog = f'{prefix}/bin/dbscan'
|
||||
+ args = [ prog, '-h' ]
|
||||
+ output = subprocess.run(args, encoding='utf-8', stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
|
||||
+ return '--do-it' in output.stdout
|
||||
+
|
||||
+
|
||||
def export_changelog(be, dblib):
|
||||
# Export backend changelog
|
||||
try:
|
||||
@@ -172,7 +180,10 @@ def import_changelog(be, dblib):
|
||||
# import backend changelog
|
||||
try:
|
||||
cl5dbname = be['eccl5dbname'] if dblib == "bdb" else be['cl5dbname']
|
||||
- run_dbscan(['-D', dblib, '-f', cl5dbname, '-I', be['cl5name']])
|
||||
+ if does_dbscan_need_do_it():
|
||||
+ run_dbscan(['-D', dblib, '-f', cl5dbname, '-I', be['cl5name'], '--do-it'])
|
||||
+ else:
|
||||
+ run_dbscan(['-D', dblib, '-f', cl5dbname, '-I', be['cl5name']])
|
||||
return True
|
||||
except subprocess.CalledProcessError as e:
|
||||
return False
|
||||
--
|
||||
2.48.0
|
||||
|
@ -0,0 +1,70 @@
|
||||
From de52853a3551f1d1876ea21b33a5242ad669fec1 Mon Sep 17 00:00:00 2001
|
||||
From: James Chapman <jachapma@redhat.com>
|
||||
Date: Tue, 4 Feb 2025 15:40:16 +0000
|
||||
Subject: [PATCH] Issue 6566 - RI plugin failure to handle a modrdn for rename
|
||||
of member of multiple groups (#6567)
|
||||
|
||||
Bug description:
|
||||
With AM and RI plugins enabled, the rename of a user that is part of multiple groups
|
||||
fails with a "value exists" error.
|
||||
|
||||
Fix description:
|
||||
For a modrdn the RI plugin creates a new DN, before a modify is attempted check
|
||||
if the new DN already exists in the attr being updated.
|
||||
|
||||
Fixes: https://github.com/389ds/389-ds-base/issues/6566
|
||||
|
||||
Reviewed by: @progier389 , @tbordaz (Thank you)
|
||||
---
|
||||
ldap/servers/plugins/referint/referint.c | 15 ++++++++++++---
|
||||
1 file changed, 12 insertions(+), 3 deletions(-)
|
||||
|
||||
diff --git a/ldap/servers/plugins/referint/referint.c b/ldap/servers/plugins/referint/referint.c
|
||||
index 468fdc239..218863ea5 100644
|
||||
--- a/ldap/servers/plugins/referint/referint.c
|
||||
+++ b/ldap/servers/plugins/referint/referint.c
|
||||
@@ -924,6 +924,7 @@ _update_all_per_mod(Slapi_DN *entrySDN, /* DN of the searched entry */
|
||||
{
|
||||
Slapi_Mods *smods = NULL;
|
||||
char *newDN = NULL;
|
||||
+ struct berval bv = {0};
|
||||
char **dnParts = NULL;
|
||||
char *sval = NULL;
|
||||
char *newvalue = NULL;
|
||||
@@ -1026,22 +1027,30 @@ _update_all_per_mod(Slapi_DN *entrySDN, /* DN of the searched entry */
|
||||
}
|
||||
/* else: normalize_rc < 0) Ignore the DN normalization error for now. */
|
||||
|
||||
+ bv.bv_val = newDN;
|
||||
+ bv.bv_len = strlen(newDN);
|
||||
p = PL_strstr(sval, slapi_sdn_get_ndn(origDN));
|
||||
if (p == sval) {
|
||||
/* (case 1) */
|
||||
slapi_mods_add_string(smods, LDAP_MOD_DELETE, attrName, sval);
|
||||
- slapi_mods_add_string(smods, LDAP_MOD_ADD, attrName, newDN);
|
||||
-
|
||||
+ /* Add only if the attr value does not exist */
|
||||
+ if (VALUE_PRESENT != attr_value_find_wsi(attr, &bv, &v)) {
|
||||
+ slapi_mods_add_string(smods, LDAP_MOD_ADD, attrName, newDN);
|
||||
+ }
|
||||
} else if (p) {
|
||||
/* (case 2) */
|
||||
slapi_mods_add_string(smods, LDAP_MOD_DELETE, attrName, sval);
|
||||
*p = '\0';
|
||||
newvalue = slapi_ch_smprintf("%s%s", sval, newDN);
|
||||
- slapi_mods_add_string(smods, LDAP_MOD_ADD, attrName, newvalue);
|
||||
+ /* Add only if the attr value does not exist */
|
||||
+ if (VALUE_PRESENT != attr_value_find_wsi(attr, &bv, &v)) {
|
||||
+ slapi_mods_add_string(smods, LDAP_MOD_ADD, attrName, newvalue);
|
||||
+ }
|
||||
slapi_ch_free_string(&newvalue);
|
||||
}
|
||||
/* else: value does not include the modified DN. Ignore it. */
|
||||
slapi_ch_free_string(&sval);
|
||||
+ bv = (struct berval){0};
|
||||
}
|
||||
rc = _do_modify(mod_pb, entrySDN, slapi_mods_get_ldapmods_byref(smods));
|
||||
if (rc) {
|
||||
--
|
||||
2.48.0
|
||||
|
@ -0,0 +1,43 @@
|
||||
From a634756784056270773d67747061e26152d85469 Mon Sep 17 00:00:00 2001
|
||||
From: Masahiro Matsuya <mmatsuya@redhat.com>
|
||||
Date: Wed, 5 Feb 2025 11:38:04 +0900
|
||||
Subject: [PATCH] Issue 6258 - Mitigate race condition in paged_results_test.py
|
||||
(#6433)
|
||||
|
||||
The regression test dirsrvtests/tests/suites/paged_results/paged_results_test.py::test_multi_suffix_search has a race condition causing it to fail due to multiple queries potentially writing their logs out of chronological order.
|
||||
|
||||
This failure is mitigated by sorting the retrieved access_log_lines by their "op" value. This ensures the log lines are in chronological order, as expected by the assertions at the end of test_multi_suffix_search().
|
||||
|
||||
Helps fix: #6258
|
||||
|
||||
Reviewed by: @droideck , @progier389 (Thanks!)
|
||||
|
||||
Co-authored-by: Anuar Beisembayev <111912342+abeisemb@users.noreply.github.com>
|
||||
---
|
||||
dirsrvtests/tests/suites/paged_results/paged_results_test.py | 3 +++
|
||||
1 file changed, 3 insertions(+)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/paged_results/paged_results_test.py b/dirsrvtests/tests/suites/paged_results/paged_results_test.py
|
||||
index eaf0e0da9..fca48db0f 100644
|
||||
--- a/dirsrvtests/tests/suites/paged_results/paged_results_test.py
|
||||
+++ b/dirsrvtests/tests/suites/paged_results/paged_results_test.py
|
||||
@@ -7,6 +7,7 @@
|
||||
# --- END COPYRIGHT BLOCK ---
|
||||
#
|
||||
import socket
|
||||
+import re
|
||||
from random import sample, randrange
|
||||
|
||||
import pytest
|
||||
@@ -1126,6 +1127,8 @@ def test_multi_suffix_search(topology_st, create_user, new_suffixes):
|
||||
topology_st.standalone.restart(timeout=10)
|
||||
|
||||
access_log_lines = topology_st.standalone.ds_access_log.match('.*pr_cookie=.*')
|
||||
+ # Sort access_log_lines by op number to mitigate race condition effects.
|
||||
+ access_log_lines.sort(key=lambda x: int(re.search(r"op=(\d+) RESULT", x).group(1)))
|
||||
pr_cookie_list = ([line.rsplit('=', 1)[-1] for line in access_log_lines])
|
||||
pr_cookie_list = [int(pr_cookie) for pr_cookie in pr_cookie_list]
|
||||
log.info('Assert that last pr_cookie == -1 and others pr_cookie == 0')
|
||||
--
|
||||
2.48.0
|
||||
|
566
0007-Issue-6229-After-an-initial-failure-subsequent-onlin.patch
Normal file
566
0007-Issue-6229-After-an-initial-failure-subsequent-onlin.patch
Normal file
@ -0,0 +1,566 @@
|
||||
From 769e71499880a0820424bf925c0f0fe793e11cc8 Mon Sep 17 00:00:00 2001
|
||||
From: progier389 <progier@redhat.com>
|
||||
Date: Fri, 28 Jun 2024 18:56:49 +0200
|
||||
Subject: [PATCH] Issue 6229 - After an initial failure, subsequent online
|
||||
backups fail (#6230)
|
||||
|
||||
* Issue 6229 - After an initial failure, subsequent online backups will not work
|
||||
|
||||
Several issues related to backup task error handling:
|
||||
Backends stay busy after the failure
|
||||
Exit code is 0 in some cases
|
||||
Crash if failing to open the backup directory
|
||||
And a more general one:
|
||||
lib389 Task DN collision
|
||||
|
||||
Solutions:
|
||||
Always reset the busy flags that have been set
|
||||
Ensure that 0 is not returned in error case
|
||||
Avoid closing NULL directory descriptor
|
||||
Use a timestamp having milliseconds precision to create the task DN
|
||||
|
||||
Issue: #6229
|
||||
|
||||
Reviewed by: @droideck (Thanks!)
|
||||
|
||||
(cherry picked from commit 04a0b6ac776a1d588ec2e10ff651e5015078ad21)
|
||||
---
|
||||
ldap/servers/slapd/back-ldbm/archive.c | 45 +++++-----
|
||||
.../slapd/back-ldbm/db-mdb/mdb_layer.c | 3 +
|
||||
src/lib389/lib389/__init__.py | 10 +--
|
||||
src/lib389/lib389/tasks.py | 82 +++++++++----------
|
||||
4 files changed, 70 insertions(+), 70 deletions(-)
|
||||
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/archive.c b/ldap/servers/slapd/back-ldbm/archive.c
|
||||
index 0460a42f6..6658cc80a 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/archive.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/archive.c
|
||||
@@ -16,6 +16,8 @@
|
||||
#include "back-ldbm.h"
|
||||
#include "dblayer.h"
|
||||
|
||||
+#define NO_OBJECT ((Object*)-1)
|
||||
+
|
||||
int
|
||||
ldbm_temporary_close_all_instances(Slapi_PBlock *pb)
|
||||
{
|
||||
@@ -270,6 +272,7 @@ ldbm_back_ldbm2archive(Slapi_PBlock *pb)
|
||||
int run_from_cmdline = 0;
|
||||
Slapi_Task *task;
|
||||
struct stat sbuf;
|
||||
+ Object *last_busy_inst_obj = NO_OBJECT;
|
||||
|
||||
slapi_pblock_get(pb, SLAPI_PLUGIN_PRIVATE, &li);
|
||||
slapi_pblock_get(pb, SLAPI_SEQ_VAL, &rawdirectory);
|
||||
@@ -380,13 +383,12 @@ ldbm_back_ldbm2archive(Slapi_PBlock *pb)
|
||||
|
||||
/* to avoid conflict w/ import, do this check for commandline, as well */
|
||||
{
|
||||
- Object *inst_obj, *inst_obj2;
|
||||
ldbm_instance *inst = NULL;
|
||||
|
||||
/* server is up -- mark all backends busy */
|
||||
- for (inst_obj = objset_first_obj(li->li_instance_set); inst_obj;
|
||||
- inst_obj = objset_next_obj(li->li_instance_set, inst_obj)) {
|
||||
- inst = (ldbm_instance *)object_get_data(inst_obj);
|
||||
+ for (last_busy_inst_obj = objset_first_obj(li->li_instance_set); last_busy_inst_obj;
|
||||
+ last_busy_inst_obj = objset_next_obj(li->li_instance_set, last_busy_inst_obj)) {
|
||||
+ inst = (ldbm_instance *)object_get_data(last_busy_inst_obj);
|
||||
|
||||
/* check if an import/restore is already ongoing... */
|
||||
if (instance_set_busy(inst) != 0 || dblayer_in_import(inst) != 0) {
|
||||
@@ -400,20 +402,6 @@ ldbm_back_ldbm2archive(Slapi_PBlock *pb)
|
||||
"another task and cannot be disturbed.",
|
||||
inst->inst_name);
|
||||
}
|
||||
-
|
||||
- /* painfully, we have to clear the BUSY flags on the
|
||||
- * backends we'd already marked...
|
||||
- */
|
||||
- for (inst_obj2 = objset_first_obj(li->li_instance_set);
|
||||
- inst_obj2 && (inst_obj2 != inst_obj);
|
||||
- inst_obj2 = objset_next_obj(li->li_instance_set,
|
||||
- inst_obj2)) {
|
||||
- inst = (ldbm_instance *)object_get_data(inst_obj2);
|
||||
- instance_set_not_busy(inst);
|
||||
- }
|
||||
- if (inst_obj2 && inst_obj2 != inst_obj)
|
||||
- object_release(inst_obj2);
|
||||
- object_release(inst_obj);
|
||||
goto err;
|
||||
}
|
||||
}
|
||||
@@ -427,18 +415,26 @@ ldbm_back_ldbm2archive(Slapi_PBlock *pb)
|
||||
goto err;
|
||||
}
|
||||
|
||||
- if (!run_from_cmdline) {
|
||||
+err:
|
||||
+ /* Clear all BUSY flags that have been previously set */
|
||||
+ if (last_busy_inst_obj != NO_OBJECT) {
|
||||
ldbm_instance *inst;
|
||||
Object *inst_obj;
|
||||
|
||||
- /* none of these backends are busy anymore */
|
||||
- for (inst_obj = objset_first_obj(li->li_instance_set); inst_obj;
|
||||
+ for (inst_obj = objset_first_obj(li->li_instance_set);
|
||||
+ inst_obj && (inst_obj != last_busy_inst_obj);
|
||||
inst_obj = objset_next_obj(li->li_instance_set, inst_obj)) {
|
||||
inst = (ldbm_instance *)object_get_data(inst_obj);
|
||||
instance_set_not_busy(inst);
|
||||
}
|
||||
+ if (last_busy_inst_obj != NULL) {
|
||||
+ /* release last seen object for aborted objset_next_obj iterations */
|
||||
+ if (inst_obj != NULL) {
|
||||
+ object_release(inst_obj);
|
||||
+ }
|
||||
+ object_release(last_busy_inst_obj);
|
||||
+ }
|
||||
}
|
||||
-err:
|
||||
if (return_value) {
|
||||
if (dir_bak) {
|
||||
slapi_log_err(SLAPI_LOG_ERR,
|
||||
@@ -727,7 +723,10 @@ ldbm_archive_config(char *bakdir, Slapi_Task *task)
|
||||
}
|
||||
|
||||
error:
|
||||
- PR_CloseDir(dirhandle);
|
||||
+ if (NULL != dirhandle) {
|
||||
+ PR_CloseDir(dirhandle);
|
||||
+ dirhandle = NULL;
|
||||
+ }
|
||||
dse_backup_unlock();
|
||||
slapi_ch_free_string(&backup_config_dir);
|
||||
slapi_ch_free_string(&dse_file);
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/db-mdb/mdb_layer.c b/ldap/servers/slapd/back-ldbm/db-mdb/mdb_layer.c
|
||||
index 70a289bdb..de4161b0c 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/db-mdb/mdb_layer.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/db-mdb/mdb_layer.c
|
||||
@@ -983,6 +983,9 @@ dbmdb_backup(struct ldbminfo *li, char *dest_dir, Slapi_Task *task)
|
||||
if (ldbm_archive_config(dest_dir, task) != 0) {
|
||||
slapi_log_err(SLAPI_LOG_ERR, "dbmdb_backup",
|
||||
"Backup of config files failed or is incomplete\n");
|
||||
+ if (0 == return_value) {
|
||||
+ return_value = -1;
|
||||
+ }
|
||||
}
|
||||
|
||||
goto bail;
|
||||
diff --git a/src/lib389/lib389/__init__.py b/src/lib389/lib389/__init__.py
|
||||
index 368741a66..cb372c138 100644
|
||||
--- a/src/lib389/lib389/__init__.py
|
||||
+++ b/src/lib389/lib389/__init__.py
|
||||
@@ -69,7 +69,7 @@ from lib389.utils import (
|
||||
get_user_is_root)
|
||||
from lib389.paths import Paths
|
||||
from lib389.nss_ssl import NssSsl
|
||||
-from lib389.tasks import BackupTask, RestoreTask
|
||||
+from lib389.tasks import BackupTask, RestoreTask, Task
|
||||
from lib389.dseldif import DSEldif
|
||||
|
||||
# mixin
|
||||
@@ -1424,7 +1424,7 @@ class DirSrv(SimpleLDAPObject, object):
|
||||
name, self.ds_paths.prefix)
|
||||
|
||||
# create the archive
|
||||
- name = "backup_%s_%s.tar.gz" % (self.serverid, time.strftime("%m%d%Y_%H%M%S"))
|
||||
+ name = "backup_%s_%s.tar.gz" % (self.serverid, Task.get_timestamp())
|
||||
backup_file = os.path.join(backup_dir, name)
|
||||
tar = tarfile.open(backup_file, "w:gz")
|
||||
tar.extraction_filter = (lambda member, path: member)
|
||||
@@ -2810,7 +2810,7 @@ class DirSrv(SimpleLDAPObject, object):
|
||||
else:
|
||||
# No output file specified. Use the default ldif location/name
|
||||
cmd.append('-a')
|
||||
- tnow = datetime.now().strftime("%Y_%m_%d_%H_%M_%S")
|
||||
+ tnow = Task.get_timestamp()
|
||||
if bename:
|
||||
ldifname = os.path.join(self.ds_paths.ldif_dir, "%s-%s-%s.ldif" % (self.serverid, bename, tnow))
|
||||
else:
|
||||
@@ -2881,7 +2881,7 @@ class DirSrv(SimpleLDAPObject, object):
|
||||
|
||||
if archive_dir is None:
|
||||
# Use the instance name and date/time as the default backup name
|
||||
- tnow = datetime.now().strftime("%Y_%m_%d_%H_%M_%S")
|
||||
+ tnow = Task.get_timestamp()
|
||||
archive_dir = os.path.join(self.ds_paths.backup_dir, "%s-%s" % (self.serverid, tnow))
|
||||
elif not archive_dir.startswith("/"):
|
||||
# Relative path, append it to the bak directory
|
||||
@@ -3506,7 +3506,7 @@ class DirSrv(SimpleLDAPObject, object):
|
||||
|
||||
if archive is None:
|
||||
# Use the instance name and date/time as the default backup name
|
||||
- tnow = datetime.now().strftime("%Y_%m_%d_%H_%M_%S")
|
||||
+ tnow = Task.get_timestamp()
|
||||
if self.serverid is not None:
|
||||
backup_dir_name = "%s-%s" % (self.serverid, tnow)
|
||||
else:
|
||||
diff --git a/src/lib389/lib389/tasks.py b/src/lib389/lib389/tasks.py
|
||||
index 6c2adb5b2..6bf302862 100644
|
||||
--- a/src/lib389/lib389/tasks.py
|
||||
+++ b/src/lib389/lib389/tasks.py
|
||||
@@ -118,7 +118,7 @@ class Task(DSLdapObject):
|
||||
return super(Task, self).create(rdn, properties, basedn)
|
||||
|
||||
@staticmethod
|
||||
- def _get_task_date():
|
||||
+ def get_timestamp():
|
||||
"""Return a timestamp to use in naming new task entries."""
|
||||
|
||||
return datetime.now().isoformat()
|
||||
@@ -132,7 +132,7 @@ class AutomemberRebuildMembershipTask(Task):
|
||||
"""
|
||||
|
||||
def __init__(self, instance, dn=None):
|
||||
- self.cn = 'automember_rebuild_' + Task._get_task_date()
|
||||
+ self.cn = 'automember_rebuild_' + Task.get_timestamp()
|
||||
dn = "cn=" + self.cn + "," + DN_AUTOMEMBER_REBUILD_TASK
|
||||
|
||||
super(AutomemberRebuildMembershipTask, self).__init__(instance, dn)
|
||||
@@ -147,7 +147,7 @@ class AutomemberAbortRebuildTask(Task):
|
||||
"""
|
||||
|
||||
def __init__(self, instance, dn=None):
|
||||
- self.cn = 'automember_abort_' + Task._get_task_date()
|
||||
+ self.cn = 'automember_abort_' + Task.get_timestamp()
|
||||
dn = "cn=" + self.cn + "," + DN_AUTOMEMBER_ABORT_REBUILD_TASK
|
||||
|
||||
super(AutomemberAbortRebuildTask, self).__init__(instance, dn)
|
||||
@@ -161,7 +161,7 @@ class FixupLinkedAttributesTask(Task):
|
||||
"""
|
||||
|
||||
def __init__(self, instance, dn=None):
|
||||
- self.cn = 'fixup_linked_attrs_' + Task._get_task_date()
|
||||
+ self.cn = 'fixup_linked_attrs_' + Task.get_timestamp()
|
||||
dn = "cn=" + self.cn + "," + DN_FIXUP_LINKED_ATTIBUTES
|
||||
|
||||
super(FixupLinkedAttributesTask, self).__init__(instance, dn)
|
||||
@@ -175,7 +175,7 @@ class MemberUidFixupTask(Task):
|
||||
"""
|
||||
|
||||
def __init__(self, instance, dn=None):
|
||||
- self.cn = 'memberUid_fixup_' + Task._get_task_date()
|
||||
+ self.cn = 'memberUid_fixup_' + Task.get_timestamp()
|
||||
dn = f"cn={self.cn},cn=memberuid task,cn=tasks,cn=config"
|
||||
|
||||
super(MemberUidFixupTask, self).__init__(instance, dn)
|
||||
@@ -190,7 +190,7 @@ class MemberOfFixupTask(Task):
|
||||
"""
|
||||
|
||||
def __init__(self, instance, dn=None):
|
||||
- self.cn = 'memberOf_fixup_' + Task._get_task_date()
|
||||
+ self.cn = 'memberOf_fixup_' + Task.get_timestamp()
|
||||
dn = "cn=" + self.cn + "," + DN_MBO_TASK
|
||||
|
||||
super(MemberOfFixupTask, self).__init__(instance, dn)
|
||||
@@ -205,7 +205,7 @@ class USNTombstoneCleanupTask(Task):
|
||||
"""
|
||||
|
||||
def __init__(self, instance, dn=None):
|
||||
- self.cn = 'usn_cleanup_' + Task._get_task_date()
|
||||
+ self.cn = 'usn_cleanup_' + Task.get_timestamp()
|
||||
dn = "cn=" + self.cn + ",cn=USN tombstone cleanup task," + DN_TASKS
|
||||
|
||||
super(USNTombstoneCleanupTask, self).__init__(instance, dn)
|
||||
@@ -225,7 +225,7 @@ class csngenTestTask(Task):
|
||||
"""
|
||||
|
||||
def __init__(self, instance, dn=None):
|
||||
- self.cn = 'csngenTest_' + Task._get_task_date()
|
||||
+ self.cn = 'csngenTest_' + Task.get_timestamp()
|
||||
dn = "cn=" + self.cn + ",cn=csngen_test," + DN_TASKS
|
||||
super(csngenTestTask, self).__init__(instance, dn)
|
||||
|
||||
@@ -238,7 +238,7 @@ class EntryUUIDFixupTask(Task):
|
||||
"""
|
||||
|
||||
def __init__(self, instance, dn=None):
|
||||
- self.cn = 'entryuuid_fixup_' + Task._get_task_date()
|
||||
+ self.cn = 'entryuuid_fixup_' + Task.get_timestamp()
|
||||
dn = "cn=" + self.cn + "," + DN_EUUID_TASK
|
||||
super(EntryUUIDFixupTask, self).__init__(instance, dn)
|
||||
self._must_attributes.extend(['basedn'])
|
||||
@@ -252,7 +252,7 @@ class DBCompactTask(Task):
|
||||
"""
|
||||
|
||||
def __init__(self, instance, dn=None):
|
||||
- self.cn = 'compact_db_' + Task._get_task_date()
|
||||
+ self.cn = 'compact_db_' + Task.get_timestamp()
|
||||
dn = "cn=" + self.cn + "," + DN_COMPACTDB_TASK
|
||||
super(DBCompactTask, self).__init__(instance, dn)
|
||||
|
||||
@@ -265,7 +265,7 @@ class SchemaReloadTask(Task):
|
||||
"""
|
||||
|
||||
def __init__(self, instance, dn=None):
|
||||
- self.cn = 'schema_reload_' + Task._get_task_date()
|
||||
+ self.cn = 'schema_reload_' + Task.get_timestamp()
|
||||
dn = "cn=" + self.cn + ",cn=schema reload task," + DN_TASKS
|
||||
super(SchemaReloadTask, self).__init__(instance, dn)
|
||||
|
||||
@@ -278,7 +278,7 @@ class SyntaxValidateTask(Task):
|
||||
"""
|
||||
|
||||
def __init__(self, instance, dn=None):
|
||||
- self.cn = 'syntax_validate_' + Task._get_task_date()
|
||||
+ self.cn = 'syntax_validate_' + Task.get_timestamp()
|
||||
dn = f"cn={self.cn},cn=syntax validate,cn=tasks,cn=config"
|
||||
|
||||
super(SyntaxValidateTask, self).__init__(instance, dn)
|
||||
@@ -295,7 +295,7 @@ class AbortCleanAllRUVTask(Task):
|
||||
"""
|
||||
|
||||
def __init__(self, instance, dn=None):
|
||||
- self.cn = 'abortcleanallruv_' + Task._get_task_date()
|
||||
+ self.cn = 'abortcleanallruv_' + Task.get_timestamp()
|
||||
dn = "cn=" + self.cn + ",cn=abort cleanallruv," + DN_TASKS
|
||||
|
||||
super(AbortCleanAllRUVTask, self).__init__(instance, dn)
|
||||
@@ -312,7 +312,7 @@ class CleanAllRUVTask(Task):
|
||||
"""
|
||||
|
||||
def __init__(self, instance, dn=None):
|
||||
- self.cn = 'cleanallruv_' + Task._get_task_date()
|
||||
+ self.cn = 'cleanallruv_' + Task.get_timestamp()
|
||||
dn = "cn=" + self.cn + ",cn=cleanallruv," + DN_TASKS
|
||||
self._properties = None
|
||||
|
||||
@@ -359,7 +359,7 @@ class ImportTask(Task):
|
||||
"""
|
||||
|
||||
def __init__(self, instance, dn=None):
|
||||
- self.cn = 'import_' + Task._get_task_date()
|
||||
+ self.cn = 'import_' + Task.get_timestamp()
|
||||
dn = "cn=%s,%s" % (self.cn, DN_IMPORT_TASK)
|
||||
self._properties = None
|
||||
|
||||
@@ -388,7 +388,7 @@ class ExportTask(Task):
|
||||
"""
|
||||
|
||||
def __init__(self, instance, dn=None):
|
||||
- self.cn = 'export_' + Task._get_task_date()
|
||||
+ self.cn = 'export_' + Task.get_timestamp()
|
||||
dn = "cn=%s,%s" % (self.cn, DN_EXPORT_TASK)
|
||||
self._properties = None
|
||||
|
||||
@@ -411,7 +411,7 @@ class BackupTask(Task):
|
||||
"""
|
||||
|
||||
def __init__(self, instance, dn=None):
|
||||
- self.cn = 'backup_' + Task._get_task_date()
|
||||
+ self.cn = 'backup_' + Task.get_timestamp()
|
||||
dn = "cn=" + self.cn + ",cn=backup," + DN_TASKS
|
||||
self._properties = None
|
||||
|
||||
@@ -426,7 +426,7 @@ class RestoreTask(Task):
|
||||
"""
|
||||
|
||||
def __init__(self, instance, dn=None):
|
||||
- self.cn = 'restore_' + Task._get_task_date()
|
||||
+ self.cn = 'restore_' + Task.get_timestamp()
|
||||
dn = "cn=" + self.cn + ",cn=restore," + DN_TASKS
|
||||
self._properties = None
|
||||
|
||||
@@ -513,7 +513,7 @@ class Tasks(object):
|
||||
raise ValueError("Import file (%s) does not exist" % input_file)
|
||||
|
||||
# Prepare the task entry
|
||||
- cn = "import_" + time.strftime("%m%d%Y_%H%M%S", time.localtime())
|
||||
+ cn = "import_" + Task.get_timestamp()
|
||||
dn = "cn=%s,%s" % (cn, DN_IMPORT_TASK)
|
||||
entry = Entry(dn)
|
||||
entry.setValues('objectclass', 'top', 'extensibleObject')
|
||||
@@ -581,7 +581,7 @@ class Tasks(object):
|
||||
raise ValueError("output_file is mandatory")
|
||||
|
||||
# Prepare the task entry
|
||||
- cn = "export_" + time.strftime("%m%d%Y_%H%M%S", time.localtime())
|
||||
+ cn = "export_" + Task.get_timestamp()
|
||||
dn = "cn=%s,%s" % (cn, DN_EXPORT_TASK)
|
||||
entry = Entry(dn)
|
||||
entry.update({
|
||||
@@ -637,7 +637,7 @@ class Tasks(object):
|
||||
raise ValueError("You must specify a backup directory.")
|
||||
|
||||
# build the task entry
|
||||
- cn = "backup_" + time.strftime("%m%d%Y_%H%M%S", time.localtime())
|
||||
+ cn = "backup_" + Task.get_timestamp()
|
||||
dn = "cn=%s,%s" % (cn, DN_BACKUP_TASK)
|
||||
entry = Entry(dn)
|
||||
entry.update({
|
||||
@@ -694,7 +694,7 @@ class Tasks(object):
|
||||
raise ValueError("Backup file (%s) does not exist" % backup_dir)
|
||||
|
||||
# build the task entry
|
||||
- cn = "restore_" + time.strftime("%m%d%Y_%H%M%S", time.localtime())
|
||||
+ cn = "restore_" + Task.get_timestamp()
|
||||
dn = "cn=%s,%s" % (cn, DN_RESTORE_TASK)
|
||||
entry = Entry(dn)
|
||||
entry.update({
|
||||
@@ -789,7 +789,7 @@ class Tasks(object):
|
||||
attrs.append(attr)
|
||||
else:
|
||||
attrs.append(attrname)
|
||||
- cn = "index_vlv_%s" % (time.strftime("%m%d%Y_%H%M%S", time.localtime()))
|
||||
+ cn = "index_vlv_%s" % (Task.get_timestamp())
|
||||
dn = "cn=%s,%s" % (cn, DN_INDEX_TASK)
|
||||
entry = Entry(dn)
|
||||
entry.update({
|
||||
@@ -803,7 +803,7 @@ class Tasks(object):
|
||||
#
|
||||
# Reindex all attributes - gather them first...
|
||||
#
|
||||
- cn = "index_all_%s" % (time.strftime("%m%d%Y_%H%M%S", time.localtime()))
|
||||
+ cn = "index_all_%s" % (Task.get_timestamp())
|
||||
dn = ('cn=%s,cn=ldbm database,cn=plugins,cn=config' % backend)
|
||||
try:
|
||||
indexes = self.conn.search_s(dn, ldap.SCOPE_SUBTREE, '(objectclass=nsIndex)')
|
||||
@@ -815,7 +815,7 @@ class Tasks(object):
|
||||
#
|
||||
# Reindex specific attributes
|
||||
#
|
||||
- cn = "index_attrs_%s" % (time.strftime("%m%d%Y_%H%M%S", time.localtime()))
|
||||
+ cn = "index_attrs_%s" % (Task.get_timestamp())
|
||||
if isinstance(attrname, (tuple, list)):
|
||||
# Need to guarantee this is a list (and not a tuple)
|
||||
for attr in attrname:
|
||||
@@ -903,8 +903,7 @@ class Tasks(object):
|
||||
|
||||
suffix = ents[0].getValue(attr)
|
||||
|
||||
- cn = "fixupmemberof_" + time.strftime("%m%d%Y_%H%M%S",
|
||||
- time.localtime())
|
||||
+ cn = "fixupmemberof_" + Task.get_timestamp()
|
||||
dn = "cn=%s,%s" % (cn, DN_MBO_TASK)
|
||||
entry = Entry(dn)
|
||||
entry.setValues('objectclass', 'top', 'extensibleObject')
|
||||
@@ -965,8 +964,7 @@ class Tasks(object):
|
||||
if len(ents) != 1:
|
||||
raise ValueError("invalid backend name: %s" % bename)
|
||||
|
||||
- cn = "fixupTombstone_" + time.strftime("%m%d%Y_%H%M%S",
|
||||
- time.localtime())
|
||||
+ cn = "fixupTombstone_" + Task.get_timestamp()
|
||||
dn = "cn=%s,%s" % (cn, DN_TOMB_FIXUP_TASK)
|
||||
entry = Entry(dn)
|
||||
entry.setValues('objectclass', 'top', 'extensibleObject')
|
||||
@@ -1019,7 +1017,7 @@ class Tasks(object):
|
||||
@return exit code
|
||||
'''
|
||||
|
||||
- cn = 'task-' + time.strftime("%m%d%Y_%H%M%S", time.localtime())
|
||||
+ cn = 'task-' + Task.get_timestamp()
|
||||
dn = ('cn=%s,cn=automember rebuild membership,cn=tasks,cn=config' % cn)
|
||||
|
||||
entry = Entry(dn)
|
||||
@@ -1077,7 +1075,7 @@ class Tasks(object):
|
||||
if not ldif_out:
|
||||
raise ValueError("Missing ldif_out")
|
||||
|
||||
- cn = 'task-' + time.strftime("%m%d%Y_%H%M%S", time.localtime())
|
||||
+ cn = 'task-' + Task.get_timestamp()
|
||||
dn = ('cn=%s,cn=automember export updates,cn=tasks,cn=config' % cn)
|
||||
entry = Entry(dn)
|
||||
entry.setValues('objectclass', 'top', 'extensibleObject')
|
||||
@@ -1129,7 +1127,7 @@ class Tasks(object):
|
||||
if not ldif_out or not ldif_in:
|
||||
raise ValueError("Missing ldif_out and/or ldif_in")
|
||||
|
||||
- cn = 'task-' + time.strftime("%m%d%Y_%H%M%S", time.localtime())
|
||||
+ cn = 'task-' + Task.get_timestamp()
|
||||
dn = ('cn=%s,cn=automember map updates,cn=tasks,cn=config' % cn)
|
||||
|
||||
entry = Entry(dn)
|
||||
@@ -1175,7 +1173,7 @@ class Tasks(object):
|
||||
@return exit code
|
||||
'''
|
||||
|
||||
- cn = 'task-' + time.strftime("%m%d%Y_%H%M%S", time.localtime())
|
||||
+ cn = 'task-' + Task.get_timestamp()
|
||||
dn = ('cn=%s,cn=fixup linked attributes,cn=tasks,cn=config' % cn)
|
||||
entry = Entry(dn)
|
||||
entry.setValues('objectclass', 'top', 'extensibleObject')
|
||||
@@ -1219,7 +1217,7 @@ class Tasks(object):
|
||||
@return exit code
|
||||
'''
|
||||
|
||||
- cn = 'task-' + time.strftime("%m%d%Y_%H%M%S", time.localtime())
|
||||
+ cn = 'task-' + Task.get_timestamp()
|
||||
dn = ('cn=%s,cn=schema reload task,cn=tasks,cn=config' % cn)
|
||||
entry = Entry(dn)
|
||||
entry.setValues('objectclass', 'top', 'extensibleObject')
|
||||
@@ -1264,7 +1262,7 @@ class Tasks(object):
|
||||
@return exit code
|
||||
'''
|
||||
|
||||
- cn = 'task-' + time.strftime("%m%d%Y_%H%M%S", time.localtime())
|
||||
+ cn = 'task-' + Task.get_timestamp()
|
||||
dn = ('cn=%s,cn=memberuid task,cn=tasks,cn=config' % cn)
|
||||
entry = Entry(dn)
|
||||
entry.setValues('objectclass', 'top', 'extensibleObject')
|
||||
@@ -1311,7 +1309,7 @@ class Tasks(object):
|
||||
@return exit code
|
||||
'''
|
||||
|
||||
- cn = 'task-' + time.strftime("%m%d%Y_%H%M%S", time.localtime())
|
||||
+ cn = 'task-' + Task.get_timestamp()
|
||||
dn = ('cn=%s,cn=syntax validate,cn=tasks,cn=config' % cn)
|
||||
entry = Entry(dn)
|
||||
entry.setValues('objectclass', 'top', 'extensibleObject')
|
||||
@@ -1358,7 +1356,7 @@ class Tasks(object):
|
||||
@return exit code
|
||||
'''
|
||||
|
||||
- cn = 'task-' + time.strftime("%m%d%Y_%H%M%S", time.localtime())
|
||||
+ cn = 'task-' + Task.get_timestamp()
|
||||
dn = ('cn=%s,cn=USN tombstone cleanup task,cn=tasks,cn=config' % cn)
|
||||
entry = Entry(dn)
|
||||
entry.setValues('objectclass', 'top', 'extensibleObject')
|
||||
@@ -1413,7 +1411,7 @@ class Tasks(object):
|
||||
if not configfile:
|
||||
raise ValueError("Missing required paramter: configfile")
|
||||
|
||||
- cn = 'task-' + time.strftime("%m%d%Y_%H%M%S", time.localtime())
|
||||
+ cn = 'task-' + Task.get_timestamp()
|
||||
dn = ('cn=%s,cn=sysconfig reload,cn=tasks,cn=config' % cn)
|
||||
entry = Entry(dn)
|
||||
entry.setValues('objectclass', 'top', 'extensibleObject')
|
||||
@@ -1464,7 +1462,7 @@ class Tasks(object):
|
||||
if not suffix:
|
||||
raise ValueError("Missing required paramter: suffix")
|
||||
|
||||
- cn = 'task-' + time.strftime("%m%d%Y_%H%M%S", time.localtime())
|
||||
+ cn = 'task-' + Task.get_timestamp()
|
||||
dn = ('cn=%s,cn=cleanallruv,cn=tasks,cn=config' % cn)
|
||||
entry = Entry(dn)
|
||||
entry.setValues('objectclass', 'top', 'extensibleObject')
|
||||
@@ -1516,7 +1514,7 @@ class Tasks(object):
|
||||
if not suffix:
|
||||
raise ValueError("Missing required paramter: suffix")
|
||||
|
||||
- cn = 'task-' + time.strftime("%m%d%Y_%H%M%S", time.localtime())
|
||||
+ cn = 'task-' + Task.get_timestamp()
|
||||
dn = ('cn=%s,cn=abort cleanallruv,cn=tasks,cn=config' % cn)
|
||||
entry = Entry(dn)
|
||||
entry.setValues('objectclass', 'top', 'extensibleObject')
|
||||
@@ -1571,7 +1569,7 @@ class Tasks(object):
|
||||
if not nsArchiveDir:
|
||||
raise ValueError("Missing required paramter: nsArchiveDir")
|
||||
|
||||
- cn = 'task-' + time.strftime("%m%d%Y_%H%M%S", time.localtime())
|
||||
+ cn = 'task-' + Task.get_timestamp()
|
||||
dn = ('cn=%s,cn=upgradedb,cn=tasks,cn=config' % cn)
|
||||
entry = Entry(dn)
|
||||
entry.setValues('objectclass', 'top', 'extensibleObject')
|
||||
@@ -1616,6 +1614,6 @@ class LDAPIMappingReloadTask(Task):
|
||||
"""
|
||||
|
||||
def __init__(self, instance, dn=None):
|
||||
- self.cn = 'reload-' + Task._get_task_date()
|
||||
+ self.cn = 'reload-' + Task.get_timestamp()
|
||||
dn = f'cn={self.cn},cn=reload ldapi mappings,cn=tasks,cn=config'
|
||||
super(LDAPIMappingReloadTask, self).__init__(instance, dn)
|
||||
--
|
||||
2.48.0
|
||||
|
165
0008-Issue-6554-During-import-of-entries-without-nsUnique.patch
Normal file
165
0008-Issue-6554-During-import-of-entries-without-nsUnique.patch
Normal file
@ -0,0 +1,165 @@
|
||||
From b2511553590f0d9b41856d8baff5f3cd103dd46f Mon Sep 17 00:00:00 2001
|
||||
From: tbordaz <tbordaz@redhat.com>
|
||||
Date: Thu, 6 Feb 2025 18:25:36 +0100
|
||||
Subject: [PATCH] Issue 6554 - During import of entries without nsUniqueId, a
|
||||
supplier generates duplicate nsUniqueId (LMDB only) (#6582)
|
||||
|
||||
Bug description:
|
||||
During an import the entry is prepared (schema, operational
|
||||
attributes, password encryption,...) before starting the
|
||||
update of the database and indexes.
|
||||
A step of the preparation is to assign a value to 'nsuniqueid'
|
||||
operational attribute. 'nsuniqueid' must be unique.
|
||||
In LMDB the preparation is done by multiple threads (workers).
|
||||
In such case the 'nsuniqueid' are generated in parallel and
|
||||
as it is time based several values can be duplicated.
|
||||
|
||||
Fix description:
|
||||
To prevent that the routine dbmdb_import_generate_uniqueid
|
||||
should make sure to synchronize the workers.
|
||||
|
||||
fixes: #6554
|
||||
|
||||
Reviewed by: Pierre Rogier
|
||||
---
|
||||
.../tests/suites/import/import_test.py | 79 ++++++++++++++++++-
|
||||
.../back-ldbm/db-mdb/mdb_import_threads.c | 11 +++
|
||||
2 files changed, 89 insertions(+), 1 deletion(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/import/import_test.py b/dirsrvtests/tests/suites/import/import_test.py
|
||||
index b7cba32fd..18caec633 100644
|
||||
--- a/dirsrvtests/tests/suites/import/import_test.py
|
||||
+++ b/dirsrvtests/tests/suites/import/import_test.py
|
||||
@@ -14,11 +14,13 @@ import os
|
||||
import pytest
|
||||
import time
|
||||
import glob
|
||||
+import re
|
||||
import logging
|
||||
import subprocess
|
||||
from datetime import datetime
|
||||
from lib389.topologies import topology_st as topo
|
||||
-from lib389._constants import DEFAULT_SUFFIX, TaskWarning
|
||||
+from lib389.topologies import topology_m2 as topo_m2
|
||||
+from lib389._constants import DEFAULT_BENAME, DEFAULT_SUFFIX, TaskWarning
|
||||
from lib389.dbgen import dbgen_users
|
||||
from lib389.tasks import ImportTask
|
||||
from lib389.index import Indexes
|
||||
@@ -690,6 +692,81 @@ def test_online_import_under_load(topo):
|
||||
assert import_task.get_exit_code() == 0
|
||||
|
||||
|
||||
+def test_duplicate_nsuniqueid(topo_m2, request):
|
||||
+ """Test that after an offline import all
|
||||
+ nsuniqueid are different
|
||||
+
|
||||
+ :id: a2541677-a288-4633-bacf-4050cc56016d
|
||||
+ :setup: MMR with 2 suppliers
|
||||
+ :steps:
|
||||
+ 1. stop the instance to do offline operations
|
||||
+ 2. Generate a 5K users LDIF file
|
||||
+ 3. Check that no uniqueid are present in the generated file
|
||||
+ 4. import the generated LDIF
|
||||
+ 5. export the database
|
||||
+ 6. Check that that exported LDIF contains more than 5K nsuniqueid
|
||||
+ 7. Check that there is no duplicate nsuniqued in exported LDIF
|
||||
+ :expectedresults:
|
||||
+ 1. Should succeeds
|
||||
+ 2. Should succeeds
|
||||
+ 3. Should succeeds
|
||||
+ 4. Should succeeds
|
||||
+ 5. Should succeeds
|
||||
+ 6. Should succeeds
|
||||
+ 7. Should succeeds
|
||||
+ """
|
||||
+ m1 = topo_m2.ms["supplier1"]
|
||||
+
|
||||
+ # Stop the instance
|
||||
+ m1.stop()
|
||||
+
|
||||
+ # Generate a test ldif (5k entries)
|
||||
+ log.info("Generating LDIF...")
|
||||
+ ldif_dir = m1.get_ldif_dir()
|
||||
+ import_ldif = ldif_dir + '/5k_users_import.ldif'
|
||||
+ dbgen_users(m1, 5000, import_ldif, DEFAULT_SUFFIX)
|
||||
+
|
||||
+ # Check that the generated LDIF does not contain nsuniqueid
|
||||
+ all_nsuniqueid = []
|
||||
+ with open(import_ldif, 'r') as file:
|
||||
+ for line in file:
|
||||
+ if line.lower().startswith("nsuniqueid: "):
|
||||
+ all_nsuniqueid.append(line.split(': ')[1])
|
||||
+ log.info("import file contains " + str(len(all_nsuniqueid)) + " nsuniqueid")
|
||||
+ assert len(all_nsuniqueid) == 0
|
||||
+
|
||||
+ # Import the "nsuniquied free" LDIF file
|
||||
+ if not m1.ldif2db('userRoot', None, None, None, import_ldif):
|
||||
+ assert False
|
||||
+
|
||||
+ # Export the DB that now should contain nsuniqueid
|
||||
+ export_ldif = ldif_dir + '/5k_user_export.ldif'
|
||||
+ log.info("export to file " + export_ldif)
|
||||
+ m1.db2ldif(bename=DEFAULT_BENAME, suffixes=[DEFAULT_SUFFIX],
|
||||
+ excludeSuffixes=None, repl_data=False,
|
||||
+ outputfile=export_ldif, encrypt=False)
|
||||
+
|
||||
+ # Check that the export LDIF contain nsuniqueid
|
||||
+ all_nsuniqueid = []
|
||||
+ with open(export_ldif, 'r') as file:
|
||||
+ for line in file:
|
||||
+ if line.lower().startswith("nsuniqueid: "):
|
||||
+ all_nsuniqueid.append(line.split(': ')[1])
|
||||
+ log.info("export file " + export_ldif + " contains " + str(len(all_nsuniqueid)) + " nsuniqueid")
|
||||
+ assert len(all_nsuniqueid) >= 5000
|
||||
+
|
||||
+ # Check that the nsuniqueid are unique
|
||||
+ assert len(set(all_nsuniqueid)) == len(all_nsuniqueid)
|
||||
+
|
||||
+ def fin():
|
||||
+ if os.path.exists(import_ldif):
|
||||
+ os.remove(import_ldif)
|
||||
+ if os.path.exists(export_ldif):
|
||||
+ os.remove(export_ldif)
|
||||
+ m1.start
|
||||
+
|
||||
+ request.addfinalizer(fin)
|
||||
+
|
||||
if __name__ == '__main__':
|
||||
# Run isolated
|
||||
# -s for DEBUG mode
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/db-mdb/mdb_import_threads.c b/ldap/servers/slapd/back-ldbm/db-mdb/mdb_import_threads.c
|
||||
index 707a110c5..0f445bb56 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/db-mdb/mdb_import_threads.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/db-mdb/mdb_import_threads.c
|
||||
@@ -610,10 +610,20 @@ dbmdb_import_generate_uniqueid(ImportJob *job, Slapi_Entry *e)
|
||||
{
|
||||
const char *uniqueid = slapi_entry_get_uniqueid(e);
|
||||
int rc = UID_SUCCESS;
|
||||
+ static pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER;
|
||||
|
||||
if (!uniqueid && (job->uuid_gen_type != SLAPI_UNIQUEID_GENERATE_NONE)) {
|
||||
char *newuniqueid;
|
||||
|
||||
+ /* With 'mdb' we have several workers generating nsuniqueid
|
||||
+ * we need to serialize them to prevent generating duplicate value
|
||||
+ * From performance pov it only impacts import
|
||||
+ * The default value is SLAPI_UNIQUEID_GENERATE_TIME_BASED so
|
||||
+ * the only syscall is clock_gettime and then string formating
|
||||
+ * that should limit contention
|
||||
+ */
|
||||
+ pthread_mutex_lock(&mutex);
|
||||
+
|
||||
/* generate id based on dn */
|
||||
if (job->uuid_gen_type == SLAPI_UNIQUEID_GENERATE_NAME_BASED) {
|
||||
char *dn = slapi_entry_get_dn(e);
|
||||
@@ -624,6 +634,7 @@ dbmdb_import_generate_uniqueid(ImportJob *job, Slapi_Entry *e)
|
||||
/* time based */
|
||||
rc = slapi_uniqueIDGenerateString(&newuniqueid);
|
||||
}
|
||||
+ pthread_mutex_unlock(&mutex);
|
||||
|
||||
if (rc == UID_SUCCESS) {
|
||||
slapi_entry_set_uniqueid(e, newuniqueid);
|
||||
--
|
||||
2.48.0
|
||||
|
@ -47,7 +47,7 @@ ExcludeArch: i686
|
||||
Summary: 389 Directory Server (base)
|
||||
Name: 389-ds-base
|
||||
Version: 2.6.1
|
||||
Release: 2%{?dist}
|
||||
Release: 3%{?dist}
|
||||
License: GPL-3.0-or-later WITH GPL-3.0-389-ds-base-exception AND (0BSD OR Apache-2.0 OR MIT) AND (Apache-2.0 OR Apache-2.0 WITH LLVM-exception OR MIT) AND (Apache-2.0 OR BSD-2-Clause OR MIT) AND (Apache-2.0 OR BSL-1.0) AND (Apache-2.0 OR MIT OR Zlib) AND (Apache-2.0 OR MIT) AND (CC-BY-4.0 AND MIT) AND (MIT OR Apache-2.0) AND Unicode-3.0 AND (MIT OR CC0-1.0) AND (MIT OR Unlicense) AND 0BSD AND Apache-2.0 AND BSD-2-Clause AND BSD-3-Clause AND ISC AND MIT AND MIT AND ISC AND MPL-2.0 AND PSF-2.0
|
||||
URL: https://www.port389.org
|
||||
Conflicts: selinux-policy-base < 3.9.8
|
||||
@ -472,6 +472,12 @@ Source4: 389-ds-base.sysusers
|
||||
|
||||
Patch: 0001-Issue-6468-Fix-building-for-older-versions-of-Python.patch
|
||||
Patch: 0002-Issue-6489-After-log-rotation-refresh-the-FD-pointer.patch
|
||||
Patch: 0003-Issue-6374-nsslapd-mdb-max-dbs-autotuning-doesn-t-wo.patch
|
||||
Patch: 0004-Issue-6090-Fix-dbscan-options-and-man-pages-6315.patch
|
||||
Patch: 0005-Issue-6566-RI-plugin-failure-to-handle-a-modrdn-for-.patch
|
||||
Patch: 0006-Issue-6258-Mitigate-race-condition-in-paged_results_.patch
|
||||
Patch: 0007-Issue-6229-After-an-initial-failure-subsequent-onlin.patch
|
||||
Patch: 0008-Issue-6554-During-import-of-entries-without-nsUnique.patch
|
||||
|
||||
%description
|
||||
389 Directory Server is an LDAPv3 compliant server. The base package includes
|
||||
@ -914,6 +920,14 @@ exit 0
|
||||
%endif
|
||||
|
||||
%changelog
|
||||
* Wed Feb 12 2025 Viktor Ashirov <vashirov@redhat.com> - 2.6.1-3
|
||||
- Resolves: RHEL-18333 Can't rename users member of automember rule
|
||||
- Resolves: RHEL-61341 After an initial failure, subsequent online backups will not work.
|
||||
- Resolves: RHEL-63887 nsslapd-mdb-max-dbs autotuning doesn't work properly
|
||||
- Resolves: RHEL-63891 dbscan crashes when showing statistics for MDB
|
||||
- Resolves: RHEL-63998 dsconf should check for number of available named databases
|
||||
- Resolves: RHEL-78344 During import of entries without nsUniqueId, a supplier generates duplicate nsUniqueId (LMDB only) [rhel-9]
|
||||
|
||||
* Sat Feb 01 2025 Viktor Ashirov <vashirov@redhat.com> - 2.6.1-2
|
||||
- Resolves: RHEL-76748: ns-slapd crashes with data directory ≥ 2 days old
|
||||
|
||||
|
4
main.fmf
4
main.fmf
@ -10,8 +10,8 @@
|
||||
package: [389-ds-base, git, pytest]
|
||||
- name: clone repo
|
||||
how: shell
|
||||
script: git clone https://github.com/389ds/389-ds-base /root/ds
|
||||
script: git clone -b 389-ds-base-3.0 https://github.com/389ds/389-ds-base /root/ds
|
||||
/test:
|
||||
/upstream_basic:
|
||||
test: pytest -v /root/ds/dirsrvtests/tests/suites/basic/basic_test.py
|
||||
duration: 30m
|
||||
duration: 60m
|
||||
|
Loading…
Reference in New Issue
Block a user