- Resolves: RHEL-18333 Can't rename users member of automember rule - Resolves: RHEL-61341 After an initial failure, subsequent online backups will not work. - Resolves: RHEL-63887 nsslapd-mdb-max-dbs autotuning doesn't work properly - Resolves: RHEL-63891 dbscan crashes when showing statistics for MDB - Resolves: RHEL-63998 dsconf should check for number of available named databases - Resolves: RHEL-78344 During import of entries without nsUniqueId, a supplier generates duplicate nsUniqueId (LMDB only) [rhel-9]
166 lines
6.2 KiB
Diff
166 lines
6.2 KiB
Diff
From b2511553590f0d9b41856d8baff5f3cd103dd46f Mon Sep 17 00:00:00 2001
|
|
From: tbordaz <tbordaz@redhat.com>
|
|
Date: Thu, 6 Feb 2025 18:25:36 +0100
|
|
Subject: [PATCH] Issue 6554 - During import of entries without nsUniqueId, a
|
|
supplier generates duplicate nsUniqueId (LMDB only) (#6582)
|
|
|
|
Bug description:
|
|
During an import the entry is prepared (schema, operational
|
|
attributes, password encryption,...) before starting the
|
|
update of the database and indexes.
|
|
A step of the preparation is to assign a value to 'nsuniqueid'
|
|
operational attribute. 'nsuniqueid' must be unique.
|
|
In LMDB the preparation is done by multiple threads (workers).
|
|
In such case the 'nsuniqueid' are generated in parallel and
|
|
as it is time based several values can be duplicated.
|
|
|
|
Fix description:
|
|
To prevent that the routine dbmdb_import_generate_uniqueid
|
|
should make sure to synchronize the workers.
|
|
|
|
fixes: #6554
|
|
|
|
Reviewed by: Pierre Rogier
|
|
---
|
|
.../tests/suites/import/import_test.py | 79 ++++++++++++++++++-
|
|
.../back-ldbm/db-mdb/mdb_import_threads.c | 11 +++
|
|
2 files changed, 89 insertions(+), 1 deletion(-)
|
|
|
|
diff --git a/dirsrvtests/tests/suites/import/import_test.py b/dirsrvtests/tests/suites/import/import_test.py
|
|
index b7cba32fd..18caec633 100644
|
|
--- a/dirsrvtests/tests/suites/import/import_test.py
|
|
+++ b/dirsrvtests/tests/suites/import/import_test.py
|
|
@@ -14,11 +14,13 @@ import os
|
|
import pytest
|
|
import time
|
|
import glob
|
|
+import re
|
|
import logging
|
|
import subprocess
|
|
from datetime import datetime
|
|
from lib389.topologies import topology_st as topo
|
|
-from lib389._constants import DEFAULT_SUFFIX, TaskWarning
|
|
+from lib389.topologies import topology_m2 as topo_m2
|
|
+from lib389._constants import DEFAULT_BENAME, DEFAULT_SUFFIX, TaskWarning
|
|
from lib389.dbgen import dbgen_users
|
|
from lib389.tasks import ImportTask
|
|
from lib389.index import Indexes
|
|
@@ -690,6 +692,81 @@ def test_online_import_under_load(topo):
|
|
assert import_task.get_exit_code() == 0
|
|
|
|
|
|
+def test_duplicate_nsuniqueid(topo_m2, request):
|
|
+ """Test that after an offline import all
|
|
+ nsuniqueid are different
|
|
+
|
|
+ :id: a2541677-a288-4633-bacf-4050cc56016d
|
|
+ :setup: MMR with 2 suppliers
|
|
+ :steps:
|
|
+ 1. stop the instance to do offline operations
|
|
+ 2. Generate a 5K users LDIF file
|
|
+ 3. Check that no uniqueid are present in the generated file
|
|
+ 4. import the generated LDIF
|
|
+ 5. export the database
|
|
+ 6. Check that that exported LDIF contains more than 5K nsuniqueid
|
|
+ 7. Check that there is no duplicate nsuniqued in exported LDIF
|
|
+ :expectedresults:
|
|
+ 1. Should succeeds
|
|
+ 2. Should succeeds
|
|
+ 3. Should succeeds
|
|
+ 4. Should succeeds
|
|
+ 5. Should succeeds
|
|
+ 6. Should succeeds
|
|
+ 7. Should succeeds
|
|
+ """
|
|
+ m1 = topo_m2.ms["supplier1"]
|
|
+
|
|
+ # Stop the instance
|
|
+ m1.stop()
|
|
+
|
|
+ # Generate a test ldif (5k entries)
|
|
+ log.info("Generating LDIF...")
|
|
+ ldif_dir = m1.get_ldif_dir()
|
|
+ import_ldif = ldif_dir + '/5k_users_import.ldif'
|
|
+ dbgen_users(m1, 5000, import_ldif, DEFAULT_SUFFIX)
|
|
+
|
|
+ # Check that the generated LDIF does not contain nsuniqueid
|
|
+ all_nsuniqueid = []
|
|
+ with open(import_ldif, 'r') as file:
|
|
+ for line in file:
|
|
+ if line.lower().startswith("nsuniqueid: "):
|
|
+ all_nsuniqueid.append(line.split(': ')[1])
|
|
+ log.info("import file contains " + str(len(all_nsuniqueid)) + " nsuniqueid")
|
|
+ assert len(all_nsuniqueid) == 0
|
|
+
|
|
+ # Import the "nsuniquied free" LDIF file
|
|
+ if not m1.ldif2db('userRoot', None, None, None, import_ldif):
|
|
+ assert False
|
|
+
|
|
+ # Export the DB that now should contain nsuniqueid
|
|
+ export_ldif = ldif_dir + '/5k_user_export.ldif'
|
|
+ log.info("export to file " + export_ldif)
|
|
+ m1.db2ldif(bename=DEFAULT_BENAME, suffixes=[DEFAULT_SUFFIX],
|
|
+ excludeSuffixes=None, repl_data=False,
|
|
+ outputfile=export_ldif, encrypt=False)
|
|
+
|
|
+ # Check that the export LDIF contain nsuniqueid
|
|
+ all_nsuniqueid = []
|
|
+ with open(export_ldif, 'r') as file:
|
|
+ for line in file:
|
|
+ if line.lower().startswith("nsuniqueid: "):
|
|
+ all_nsuniqueid.append(line.split(': ')[1])
|
|
+ log.info("export file " + export_ldif + " contains " + str(len(all_nsuniqueid)) + " nsuniqueid")
|
|
+ assert len(all_nsuniqueid) >= 5000
|
|
+
|
|
+ # Check that the nsuniqueid are unique
|
|
+ assert len(set(all_nsuniqueid)) == len(all_nsuniqueid)
|
|
+
|
|
+ def fin():
|
|
+ if os.path.exists(import_ldif):
|
|
+ os.remove(import_ldif)
|
|
+ if os.path.exists(export_ldif):
|
|
+ os.remove(export_ldif)
|
|
+ m1.start
|
|
+
|
|
+ request.addfinalizer(fin)
|
|
+
|
|
if __name__ == '__main__':
|
|
# Run isolated
|
|
# -s for DEBUG mode
|
|
diff --git a/ldap/servers/slapd/back-ldbm/db-mdb/mdb_import_threads.c b/ldap/servers/slapd/back-ldbm/db-mdb/mdb_import_threads.c
|
|
index 707a110c5..0f445bb56 100644
|
|
--- a/ldap/servers/slapd/back-ldbm/db-mdb/mdb_import_threads.c
|
|
+++ b/ldap/servers/slapd/back-ldbm/db-mdb/mdb_import_threads.c
|
|
@@ -610,10 +610,20 @@ dbmdb_import_generate_uniqueid(ImportJob *job, Slapi_Entry *e)
|
|
{
|
|
const char *uniqueid = slapi_entry_get_uniqueid(e);
|
|
int rc = UID_SUCCESS;
|
|
+ static pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER;
|
|
|
|
if (!uniqueid && (job->uuid_gen_type != SLAPI_UNIQUEID_GENERATE_NONE)) {
|
|
char *newuniqueid;
|
|
|
|
+ /* With 'mdb' we have several workers generating nsuniqueid
|
|
+ * we need to serialize them to prevent generating duplicate value
|
|
+ * From performance pov it only impacts import
|
|
+ * The default value is SLAPI_UNIQUEID_GENERATE_TIME_BASED so
|
|
+ * the only syscall is clock_gettime and then string formating
|
|
+ * that should limit contention
|
|
+ */
|
|
+ pthread_mutex_lock(&mutex);
|
|
+
|
|
/* generate id based on dn */
|
|
if (job->uuid_gen_type == SLAPI_UNIQUEID_GENERATE_NAME_BASED) {
|
|
char *dn = slapi_entry_get_dn(e);
|
|
@@ -624,6 +634,7 @@ dbmdb_import_generate_uniqueid(ImportJob *job, Slapi_Entry *e)
|
|
/* time based */
|
|
rc = slapi_uniqueIDGenerateString(&newuniqueid);
|
|
}
|
|
+ pthread_mutex_unlock(&mutex);
|
|
|
|
if (rc == UID_SUCCESS) {
|
|
slapi_entry_set_uniqueid(e, newuniqueid);
|
|
--
|
|
2.48.0
|
|
|