Import from CS git
This commit is contained in:
parent
2090fec9cd
commit
4c5c05b796
@ -0,0 +1,53 @@
|
||||
From c9a1628de9f666a43911392220c0e83d051a51c6 Mon Sep 17 00:00:00 2001
|
||||
From: Viktor Ashirov <vashirov@redhat.com>
|
||||
Date: Thu, 6 Jun 2024 21:05:09 +0200
|
||||
Subject: [PATCH] Issue 6192 - Test failure: test_match_large_valueset
|
||||
|
||||
Description:
|
||||
When BDB backend is used, nsslapd-cache-autosize needs to be set to 0
|
||||
first in order to change nsslapd-cachememsize.
|
||||
Also increase the expected etime slightly, as it fails on slower VMs
|
||||
both with BDB and MDB backends.
|
||||
|
||||
Fixes: https://github.com/389ds/389-ds-base/issues/6192
|
||||
|
||||
Reviewed by: @droideck, @tbordaz (Thanks!)
|
||||
---
|
||||
dirsrvtests/tests/suites/filter/filter_test.py | 7 +++++--
|
||||
1 file changed, 5 insertions(+), 2 deletions(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/filter/filter_test.py b/dirsrvtests/tests/suites/filter/filter_test.py
|
||||
index 4baaf04a7..b17846c01 100644
|
||||
--- a/dirsrvtests/tests/suites/filter/filter_test.py
|
||||
+++ b/dirsrvtests/tests/suites/filter/filter_test.py
|
||||
@@ -15,7 +15,7 @@ from lib389.tasks import *
|
||||
from lib389.backend import Backends, Backend
|
||||
from lib389.dbgen import dbgen_users, dbgen_groups
|
||||
from lib389.topologies import topology_st
|
||||
-from lib389._constants import PASSWORD, DEFAULT_SUFFIX, DN_DM, SUFFIX
|
||||
+from lib389._constants import PASSWORD, DEFAULT_SUFFIX, DN_DM, SUFFIX, DN_CONFIG_LDBM
|
||||
from lib389.utils import *
|
||||
|
||||
pytestmark = pytest.mark.tier1
|
||||
@@ -372,6 +372,9 @@ def test_match_large_valueset(topology_st):
|
||||
be_groups = backends.create(properties={'parent': DEFAULT_SUFFIX, 'nsslapd-suffix': groups_suffix, 'name': groups_backend})
|
||||
|
||||
# set the entry cache to 200Mb as the 1K groups of 2K users require at least 170Mb
|
||||
+ if get_default_db_lib() == "bdb":
|
||||
+ config_ldbm = DSLdapObject(inst, DN_CONFIG_LDBM)
|
||||
+ config_ldbm.set('nsslapd-cache-autosize', '0')
|
||||
be_groups.replace('nsslapd-cachememsize', groups_entrycache)
|
||||
except:
|
||||
raise
|
||||
@@ -427,7 +430,7 @@ def test_match_large_valueset(topology_st):
|
||||
etime = float(search_result[1].split('etime=')[1])
|
||||
log.info("Duration of the search from access log was %f", etime)
|
||||
assert len(entries) == groups_number
|
||||
- assert (etime < 1)
|
||||
+ assert (etime < 5)
|
||||
|
||||
if __name__ == '__main__':
|
||||
# Run isolated
|
||||
--
|
||||
2.47.1
|
||||
|
@ -0,0 +1,217 @@
|
||||
From 58efd22cdefed368c97ef235f2abb66ac4aec234 Mon Sep 17 00:00:00 2001
|
||||
From: tbordaz <tbordaz@redhat.com>
|
||||
Date: Thu, 5 Sep 2024 10:19:59 +0200
|
||||
Subject: [PATCH] Issue 6307 - Wrong set of entries returned for some search
|
||||
filters (#6308)
|
||||
|
||||
Bug description:
|
||||
When the server returns an entry to a search it
|
||||
checks both access and matching of the filter.
|
||||
When evaluating a '!' (NOT) logical expression the server,
|
||||
in a first phase evaluates ONLY the right to access the
|
||||
related component (and its subcomponents).
|
||||
Then in a second phase verifies the matching.
|
||||
If the related component is a OR, in the first phase it
|
||||
evaluates access AND matching, this even if the call was
|
||||
to evaluate only access.
|
||||
This result in incoherent results.
|
||||
|
||||
Fix description:
|
||||
Make sure that when the function vattr_test_filter_list_or
|
||||
is called to only check access, it does not evaluate the matching.
|
||||
|
||||
fixes: #6307
|
||||
|
||||
Reviewed by: Pierre Rogier (Thanks !!)
|
||||
---
|
||||
.../tests/suites/filter/filter_test.py | 116 ++++++++++++++++++
|
||||
ldap/servers/slapd/filterentry.c | 11 ++
|
||||
2 files changed, 127 insertions(+)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/filter/filter_test.py b/dirsrvtests/tests/suites/filter/filter_test.py
|
||||
index b17846c01..e1d02f133 100644
|
||||
--- a/dirsrvtests/tests/suites/filter/filter_test.py
|
||||
+++ b/dirsrvtests/tests/suites/filter/filter_test.py
|
||||
@@ -10,12 +10,14 @@ import logging
|
||||
|
||||
import pytest
|
||||
import time
|
||||
+from ldap import SCOPE_SUBTREE
|
||||
from lib389.dirsrv_log import DirsrvAccessLog
|
||||
from lib389.tasks import *
|
||||
from lib389.backend import Backends, Backend
|
||||
from lib389.dbgen import dbgen_users, dbgen_groups
|
||||
from lib389.topologies import topology_st
|
||||
from lib389._constants import PASSWORD, DEFAULT_SUFFIX, DN_DM, SUFFIX, DN_CONFIG_LDBM
|
||||
+from lib389.idm.user import UserAccount, UserAccounts
|
||||
from lib389.utils import *
|
||||
|
||||
pytestmark = pytest.mark.tier1
|
||||
@@ -432,6 +434,120 @@ def test_match_large_valueset(topology_st):
|
||||
assert len(entries) == groups_number
|
||||
assert (etime < 5)
|
||||
|
||||
+def test_filter_not_operator(topology_st, request):
|
||||
+ """Test ldapsearch with scope one gives only single entry
|
||||
+
|
||||
+ :id: b3711e02-7e76-444d-82f3-495c6dadd97f
|
||||
+ :setup: Standalone instance
|
||||
+ :steps:
|
||||
+ 1. Creating user1..user9
|
||||
+ 2. Adding specific 'telephonenumber' to the users
|
||||
+ 3. Check returned set (5 users) with the first filter
|
||||
+ 4. Check returned set (4 users) with the second filter
|
||||
+ :expectedresults:
|
||||
+ 1. This should pass
|
||||
+ 2. This should pass
|
||||
+ 3. This should pass
|
||||
+ 4. This should pass
|
||||
+ """
|
||||
+
|
||||
+ topology_st.standalone.start()
|
||||
+ # Creating Users
|
||||
+ log.info('Create users from user1 to user9')
|
||||
+ users = UserAccounts(topology_st.standalone, "ou=people,%s" % DEFAULT_SUFFIX, rdn=None)
|
||||
+
|
||||
+ for user in ['user1',
|
||||
+ 'user2',
|
||||
+ 'user3',
|
||||
+ 'user4',
|
||||
+ 'user5',
|
||||
+ 'user6',
|
||||
+ 'user7',
|
||||
+ 'user8',
|
||||
+ 'user9']:
|
||||
+ users.create(properties={
|
||||
+ 'mail': f'{user}@redhat.com',
|
||||
+ 'uid': user,
|
||||
+ 'givenName': user.title(),
|
||||
+ 'cn': f'bit {user}',
|
||||
+ 'sn': user.title(),
|
||||
+ 'manager': f'uid={user},{SUFFIX}',
|
||||
+ 'userpassword': PW_DM,
|
||||
+ 'homeDirectory': '/home/' + user,
|
||||
+ 'uidNumber': '1000',
|
||||
+ 'gidNumber': '2000',
|
||||
+ })
|
||||
+ # Adding specific values to the users
|
||||
+ log.info('Adding telephonenumber values')
|
||||
+ user = UserAccount(topology_st.standalone, 'uid=user1, ou=people, %s' % DEFAULT_SUFFIX)
|
||||
+ user.add('telephonenumber', ['1234', '2345'])
|
||||
+
|
||||
+ user = UserAccount(topology_st.standalone, 'uid=user2, ou=people, %s' % DEFAULT_SUFFIX)
|
||||
+ user.add('telephonenumber', ['1234', '4567'])
|
||||
+
|
||||
+ user = UserAccount(topology_st.standalone, 'uid=user3, ou=people, %s' % DEFAULT_SUFFIX)
|
||||
+ user.add('telephonenumber', ['1234', '4567'])
|
||||
+
|
||||
+ user = UserAccount(topology_st.standalone, 'uid=user4, ou=people, %s' % DEFAULT_SUFFIX)
|
||||
+ user.add('telephonenumber', ['1234'])
|
||||
+
|
||||
+ user = UserAccount(topology_st.standalone, 'uid=user5, ou=people, %s' % DEFAULT_SUFFIX)
|
||||
+ user.add('telephonenumber', ['2345'])
|
||||
+
|
||||
+ user = UserAccount(topology_st.standalone, 'uid=user6, ou=people, %s' % DEFAULT_SUFFIX)
|
||||
+ user.add('telephonenumber', ['3456'])
|
||||
+
|
||||
+ user = UserAccount(topology_st.standalone, 'uid=user7, ou=people, %s' % DEFAULT_SUFFIX)
|
||||
+ user.add('telephonenumber', ['4567'])
|
||||
+
|
||||
+ user = UserAccount(topology_st.standalone, 'uid=user8, ou=people, %s' % DEFAULT_SUFFIX)
|
||||
+ user.add('telephonenumber', ['1234'])
|
||||
+
|
||||
+ user = UserAccount(topology_st.standalone, 'uid=user9, ou=people, %s' % DEFAULT_SUFFIX)
|
||||
+ user.add('telephonenumber', ['1234', '4567'])
|
||||
+
|
||||
+ # Do a first test of filter containing a NOT
|
||||
+ # and check the expected values are retrieved
|
||||
+ log.info('Search with filter containing NOT')
|
||||
+ log.info('expect user2, user3, user6, user8 and user9')
|
||||
+ filter1 = "(|(telephoneNumber=3456)(&(telephoneNumber=1234)(!(|(uid=user1)(uid=user4)))))"
|
||||
+ entries = topology_st.standalone.search_s(DEFAULT_SUFFIX, SCOPE_SUBTREE, filter1)
|
||||
+ uids = []
|
||||
+ for entry in entries:
|
||||
+ assert entry.hasAttr('uid')
|
||||
+ uids.append(entry.getValue('uid'))
|
||||
+
|
||||
+ assert len(uids) == 5
|
||||
+ for uid in [b'user2', b'user3', b'user6', b'user8', b'user9']:
|
||||
+ assert uid in uids
|
||||
+
|
||||
+ # Do a second test of filter containing a NOT
|
||||
+ # and check the expected values are retrieved
|
||||
+ log.info('Search with a second filter containing NOT')
|
||||
+ log.info('expect user2, user3, user8 and user9')
|
||||
+ filter1 = "(|(&(telephoneNumber=1234)(!(|(uid=user1)(uid=user4)))))"
|
||||
+ entries = topology_st.standalone.search_s(DEFAULT_SUFFIX, SCOPE_SUBTREE, filter1)
|
||||
+ uids = []
|
||||
+ for entry in entries:
|
||||
+ assert entry.hasAttr('uid')
|
||||
+ uids.append(entry.getValue('uid'))
|
||||
+
|
||||
+ assert len(uids) == 4
|
||||
+ for uid in [b'user2', b'user3', b'user8', b'user9']:
|
||||
+ assert uid in uids
|
||||
+
|
||||
+ def fin():
|
||||
+ """
|
||||
+ Deletes entries after the test.
|
||||
+ """
|
||||
+ for user in users.list():
|
||||
+ pass
|
||||
+ user.delete()
|
||||
+
|
||||
+
|
||||
+ request.addfinalizer(fin)
|
||||
+
|
||||
+
|
||||
if __name__ == '__main__':
|
||||
# Run isolated
|
||||
# -s for DEBUG mode
|
||||
diff --git a/ldap/servers/slapd/filterentry.c b/ldap/servers/slapd/filterentry.c
|
||||
index d2c7e3082..f5604161d 100644
|
||||
--- a/ldap/servers/slapd/filterentry.c
|
||||
+++ b/ldap/servers/slapd/filterentry.c
|
||||
@@ -948,14 +948,17 @@ slapi_vattr_filter_test_ext_internal(
|
||||
break;
|
||||
|
||||
case LDAP_FILTER_NOT:
|
||||
+ slapi_log_err(SLAPI_LOG_FILTER, "vattr_test_filter_list_NOT", "=>\n");
|
||||
rc = slapi_vattr_filter_test_ext_internal(pb, e, f->f_not, verify_access, only_check_access, access_check_done);
|
||||
if (verify_access && only_check_access) {
|
||||
/* dont play with access control return codes
|
||||
* do not negate return code */
|
||||
+ slapi_log_err(SLAPI_LOG_FILTER, "vattr_test_filter_list_NOT only check access", "<= %d\n", rc);
|
||||
break;
|
||||
}
|
||||
if (rc > 0) {
|
||||
/* an error occurred or access denied, don't negate */
|
||||
+ slapi_log_err(SLAPI_LOG_FILTER, "vattr_test_filter_list_NOT slapi_vattr_filter_test_ext_internal fails", "<= %d\n", rc);
|
||||
break;
|
||||
}
|
||||
if (verify_access) {
|
||||
@@ -980,6 +983,7 @@ slapi_vattr_filter_test_ext_internal(
|
||||
/* filter verification only, no error */
|
||||
rc = (rc == 0) ? -1 : 0;
|
||||
}
|
||||
+ slapi_log_err(SLAPI_LOG_FILTER, "vattr_test_filter_list_NOT", "<= %d\n", rc);
|
||||
break;
|
||||
|
||||
default:
|
||||
@@ -1084,6 +1088,13 @@ vattr_test_filter_list_or(
|
||||
continue;
|
||||
}
|
||||
}
|
||||
+ /* we are not evaluating if the entry matches
|
||||
+ * but only that we have access to ALL components
|
||||
+ * so check the next one
|
||||
+ */
|
||||
+ if (only_check_access) {
|
||||
+ continue;
|
||||
+ }
|
||||
/* now check if filter matches */
|
||||
/*
|
||||
* We can NOT skip this because we need to know if the item we matched on
|
||||
--
|
||||
2.47.1
|
||||
|
@ -0,0 +1,254 @@
|
||||
From cd13c0e33d2f5c63e50af90c6841f6104c4dcdb9 Mon Sep 17 00:00:00 2001
|
||||
From: Mark Reynolds <mreynolds@redhat.com>
|
||||
Date: Thu, 24 Oct 2024 19:18:03 -0400
|
||||
Subject: [PATCH] Issue 6381 - CleanAllRUV - move changelog purging to the very
|
||||
end of the task
|
||||
|
||||
Description:
|
||||
|
||||
There are deadlock situations that can occur when cleanAllRUV is removing the
|
||||
clean task attribute (nsds5ReplicaCleanRUV) from the replica config, while
|
||||
the change log purging is occurring. Instead do the the changelog purge after
|
||||
everything else is done and have the changelog purging code remove the rid
|
||||
from the cleaned list once it finishes.
|
||||
|
||||
Also improved the task logging.
|
||||
|
||||
Fixes: https://github.com/389ds/389-ds-base/issues/6381
|
||||
|
||||
Reviewed by: progier389(Thanks!)
|
||||
---
|
||||
ldap/servers/plugins/replication/cl5_api.c | 55 +++++++++++--------
|
||||
ldap/servers/plugins/replication/repl5.h | 2 +-
|
||||
.../plugins/replication/repl_cleanallruv.c | 43 ++++++++-------
|
||||
3 files changed, 57 insertions(+), 43 deletions(-)
|
||||
|
||||
diff --git a/ldap/servers/plugins/replication/cl5_api.c b/ldap/servers/plugins/replication/cl5_api.c
|
||||
index 413e78a30..a944d3b29 100644
|
||||
--- a/ldap/servers/plugins/replication/cl5_api.c
|
||||
+++ b/ldap/servers/plugins/replication/cl5_api.c
|
||||
@@ -246,7 +246,7 @@ static int _cl5CheckMissingCSN(const CSN *minCsn, const RUV *supplierRUV, cldb_H
|
||||
static int cldb_IsTrimmingEnabled(cldb_Handle *cldb);
|
||||
static int _cl5TrimMain(void *param);
|
||||
void _cl5TrimReplica(Replica *r);
|
||||
-void _cl5PurgeRID(cldb_Handle *cldb, ReplicaId cleaned_rid);
|
||||
+void _cl5PurgeRID(cleanruv_purge_data *data, cldb_Handle *cldb);
|
||||
static PRBool _cl5CanTrim(time_t time, long *numToTrim, Replica *replica, CL5Config *dbTrim);
|
||||
int _cl5ConstructRUVs (cldb_Handle *cldb);
|
||||
int _cl5ReadRUVs(cldb_Handle *cldb);
|
||||
@@ -984,7 +984,7 @@ cl5CreateReplayIteratorEx(Private_Repl_Protocol *prp, const RUV *consumerRuv, CL
|
||||
pthread_mutex_unlock(&(cldb->stLock));
|
||||
|
||||
/* iterate through the ruv in csn order to find first supplier for which
|
||||
- we can replay changes */
|
||||
+ we can replay changes */
|
||||
rc = _cl5PositionCursorForReplay (consumerRID, consumerRuv, replica, iterator, NULL);
|
||||
|
||||
if (rc != CL5_SUCCESS) {
|
||||
@@ -1874,8 +1874,8 @@ _cl5Iterate(cldb_Handle *cldb, dbi_iterate_cb_t *action_cb, DBLCI_CTX *dblcictx,
|
||||
continue;
|
||||
}
|
||||
} else {
|
||||
- /* read-only opertion on bdb are transactionless, so no reason to abort txn
|
||||
- * after having seen some number of records
|
||||
+ /* read-only opertion on bdb are transactionless, so no reason to abort txn
|
||||
+ * after having seen some number of records
|
||||
*/
|
||||
dblcictx->seen.nbmax = 0;
|
||||
}
|
||||
@@ -2552,21 +2552,19 @@ _cl5TrimMain(void *param)
|
||||
static void
|
||||
_cl5DoPurging(cleanruv_purge_data *purge_data)
|
||||
{
|
||||
- ReplicaId rid = purge_data->cleaned_rid;
|
||||
- const Slapi_DN *suffix_sdn = purge_data->suffix_sdn;
|
||||
cldb_Handle *cldb = replica_get_cl_info(purge_data->replica);
|
||||
-
|
||||
if (cldb == NULL) {
|
||||
slapi_log_err(SLAPI_LOG_ERR, repl_plugin_name_cl,
|
||||
"_cl5DoPurging - Changelog info was NULL - is your replication configuration valid?\n");
|
||||
return;
|
||||
}
|
||||
+
|
||||
pthread_mutex_lock(&(cldb->clLock));
|
||||
- _cl5PurgeRID (cldb, rid);
|
||||
- slapi_log_err(SLAPI_LOG_REPL, repl_plugin_name_cl,
|
||||
- "_cl5DoPurging - Purged rid (%d) from suffix (%s)\n",
|
||||
- rid, slapi_sdn_get_dn(suffix_sdn));
|
||||
+
|
||||
+ _cl5PurgeRID(purge_data, cldb);
|
||||
+
|
||||
pthread_mutex_unlock(&(cldb->clLock));
|
||||
+
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -2653,7 +2651,7 @@ _cl5PurgeRidOnEntry(dbi_val_t *key, dbi_val_t *data, void *ctx)
|
||||
}
|
||||
|
||||
/*
|
||||
- * _cl5PurgeRID(Object *obj, ReplicaId cleaned_rid)
|
||||
+ * _cl5PurgeRID(cleanruv_purge_data, cleaned_rid)
|
||||
*
|
||||
* Clean the entire changelog of updates from the "cleaned rid" via CLEANALLRUV
|
||||
* Delete entries in batches so we don't consume too many db locks, and we don't
|
||||
@@ -2662,18 +2660,30 @@ _cl5PurgeRidOnEntry(dbi_val_t *key, dbi_val_t *data, void *ctx)
|
||||
* beginning for each new iteration.
|
||||
*/
|
||||
void
|
||||
-_cl5PurgeRID(cldb_Handle *cldb, ReplicaId cleaned_rid)
|
||||
+_cl5PurgeRID(cleanruv_purge_data *data, cldb_Handle *cldb)
|
||||
{
|
||||
DBLCI_CTX dblcictx = {0};
|
||||
+ int32_t rc = 0;
|
||||
|
||||
dblcictx.seen.nbmax = CL5_PURGE_MAX_LOOKUP_PER_TRANSACTION;
|
||||
dblcictx.changed.nbmax = CL5_PURGE_MAX_PER_TRANSACTION;
|
||||
- dblcictx.rid2purge = cleaned_rid;
|
||||
- _cl5Iterate(cldb, _cl5PurgeRidOnEntry, &dblcictx, PR_FALSE);
|
||||
-
|
||||
- slapi_log_err(SLAPI_LOG_REPL, repl_plugin_name_cl,
|
||||
- "_cl5PurgeRID - Removed (%ld entries) that originated from rid (%d)\n",
|
||||
- dblcictx.changed.tot, cleaned_rid);
|
||||
+ dblcictx.rid2purge = data->cleaned_rid;
|
||||
+
|
||||
+ rc = _cl5Iterate(cldb, _cl5PurgeRidOnEntry, &dblcictx, PR_FALSE);
|
||||
+ if (rc != CL5_SUCCESS && rc != CL5_NOTFOUND) {
|
||||
+ cleanruv_log(data->task, data->cleaned_rid, CLEANALLRUV_ID,
|
||||
+ SLAPI_LOG_ERR,
|
||||
+ "Purging failed to iterate through the entire changelog "
|
||||
+ "(error %d). There is a chance the rid was not fully "
|
||||
+ "removed, and you may have to run the cleanAllRUV task "
|
||||
+ "again.",
|
||||
+ rc);
|
||||
+ } else {
|
||||
+ cleanruv_log(data->task, data->cleaned_rid, CLEANALLRUV_ID,
|
||||
+ SLAPI_LOG_INFO,
|
||||
+ "Purged %ld records from the changelog",
|
||||
+ dblcictx.changed.tot);
|
||||
+ }
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -4459,11 +4469,10 @@ trigger_cl_purging_thread(void *arg)
|
||||
/* Purge the changelog */
|
||||
_cl5DoPurging(purge_data);
|
||||
|
||||
- slapi_counter_decrement(cldb->clThreads);
|
||||
+ /* Remove the rid from the internal list */
|
||||
+ remove_cleaned_rid(purge_data->cleaned_rid);
|
||||
|
||||
- slapi_log_err(SLAPI_LOG_REPL, repl_plugin_name_cl,
|
||||
- "trigger_cl_purging_thread - purged changelog for (%s) rid (%d)\n",
|
||||
- slapi_sdn_get_dn(purge_data->suffix_sdn), purge_data->cleaned_rid);
|
||||
+ slapi_counter_decrement(cldb->clThreads);
|
||||
|
||||
free_and_return:
|
||||
pthread_mutex_unlock(&(cldb->stLock));
|
||||
diff --git a/ldap/servers/plugins/replication/repl5.h b/ldap/servers/plugins/replication/repl5.h
|
||||
index f7fc74e82..45b42be0f 100644
|
||||
--- a/ldap/servers/plugins/replication/repl5.h
|
||||
+++ b/ldap/servers/plugins/replication/repl5.h
|
||||
@@ -830,8 +830,8 @@ typedef struct _cleanruv_data
|
||||
typedef struct _cleanruv_purge_data
|
||||
{
|
||||
int cleaned_rid;
|
||||
- const Slapi_DN *suffix_sdn;
|
||||
Replica *replica;
|
||||
+ Slapi_Task *task;
|
||||
} cleanruv_purge_data;
|
||||
|
||||
typedef struct _csngen_test_data
|
||||
diff --git a/ldap/servers/plugins/replication/repl_cleanallruv.c b/ldap/servers/plugins/replication/repl_cleanallruv.c
|
||||
index 42877add5..a985e691f 100644
|
||||
--- a/ldap/servers/plugins/replication/repl_cleanallruv.c
|
||||
+++ b/ldap/servers/plugins/replication/repl_cleanallruv.c
|
||||
@@ -1777,7 +1777,6 @@ replica_execute_cleanruv_task(Replica *replica, ReplicaId rid, char *returntext
|
||||
{
|
||||
Object *RUVObj;
|
||||
RUV *local_ruv = NULL;
|
||||
- cleanruv_purge_data *purge_data;
|
||||
int rc = 0;
|
||||
PR_ASSERT(replica);
|
||||
|
||||
@@ -1794,10 +1793,14 @@ replica_execute_cleanruv_task(Replica *replica, ReplicaId rid, char *returntext
|
||||
(ruv_replica_count(local_ruv) <= 1)) {
|
||||
return LDAP_UNWILLING_TO_PERFORM;
|
||||
}
|
||||
- rc = ruv_delete_replica(local_ruv, rid);
|
||||
- if (replica_write_ruv(replica)) {
|
||||
+ if ((rc = ruv_delete_replica(local_ruv, rid))) {
|
||||
+ slapi_log_err(SLAPI_LOG_ERR, repl_plugin_name, "cleanAllRUV_task - "
|
||||
+ "Failed to remove rid from RUV (%d)\n", rc);
|
||||
+ return LDAP_OPERATIONS_ERROR;
|
||||
+ }
|
||||
+ if ((rc = replica_write_ruv(replica))) {
|
||||
slapi_log_err(SLAPI_LOG_REPL, repl_plugin_name,
|
||||
- "cleanAllRUV_task - Could not write RUV\n");
|
||||
+ "cleanAllRUV_task - Could not write RUV (%d)\n", rc);
|
||||
}
|
||||
object_release(RUVObj);
|
||||
|
||||
@@ -1809,19 +1812,6 @@ replica_execute_cleanruv_task(Replica *replica, ReplicaId rid, char *returntext
|
||||
*/
|
||||
cl5CleanRUV(rid, replica);
|
||||
|
||||
- /*
|
||||
- * Now purge the changelog. The purging thread will free the purge_data
|
||||
- */
|
||||
- purge_data = (cleanruv_purge_data *)slapi_ch_calloc(1, sizeof(cleanruv_purge_data));
|
||||
- purge_data->cleaned_rid = rid;
|
||||
- purge_data->suffix_sdn = replica_get_root(replica);
|
||||
- purge_data->replica = replica;
|
||||
- trigger_cl_purging(purge_data);
|
||||
-
|
||||
- if (rc != RUV_SUCCESS) {
|
||||
- slapi_log_err(SLAPI_LOG_ERR, repl_plugin_name, "cleanAllRUV_task - Task failed(%d)\n", rc);
|
||||
- return LDAP_OPERATIONS_ERROR;
|
||||
- }
|
||||
slapi_log_err(SLAPI_LOG_REPL, repl_plugin_name, "cleanAllRUV_task - Finished successfully\n");
|
||||
return LDAP_SUCCESS;
|
||||
}
|
||||
@@ -2097,6 +2087,7 @@ static void
|
||||
replica_cleanallruv_thread(void *arg)
|
||||
{
|
||||
cleanruv_data *data = arg;
|
||||
+ cleanruv_purge_data *purge_data = NULL;
|
||||
Object *agmt_obj = NULL;
|
||||
Object *ruv_obj = NULL;
|
||||
Repl_Agmt *agmt = NULL;
|
||||
@@ -2377,7 +2368,20 @@ done:
|
||||
"Propagated task does not delete Keep alive entry (%d).", data->rid);
|
||||
}
|
||||
clean_agmts(data);
|
||||
- remove_cleaned_rid(data->rid);
|
||||
+
|
||||
+ /*
|
||||
+ * Now purge the changelog. The purging thread will free the
|
||||
+ * purge_data and update the cleaned rid list
|
||||
+ */
|
||||
+ purge_data = (cleanruv_purge_data *)slapi_ch_calloc(1, sizeof(cleanruv_purge_data));
|
||||
+ purge_data->cleaned_rid = data->rid;
|
||||
+ purge_data->replica = data->replica;
|
||||
+ purge_data->task = data->task;
|
||||
+ cleanruv_log(data->task, data->rid, CLEANALLRUV_ID, SLAPI_LOG_INFO,
|
||||
+ "Triggering changelog purge thread. This might complete "
|
||||
+ "after the cleaning task finishes.");
|
||||
+ trigger_cl_purging(purge_data);
|
||||
+
|
||||
cleanruv_log(data->task, data->rid, CLEANALLRUV_ID, SLAPI_LOG_INFO,
|
||||
"Successfully cleaned rid(%d)", data->rid);
|
||||
} else {
|
||||
@@ -2436,7 +2440,8 @@ clean_agmts(cleanruv_data *data)
|
||||
agmt_obj = agmtlist_get_next_agreement_for_replica(data->replica, agmt_obj);
|
||||
continue;
|
||||
}
|
||||
- cleanruv_log(data->task, data->rid, CLEANALLRUV_ID, SLAPI_LOG_INFO, "Cleaning agmt...");
|
||||
+ cleanruv_log(data->task, data->rid, CLEANALLRUV_ID, SLAPI_LOG_INFO,
|
||||
+ "Cleaning agmt (%s) ...", agmt_get_long_name(agmt));
|
||||
agmt_stop(agmt);
|
||||
agmt_update_consumer_ruv(agmt);
|
||||
agmt_start(agmt);
|
||||
--
|
||||
2.47.1
|
||||
|
@ -0,0 +1,460 @@
|
||||
From 1ffc8c7c97b158e8cefd22bd0402aa0285b2baba Mon Sep 17 00:00:00 2001
|
||||
From: Mark Reynolds <mreynolds@redhat.com>
|
||||
Date: Thu, 31 Oct 2024 08:09:09 -0400
|
||||
Subject: [PATCH] Issue 6390 - Adjust cleanAllRUV max per txn and interval
|
||||
limits
|
||||
|
||||
The cleanAllRUV changelog purging and the "normal" changelog trimming are
|
||||
using different limits. The cleanAllRUV "purging" holds the transaction 10
|
||||
times longer than the "trimming" does. When we use the same/smaller limits
|
||||
the CPU still gets high but it's spread out and it should not block other
|
||||
threads from running.
|
||||
|
||||
Improved overall work flow of cleanAllRUV - changed changelog purging to
|
||||
be a function and not a thread which simplifies other parts of the code.
|
||||
|
||||
Also fixed typo in repl logs (issue 6369), and deadlock with lmdb during
|
||||
purging (issue 6396)
|
||||
|
||||
relates: https://github.com/389ds/389-ds-base/issues/6390
|
||||
fixes: https://github.com/389ds/389-ds-base/issues/6396
|
||||
fixes: https://github.com/389ds/389-ds-base/issues/6369
|
||||
|
||||
Reviewed by: jchapman & progier (Thanks!!)
|
||||
---
|
||||
ldap/servers/plugins/replication/cl5_api.c | 127 ++++++++----------
|
||||
ldap/servers/plugins/replication/cl5_api.h | 41 +++---
|
||||
ldap/servers/plugins/replication/repl5.h | 8 +-
|
||||
.../plugins/replication/repl5_protocol_util.c | 2 +-
|
||||
.../plugins/replication/repl_cleanallruv.c | 30 ++---
|
||||
5 files changed, 91 insertions(+), 117 deletions(-)
|
||||
|
||||
diff --git a/ldap/servers/plugins/replication/cl5_api.c b/ldap/servers/plugins/replication/cl5_api.c
|
||||
index a944d3b29..92fb776dc 100644
|
||||
--- a/ldap/servers/plugins/replication/cl5_api.c
|
||||
+++ b/ldap/servers/plugins/replication/cl5_api.c
|
||||
@@ -90,8 +90,6 @@
|
||||
#define MAX_RETRIES 10 /* Maximum number of retry in case of db retryable error */
|
||||
#define CL5_TRIM_MAX_PER_TRANSACTION 100
|
||||
#define CL5_TRIM_MAX_LOOKUP_PER_TRANSACTION 10000
|
||||
-#define CL5_PURGE_MAX_PER_TRANSACTION 1000
|
||||
-#define CL5_PURGE_MAX_LOOKUP_PER_TRANSACTION 10000
|
||||
|
||||
/***** Data Definitions *****/
|
||||
|
||||
@@ -246,14 +244,13 @@ static int _cl5CheckMissingCSN(const CSN *minCsn, const RUV *supplierRUV, cldb_H
|
||||
static int cldb_IsTrimmingEnabled(cldb_Handle *cldb);
|
||||
static int _cl5TrimMain(void *param);
|
||||
void _cl5TrimReplica(Replica *r);
|
||||
-void _cl5PurgeRID(cleanruv_purge_data *data, cldb_Handle *cldb);
|
||||
+int32_t _cl5PurgeRID(cleanruv_data *data, cldb_Handle *cldb);
|
||||
static PRBool _cl5CanTrim(time_t time, long *numToTrim, Replica *replica, CL5Config *dbTrim);
|
||||
int _cl5ConstructRUVs (cldb_Handle *cldb);
|
||||
int _cl5ReadRUVs(cldb_Handle *cldb);
|
||||
static int _cl5WriteRUV(cldb_Handle *cldb, PRBool purge);
|
||||
static int _cl5UpdateRUV (cldb_Handle *cldb, CSN *csn, PRBool newReplica, PRBool purge);
|
||||
static int _cl5GetRUV2Purge2(Replica *r, RUV **ruv);
|
||||
-void trigger_cl_purging_thread(void *rid);
|
||||
|
||||
/* bakup/recovery, import/export */
|
||||
static int _cl5LDIF2Operation(char *ldifEntry, slapi_operation_parameters *op, char **replGen);
|
||||
@@ -1844,9 +1841,13 @@ _cl5Iterate(cldb_Handle *cldb, dbi_iterate_cb_t *action_cb, DBLCI_CTX *dblcictx,
|
||||
|
||||
dblcictx->finished = PR_FALSE;
|
||||
dblcictx->cldb = cldb;
|
||||
- while ( !slapi_is_shutting_down() &&
|
||||
- ((rc == CL5_SUCCESS && dblcictx->finished == PR_FALSE) ||
|
||||
- (rc == CL5_DB_RETRY && nbtries < MAX_RETRIES))) {
|
||||
+ while ((rc == CL5_SUCCESS && dblcictx->finished == PR_FALSE) ||
|
||||
+ (rc == CL5_DB_RETRY && nbtries < MAX_RETRIES))
|
||||
+ {
|
||||
+ if (slapi_is_shutting_down()) {
|
||||
+ return CL5_SHUTDOWN;
|
||||
+ }
|
||||
+
|
||||
nbtries++;
|
||||
dblcictx->changed.nb = 0;
|
||||
dblcictx->seen.nb = 0;
|
||||
@@ -2502,7 +2503,7 @@ _cl5TrimMain(void *param)
|
||||
cldb->trimmingOnGoing = 1;
|
||||
slapi_counter_increment(cldb->clThreads);
|
||||
|
||||
- while (cldb->dbState == CL5_STATE_OPEN)
|
||||
+ while (cldb->dbState == CL5_STATE_OPEN && !slapi_is_shutting_down())
|
||||
{
|
||||
pthread_mutex_unlock(&(cldb->stLock));
|
||||
|
||||
@@ -2548,24 +2549,28 @@ _cl5TrimMain(void *param)
|
||||
* We are purging a changelog after a cleanAllRUV task. Find the specific
|
||||
* changelog for the backend that is being cleaned, and purge all the records
|
||||
* with the cleaned rid.
|
||||
+ *
|
||||
+ * If we encounter a shutdown _cl5PurgeRID will return 1
|
||||
*/
|
||||
-static void
|
||||
-_cl5DoPurging(cleanruv_purge_data *purge_data)
|
||||
+static int32_t
|
||||
+_cl5DoPurging(cleanruv_data *purge_data)
|
||||
{
|
||||
cldb_Handle *cldb = replica_get_cl_info(purge_data->replica);
|
||||
+ int32_t rc = 0;
|
||||
+
|
||||
if (cldb == NULL) {
|
||||
slapi_log_err(SLAPI_LOG_ERR, repl_plugin_name_cl,
|
||||
"_cl5DoPurging - Changelog info was NULL - is your replication configuration valid?\n");
|
||||
- return;
|
||||
+ return rc;
|
||||
}
|
||||
|
||||
pthread_mutex_lock(&(cldb->clLock));
|
||||
|
||||
- _cl5PurgeRID(purge_data, cldb);
|
||||
+ rc = _cl5PurgeRID(purge_data, cldb);
|
||||
|
||||
pthread_mutex_unlock(&(cldb->clLock));
|
||||
|
||||
- return;
|
||||
+ return rc;
|
||||
}
|
||||
|
||||
static inline int
|
||||
@@ -2651,7 +2656,7 @@ _cl5PurgeRidOnEntry(dbi_val_t *key, dbi_val_t *data, void *ctx)
|
||||
}
|
||||
|
||||
/*
|
||||
- * _cl5PurgeRID(cleanruv_purge_data, cleaned_rid)
|
||||
+ * _cl5PurgeRID(cleanruv_data, cleaned_rid)
|
||||
*
|
||||
* Clean the entire changelog of updates from the "cleaned rid" via CLEANALLRUV
|
||||
* Delete entries in batches so we don't consume too many db locks, and we don't
|
||||
@@ -2659,19 +2664,28 @@ _cl5PurgeRidOnEntry(dbi_val_t *key, dbi_val_t *data, void *ctx)
|
||||
* We save the key from the last iteration so we don't have to start from the
|
||||
* beginning for each new iteration.
|
||||
*/
|
||||
-void
|
||||
-_cl5PurgeRID(cleanruv_purge_data *data, cldb_Handle *cldb)
|
||||
+int32_t
|
||||
+_cl5PurgeRID(cleanruv_data *data, cldb_Handle *cldb)
|
||||
{
|
||||
DBLCI_CTX dblcictx = {0};
|
||||
int32_t rc = 0;
|
||||
|
||||
- dblcictx.seen.nbmax = CL5_PURGE_MAX_LOOKUP_PER_TRANSACTION;
|
||||
- dblcictx.changed.nbmax = CL5_PURGE_MAX_PER_TRANSACTION;
|
||||
- dblcictx.rid2purge = data->cleaned_rid;
|
||||
+ if (dblayer_is_lmdb(cldb->be)) {
|
||||
+ dblcictx.seen.nbmax = 5000;
|
||||
+ dblcictx.changed.nbmax = 50;
|
||||
+ } else {
|
||||
+ dblcictx.seen.nbmax = 10000;
|
||||
+ dblcictx.changed.nbmax = 50;
|
||||
+ }
|
||||
+ dblcictx.rid2purge = data->rid;
|
||||
|
||||
rc = _cl5Iterate(cldb, _cl5PurgeRidOnEntry, &dblcictx, PR_FALSE);
|
||||
- if (rc != CL5_SUCCESS && rc != CL5_NOTFOUND) {
|
||||
- cleanruv_log(data->task, data->cleaned_rid, CLEANALLRUV_ID,
|
||||
+ if (rc == CL5_SHUTDOWN) {
|
||||
+ cleanruv_log(data->task, data->rid, CLEANALLRUV_ID, SLAPI_LOG_NOTICE,
|
||||
+ "Server shutting down. Process will resume at server "
|
||||
+ "startup");
|
||||
+ } else if (rc != CL5_SUCCESS && rc != CL5_NOTFOUND) {
|
||||
+ cleanruv_log(data->task, data->rid, CLEANALLRUV_ID,
|
||||
SLAPI_LOG_ERR,
|
||||
"Purging failed to iterate through the entire changelog "
|
||||
"(error %d). There is a chance the rid was not fully "
|
||||
@@ -2679,11 +2693,14 @@ _cl5PurgeRID(cleanruv_purge_data *data, cldb_Handle *cldb)
|
||||
"again.",
|
||||
rc);
|
||||
} else {
|
||||
- cleanruv_log(data->task, data->cleaned_rid, CLEANALLRUV_ID,
|
||||
+ cleanruv_log(data->task, data->rid, CLEANALLRUV_ID,
|
||||
SLAPI_LOG_INFO,
|
||||
- "Purged %ld records from the changelog",
|
||||
- dblcictx.changed.tot);
|
||||
+ "Scanned %ld records, and purged %ld records from the "
|
||||
+ "changelog",
|
||||
+ dblcictx.seen.tot, dblcictx.changed.tot);
|
||||
}
|
||||
+
|
||||
+ return rc;
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -4292,7 +4309,7 @@ _cl5ExportFile(PRFileDesc *prFile, cldb_Handle *cldb)
|
||||
}
|
||||
slapi_write_buffer(prFile, "\n", strlen("\n"));
|
||||
|
||||
- dblcictx.seen.nbmax = CL5_PURGE_MAX_LOOKUP_PER_TRANSACTION;
|
||||
+ dblcictx.seen.nbmax = CL5_TRIM_MAX_LOOKUP_PER_TRANSACTION;
|
||||
dblcictx.exportFile = prFile;
|
||||
rc = _cl5Iterate(cldb, _cl5ExportEntry2File, &dblcictx, PR_TRUE);
|
||||
|
||||
@@ -4415,68 +4432,40 @@ cl5CleanRUV(ReplicaId rid, Replica *replica)
|
||||
ruv_delete_replica(cldb->maxRUV, rid);
|
||||
}
|
||||
|
||||
-static void
|
||||
-free_purge_data(cleanruv_purge_data *purge_data)
|
||||
-{
|
||||
- slapi_ch_free((void **)&purge_data);
|
||||
-}
|
||||
-
|
||||
-/*
|
||||
- * Create a thread to purge a changelog of cleaned RIDs
|
||||
- */
|
||||
-void
|
||||
-trigger_cl_purging(cleanruv_purge_data *purge_data)
|
||||
-{
|
||||
- PRThread *trim_tid = NULL;
|
||||
-
|
||||
- trim_tid = PR_CreateThread(PR_USER_THREAD, (VFP)(void *)trigger_cl_purging_thread,
|
||||
- (void *)purge_data, PR_PRIORITY_NORMAL, PR_GLOBAL_THREAD,
|
||||
- PR_UNJOINABLE_THREAD, DEFAULT_THREAD_STACKSIZE);
|
||||
- if (NULL == trim_tid) {
|
||||
- slapi_log_err(SLAPI_LOG_ERR, repl_plugin_name_cl,
|
||||
- "trigger_cl_purging - Failed to create cl purging "
|
||||
- "thread; NSPR error - %d\n",
|
||||
- PR_GetError());
|
||||
- free_purge_data(purge_data);
|
||||
- } else {
|
||||
- /* need a little time for the thread to get started */
|
||||
- DS_Sleep(PR_SecondsToInterval(1));
|
||||
- }
|
||||
-}
|
||||
-
|
||||
/*
|
||||
* Purge a changelog of entries that originated from a particular replica(rid)
|
||||
*/
|
||||
-void
|
||||
-trigger_cl_purging_thread(void *arg)
|
||||
+int32_t
|
||||
+cldb_purge_rid(cleanruv_data *purge_data)
|
||||
{
|
||||
- cleanruv_purge_data *purge_data = (cleanruv_purge_data *)arg;
|
||||
- Replica *replica = purge_data->replica;
|
||||
- cldb_Handle *cldb = replica_get_cl_info(replica);
|
||||
+ cldb_Handle *cldb = replica_get_cl_info(purge_data->replica);
|
||||
+ int32_t rc = -1;
|
||||
|
||||
if (cldb == NULL) {
|
||||
- return;
|
||||
+ return rc;
|
||||
}
|
||||
|
||||
pthread_mutex_lock(&(cldb->stLock));
|
||||
+
|
||||
/* Make sure we have a change log, and we aren't closing it */
|
||||
if (cldb->dbState != CL5_STATE_OPEN) {
|
||||
- goto free_and_return;
|
||||
+ pthread_mutex_unlock(&(cldb->stLock));
|
||||
+ return rc;
|
||||
}
|
||||
-
|
||||
slapi_counter_increment(cldb->clThreads);
|
||||
+ pthread_mutex_unlock(&(cldb->stLock));
|
||||
|
||||
/* Purge the changelog */
|
||||
- _cl5DoPurging(purge_data);
|
||||
-
|
||||
- /* Remove the rid from the internal list */
|
||||
- remove_cleaned_rid(purge_data->cleaned_rid);
|
||||
+ rc = _cl5DoPurging(purge_data);
|
||||
|
||||
slapi_counter_decrement(cldb->clThreads);
|
||||
|
||||
-free_and_return:
|
||||
- pthread_mutex_unlock(&(cldb->stLock));
|
||||
- free_purge_data(purge_data);
|
||||
+ /* Handle result code */
|
||||
+ if (rc == CL5_SUCCESS || rc == CL5_NOTFOUND) {
|
||||
+ return LDAP_SUCCESS;
|
||||
+ } else {
|
||||
+ return -1;
|
||||
+ }
|
||||
}
|
||||
|
||||
char *
|
||||
diff --git a/ldap/servers/plugins/replication/cl5_api.h b/ldap/servers/plugins/replication/cl5_api.h
|
||||
index a690eee37..e4b4f11ee 100644
|
||||
--- a/ldap/servers/plugins/replication/cl5_api.h
|
||||
+++ b/ldap/servers/plugins/replication/cl5_api.h
|
||||
@@ -81,26 +81,27 @@ typedef enum {
|
||||
/* error codes */
|
||||
enum
|
||||
{
|
||||
- CL5_SUCCESS, /* successful operation */
|
||||
- CL5_BAD_DATA, /* invalid parameter passed to the function */
|
||||
- CL5_BAD_FORMAT, /* db data has unexpected format */
|
||||
- CL5_BAD_STATE, /* changelog is in an incorrect state for attempted operation */
|
||||
- CL5_BAD_DBVERSION, /* changelog has invalid dbversion */
|
||||
- CL5_DB_ERROR, /* database error */
|
||||
- CL5_NOTFOUND, /* requested entry or value was not found */
|
||||
- CL5_MEMORY_ERROR, /* memory allocation failed */
|
||||
- CL5_SYSTEM_ERROR, /* NSPR error occured, use PR_Error for furhter info */
|
||||
- CL5_CSN_ERROR, /* CSN API failed */
|
||||
- CL5_RUV_ERROR, /* RUV API failed */
|
||||
- CL5_OBJSET_ERROR, /* namedobjset api failed */
|
||||
- CL5_DB_LOCK_ERROR, /* bdb returns error 12 when the db runs out of locks,
|
||||
+ CL5_SUCCESS, /* successful operation */
|
||||
+ CL5_BAD_DATA, /* invalid parameter passed to the function */
|
||||
+ CL5_BAD_FORMAT, /* db data has unexpected format */
|
||||
+ CL5_BAD_STATE, /* changelog is in an incorrect state for attempted operation */
|
||||
+ CL5_BAD_DBVERSION, /* changelog has invalid dbversion */
|
||||
+ CL5_DB_ERROR, /* database error */
|
||||
+ CL5_NOTFOUND, /* requested entry or value was not found */
|
||||
+ CL5_MEMORY_ERROR, /* memory allocation failed */
|
||||
+ CL5_SYSTEM_ERROR, /* NSPR error occured, use PR_Error for furhter info */
|
||||
+ CL5_CSN_ERROR, /* CSN API failed */
|
||||
+ CL5_RUV_ERROR, /* RUV API failed */
|
||||
+ CL5_OBJSET_ERROR, /* namedobjset api failed */
|
||||
+ CL5_DB_LOCK_ERROR, /* bdb returns error 12 when the db runs out of locks,
|
||||
this var needs to be in slot 12 of the list.
|
||||
Do not re-order enum above! */
|
||||
- CL5_PURGED_DATA, /* requested data has been purged */
|
||||
- CL5_MISSING_DATA, /* data should be in the changelog, but is missing */
|
||||
- CL5_UNKNOWN_ERROR, /* unclassified error */
|
||||
- CL5_IGNORE_OP, /* ignore this updated - used by CLEANALLRUV task */
|
||||
- CL5_DB_RETRY, /* Retryable database error */
|
||||
+ CL5_PURGED_DATA, /* requested data has been purged */
|
||||
+ CL5_MISSING_DATA, /* data should be in the changelog, but is missing */
|
||||
+ CL5_UNKNOWN_ERROR, /* unclassified error */
|
||||
+ CL5_IGNORE_OP, /* ignore this updated - used by CLEANALLRUV task */
|
||||
+ CL5_DB_RETRY, /* Retryable database error */
|
||||
+ CL5_SHUTDOWN, /* server shutdown during changelog iteration */
|
||||
CL5_LAST_ERROR_CODE /* Should always be last in this enum */
|
||||
};
|
||||
|
||||
@@ -140,7 +141,7 @@ int cl5Open(void);
|
||||
int cl5Close(void);
|
||||
|
||||
/* Name: cldb_RemoveReplicaDB
|
||||
- Description: Clear the cldb information from the replica
|
||||
+ Description: Clear the cldb information from the replica
|
||||
and delete the database file
|
||||
*/
|
||||
int cldb_RemoveReplicaDB(Replica *replica);
|
||||
@@ -328,7 +329,7 @@ int cl5NotifyRUVChange(Replica *replica);
|
||||
|
||||
void cl5CleanRUV(ReplicaId rid, Replica *replica);
|
||||
void cl5NotifyCleanup(int rid);
|
||||
-void trigger_cl_purging(cleanruv_purge_data *purge_data);
|
||||
+int32_t cldb_purge_rid(cleanruv_data *purge_data);
|
||||
int cldb_SetReplicaDB(Replica *replica, void *arg);
|
||||
int cldb_UnSetReplicaDB(Replica *replica, void *arg);
|
||||
int cldb_StartTrimming(Replica *replica);
|
||||
diff --git a/ldap/servers/plugins/replication/repl5.h b/ldap/servers/plugins/replication/repl5.h
|
||||
index 45b42be0f..2ba2cfaa7 100644
|
||||
--- a/ldap/servers/plugins/replication/repl5.h
|
||||
+++ b/ldap/servers/plugins/replication/repl5.h
|
||||
@@ -827,13 +827,6 @@ typedef struct _cleanruv_data
|
||||
PRBool original_task;
|
||||
} cleanruv_data;
|
||||
|
||||
-typedef struct _cleanruv_purge_data
|
||||
-{
|
||||
- int cleaned_rid;
|
||||
- Replica *replica;
|
||||
- Slapi_Task *task;
|
||||
-} cleanruv_purge_data;
|
||||
-
|
||||
typedef struct _csngen_test_data
|
||||
{
|
||||
Slapi_Task *task;
|
||||
@@ -927,5 +920,6 @@ void cleanruv_log(Slapi_Task *task, int rid, char *task_type, int sev_level, cha
|
||||
char *replica_cleanallruv_get_local_maxcsn(ReplicaId rid, char *base_dn);
|
||||
int replica_execute_cleanruv_task(Replica *r, ReplicaId rid, char *returntext);
|
||||
int replica_execute_cleanall_ruv_task(Replica *r, ReplicaId rid, Slapi_Task *task, const char *force_cleaning, PRBool original_task, char *returntext);
|
||||
+void delete_cleaned_rid_config(cleanruv_data *data);
|
||||
|
||||
#endif /* _REPL5_H_ */
|
||||
diff --git a/ldap/servers/plugins/replication/repl5_protocol_util.c b/ldap/servers/plugins/replication/repl5_protocol_util.c
|
||||
index 78fa863bb..25173e5cd 100644
|
||||
--- a/ldap/servers/plugins/replication/repl5_protocol_util.c
|
||||
+++ b/ldap/servers/plugins/replication/repl5_protocol_util.c
|
||||
@@ -361,7 +361,7 @@ acquire_replica(Private_Repl_Protocol *prp, char *prot_oid, RUV **ruv)
|
||||
/* remote replica detected a duplicate ReplicaID */
|
||||
slapi_log_err(SLAPI_LOG_ERR, repl_plugin_name,
|
||||
"acquire_replica - "
|
||||
- "%s: Unable to aquire replica: the replica "
|
||||
+ "%s: Unable to acquire replica: the replica "
|
||||
"has the same Replica ID as this one. "
|
||||
"Replication is aborting.\n",
|
||||
agmt_get_long_name(prp->agmt));
|
||||
diff --git a/ldap/servers/plugins/replication/repl_cleanallruv.c b/ldap/servers/plugins/replication/repl_cleanallruv.c
|
||||
index a985e691f..b62c2fae0 100644
|
||||
--- a/ldap/servers/plugins/replication/repl_cleanallruv.c
|
||||
+++ b/ldap/servers/plugins/replication/repl_cleanallruv.c
|
||||
@@ -41,7 +41,7 @@ static int replica_cleanallruv_send_abort_extop(Repl_Agmt *ra, Slapi_Task *task,
|
||||
static int replica_cleanallruv_check_maxcsn(Repl_Agmt *agmt, char *basedn, char *rid_text, char *maxcsn, Slapi_Task *task);
|
||||
static int replica_cleanallruv_replica_alive(Repl_Agmt *agmt);
|
||||
static int replica_cleanallruv_check_ruv(char *repl_root, Repl_Agmt *ra, char *rid_text, Slapi_Task *task, char *force);
|
||||
-static void delete_cleaned_rid_config(cleanruv_data *data);
|
||||
+
|
||||
static int replica_cleanallruv_is_finished(Repl_Agmt *agmt, char *filter, Slapi_Task *task);
|
||||
static void check_replicas_are_done_cleaning(cleanruv_data *data);
|
||||
static void check_replicas_are_done_aborting(cleanruv_data *data);
|
||||
@@ -791,7 +791,7 @@ delete_aborted_rid(Replica *r, ReplicaId rid, char *repl_root, char *certify_all
|
||||
/*
|
||||
* Just remove the dse.ldif config, but we need to keep the cleaned rids in memory until we know we are done
|
||||
*/
|
||||
-static void
|
||||
+void
|
||||
delete_cleaned_rid_config(cleanruv_data *clean_data)
|
||||
{
|
||||
Slapi_PBlock *pb, *modpb;
|
||||
@@ -2087,7 +2087,6 @@ static void
|
||||
replica_cleanallruv_thread(void *arg)
|
||||
{
|
||||
cleanruv_data *data = arg;
|
||||
- cleanruv_purge_data *purge_data = NULL;
|
||||
Object *agmt_obj = NULL;
|
||||
Object *ruv_obj = NULL;
|
||||
Repl_Agmt *agmt = NULL;
|
||||
@@ -2351,13 +2350,11 @@ done:
|
||||
/*
|
||||
* Success - the rid has been cleaned!
|
||||
*
|
||||
- * Delete the cleaned rid config.
|
||||
* Make sure all the replicas have been "pre_cleaned"
|
||||
* Remove the keep alive entry if present
|
||||
* Clean the agreements' RUV
|
||||
- * Remove the rid from the internal clean list
|
||||
+ * Purge the changelog
|
||||
*/
|
||||
- delete_cleaned_rid_config(data);
|
||||
check_replicas_are_done_cleaning(data);
|
||||
if (data->original_task) {
|
||||
cleanruv_log(data->task, data->rid, CLEANALLRUV_ID, SLAPI_LOG_INFO,
|
||||
@@ -2369,21 +2366,14 @@ done:
|
||||
}
|
||||
clean_agmts(data);
|
||||
|
||||
- /*
|
||||
- * Now purge the changelog. The purging thread will free the
|
||||
- * purge_data and update the cleaned rid list
|
||||
- */
|
||||
- purge_data = (cleanruv_purge_data *)slapi_ch_calloc(1, sizeof(cleanruv_purge_data));
|
||||
- purge_data->cleaned_rid = data->rid;
|
||||
- purge_data->replica = data->replica;
|
||||
- purge_data->task = data->task;
|
||||
cleanruv_log(data->task, data->rid, CLEANALLRUV_ID, SLAPI_LOG_INFO,
|
||||
- "Triggering changelog purge thread. This might complete "
|
||||
- "after the cleaning task finishes.");
|
||||
- trigger_cl_purging(purge_data);
|
||||
-
|
||||
- cleanruv_log(data->task, data->rid, CLEANALLRUV_ID, SLAPI_LOG_INFO,
|
||||
- "Successfully cleaned rid(%d)", data->rid);
|
||||
+ "Purging changelog...");
|
||||
+ if (cldb_purge_rid(data) == LDAP_SUCCESS) {
|
||||
+ delete_cleaned_rid_config(data);
|
||||
+ remove_cleaned_rid(data->rid);
|
||||
+ cleanruv_log(data->task, data->rid, CLEANALLRUV_ID, SLAPI_LOG_INFO,
|
||||
+ "Successfully cleaned rid(%d)", data->rid);
|
||||
+ }
|
||||
} else {
|
||||
/*
|
||||
* Shutdown or abort
|
||||
--
|
||||
2.47.1
|
||||
|
@ -0,0 +1,112 @@
|
||||
From 1217d4a2a4bd43ff572d91a6379501feb5d4c517 Mon Sep 17 00:00:00 2001
|
||||
From: Firstyear <william@blackhats.net.au>
|
||||
Date: Mon, 5 Aug 2024 08:57:35 +1000
|
||||
Subject: [PATCH] Issue 6284 - BUG - freelist ordering causes high wtime
|
||||
(#6285)
|
||||
|
||||
Bug Description: Multithread listeners were added to
|
||||
support having multiple threads able to perform IO
|
||||
for connections to reduce wtime. However, when the server
|
||||
is first started if the number of clients is less than
|
||||
the size of a single listeners conntable this causes
|
||||
extremely high wtimes.
|
||||
|
||||
This is because the initial connection freelist
|
||||
construction adds every connection in order of *listener*
|
||||
so new connections always fill the first listener thread,
|
||||
then only once that listener is full it spills over to the
|
||||
next.
|
||||
|
||||
If the conntable size is small, this isn't noticeable, but
|
||||
an example is if your contable is 16384 elements with 4
|
||||
listeners, then the first 4096 connections will always
|
||||
go to the first listener.
|
||||
|
||||
Fix Description: When the freelist is constructed rather
|
||||
than iterate over each listener *then* each slot, we
|
||||
iterate over each slot then each listenr. This causes
|
||||
the initial freelist to be interleaved between all
|
||||
listeners so that even with a small number of connections
|
||||
the work is spread fairly without lasering a single
|
||||
listener.
|
||||
|
||||
A supplied test from a user shows a significant drop in
|
||||
wtime and an increase in throughput of operations with
|
||||
this change.
|
||||
|
||||
fixes: https://github.com/389ds/389-ds-base/issues/6284
|
||||
|
||||
Author: William Brown <william@blackhats.net.au>
|
||||
|
||||
Review by: @tbordaz and @jchapma (Thanks!)
|
||||
---
|
||||
ldap/servers/slapd/conntable.c | 33 +++++++++++++++++++++++++++------
|
||||
1 file changed, 27 insertions(+), 6 deletions(-)
|
||||
|
||||
diff --git a/ldap/servers/slapd/conntable.c b/ldap/servers/slapd/conntable.c
|
||||
index 90f08abd4..b8e2ce3b5 100644
|
||||
--- a/ldap/servers/slapd/conntable.c
|
||||
+++ b/ldap/servers/slapd/conntable.c
|
||||
@@ -115,20 +115,26 @@ connection_table_new(int table_size)
|
||||
{
|
||||
Connection_Table *ct;
|
||||
size_t i = 0;
|
||||
- int ct_list = 0;
|
||||
- int free_idx = 0;
|
||||
+ size_t ct_list = 0;
|
||||
+ size_t free_idx = 0;
|
||||
ber_len_t maxbersize = config_get_maxbersize();
|
||||
ct = (Connection_Table *)slapi_ch_calloc(1, sizeof(Connection_Table));
|
||||
ct->list_num = config_get_num_listeners();
|
||||
ct->num_active = (int *)slapi_ch_calloc(1, ct->list_num * sizeof(int));
|
||||
+ /* Connection table size must be divisible by the number of listeners */
|
||||
ct->size = table_size - (table_size % ct->list_num);
|
||||
/* Account for first slot of each list being used as head (c_next). */
|
||||
ct->list_size = (ct->size/ct->list_num)+1;
|
||||
+ /*
|
||||
+ * Since there are extra list heads, in the case ct->size was a multiple of list_num, we
|
||||
+ * need to expand ct->size to reflect the true allocation amount.
|
||||
+ */
|
||||
+ ct->size = ct->list_size * ct->list_num;
|
||||
ct->c = (Connection **)slapi_ch_calloc(1, ct->size * sizeof(Connection *));
|
||||
ct->fd = (struct POLL_STRUCT **)slapi_ch_calloc(1, ct->list_num * sizeof(struct POLL_STRUCT*));
|
||||
ct->table_mutex = PR_NewLock();
|
||||
/* Allocate the freelist (a slot for each connection plus another slot for the final NULL pointer) */
|
||||
- ct->c_freelist = (Connection **)slapi_ch_calloc(1, (ct->size+1) * sizeof(Connection *));
|
||||
+ ct->c_freelist = (Connection **)slapi_ch_calloc(1, (ct->size + 1) * sizeof(Connection *));
|
||||
ct->conn_next_offset = 0;
|
||||
|
||||
slapi_log_err(SLAPI_LOG_INFO, "connection_table_new", "Number of connection sub-tables %d, each containing %d slots.\n",
|
||||
@@ -184,11 +190,26 @@ connection_table_new(int table_size)
|
||||
|
||||
/* Ready to rock, mark as such. */
|
||||
ct->c[ct_list][i].c_state = CONN_STATE_INIT;
|
||||
+ }
|
||||
+ }
|
||||
|
||||
+ /*
|
||||
+ * The freelist is how new connections are inserted into our ct to be used. We need to ensure
|
||||
+ * these are *spread* initially over the set of listeners so that we distributed between handlers
|
||||
+ * from the start. Over time as things are re-added to the freelist it will "shuffle" and we
|
||||
+ * will effectively have randomised listener assignment.
|
||||
+ *
|
||||
+ * Without this it means that a single listener will be "focused on" until it's conntable
|
||||
+ * is full at which point we spill out into the next listener.
|
||||
+ *
|
||||
+ * This is why in the subsequent loops we INVERT the loop to iterate over list_size
|
||||
+ * first, and then the ct_list as the inner component so that the freelist initially
|
||||
+ * alternates between the listeners first to help distribute requests.
|
||||
+ */
|
||||
+ for (i = 1; i < ct->list_size; i++) {
|
||||
+ for (ct_list = 0; ct_list < ct->list_num; ct_list++) {
|
||||
/* Map multiple ct lists to a single freelist, but skip slot 0 of each list. */
|
||||
- if (i != 0) {
|
||||
- ct->c_freelist[free_idx++] = &(ct->c[ct_list][i]);
|
||||
- }
|
||||
+ ct->c_freelist[free_idx++] = &(ct->c[ct_list][i]);
|
||||
}
|
||||
}
|
||||
|
||||
--
|
||||
2.47.1
|
||||
|
@ -0,0 +1,54 @@
|
||||
From 47709ccde40a50eb908bf43e11cce3dc68fa0ef5 Mon Sep 17 00:00:00 2001
|
||||
From: James Chapman <jachapma@redhat.com>
|
||||
Date: Wed, 21 Aug 2024 23:00:37 +0100
|
||||
Subject: [PATCH] Issue 6296 - basic_test.py::test_conn_limits fails in main
|
||||
branch (#6300)
|
||||
|
||||
Description:
|
||||
A CI test to validate connection management functionality by
|
||||
configuring an instance with a very small connection table fails,
|
||||
with the instance failing to start.
|
||||
|
||||
Fix description:
|
||||
In a multi list connection table configuration, the connection table
|
||||
code has been updated to expand the connection table size to include
|
||||
a head for each list. This expanded size needs to be accounted for
|
||||
when we determine if we can accept new connections and on final check
|
||||
at server starup time.
|
||||
|
||||
Relates: https://github.com/389ds/389-ds-base/issues/6296
|
||||
|
||||
Reviewed by: @droideck (Thank you)
|
||||
---
|
||||
ldap/servers/slapd/daemon.c | 8 ++++----
|
||||
1 file changed, 4 insertions(+), 4 deletions(-)
|
||||
|
||||
diff --git a/ldap/servers/slapd/daemon.c b/ldap/servers/slapd/daemon.c
|
||||
index 9e2f32c43..56e20bd13 100644
|
||||
--- a/ldap/servers/slapd/daemon.c
|
||||
+++ b/ldap/servers/slapd/daemon.c
|
||||
@@ -829,8 +829,8 @@ accept_thread(void *vports)
|
||||
num_poll = setup_pr_accept_pds(n_tcps, s_tcps, i_unix, &fds);
|
||||
|
||||
while (!g_get_shutdown()) {
|
||||
- /* Do we need to accept new connections? */
|
||||
- int accept_new_connections = (ct->size > ct->conn_next_offset);
|
||||
+ /* Do we need to accept new connections, account for ct->size including list heads. */
|
||||
+ int accept_new_connections = ((ct->size - ct->list_num) > ct->conn_next_offset);
|
||||
if (!accept_new_connections) {
|
||||
if (last_accept_new_connections) {
|
||||
slapi_log_err(SLAPI_LOG_ERR, "accept_thread",
|
||||
@@ -2081,8 +2081,8 @@ unfurl_banners(Connection_Table *ct, daemon_ports_t *ports, PRFileDesc **n_tcps,
|
||||
slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig();
|
||||
char addrbuf[256];
|
||||
int isfirsttime = 1;
|
||||
-
|
||||
- if (ct->size > (slapdFrontendConfig->maxdescriptors - slapdFrontendConfig->reservedescriptors)) {
|
||||
+ /* Take into account that ct->size includes a list head for each listener. */
|
||||
+ if ((ct->size - ct->list_num) > (slapdFrontendConfig->maxdescriptors - slapdFrontendConfig->reservedescriptors)) {
|
||||
slapi_log_err(SLAPI_LOG_ERR, "slapd_daemon",
|
||||
"Not enough descriptors to accept any connections. "
|
||||
"This may be because the maxdescriptors configuration "
|
||||
--
|
||||
2.47.1
|
||||
|
@ -0,0 +1,686 @@
|
||||
From 569628ce405f214a7e35deb96b4e6c22519a4268 Mon Sep 17 00:00:00 2001
|
||||
From: Simon Pichugin <spichugi@redhat.com>
|
||||
Date: Wed, 4 Dec 2024 22:29:11 -0500
|
||||
Subject: [PATCH] Issue 5798 - Fix dsconf config multi-valued attr operations
|
||||
(#6426)
|
||||
|
||||
Description:
|
||||
Fix add operation to properly handle multi-valued attributes
|
||||
so they persist after server restart.
|
||||
Add support for multiple values at once via dsconf config.
|
||||
Handle delete operation in a more flexible way.
|
||||
Refactor attribute handling code for better error handling and consistency.
|
||||
*Add CI test suite
|
||||
|
||||
Fixes: https://github.com/389ds/389-ds-base/issues/5798
|
||||
|
||||
Reviewed by: @progier389 (Thanks!)
|
||||
---
|
||||
.../tests/suites/clu/dsconf_config_test.py | 347 ++++++++++++++++++
|
||||
src/lib389/lib389/_mapped_object.py | 55 +--
|
||||
src/lib389/lib389/cli_conf/config.py | 172 +++++----
|
||||
3 files changed, 481 insertions(+), 93 deletions(-)
|
||||
create mode 100644 dirsrvtests/tests/suites/clu/dsconf_config_test.py
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/clu/dsconf_config_test.py b/dirsrvtests/tests/suites/clu/dsconf_config_test.py
|
||||
new file mode 100644
|
||||
index 000000000..d67679adf
|
||||
--- /dev/null
|
||||
+++ b/dirsrvtests/tests/suites/clu/dsconf_config_test.py
|
||||
@@ -0,0 +1,347 @@
|
||||
+# --- BEGIN COPYRIGHT BLOCK ---
|
||||
+# Copyright (C) 2024 Red Hat, Inc.
|
||||
+# All rights reserved.
|
||||
+#
|
||||
+# License: GPL (version 3 or any later version).
|
||||
+# See LICENSE for details.
|
||||
+# --- END COPYRIGHT BLOCK ---
|
||||
+#
|
||||
+
|
||||
+import subprocess
|
||||
+import logging
|
||||
+import pytest
|
||||
+from lib389._constants import DN_DM
|
||||
+from lib389.topologies import topology_st
|
||||
+
|
||||
+pytestmark = pytest.mark.tier1
|
||||
+
|
||||
+log = logging.getLogger(__name__)
|
||||
+
|
||||
+HAPROXY_IPS = {
|
||||
+ 'single': '192.168.1.1',
|
||||
+ 'multiple': ['10.0.0.1', '172.16.0.1', '192.168.2.1']
|
||||
+}
|
||||
+
|
||||
+REFERRALS = {
|
||||
+ 'single': 'ldap://primary.example.com',
|
||||
+ 'multiple': [
|
||||
+ 'ldap://server1.example.com',
|
||||
+ 'ldap://server2.example.com',
|
||||
+ 'ldap://server3.example.com'
|
||||
+ ]
|
||||
+}
|
||||
+
|
||||
+TEST_ATTRS = [
|
||||
+ ('nsslapd-haproxy-trusted-ip', HAPROXY_IPS),
|
||||
+ ('nsslapd-referral', REFERRALS)
|
||||
+]
|
||||
+
|
||||
+
|
||||
+def execute_dsconf_command(dsconf_cmd, subcommands):
|
||||
+ """Execute dsconf command and return output and return code"""
|
||||
+
|
||||
+ cmdline = dsconf_cmd + subcommands
|
||||
+ proc = subprocess.Popen(cmdline, stdout=subprocess.PIPE)
|
||||
+ out, _ = proc.communicate()
|
||||
+ return out.decode('utf-8'), proc.returncode
|
||||
+
|
||||
+
|
||||
+def get_dsconf_base_cmd(topology):
|
||||
+ """Return base dsconf command list"""
|
||||
+
|
||||
+ return ['/usr/sbin/dsconf', topology.standalone.serverid,
|
||||
+ '-D', DN_DM, '-w', 'password']
|
||||
+
|
||||
+
|
||||
+@pytest.mark.parametrize("attr_name,values_dict", TEST_ATTRS)
|
||||
+def test_single_value_add(topology_st, attr_name, values_dict):
|
||||
+ """Test adding a single value to an attribute
|
||||
+
|
||||
+ :id: ffc912a6-c188-413d-9c35-7f4b3774d946
|
||||
+ :setup: Standalone DS instance
|
||||
+ :steps:
|
||||
+ 1. Add a single value to the specified attribute
|
||||
+ 2. Verify the value was added correctly
|
||||
+ :expectedresults:
|
||||
+ 1. Command should execute successfully
|
||||
+ 2. Added value should be present in the configuration
|
||||
+ """
|
||||
+ dsconf_cmd = get_dsconf_base_cmd(topology_st)
|
||||
+
|
||||
+ try:
|
||||
+ value = values_dict['single']
|
||||
+ test_attr = f"{attr_name}={value}"
|
||||
+
|
||||
+ # Add single value
|
||||
+ output, rc = execute_dsconf_command(dsconf_cmd, ['config', 'add', test_attr])
|
||||
+ assert rc == 0
|
||||
+
|
||||
+ # Verify
|
||||
+ output, _ = execute_dsconf_command(dsconf_cmd, ['config', 'get', attr_name])
|
||||
+ assert value in output
|
||||
+
|
||||
+ finally:
|
||||
+ execute_dsconf_command(dsconf_cmd, ['config', 'delete', attr_name])
|
||||
+
|
||||
+
|
||||
+@pytest.mark.parametrize("attr_name,values_dict", TEST_ATTRS)
|
||||
+def test_single_value_replace(topology_st, attr_name, values_dict):
|
||||
+ """Test replacing a single value in configuration attributes
|
||||
+
|
||||
+ :id: 112e3e5e-8db8-4974-9ea4-ed789c2d02f2
|
||||
+ :setup: Standalone DS instance
|
||||
+ :steps:
|
||||
+ 1. Add initial value to the specified attribute
|
||||
+ 2. Replace the value with a new one
|
||||
+ 3. Verify the replacement was successful
|
||||
+ :expectedresults:
|
||||
+ 1. Initial value should be added successfully
|
||||
+ 2. Replace command should execute successfully
|
||||
+ 3. New value should be present and old value should be absent
|
||||
+ """
|
||||
+ dsconf_cmd = get_dsconf_base_cmd(topology_st)
|
||||
+
|
||||
+ try:
|
||||
+ # Add initial value
|
||||
+ value = values_dict['single']
|
||||
+ test_attr = f"{attr_name}={value}"
|
||||
+ execute_dsconf_command(dsconf_cmd, ['config', 'add', test_attr])
|
||||
+
|
||||
+ # Replace with new value
|
||||
+ new_value = values_dict['multiple'][0]
|
||||
+ replace_attr = f"{attr_name}={new_value}"
|
||||
+ output, rc = execute_dsconf_command(dsconf_cmd, ['config', 'replace', replace_attr])
|
||||
+ assert rc == 0
|
||||
+
|
||||
+ # Verify
|
||||
+ output, _ = execute_dsconf_command(dsconf_cmd, ['config', 'get', attr_name])
|
||||
+ assert new_value in output
|
||||
+ assert value not in output
|
||||
+
|
||||
+ finally:
|
||||
+ execute_dsconf_command(dsconf_cmd, ['config', 'delete', attr_name])
|
||||
+
|
||||
+
|
||||
+@pytest.mark.parametrize("attr_name,values_dict", TEST_ATTRS)
|
||||
+def test_multi_value_batch_add(topology_st, attr_name, values_dict):
|
||||
+ """Test adding multiple values in a single batch command
|
||||
+
|
||||
+ :id: 4c34c7f8-16cc-4ab6-938a-967537be5470
|
||||
+ :setup: Standalone DS instance
|
||||
+ :steps:
|
||||
+ 1. Add multiple values to the attribute in a single command
|
||||
+ 2. Verify all values were added correctly
|
||||
+ :expectedresults:
|
||||
+ 1. Batch add command should execute successfully
|
||||
+ 2. All added values should be present in the configuration
|
||||
+ """
|
||||
+ dsconf_cmd = get_dsconf_base_cmd(topology_st)
|
||||
+
|
||||
+ try:
|
||||
+ # Add multiple values in one command
|
||||
+ attr_values = [f"{attr_name}={val}" for val in values_dict['multiple']]
|
||||
+ output, rc = execute_dsconf_command(dsconf_cmd, ['config', 'add'] + attr_values)
|
||||
+ assert rc == 0
|
||||
+
|
||||
+ # Verify all values
|
||||
+ output, _ = execute_dsconf_command(dsconf_cmd, ['config', 'get', attr_name])
|
||||
+ for value in values_dict['multiple']:
|
||||
+ assert value in output
|
||||
+
|
||||
+ finally:
|
||||
+ execute_dsconf_command(dsconf_cmd, ['config', 'delete', attr_name])
|
||||
+
|
||||
+
|
||||
+@pytest.mark.parametrize("attr_name,values_dict", TEST_ATTRS)
|
||||
+def test_multi_value_batch_replace(topology_st, attr_name, values_dict):
|
||||
+ """Test replacing with multiple values in a single batch command
|
||||
+
|
||||
+ :id: 05cf28b8-000e-4856-a10b-7e1df012737d
|
||||
+ :setup: Standalone DS instance
|
||||
+ :steps:
|
||||
+ 1. Add initial single value
|
||||
+ 2. Replace with multiple values in a single command
|
||||
+ 3. Verify the replacement was successful
|
||||
+ :expectedresults:
|
||||
+ 1. Initial value should be added successfully
|
||||
+ 2. Batch replace command should execute successfully
|
||||
+ 3. All new values should be present and initial value should be absent
|
||||
+ """
|
||||
+ dsconf_cmd = get_dsconf_base_cmd(topology_st)
|
||||
+
|
||||
+ try:
|
||||
+ # Add initial value
|
||||
+ initial_value = values_dict['single']
|
||||
+ execute_dsconf_command(dsconf_cmd, ['config', 'add', f"{attr_name}={initial_value}"])
|
||||
+
|
||||
+ # Replace with multiple values
|
||||
+ attr_values = [f"{attr_name}={val}" for val in values_dict['multiple']]
|
||||
+ output, rc = execute_dsconf_command(dsconf_cmd, ['config', 'replace'] + attr_values)
|
||||
+ assert rc == 0
|
||||
+
|
||||
+ # Verify
|
||||
+ output, _ = execute_dsconf_command(dsconf_cmd, ['config', 'get', attr_name])
|
||||
+ assert initial_value not in output
|
||||
+ for value in values_dict['multiple']:
|
||||
+ assert value in output
|
||||
+
|
||||
+ finally:
|
||||
+ execute_dsconf_command(dsconf_cmd, ['config', 'delete', attr_name])
|
||||
+
|
||||
+
|
||||
+@pytest.mark.parametrize("attr_name,values_dict", TEST_ATTRS)
|
||||
+def test_multi_value_specific_delete(topology_st, attr_name, values_dict):
|
||||
+ """Test deleting specific values from multi-valued attribute
|
||||
+
|
||||
+ :id: bb325c9a-eae8-438a-b577-bd63540b91cb
|
||||
+ :setup: Standalone DS instance
|
||||
+ :steps:
|
||||
+ 1. Add multiple values to the attribute
|
||||
+ 2. Delete a specific value
|
||||
+ 3. Verify the deletion was successful
|
||||
+ :expectedresults:
|
||||
+ 1. Multiple values should be added successfully
|
||||
+ 2. Specific delete command should execute successfully
|
||||
+ 3. Deleted value should be absent while others remain present
|
||||
+ """
|
||||
+ dsconf_cmd = get_dsconf_base_cmd(topology_st)
|
||||
+
|
||||
+ try:
|
||||
+ # Add multiple values
|
||||
+ attr_values = [f"{attr_name}={val}" for val in values_dict['multiple']]
|
||||
+ execute_dsconf_command(dsconf_cmd, ['config', 'add'] + attr_values)
|
||||
+
|
||||
+ # Delete middle value
|
||||
+ delete_value = values_dict['multiple'][1]
|
||||
+ output, rc = execute_dsconf_command(dsconf_cmd,
|
||||
+ ['config', 'delete', f"{attr_name}={delete_value}"])
|
||||
+ assert rc == 0
|
||||
+
|
||||
+ # Verify remaining values
|
||||
+ output, _ = execute_dsconf_command(dsconf_cmd, ['config', 'get', attr_name])
|
||||
+ assert delete_value not in output
|
||||
+ assert values_dict['multiple'][0] in output
|
||||
+ assert values_dict['multiple'][2] in output
|
||||
+
|
||||
+ finally:
|
||||
+ execute_dsconf_command(dsconf_cmd, ['config', 'delete', attr_name])
|
||||
+
|
||||
+
|
||||
+@pytest.mark.parametrize("attr_name,values_dict", TEST_ATTRS)
|
||||
+def test_multi_value_batch_delete(topology_st, attr_name, values_dict):
|
||||
+ """Test deleting multiple values in a single batch command
|
||||
+
|
||||
+ :id: 4b105824-b060-4f83-97d7-001a01dba1a5
|
||||
+ :setup: Standalone DS instance
|
||||
+ :steps:
|
||||
+ 1. Add multiple values to the attribute
|
||||
+ 2. Delete multiple values in a single command
|
||||
+ 3. Verify the batch deletion was successful
|
||||
+ :expectedresults:
|
||||
+ 1. Multiple values should be added successfully
|
||||
+ 2. Batch delete command should execute successfully
|
||||
+ 3. Deleted values should be absent while others remain present
|
||||
+ """
|
||||
+ dsconf_cmd = get_dsconf_base_cmd(topology_st)
|
||||
+
|
||||
+ try:
|
||||
+ # Add all values
|
||||
+ attr_values = [f"{attr_name}={val}" for val in values_dict['multiple']]
|
||||
+ execute_dsconf_command(dsconf_cmd, ['config', 'add'] + attr_values)
|
||||
+
|
||||
+ # Delete multiple values in one command
|
||||
+ delete_values = [f"{attr_name}={val}" for val in values_dict['multiple'][:2]]
|
||||
+ output, rc = execute_dsconf_command(dsconf_cmd, ['config', 'delete'] + delete_values)
|
||||
+ assert rc == 0
|
||||
+
|
||||
+ # Verify
|
||||
+ output, _ = execute_dsconf_command(dsconf_cmd, ['config', 'get', attr_name])
|
||||
+ assert values_dict['multiple'][2] in output
|
||||
+ assert values_dict['multiple'][0] not in output
|
||||
+ assert values_dict['multiple'][1] not in output
|
||||
+
|
||||
+ finally:
|
||||
+ execute_dsconf_command(dsconf_cmd, ['config', 'delete', attr_name])
|
||||
+
|
||||
+
|
||||
+@pytest.mark.parametrize("attr_name,values_dict", TEST_ATTRS)
|
||||
+def test_single_value_persists_after_restart(topology_st, attr_name, values_dict):
|
||||
+ """Test single value persists after server restart
|
||||
+
|
||||
+ :id: be1a7e3d-a9ca-48a1-a3bc-062990d4f3e9
|
||||
+ :setup: Standalone DS instance
|
||||
+ :steps:
|
||||
+ 1. Add single value to the attribute
|
||||
+ 2. Verify the value is present
|
||||
+ 3. Restart the server
|
||||
+ 4. Verify the value persists after restart
|
||||
+ :expectedresults:
|
||||
+ 1. Value should be added successfully
|
||||
+ 2. Value should be present before restart
|
||||
+ 3. Server should restart successfully
|
||||
+ 4. Value should still be present after restart
|
||||
+ """
|
||||
+ dsconf_cmd = get_dsconf_base_cmd(topology_st)
|
||||
+
|
||||
+ try:
|
||||
+ # Add single value
|
||||
+ value = values_dict['single']
|
||||
+ output, rc = execute_dsconf_command(dsconf_cmd,
|
||||
+ ['config', 'add', f"{attr_name}={value}"])
|
||||
+ assert rc == 0
|
||||
+
|
||||
+ # Verify before restart
|
||||
+ output, _ = execute_dsconf_command(dsconf_cmd, ['config', 'get', attr_name])
|
||||
+ assert value in output
|
||||
+
|
||||
+ # Restart the server
|
||||
+ topology_st.standalone.restart(timeout=10)
|
||||
+
|
||||
+ # Verify after restart
|
||||
+ output, _ = execute_dsconf_command(dsconf_cmd, ['config', 'get', attr_name])
|
||||
+ assert value in output
|
||||
+
|
||||
+ finally:
|
||||
+ execute_dsconf_command(dsconf_cmd, ['config', 'delete', attr_name])
|
||||
+
|
||||
+
|
||||
+@pytest.mark.parametrize("attr_name,values_dict", TEST_ATTRS)
|
||||
+def test_multi_value_batch_persists_after_restart(topology_st, attr_name, values_dict):
|
||||
+ """Test multiple values added in batch persist after server restart
|
||||
+
|
||||
+ :id: fd0435e2-90b1-465a-8968-d3a375c8fb22
|
||||
+ :setup: Standalone DS instance
|
||||
+ :steps:
|
||||
+ 1. Add multiple values in a single batch command
|
||||
+ 2. Verify all values are present
|
||||
+ 3. Restart the server
|
||||
+ 4. Verify all values persist after restart
|
||||
+ :expectedresults:
|
||||
+ 1. Batch add command should execute successfully
|
||||
+ 2. All values should be present before restart
|
||||
+ 3. Server should restart successfully
|
||||
+ 4. All values should still be present after restart
|
||||
+ """
|
||||
+ dsconf_cmd = get_dsconf_base_cmd(topology_st)
|
||||
+
|
||||
+ try:
|
||||
+ # Add multiple values
|
||||
+ attr_values = [f"{attr_name}={val}" for val in values_dict['multiple']]
|
||||
+ output, rc = execute_dsconf_command(dsconf_cmd, ['config', 'add'] + attr_values)
|
||||
+ assert rc == 0
|
||||
+
|
||||
+ # Verify before restart
|
||||
+ output, _ = execute_dsconf_command(dsconf_cmd, ['config', 'get', attr_name])
|
||||
+ for value in values_dict['multiple']:
|
||||
+ assert value in output
|
||||
+
|
||||
+ # Restart the server
|
||||
+ topology_st.standalone.restart(timeout=10)
|
||||
+
|
||||
+ # Verify after restart
|
||||
+ output, _ = execute_dsconf_command(dsconf_cmd, ['config', 'get', attr_name])
|
||||
+ for value in values_dict['multiple']:
|
||||
+ assert value in output
|
||||
+
|
||||
+ finally:
|
||||
+ execute_dsconf_command(dsconf_cmd, ['config', 'delete', attr_name])
|
||||
diff --git a/src/lib389/lib389/_mapped_object.py b/src/lib389/lib389/_mapped_object.py
|
||||
index 9799b8839..af5aab71e 100644
|
||||
--- a/src/lib389/lib389/_mapped_object.py
|
||||
+++ b/src/lib389/lib389/_mapped_object.py
|
||||
@@ -321,25 +321,31 @@ class DSLdapObject(DSLogging, DSLint):
|
||||
This is useful for configuration changes that require
|
||||
atomic operation, and ease of use.
|
||||
|
||||
- An example of usage is add_many((key, value), (key, value))
|
||||
+ An example of usage is add_many((key, value), (key, [value1, value2]))
|
||||
|
||||
No wrapping list is needed for the arguments.
|
||||
|
||||
- :param *args: tuples of key,value to replace.
|
||||
- :type *args: (str, str)
|
||||
+ :param *args: tuples of key,value to add. Value can be a single value
|
||||
+ or a collection (list, tuple, set) of values.
|
||||
+ :type *args: (str, str) or (str, list/tuple/set)
|
||||
"""
|
||||
-
|
||||
mods = []
|
||||
for arg in args:
|
||||
- if isinstance(arg[1], list) or isinstance(arg[1], tuple):
|
||||
- value = ensure_list_bytes(arg[1])
|
||||
+ key, value = arg
|
||||
+ if isinstance(value, (list, tuple, set)):
|
||||
+ value = ensure_list_bytes(list(value))
|
||||
else:
|
||||
- value = [ensure_bytes(arg[1])]
|
||||
- mods.append((ldap.MOD_ADD, ensure_str(arg[0]), value))
|
||||
- return _modify_ext_s(self._instance,self._dn, mods, serverctrls=self._server_controls,
|
||||
- clientctrls=self._client_controls, escapehatch='i am sure')
|
||||
+ value = [ensure_bytes(value)]
|
||||
+
|
||||
+ mods.append((ldap.MOD_ADD, ensure_str(key), value))
|
||||
+
|
||||
+ return _modify_ext_s(self._instance,
|
||||
+ self._dn,
|
||||
+ mods,
|
||||
+ serverctrls=self._server_controls,
|
||||
+ clientctrls=self._client_controls,
|
||||
+ escapehatch='i am sure')
|
||||
|
||||
- # Basically what it means;
|
||||
def replace(self, key, value):
|
||||
"""Replace an attribute with a value
|
||||
|
||||
@@ -355,23 +361,30 @@ class DSLdapObject(DSLogging, DSLint):
|
||||
This is useful for configuration changes that require
|
||||
atomic operation, and ease of use.
|
||||
|
||||
- An example of usage is replace_many((key, value), (key, value))
|
||||
+ An example of usage is replace_many((key, value), (key, [value1, value2]))
|
||||
|
||||
No wrapping list is needed for the arguments.
|
||||
|
||||
- :param *args: tuples of key,value to replace.
|
||||
- :type *args: (str, str)
|
||||
+ :param *args: tuples of key,value to replace. Value can be a single value
|
||||
+ or a collection (list, tuple, set) of values.
|
||||
+ :type *args: (str, str) or (str, list/tuple/set)
|
||||
"""
|
||||
-
|
||||
mods = []
|
||||
for arg in args:
|
||||
- if isinstance(arg[1], list) or isinstance(arg[1], tuple):
|
||||
- value = ensure_list_bytes(arg[1])
|
||||
+ key, value = arg
|
||||
+ if isinstance(value, (list, tuple, set)):
|
||||
+ value = ensure_list_bytes(list(value))
|
||||
else:
|
||||
- value = [ensure_bytes(arg[1])]
|
||||
- mods.append((ldap.MOD_REPLACE, ensure_str(arg[0]), value))
|
||||
- return _modify_ext_s(self._instance,self._dn, mods, serverctrls=self._server_controls,
|
||||
- clientctrls=self._client_controls, escapehatch='i am sure')
|
||||
+ value = [ensure_bytes(value)]
|
||||
+
|
||||
+ mods.append((ldap.MOD_REPLACE, ensure_str(key), value))
|
||||
+
|
||||
+ return _modify_ext_s(self._instance,
|
||||
+ self._dn,
|
||||
+ mods,
|
||||
+ serverctrls=self._server_controls,
|
||||
+ clientctrls=self._client_controls,
|
||||
+ escapehatch='i am sure')
|
||||
|
||||
# This needs to work on key + val, and key
|
||||
def remove(self, key, value):
|
||||
diff --git a/src/lib389/lib389/cli_conf/config.py b/src/lib389/lib389/cli_conf/config.py
|
||||
index fb4c68f8b..6f4eddc8c 100644
|
||||
--- a/src/lib389/lib389/cli_conf/config.py
|
||||
+++ b/src/lib389/lib389/cli_conf/config.py
|
||||
@@ -1,5 +1,5 @@
|
||||
# --- BEGIN COPYRIGHT BLOCK ---
|
||||
-# Copyright (C) 2023 Red Hat, Inc.
|
||||
+# Copyright (C) 2024 Red Hat, Inc.
|
||||
# All rights reserved.
|
||||
#
|
||||
# License: GPL (version 3 or any later version).
|
||||
@@ -12,12 +12,9 @@ from lib389.config import Config
|
||||
from lib389.cli_base import (
|
||||
_generic_get_entry,
|
||||
_generic_get_attr,
|
||||
- _generic_replace_attr,
|
||||
CustomHelpFormatter
|
||||
)
|
||||
|
||||
-OpType = Enum("OpType", "add delete")
|
||||
-
|
||||
|
||||
def _config_display_ldapimaprootdn_warning(log, args):
|
||||
"""If we update the rootdn we need to update the ldapi settings too"""
|
||||
@@ -28,44 +25,48 @@ def _config_display_ldapimaprootdn_warning(log, args):
|
||||
"For LDAPI configuration, \"nsslapd-rootdn\" is used instead.")
|
||||
|
||||
|
||||
-def _config_get_existing_attrs(conf, args, op_type):
|
||||
- """Get the existing attribute from the server and return them in a dict
|
||||
- so we can add them back after the operation is done.
|
||||
-
|
||||
- For op_type == OpType.delete, we delete them from the server so we can add
|
||||
- back only those that are not specified in the command line.
|
||||
- (i.e delete nsslapd-haproxy-trusted-ip="192.168.0.1", but
|
||||
- nsslapd-haproxy-trusted-ip has 192.168.0.1 and 192.168.0.2 values.
|
||||
- So we want only 192.168.0.1 to be deleted in the end)
|
||||
- """
|
||||
-
|
||||
- existing_attrs = {}
|
||||
- if args and args.attr:
|
||||
- for attr in args.attr:
|
||||
- if "=" in attr:
|
||||
- [attr_name, val] = attr.split("=", 1)
|
||||
- # We should process only multi-valued attributes this way
|
||||
- if attr_name.lower() == "nsslapd-haproxy-trusted-ip" or \
|
||||
- attr_name.lower() == "nsslapd-referral":
|
||||
- if attr_name not in existing_attrs.keys():
|
||||
- existing_attrs[attr_name] = conf.get_attr_vals_utf8(attr_name)
|
||||
- existing_attrs[attr_name] = [x for x in existing_attrs[attr_name] if x != val]
|
||||
-
|
||||
- if op_type == OpType.add:
|
||||
- if existing_attrs[attr_name] == []:
|
||||
- del existing_attrs[attr_name]
|
||||
-
|
||||
- if op_type == OpType.delete:
|
||||
- conf.remove_all(attr_name)
|
||||
- else:
|
||||
- if op_type == OpType.delete:
|
||||
- conf.remove_all(attr)
|
||||
- else:
|
||||
- raise ValueError(f"You must specify a value to add for the attribute ({attr_name})")
|
||||
- return existing_attrs
|
||||
- else:
|
||||
- # Missing value
|
||||
- raise ValueError(f"Missing attribute to {op_type.name}")
|
||||
+def _format_values_for_display(values):
|
||||
+ """Format a set of values for display"""
|
||||
+
|
||||
+ if not values:
|
||||
+ return ""
|
||||
+ if len(values) == 1:
|
||||
+ return f"'{next(iter(values))}'"
|
||||
+ return ', '.join(f"'{value}'" for value in sorted(values))
|
||||
+
|
||||
+
|
||||
+def _parse_attr_value_pairs(attrs, allow_no_value=False):
|
||||
+ """Parse attribute value pairs from a list of strings in the format 'attr=value'"""
|
||||
+
|
||||
+ attr_values = {}
|
||||
+ attrs_without_values = set()
|
||||
+
|
||||
+ for attr in attrs:
|
||||
+ if "=" in attr:
|
||||
+ attr_name, val = attr.split("=", 1)
|
||||
+ if attr_name not in attr_values:
|
||||
+ attr_values[attr_name] = set()
|
||||
+ attr_values[attr_name].add(val)
|
||||
+ elif allow_no_value:
|
||||
+ attrs_without_values.add(attr)
|
||||
+ else:
|
||||
+ raise ValueError(f"Invalid attribute format: {attr}. Must be in format 'attr=value'")
|
||||
+
|
||||
+ return attr_values, attrs_without_values
|
||||
+
|
||||
+
|
||||
+def _handle_attribute_operation(conf, operation_type, attr_values, log):
|
||||
+ """Handle attribute operations (add, replace) with consistent error handling"""
|
||||
+
|
||||
+ if operation_type == "add":
|
||||
+ conf.add_many(*[(k, v) for k, v in attr_values.items()])
|
||||
+ elif operation_type == "replace":
|
||||
+ conf.replace_many(*[(k, v) for k, v in attr_values.items()])
|
||||
+
|
||||
+ for attr_name, values in attr_values.items():
|
||||
+ formatted_values = _format_values_for_display(values)
|
||||
+ operation_past = "added" if operation_type == "add" else f"{operation_type}d"
|
||||
+ log.info(f"Successfully {operation_past} value(s) for '{attr_name}': {formatted_values}")
|
||||
|
||||
|
||||
def config_get(inst, basedn, log, args):
|
||||
@@ -77,49 +78,76 @@ def config_get(inst, basedn, log, args):
|
||||
|
||||
|
||||
def config_replace_attr(inst, basedn, log, args):
|
||||
- _generic_replace_attr(inst, basedn, log.getChild('config_replace_attr'), Config, args)
|
||||
+ if not args or not args.attr:
|
||||
+ raise ValueError("Missing attribute to replace")
|
||||
|
||||
+ conf = Config(inst, basedn)
|
||||
+ attr_values, _ = _parse_attr_value_pairs(args.attr)
|
||||
+ _handle_attribute_operation(conf, "replace", attr_values, log)
|
||||
_config_display_ldapimaprootdn_warning(log, args)
|
||||
|
||||
|
||||
def config_add_attr(inst, basedn, log, args):
|
||||
+ if not args or not args.attr:
|
||||
+ raise ValueError("Missing attribute to add")
|
||||
+
|
||||
conf = Config(inst, basedn)
|
||||
- final_mods = []
|
||||
-
|
||||
- existing_attrs = _config_get_existing_attrs(conf, args, OpType.add)
|
||||
-
|
||||
- if args and args.attr:
|
||||
- for attr in args.attr:
|
||||
- if "=" in attr:
|
||||
- [attr_name, val] = attr.split("=", 1)
|
||||
- if attr_name in existing_attrs:
|
||||
- for v in existing_attrs[attr_name]:
|
||||
- final_mods.append((attr_name, v))
|
||||
- final_mods.append((attr_name, val))
|
||||
- try:
|
||||
- conf.add_many(*set(final_mods))
|
||||
- except ldap.TYPE_OR_VALUE_EXISTS:
|
||||
- pass
|
||||
- else:
|
||||
- raise ValueError(f"You must specify a value to add for the attribute ({attr_name})")
|
||||
- else:
|
||||
- # Missing value
|
||||
- raise ValueError("Missing attribute to add")
|
||||
+ attr_values, _ = _parse_attr_value_pairs(args.attr)
|
||||
|
||||
+ # Validate no values already exist
|
||||
+ for attr_name, values in attr_values.items():
|
||||
+ existing_vals = set(conf.get_attr_vals_utf8(attr_name) or [])
|
||||
+ duplicate_vals = values & existing_vals
|
||||
+ if duplicate_vals:
|
||||
+ raise ldap.ALREADY_EXISTS(
|
||||
+ f"Values {duplicate_vals} already exist for attribute '{attr_name}'")
|
||||
+
|
||||
+ _handle_attribute_operation(conf, "add", attr_values, log)
|
||||
_config_display_ldapimaprootdn_warning(log, args)
|
||||
|
||||
|
||||
def config_del_attr(inst, basedn, log, args):
|
||||
- conf = Config(inst, basedn)
|
||||
- final_mods = []
|
||||
+ if not args or not args.attr:
|
||||
+ raise ValueError("Missing attribute to delete")
|
||||
|
||||
- existing_attrs = _config_get_existing_attrs(conf, args, OpType.delete)
|
||||
+ conf = Config(inst, basedn)
|
||||
+ attr_values, attrs_to_remove = _parse_attr_value_pairs(args.attr, allow_no_value=True)
|
||||
+
|
||||
+ # Handle complete attribute removal
|
||||
+ for attr in attrs_to_remove:
|
||||
+ try:
|
||||
+ if conf.get_attr_vals_utf8(attr):
|
||||
+ conf.remove_all(attr)
|
||||
+ log.info(f"Removed attribute '{attr}' completely")
|
||||
+ else:
|
||||
+ log.warning(f"Attribute '{attr}' does not exist - skipping")
|
||||
+ except ldap.NO_SUCH_ATTRIBUTE:
|
||||
+ log.warning(f"Attribute '{attr}' does not exist - skipping")
|
||||
+
|
||||
+ # Validate and handle value-specific deletion
|
||||
+ if attr_values:
|
||||
+ for attr_name, values in attr_values.items():
|
||||
+ try:
|
||||
+ existing_values = set(conf.get_attr_vals_utf8(attr_name) or [])
|
||||
+ values_to_delete = values & existing_values
|
||||
+ values_not_found = values - existing_values
|
||||
+
|
||||
+ if values_not_found:
|
||||
+ formatted_values = _format_values_for_display(values_not_found)
|
||||
+ log.warning(f"Values {formatted_values} do not exist for attribute '{attr_name}' - skipping")
|
||||
+
|
||||
+ if values_to_delete:
|
||||
+ remaining_values = existing_values - values_to_delete
|
||||
+ if not remaining_values:
|
||||
+ conf.remove_all(attr_name)
|
||||
+ else:
|
||||
+ conf.replace_many((attr_name, remaining_values))
|
||||
+ formatted_values = _format_values_for_display(values_to_delete)
|
||||
+ log.info(f"Successfully deleted values {formatted_values} from '{attr_name}'")
|
||||
+ except ldap.NO_SUCH_ATTRIBUTE:
|
||||
+ log.warning(f"Attribute '{attr_name}' does not exist - skipping")
|
||||
|
||||
- # Then add the attributes back all except the one we need to remove
|
||||
- for attr_name in existing_attrs.keys():
|
||||
- for val in existing_attrs[attr_name]:
|
||||
- final_mods.append((attr_name, val))
|
||||
- conf.add_many(*set(final_mods))
|
||||
+ _config_display_ldapimaprootdn_warning(log, args)
|
||||
|
||||
|
||||
def create_parser(subparsers):
|
||||
--
|
||||
2.48.0
|
||||
|
@ -47,7 +47,7 @@ ExcludeArch: i686
|
||||
Summary: 389 Directory Server (base)
|
||||
Name: 389-ds-base
|
||||
Version: 2.5.2
|
||||
Release: 2%{?dist}
|
||||
Release: 5%{?dist}
|
||||
License: GPL-3.0-or-later AND (0BSD OR Apache-2.0 OR MIT) AND (Apache-2.0 OR Apache-2.0 WITH LLVM-exception OR MIT) AND (Apache-2.0 OR BSL-1.0) AND (Apache-2.0 OR MIT OR Zlib) AND (Apache-2.0 OR MIT) AND (CC-BY-4.0 AND MIT) AND (MIT OR Apache-2.0) AND Unicode-DFS-2016 AND (MIT OR CC0-1.0) AND (MIT OR Unlicense) AND 0BSD AND Apache-2.0 AND BSD-2-Clause AND BSD-3-Clause AND ISC AND MIT AND MIT AND ISC AND MPL-2.0 AND PSF-2.0
|
||||
URL: https://www.port389.org
|
||||
Conflicts: selinux-policy-base < 3.9.8
|
||||
@ -471,7 +471,13 @@ Source4: 389-ds-base.sysusers
|
||||
|
||||
Patch: 0001-Issue-6312-In-branch-2.5-healthcheck-report-an-inval.patch
|
||||
Patch: 0002-Issue-6316-lmdb-reindex-is-broken-if-index-type-is-s.patch
|
||||
|
||||
Patch: 0003-Issue-6192-Test-failure-test_match_large_valueset.patch
|
||||
Patch: 0004-Issue-6307-Wrong-set-of-entries-returned-for-some-se.patch
|
||||
Patch: 0005-Issue-6381-CleanAllRUV-move-changelog-purging-to-the.patch
|
||||
Patch: 0006-Issue-6390-Adjust-cleanAllRUV-max-per-txn-and-interv.patch
|
||||
Patch: 0007-Issue-6284-BUG-freelist-ordering-causes-high-wtime-6.patch
|
||||
Patch: 0008-Issue-6296-basic_test.py-test_conn_limits-fails-in-m.patch
|
||||
Patch: 0009-Issue-5798-Fix-dsconf-config-multi-valued-attr-opera.patch
|
||||
|
||||
%description
|
||||
389 Directory Server is an LDAPv3 compliant server. The base package includes
|
||||
@ -573,7 +579,7 @@ A cockpit UI Plugin for configuring and administering the 389 Directory Server
|
||||
|
||||
%prep
|
||||
|
||||
%autosetup -p1 -v -n %{name}-%{version}
|
||||
%autosetup -p1 -n %{name}-%{version}
|
||||
%if %{bundle_jemalloc}
|
||||
%setup -q -n %{name}-%{version} -T -D -b 3
|
||||
%endif
|
||||
@ -914,6 +920,19 @@ exit 0
|
||||
%endif
|
||||
|
||||
%changelog
|
||||
* Fri Jan 24 2025 Viktor Ashirov <vashirov@redhat.com> - 2.5.2-5
|
||||
- Resolves: RHEL-74350 - Some nsslapd-haproxy-trusted-ip values are discarded upon a restart. [rhel-9.5.z]
|
||||
|
||||
* Mon Dec 09 2024 Viktor Ashirov <vashirov@redhat.com> - 2.5.2-4
|
||||
- Resolves: RHEL-70257 - Freelist ordering causes high wtime [rhel-9.5.z]
|
||||
|
||||
* Thu Dec 05 2024 James Chapman <jachapma@redhat.com> - 2.5.2-3
|
||||
- Bump version to 2.5.2-3
|
||||
- Resolves: RHEL-65775 - Wrong set of entries returned for some search filters [rhel-9.5.z]
|
||||
- Resolves: RHEL-66138 - deadlock during cleanAllRuv [rhel-9.5.z]
|
||||
- Resolves: RHEL-67163 - cleanallruv consums CPU and is slow [rhel-9.5.z]
|
||||
- Resolves: RHEL-70257 - Freelist ordering causes high wtime [rhel-9.5.z]
|
||||
|
||||
* Mon Sep 16 2024 Viktor Ashirov <vashirov@redhat.com> - 2.5.2-2
|
||||
- Bump version to 2.5.2-2
|
||||
- Resolves: RHEL-55744 - ipahealthcheck.ds.backends.BackendsCheck.DSBLE0006: BDB is deprecated and should not be used as a backend
|
||||
|
Loading…
Reference in New Issue
Block a user