import 389-ds-base-2.2.4-3.el9

This commit is contained in:
CentOS Sources 2023-05-09 05:28:16 +00:00 committed by Stepan Oksanichenko
parent d5a9d6db5d
commit 9a4a77d32d
7 changed files with 578 additions and 74 deletions

View File

@ -1,2 +1,2 @@
705d40272656ecd89e5ba648345dc63c47d79c11 SOURCES/389-ds-base-2.1.3.tar.bz2
e7345ce7d65766dc8d8a779d2661ec76a3913b63 SOURCES/389-ds-base-2.2.4.tar.bz2
1c8f2d0dfbf39fa8cd86363bf3314351ab21f8d4 SOURCES/jemalloc-5.3.0.tar.bz2

2
.gitignore vendored
View File

@ -1,2 +1,2 @@
SOURCES/389-ds-base-2.1.3.tar.bz2
SOURCES/389-ds-base-2.2.4.tar.bz2
SOURCES/jemalloc-5.3.0.tar.bz2

View File

@ -0,0 +1,294 @@
From 8b89bf22dea16956e4a21174f28ec11f32fc2db4 Mon Sep 17 00:00:00 2001
From: tbordaz <tbordaz@redhat.com>
Date: Mon, 21 Nov 2022 11:41:15 +0100
Subject: [PATCH 1/3] Issue 3729 - (cont) RFE Extend log of operations
statistics in access log (#5538)
Bug description:
This is a continuation of the #3729
The previous fix did not manage internal SRCH, so
statistics of internal SRCH were not logged
Fix description:
For internal operation log_op_stat uses
connid/op_id/op_internal_id/op_nested_count that have been
computed log_result
For direct operation log_op_stat uses info from the
operation itself (o_connid and o_opid)
log_op_stat relies on operation_type rather than
o_tag that is not available for internal operation
relates: #3729
Reviewed by: Pierre Rogier
---
.../tests/suites/ds_logs/ds_logs_test.py | 90 ++++++++++++++++++-
ldap/servers/slapd/proto-slap.h | 2 +-
ldap/servers/slapd/result.c | 74 +++++++++------
3 files changed, 136 insertions(+), 30 deletions(-)
diff --git a/dirsrvtests/tests/suites/ds_logs/ds_logs_test.py b/dirsrvtests/tests/suites/ds_logs/ds_logs_test.py
index 865a6d0a3..67605438b 100644
--- a/dirsrvtests/tests/suites/ds_logs/ds_logs_test.py
+++ b/dirsrvtests/tests/suites/ds_logs/ds_logs_test.py
@@ -13,7 +13,7 @@ import pytest
import subprocess
from lib389._mapped_object import DSLdapObject
from lib389.topologies import topology_st
-from lib389.plugins import AutoMembershipPlugin, ReferentialIntegrityPlugin, AutoMembershipDefinitions
+from lib389.plugins import AutoMembershipPlugin, ReferentialIntegrityPlugin, AutoMembershipDefinitions, MemberOfPlugin
from lib389.idm.user import UserAccounts
from lib389.idm.group import Groups
from lib389.idm.organizationalunit import OrganizationalUnits
@@ -1254,6 +1254,94 @@ def test_stat_index(topology_st, request):
request.addfinalizer(fin)
+def test_stat_internal_op(topology_st, request):
+ """Check that statistics can also be collected for internal operations
+
+ :id: 19f393bd-5866-425a-af7a-4dade06d5c77
+ :setup: Standalone Instance
+ :steps:
+ 1. Check that nsslapd-statlog-level is 0 (default)
+ 2. Enable memberof plugins
+ 3. Create a user
+ 4. Remove access log (to only detect new records)
+ 5. Enable statistic logging nsslapd-statlog-level=1
+ 6. Check that on direct SRCH there is no 'Internal' Stat records
+ 7. Remove access log (to only detect new records)
+ 8. Add group with the user, so memberof triggers internal search
+ and check it exists 'Internal' Stat records
+ :expectedresults:
+ 1. Success
+ 2. Success
+ 3. Success
+ 4. Success
+ 5. Success
+ 6. Success
+ 7. Success
+ 8. Success
+ """
+
+ inst = topology_st.standalone
+
+ # Step 1
+ log.info("Assert nsslapd-statlog-level is by default 0")
+ assert topology_st.standalone.config.get_attr_val_int("nsslapd-statlog-level") == 0
+
+ # Step 2
+ memberof = MemberOfPlugin(inst)
+ memberof.enable()
+ inst.restart()
+
+ # Step 3 Add setup entries
+ users = UserAccounts(inst, DEFAULT_SUFFIX, rdn=None)
+ user = users.create(properties={'uid': 'test_1',
+ 'cn': 'test_1',
+ 'sn': 'test_1',
+ 'description': 'member',
+ 'uidNumber': '1000',
+ 'gidNumber': '2000',
+ 'homeDirectory': '/home/testuser'})
+ # Step 4 reset accesslog
+ topology_st.standalone.stop()
+ lpath = topology_st.standalone.ds_access_log._get_log_path()
+ os.unlink(lpath)
+ topology_st.standalone.start()
+
+ # Step 5 enable statistics
+ log.info("Set nsslapd-statlog-level: 1 to enable indexing statistics")
+ topology_st.standalone.config.set("nsslapd-statlog-level", "1")
+
+ # Step 6 for direct SRCH only non internal STAT records
+ entries = topology_st.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, "uid=test_1")
+ topology_st.standalone.stop()
+ assert topology_st.standalone.ds_access_log.match('.*STAT read index.*')
+ assert topology_st.standalone.ds_access_log.match('.*STAT read index: attribute.*')
+ assert topology_st.standalone.ds_access_log.match('.*STAT read index: duration.*')
+ assert not topology_st.standalone.ds_access_log.match('.*Internal.*STAT.*')
+ topology_st.standalone.start()
+
+ # Step 7 reset accesslog
+ topology_st.standalone.stop()
+ lpath = topology_st.standalone.ds_access_log._get_log_path()
+ os.unlink(lpath)
+ topology_st.standalone.start()
+
+ # Step 8 trigger internal searches and check internal stat records
+ groups = Groups(inst, DEFAULT_SUFFIX, rdn=None)
+ group = groups.create(properties={'cn': 'mygroup',
+ 'member': 'uid=test_1,%s' % DEFAULT_SUFFIX,
+ 'description': 'group'})
+ topology_st.standalone.restart()
+ assert topology_st.standalone.ds_access_log.match('.*Internal.*STAT read index.*')
+ assert topology_st.standalone.ds_access_log.match('.*Internal.*STAT read index: attribute.*')
+ assert topology_st.standalone.ds_access_log.match('.*Internal.*STAT read index: duration.*')
+
+ def fin():
+ log.info('Deleting user/group')
+ user.delete()
+ group.delete()
+
+ request.addfinalizer(fin)
+
if __name__ == '__main__':
# Run isolated
# -s for DEBUG mode
diff --git a/ldap/servers/slapd/proto-slap.h b/ldap/servers/slapd/proto-slap.h
index 77832797b..c63ad8e74 100644
--- a/ldap/servers/slapd/proto-slap.h
+++ b/ldap/servers/slapd/proto-slap.h
@@ -515,7 +515,7 @@ long long config_get_pw_minage(void);
long long config_get_pw_warning(void);
int config_get_errorlog_level(void);
int config_get_accesslog_level(void);
-int config_get_statlog_level();
+int config_get_statlog_level(void);
int config_get_securitylog_level(void);
int config_get_auditlog_logging_enabled(void);
int config_get_auditfaillog_logging_enabled(void);
diff --git a/ldap/servers/slapd/result.c b/ldap/servers/slapd/result.c
index c8b363cce..2ba205e04 100644
--- a/ldap/servers/slapd/result.c
+++ b/ldap/servers/slapd/result.c
@@ -33,7 +33,7 @@ static long current_conn_count;
static PRLock *current_conn_count_mutex;
static int flush_ber(Slapi_PBlock *pb, Connection *conn, Operation *op, BerElement *ber, int type);
static char *notes2str(unsigned int notes, char *buf, size_t buflen);
-static void log_op_stat(Slapi_PBlock *pb);
+static void log_op_stat(Slapi_PBlock *pb, uint64_t connid, int32_t op_id, int32_t op_internal_id, int32_t op_nested_count);
static void log_result(Slapi_PBlock *pb, Operation *op, int err, ber_tag_t tag, int nentries);
static void log_entry(Operation *op, Slapi_Entry *e);
static void log_referral(Operation *op);
@@ -2000,65 +2000,82 @@ notes2str(unsigned int notes, char *buf, size_t buflen)
return (buf);
}
+#define STAT_LOG_CONN_OP_FMT_INT_INT "conn=Internal(%" PRIu64 ") op=%d(%d)(%d)"
+#define STAT_LOG_CONN_OP_FMT_EXT_INT "conn=%" PRIu64 " (Internal) op=%d(%d)(%d)"
static void
-log_op_stat(Slapi_PBlock *pb)
+log_op_stat(Slapi_PBlock *pb, uint64_t connid, int32_t op_id, int32_t op_internal_id, int32_t op_nested_count)
{
-
- Connection *conn = NULL;
Operation *op = NULL;
Op_stat *op_stat;
struct timespec duration;
char stat_etime[ETIME_BUFSIZ] = {0};
+ int internal_op;
if (config_get_statlog_level() == 0) {
return;
}
- slapi_pblock_get(pb, SLAPI_CONNECTION, &conn);
slapi_pblock_get(pb, SLAPI_OPERATION, &op);
+ internal_op = operation_is_flag_set(op, OP_FLAG_INTERNAL);
op_stat = op_stat_get_operation_extension(pb);
- if (conn == NULL || op == NULL || op_stat == NULL) {
+ if (op == NULL || op_stat == NULL) {
return;
}
/* process the operation */
- switch (op->o_tag) {
- case LDAP_REQ_BIND:
- case LDAP_REQ_UNBIND:
- case LDAP_REQ_ADD:
- case LDAP_REQ_DELETE:
- case LDAP_REQ_MODRDN:
- case LDAP_REQ_MODIFY:
- case LDAP_REQ_COMPARE:
+ switch (operation_get_type(op)) {
+ case SLAPI_OPERATION_BIND:
+ case SLAPI_OPERATION_UNBIND:
+ case SLAPI_OPERATION_ADD:
+ case SLAPI_OPERATION_DELETE:
+ case SLAPI_OPERATION_MODRDN:
+ case SLAPI_OPERATION_MODIFY:
+ case SLAPI_OPERATION_COMPARE:
+ case SLAPI_OPERATION_EXTENDED:
break;
- case LDAP_REQ_SEARCH:
+ case SLAPI_OPERATION_SEARCH:
if ((LDAP_STAT_READ_INDEX & config_get_statlog_level()) &&
op_stat->search_stat) {
struct component_keys_lookup *key_info;
for (key_info = op_stat->search_stat->keys_lookup; key_info; key_info = key_info->next) {
- slapi_log_stat(LDAP_STAT_READ_INDEX,
- "conn=%" PRIu64 " op=%d STAT read index: attribute=%s key(%s)=%s --> count %d\n",
- op->o_connid, op->o_opid,
- key_info->attribute_type, key_info->index_type, key_info->key,
- key_info->id_lookup_cnt);
+ if (internal_op) {
+ slapi_log_stat(LDAP_STAT_READ_INDEX,
+ connid == 0 ? STAT_LOG_CONN_OP_FMT_INT_INT "STAT read index: attribute=%s key(%s)=%s --> count %d\n":
+ STAT_LOG_CONN_OP_FMT_EXT_INT "STAT read index: attribute=%s key(%s)=%s --> count %d\n",
+ connid, op_id, op_internal_id, op_nested_count,
+ key_info->attribute_type, key_info->index_type, key_info->key,
+ key_info->id_lookup_cnt);
+ } else {
+ slapi_log_stat(LDAP_STAT_READ_INDEX,
+ "conn=%" PRIu64 " op=%d STAT read index: attribute=%s key(%s)=%s --> count %d\n",
+ connid, op_id,
+ key_info->attribute_type, key_info->index_type, key_info->key,
+ key_info->id_lookup_cnt);
+ }
}
/* total elapsed time */
slapi_timespec_diff(&op_stat->search_stat->keys_lookup_end, &op_stat->search_stat->keys_lookup_start, &duration);
snprintf(stat_etime, ETIME_BUFSIZ, "%" PRId64 ".%.09" PRId64 "", (int64_t)duration.tv_sec, (int64_t)duration.tv_nsec);
- slapi_log_stat(LDAP_STAT_READ_INDEX,
- "conn=%" PRIu64 " op=%d STAT read index: duration %s\n",
- op->o_connid, op->o_opid, stat_etime);
+ if (internal_op) {
+ slapi_log_stat(LDAP_STAT_READ_INDEX,
+ connid == 0 ? STAT_LOG_CONN_OP_FMT_INT_INT "STAT read index: duration %s\n":
+ STAT_LOG_CONN_OP_FMT_EXT_INT "STAT read index: duration %s\n",
+ connid, op_id, op_internal_id, op_nested_count, stat_etime);
+ } else {
+ slapi_log_stat(LDAP_STAT_READ_INDEX,
+ "conn=%" PRIu64 " op=%d STAT read index: duration %s\n",
+ op->o_connid, op->o_opid, stat_etime);
+ }
}
break;
- case LDAP_REQ_ABANDON_30:
- case LDAP_REQ_ABANDON:
+ case SLAPI_OPERATION_ABANDON:
break;
default:
slapi_log_err(SLAPI_LOG_ERR,
- "log_op_stat", "Ignoring unknown LDAP request (conn=%" PRIu64 ", tag=0x%lx)\n",
- conn->c_connid, op->o_tag);
+ "log_op_stat", "Ignoring unknown LDAP request (conn=%" PRIu64 ", op_type=0x%lx)\n",
+ connid, operation_get_type(op));
break;
}
}
@@ -2218,7 +2235,7 @@ log_result(Slapi_PBlock *pb, Operation *op, int err, ber_tag_t tag, int nentries
} else {
ext_str = "";
}
- log_op_stat(pb);
+ log_op_stat(pb, op->o_connid, op->o_opid, 0, 0);
slapi_log_access(LDAP_DEBUG_STATS,
"conn=%" PRIu64 " op=%d RESULT err=%d"
" tag=%" BERTAG_T " nentries=%d wtime=%s optime=%s etime=%s%s%s%s\n",
@@ -2233,6 +2250,7 @@ log_result(Slapi_PBlock *pb, Operation *op, int err, ber_tag_t tag, int nentries
}
} else {
int optype;
+ log_op_stat(pb, connid, op_id, op_internal_id, op_nested_count);
#define LOG_MSG_FMT " tag=%" BERTAG_T " nentries=%d wtime=%s optime=%s etime=%s%s%s\n"
slapi_log_access(LDAP_DEBUG_ARGS,
connid == 0 ? LOG_CONN_OP_FMT_INT_INT LOG_MSG_FMT :
--
2.38.1

View File

@ -1,34 +0,0 @@
From 88c1e83e02a59f4811f06757daced6c821fa54d9 Mon Sep 17 00:00:00 2001
From: progier389 <progier@redhat.com>
Date: Mon, 11 Apr 2022 16:15:07 +0200
Subject: [PATCH] Issue 5254 - dscreate create-template regression due to
5a3bdc336 (#5255)
dscreate create-template regression due to 829ea4113..5a3bdc336
(default value for template_file parameter was unwillingly changed to 'None' (instead of None))
Issue: 5254 https://github.com/389ds/389-ds-base/issues/5254
Reviewed by:
(cherry picked from commit 45af34013f8bdd34f939d36b16776413e13c0a51)
---
src/lib389/cli/dscreate | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/src/lib389/cli/dscreate b/src/lib389/cli/dscreate
index 0b41166cc..aa3878ff9 100755
--- a/src/lib389/cli/dscreate
+++ b/src/lib389/cli/dscreate
@@ -51,7 +51,7 @@ interactive_parser.set_defaults(func=cli_instance.instance_create_interactive)
template_parser = subparsers.add_parser('create-template', help="Display an example inf answer file, or provide a file name to write it to disk.")
template_parser.add_argument('--advanced', action='store_true', default=False,
help="Add advanced options to the template - changing the advanced options may make your instance install fail")
-template_parser.add_argument('template_file', nargs="?", default='None', help="Write example template to this file")
+template_parser.add_argument('template_file', nargs="?", default=None, help="Write example template to this file")
template_parser.set_defaults(func=cli_instance.instance_example)
subtree_parser = subparsers.add_parser('ds-root', help="Prepare a root directory in which non root user can create, run and administer instances.")
--
2.37.3

View File

@ -0,0 +1,30 @@
From 3bc889b6564b70c5113f74e8add1a47b38fce04b Mon Sep 17 00:00:00 2001
From: Mark Reynolds <mreynolds@redhat.com>
Date: Mon, 28 Nov 2022 09:47:09 -0500
Subject: [PATCH 2/3] Issue 5544 - Increase default task TTL
Description: Increase the Time To Live of tasks from 1 hour to 12 hours
relates: https://github.com/389ds/389-ds-base/issues/5544
Reviewed by: progier(Thanks!)
---
ldap/servers/slapd/task.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/ldap/servers/slapd/task.c b/ldap/servers/slapd/task.c
index c0e3dd7c4..1dc4f6b28 100644
--- a/ldap/servers/slapd/task.c
+++ b/ldap/servers/slapd/task.c
@@ -48,7 +48,7 @@ static uint64_t shutting_down = 0;
#define TASK_DATE_NAME "nsTaskCreated"
#define TASK_WARNING_NAME "nsTaskWarning"
-#define DEFAULT_TTL "3600" /* seconds */
+#define DEFAULT_TTL "43200" /* 12 hours in seconds */
#define TASK_SYSCONFIG_FILE_ATTR "sysconfigfile" /* sysconfig reload task file attr */
#define TASK_SYSCONFIG_LOGCHANGES_ATTR "logchanges"
#define TASK_TOMBSTONE_FIXUP "fixup tombstones task"
--
2.38.1

View File

@ -0,0 +1,219 @@
From 99dbba52eb45628c7f290e9ed3aeabb2a2a67db4 Mon Sep 17 00:00:00 2001
From: Mark Reynolds <mreynolds@redhat.com>
Date: Tue, 13 Dec 2022 09:41:34 -0500
Subject: [PATCH] Issue 5413 - Allow mutliple MemberOf fixup tasks with
different bases/filters
Description:
A change was made to only allow a single fixup task at a time, but there are
cases where you would want to run mutliple tasks but on different branches/filters.
Now we maintain a linked list of bases/filters of the current running tasks to
monitor this.
relates: https://github.com/389ds/389-ds-base/issues/5413
Reviewed by: tbordaz(Thanks!)
---
.../suites/memberof_plugin/fixup_test.py | 5 +-
ldap/servers/plugins/memberof/memberof.c | 101 ++++++++++++++----
2 files changed, 85 insertions(+), 21 deletions(-)
diff --git a/dirsrvtests/tests/suites/memberof_plugin/fixup_test.py b/dirsrvtests/tests/suites/memberof_plugin/fixup_test.py
index 9566e144c..d5369439f 100644
--- a/dirsrvtests/tests/suites/memberof_plugin/fixup_test.py
+++ b/dirsrvtests/tests/suites/memberof_plugin/fixup_test.py
@@ -59,12 +59,15 @@ def test_fixup_task_limit(topo):
with pytest.raises(ldap.UNWILLING_TO_PERFORM):
memberof.fixup(DEFAULT_SUFFIX)
+ # Add second task but on different suffix which should be allowed
+ memberof.fixup("ou=people," + DEFAULT_SUFFIX)
+
# Wait for first task to complete
task.wait()
# Add new task which should be allowed now
memberof.fixup(DEFAULT_SUFFIX)
-
+
if __name__ == '__main__':
# Run isolated
diff --git a/ldap/servers/plugins/memberof/memberof.c b/ldap/servers/plugins/memberof/memberof.c
index 0b8cfe95c..a14617044 100644
--- a/ldap/servers/plugins/memberof/memberof.c
+++ b/ldap/servers/plugins/memberof/memberof.c
@@ -52,7 +52,6 @@ static Slapi_DN* _pluginDN = NULL;
MemberOfConfig *qsortConfig = 0;
static int usetxn = 0;
static int premodfn = 0;
-static PRBool fixup_running = PR_FALSE;
static PRLock *fixup_lock = NULL;
static int32_t fixup_progress_count = 0;
static int64_t fixup_progress_elapsed = 0;
@@ -65,6 +64,15 @@ typedef struct _memberofstringll
void *next;
} memberofstringll;
+typedef struct _fixup_ll
+{
+ Slapi_DN *sdn;
+ char *filter_str;
+ void *next;
+} mo_fixup_ll;
+
+static mo_fixup_ll *fixup_list = NULL;
+
typedef struct _memberof_get_groups_data
{
MemberOfConfig *config;
@@ -438,6 +446,15 @@ memberof_postop_close(Slapi_PBlock *pb __attribute__((unused)))
PR_DestroyLock(fixup_lock);
fixup_lock = NULL;
+ mo_fixup_ll *fixup_task = fixup_list;
+ while (fixup_task != NULL) {
+ mo_fixup_ll *tmp = fixup_task;
+ fixup_task = fixup_task->next;
+ slapi_sdn_free(&tmp->sdn);
+ slapi_ch_free_string(&tmp->filter_str);
+ slapi_ch_free((void**)&tmp);
+ }
+
slapi_log_err(SLAPI_LOG_TRACE, MEMBEROF_PLUGIN_SUBSYSTEM,
"<-- memberof_postop_close\n");
return 0;
@@ -2817,7 +2834,6 @@ memberof_fixup_task_thread(void *arg)
}
PR_Lock(fixup_lock);
- fixup_running = PR_TRUE;
fixup_progress_count = 0;
fixup_progress_elapsed = slapi_current_rel_time_t();
fixup_start_time = slapi_current_rel_time_t();
@@ -2849,11 +2865,10 @@ memberof_fixup_task_thread(void *arg)
/* Mark this as a task operation */
configCopy.fixup_task = 1;
configCopy.task = task;
-
+ Slapi_DN *sdn = slapi_sdn_new_dn_byref(td->dn);
if (usetxn) {
- Slapi_DN *sdn = slapi_sdn_new_dn_byref(td->dn);
Slapi_Backend *be = slapi_be_select_exact(sdn);
- slapi_sdn_free(&sdn);
+
if (be) {
fixup_pb = slapi_pblock_new();
slapi_pblock_set(fixup_pb, SLAPI_BACKEND, be);
@@ -2894,14 +2909,37 @@ done:
fixup_progress_count, slapi_current_rel_time_t() - fixup_start_time);
slapi_task_inc_progress(task);
+ /* Cleanup task linked list */
+ PR_Lock(fixup_lock);
+ mo_fixup_ll *prev = NULL;
+ for (mo_fixup_ll *curr = fixup_list; curr; curr = curr->next) {
+ mo_fixup_ll *next = curr->next;
+ if (slapi_sdn_compare(curr->sdn, sdn) == 0 &&
+ strcasecmp(curr->filter_str, td->filter_str) == 0)
+ {
+ /* free current code */
+ slapi_sdn_free(&curr->sdn);
+ slapi_ch_free_string(&curr->filter_str);
+ slapi_ch_free((void**)&curr);
+
+ /* update linked list */
+ if (prev == NULL) {
+ /* first node */
+ fixup_list = next;
+ } else {
+ prev->next = next;
+ }
+ break;
+ }
+ prev = curr;
+ }
+ PR_Unlock(fixup_lock);
+ slapi_sdn_free(&sdn);
+
/* this will queue the destruction of the task */
slapi_task_finish(task, rc);
slapi_task_dec_refcount(task);
- PR_Lock(fixup_lock);
- fixup_running = PR_FALSE;
- PR_Unlock(fixup_lock);
-
slapi_log_err(SLAPI_LOG_INFO, MEMBEROF_PLUGIN_SUBSYSTEM,
"memberof_fixup_task_thread - Memberof task finished (processed %d entries in %ld seconds)\n",
fixup_progress_count, slapi_current_rel_time_t() - fixup_start_time);
@@ -2919,23 +2957,13 @@ memberof_task_add(Slapi_PBlock *pb,
int rv = SLAPI_DSE_CALLBACK_OK;
task_data *mytaskdata = NULL;
Slapi_Task *task = NULL;
+ Slapi_DN *sdn = NULL;
char *bind_dn;
const char *filter;
const char *dn = 0;
*returncode = LDAP_SUCCESS;
- PR_Lock(fixup_lock);
- if (fixup_running) {
- PR_Unlock(fixup_lock);
- *returncode = LDAP_UNWILLING_TO_PERFORM;
- slapi_log_err(SLAPI_LOG_ERR, MEMBEROF_PLUGIN_SUBSYSTEM,
- "memberof_task_add - there is already a fixup task running\n");
- rv = SLAPI_DSE_CALLBACK_ERROR;
- goto out;
- }
- PR_Unlock(fixup_lock);
-
/* get arg(s) */
if ((dn = slapi_entry_attr_get_ref(e, "basedn")) == NULL) {
*returncode = LDAP_OBJECT_CLASS_VIOLATION;
@@ -2949,6 +2977,39 @@ memberof_task_add(Slapi_PBlock *pb,
goto out;
}
+ PR_Lock(fixup_lock);
+ sdn = slapi_sdn_new_dn_byval(dn);
+ if (fixup_list == NULL) {
+ fixup_list = (mo_fixup_ll *)slapi_ch_calloc(1, sizeof(mo_fixup_ll));
+ fixup_list->sdn = sdn;
+ fixup_list->filter_str = slapi_ch_strdup(filter);
+ } else {
+ for (mo_fixup_ll *fixup_task = fixup_list; fixup_task; fixup_task = fixup_task->next) {
+ if (slapi_sdn_compare(sdn, fixup_task->sdn) == 0 &&
+ strcasecmp(filter, fixup_task->filter_str) == 0)
+ {
+ /* Found an identical running task, reject it */
+ PR_Unlock(fixup_lock);
+ slapi_log_err(SLAPI_LOG_ERR, MEMBEROF_PLUGIN_SUBSYSTEM,
+ "memberof_task_add - there is already an identical fixup task running: base: %s filter: %s\n",
+ slapi_sdn_get_dn(sdn), filter);
+ slapi_sdn_free(&sdn);
+ *returncode = LDAP_UNWILLING_TO_PERFORM;
+ rv = SLAPI_DSE_CALLBACK_ERROR;
+ goto out;
+ }
+ }
+ /* Add the new task DN to the top of the list */
+ mo_fixup_ll *head = fixup_list;
+ mo_fixup_ll *new_task = (mo_fixup_ll *)slapi_ch_calloc(1, sizeof(mo_fixup_ll));
+ new_task->sdn = sdn;
+ new_task->filter_str = slapi_ch_strdup(filter);
+ new_task->next = head;
+ fixup_list = new_task;
+ }
+ PR_Unlock(fixup_lock);
+
+
/* setup our task data */
slapi_pblock_get(pb, SLAPI_REQUESTOR_DN, &bind_dn);
mytaskdata = (task_data *)slapi_ch_malloc(sizeof(task_data));
--
2.38.1

View File

@ -46,8 +46,8 @@ ExcludeArch: i686
Summary: 389 Directory Server (base)
Name: 389-ds-base
Version: 2.1.3
Release: 4%{?dist}
Version: 2.2.4
Release: 3%{?dist}
License: GPLv3+ and (ASL 2.0 or MIT)
URL: https://www.port389.org
Conflicts: selinux-policy-base < 3.9.8
@ -157,6 +157,7 @@ BuildRequires: icu
BuildRequires: libicu-devel
BuildRequires: pcre-devel
BuildRequires: cracklib-devel
BuildRequires: json-c-devel
%if %{use_clang}
BuildRequires: libatomic
BuildRequires: clang
@ -228,6 +229,8 @@ Requires: openldap-clients
Requires: /usr/bin/c_rehash
Requires: python%{python3_pkgversion}-ldap
Requires: acl
Requires: zlib
Requires: json-c
# this is needed to setup SSL if you are not using the
# administration server package
@ -266,7 +269,10 @@ Source2: %{name}-devel.README
%if %{bundle_jemalloc}
Source3: https://github.com/jemalloc/%{jemalloc_name}/releases/download/%{jemalloc_ver}/%{jemalloc_name}-%{jemalloc_ver}.tar.bz2
%endif
Patch01: 0001-Issue-5254-dscreate-create-template-regression-due-t.patch
Patch01: 0001-Issue-3729-cont-RFE-Extend-log-of-operations-statist.patch
Patch02: 0002-Issue-5544-Increase-default-task-TTL.patch
Patch03: 0003-Issue-5413-Allow-mutliple-MemberOf-fixup-tasks-with-.patch
%description
389 Directory Server is an LDAPv3 compliant server. The base package includes
@ -717,42 +723,31 @@ exit 0
%endif
%changelog
* Thu Oct 6 2022 Mark Reynolds <mreynolds@redhat.com> - 2.1.3-4
- Bump version to 2.1.3-4
- Resolves: Bug 1872451 - Fix regression with dscreate template
* Tue Dec 13 2022 Mark Reynolds <mreynolds@redhat.com> - 2.2.4-3
- Bump version to 2.2.4-3
- Resolves: rhbz#2142636 - pam mutex lock causing high etimes, affecting red hat internal sso
- Resolves: rhbz#2093981 - RFE - Create Security Audit Log
- Resolves: rhbz#2132697 - [RFE] 389ds: run as non-root
- Resolves: rhbz#2124660 - Retro changelog trimming uses maxage incorrectly
- Resolves: rhbz#2114039 - Current pbkdf2 hardcoded parameters are no longer secure
- Resolves: rhbz#2112998 - performance search rate: checking if an entry is a referral is expensive
- Resolves: rhbz#2112361 - Supplier should do periodic update to avoid slow replication when a new direct update happen
- Resolves: rhbz#2109891 - Migrate 389 to pcre2
* Fri Aug 19 2022 Mark Reynolds <mreynolds@redhat.com> - 2.1.3-3
- Bump version to 2.1.3-3
- Resolves: Bug 2118765
* Thu Aug 18 2022 Mark Reynolds <mreynolds@redhat.com> - 2.1.3-2
- Bump version to 2.1.3-2
- Resolves: Bug 2118765 - SIGSEGV in sync_repl
* Mon Dec 12 2022 Mark Reynolds <mreynolds@redhat.com> - 2.2.4-2
- Bump version to 2.2.4-2
- Resolves: Bug 1859271 - RFE - Extend log of operations statistics in access log
- Resolves: Bug 2093981 - RFE - Create Security Audit Log
- Resolves: Bug 2109891 - Migrate 389 to pcre2
- Resolves: Bug 2112361 - Supplier should do periodic update to avoid slow replication when a new direct update happen
- Resolves: Bug 2112998 - performance search rate: checking if an entry is a referral is expensive
- Resolves: Bug 2114039 - Current pbkdf2 hardcoded parameters are no longer secure
- Resolves: Bug 2124660 - Retro changelog trimming uses maxage incorrectly
- Resolves: Bug 2132697 - RFE - run as non-root
- Resolves: Bug 2142636 - pam mutex lock causing high etimes, affecting red hat internal sso
* Mon Jul 11 2022 Mark Reynolds <mreynolds@redhat.com> - 2.1.3-1
- Bump version to 2.1.3-1
- Resolves: Bug 2061801 - rebase 389-ds-base to 2.1.3
- Resolves: Bug 1872451 - RFE - run as non-root
- Resolves: Bug 2052527 - RFE - Provide an option to abort an Auto Member rebuild task
- Resolves: Bug 2057056 - Import may break the replication because changelog starting csn may not be created
- Resolves: Bug 2057063 - Add support for recursively deleting subentries
- Resolves: Bug 2062778 - sending crafted message could result in DoS
- Resolves: Bug 2064781 - expired password was still allowed to access the database
- Resolves: Bug 2100337 - dsconf backend export userroot fails ldap.DECODING_ERROR
* Mon Jun 13 2022 Mark Reynolds <mreynolds@redhat.com> - 2.1.1-3
- Bump version to 2.1.1-3
- Resolves: Bug 2061801 - Fix nss-tools requirement
* Mon Jun 13 2022 Mark Reynolds <mreynolds@redhat.com> - 2.1.1-2
- Bump version to 2.1.1-2
- Resolves: Bug 2061801 - Fix lmdb-libs requirement
* Thu May 12 2022 Mark Reynolds <mreynolds@redhat.com> - 2.1.1-1
- Bump version to 2.1.1-1
- Resolves: Bug 2061801 - rebase 389-ds-base to 2.1.1
* Tue Mar 8 2022 Mark Reynolds <mreynolds@redhat.com> - 2.1.0-1
- Bump version to 2.1.0-1
- Resolves: Bug 2061801 - rebase 389-ds-base to 2.1.0
* Fri Nov 11 2022 Mark Reynolds <mreynolds@redhat.com> - 2.2.4-1
- Bump version to 2.2.4-1
- Resolves: Bug 1132524 - [RFE] Compression of log files